Documentation/ABI/testing/debugfs-aufs                            |    55 +
 Documentation/ABI/testing/sysfs-aufs                              |    31 +
 Documentation/admin-guide/kernel-parameters.txt                   |    11 +
 Documentation/admin-guide/sysctl/kernel.rst                       |    34 +
 Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt |    48 +
 Documentation/devicetree/bindings/usb/usb-device.txt              |     9 +-
 Documentation/filesystems/aufs/README                             |   401 ++
 Documentation/filesystems/aufs/design/01intro.txt                 |   171 +
 Documentation/filesystems/aufs/design/02struct.txt                |   258 ++
 Documentation/filesystems/aufs/design/03atomic_open.txt           |    85 +
 Documentation/filesystems/aufs/design/03lookup.txt                |   113 +
 Documentation/filesystems/aufs/design/04branch.txt                |    74 +
 Documentation/filesystems/aufs/design/05wbr_policy.txt            |    64 +
 Documentation/filesystems/aufs/design/06dirren.dot                |    31 +
 Documentation/filesystems/aufs/design/06dirren.txt                |   102 +
 Documentation/filesystems/aufs/design/06fhsm.txt                  |   120 +
 Documentation/filesystems/aufs/design/06mmap.txt                  |    72 +
 Documentation/filesystems/aufs/design/06xattr.txt                 |    96 +
 Documentation/filesystems/aufs/design/07export.txt                |    58 +
 Documentation/filesystems/aufs/design/08shwh.txt                  |    52 +
 Documentation/filesystems/aufs/design/10dynop.txt                 |    47 +
 Documentation/power/power-sequence/design.rst                     |    54 +
 Documentation/scheduler/sched-BFS.txt                             |   351 ++
 Documentation/scheduler/sched-MuQSS.txt                           |   373 ++
 Documentation/vm/uksm.txt                                         |    61 +
 MAINTAINERS                                                       |    22 +
 Makefile                                                          |     7 +-
 arch/alpha/Kconfig                                                |     2 +
 arch/arm/Kconfig                                                  |     2 +
 arch/arm/boot/Makefile                                            |     4 +
 arch/arm/boot/compressed/atags_to_fdt.c                           |     6 +
 arch/arm/boot/dts/Makefile                                        |    15 +
 arch/arm/boot/dts/am335x-abbbi.dts                                |   164 +
 arch/arm/boot/dts/am335x-bone-common-no-capemgr.dtsi              |   351 ++
 arch/arm/boot/dts/am335x-bone-common-univ.dtsi                    |  2940 ++++++++++++++
 arch/arm/boot/dts/am335x-bone-common.dtsi                         |    14 +-
 arch/arm/boot/dts/am335x-bone-jtag.dtsi                           |    15 +
 arch/arm/boot/dts/am335x-bone-uboot-univ.dts                      |    29 +
 arch/arm/boot/dts/am335x-bone.dts                                 |     6 +
 arch/arm/boot/dts/am335x-boneblack-common.dtsi                    |    10 +-
 arch/arm/boot/dts/am335x-boneblack-uboot-univ.dts                 |    40 +
 arch/arm/boot/dts/am335x-boneblack-uboot.dts                      |    40 +
 arch/arm/boot/dts/am335x-boneblack-wireless.dts                   |     6 +
 arch/arm/boot/dts/am335x-boneblack.dts                            |     6 +
 arch/arm/boot/dts/am335x-boneblue.dts                             |   425 +-
 arch/arm/boot/dts/am335x-bonegreen-gateway.dts                    |   193 +
 arch/arm/boot/dts/am335x-bonegreen-wireless-common-univ.dtsi      |  2814 +++++++++++++
 arch/arm/boot/dts/am335x-bonegreen-wireless-uboot-univ.dts        |    55 +
 arch/arm/boot/dts/am335x-bonegreen-wireless.dts                   |     6 +
 arch/arm/boot/dts/am335x-bonegreen.dts                            |     6 +
 arch/arm/boot/dts/am335x-osd3358-sm-red.dts                       |    25 +-
 arch/arm/boot/dts/am335x-pocketbeagle.dts                         |  2041 +++++++++-
 arch/arm/boot/dts/am335x-sancloud-bbe.dts                         |     5 +
 arch/arm/boot/dts/am33xx-l4.dtsi                                  |   159 +
 arch/arm/boot/dts/am33xx.dtsi                                     |     2 +-
 arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi                   |     4 +
 arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts                     |     5 +
 arch/arm/boot/dts/am57xx-beagle-x15-revc.dts                      |     5 +
 arch/arm/boot/dts/am57xx-beagle-x15.dts                           |     5 +
 arch/arm/boot/dts/armada-370-smileplug.dts                        |   173 +
 arch/arm/boot/dts/dove-d3plug.dts                                 |     7 +
 arch/arm/boot/dts/dra7.dtsi                                       |    10 +
 arch/arm/boot/dts/exynos4412-odroid-common.dtsi                   |     2 +-
 arch/arm/boot/dts/imx6q-evi.dts                                   |    25 +-
 arch/arm/boot/dts/imx6qdl-udoo.dtsi                               |    46 +-
 arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi                    |    19 +-
 arch/arm/boot/dts/imx6qdl-wandboard-revc1.dtsi                    |    18 +
 arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi                    |    20 +
 arch/arm/boot/dts/imx6qdl.dtsi                                    |     6 +
 arch/arm/boot/dts/imx6ul-usbarmory.dts                            |   255 ++
 arch/arm/boot/dts/imx6ull-usbarmory.dts                           |   256 ++
 arch/arm/boot/dts/imx7d-pico-pi.dts                               |     4 +
 arch/arm/boot/dts/omap3-beagle-xm.dts                             |    53 +-
 arch/arm/boot/dts/omap3-beagle.dts                                |    11 +
 arch/arm/boot/dts/omap4-panda-a4.dts                              |    12 +
 arch/arm/boot/dts/omap4-panda-common.dtsi                         |    10 -
 arch/arm/boot/dts/omap4-panda-es-b3.dts                           |    82 +
 arch/arm/boot/dts/omap4-panda-es.dts                              |    12 +
 arch/arm/boot/dts/omap4-panda.dts                                 |    12 +
 arch/arm/boot/dts/omap4-sdp.dts                                   |     2 +
 arch/arm/boot/dts/omap4.dtsi                                      |     2 +
 arch/arm/boot/dts/sun7i-a20-cubietruck.dts                        |     2 +
 arch/arm/boot/dts/twl6030.dtsi                                    |     5 +
 arch/arm/include/uapi/asm/setup.h                                 |    17 +
 arch/arm/mach-imx/devices/Kconfig                                 |     6 +
 arch/arm/mach-imx/devices/Makefile                                |     1 +
 arch/arm/mach-imx/devices/wand-rfkill.c                           |   290 ++
 arch/arm64/Kconfig                                                |     2 +
 arch/powerpc/Kconfig                                              |     2 +
 arch/powerpc/platforms/cell/spufs/sched.c                         |     2 +
 arch/x86/Kconfig                                                  |    18 +
 arch/x86/Kconfig.cpu                                              |   301 +-
 arch/x86/Makefile                                                 |    50 +-
 arch/x86/Makefile_32.cpu                                          |    32 +-
 arch/x86/include/asm/vermagic.h                                   |    56 +
 block/Kconfig.iosched                                             |     9 +
 block/elevator.c                                                  |     8 +
 block/mq-deadline.c                                               |     4 +
 drivers/Makefile                                                  |    19 +-
 drivers/block/loop.c                                              |    18 +
 drivers/block/swim.c                                              |     6 +-
 drivers/char/ipmi/ipmi_msghandler.c                               |     2 +-
 drivers/char/ipmi/ipmi_ssif.c                                     |     2 +-
 drivers/cpufreq/cpufreq_ondemand.c                                |    14 +-
 drivers/cpufreq/intel_pstate.c                                    |     2 +
 drivers/gpio/Kconfig                                              |    14 +
 drivers/gpio/Makefile                                             |     1 +
 drivers/gpio/gpio-of-helper.c                                     |   435 ++
 drivers/gpio/gpiolib-sysfs.c                                      |    35 +-
 drivers/gpio/gpiolib.c                                            |     8 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c                              |     2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_irq.c                               |     2 +-
 drivers/hwmon/fam15h_power.c                                      |     2 +-
 drivers/i2c/busses/Kconfig                                        |     9 +
 drivers/i2c/busses/Makefile                                       |     1 +
 drivers/i2c/busses/i2c-nct6775.c                                  |   647 +++
 drivers/i2c/busses/i2c-piix4.c                                    |     4 +-
 drivers/iio/light/tsl2563.c                                       |     2 +-
 drivers/input/misc/tps65218-pwrbutton.c                           |     2 +-
 drivers/input/touchscreen/ar1021_i2c.c                            |    81 +-
 drivers/input/touchscreen/atmel_mxt_ts.c                          |     2 +-
 drivers/input/touchscreen/ti_am335x_tsc.c                         |    11 +-
 drivers/media/i2c/msp3400-driver.c                                |     4 +-
 drivers/media/pci/cx18/cx18-gpio.c                                |     4 +-
 drivers/media/pci/ivtv/ivtv-gpio.c                                |     6 +-
 drivers/media/pci/ivtv/ivtv-ioctl.c                               |     2 +-
 drivers/media/pci/ivtv/ivtv-streams.c                             |     2 +-
 drivers/media/radio/radio-mr800.c                                 |     2 +-
 drivers/media/radio/radio-tea5777.c                               |     2 +-
 drivers/media/radio/tea575x.c                                     |     2 +-
 drivers/mfd/ucb1x00-core.c                                        |     2 +-
 drivers/misc/Kconfig                                              |     8 +
 drivers/misc/Makefile                                             |     2 +
 drivers/misc/cape/Kconfig                                         |     5 +
 drivers/misc/cape/Makefile                                        |     5 +
 drivers/misc/cape/beaglebone/Kconfig                              |    10 +
 drivers/misc/cape/beaglebone/Makefile                             |     5 +
 drivers/misc/cape/beaglebone/bone-pinmux-helper.c                 |   242 ++
 drivers/misc/sgi-xp/xpc_channel.c                                 |     2 +-
 drivers/misc/udoo_ard.c                                           |   417 ++
 drivers/mmc/host/mvsdio.c                                         |     4 +-
 drivers/net/caif/caif_hsi.c                                       |     2 +-
 drivers/net/can/usb/peak_usb/pcan_usb.c                           |     2 +-
 drivers/net/ethernet/ti/davinci_mdio.c                            |    97 +
 drivers/net/usb/lan78xx.c                                         |     2 +-
 drivers/net/usb/smsc95xx.c                                        |    56 +
 drivers/net/usb/usbnet.c                                          |     2 +-
 drivers/net/wireless/broadcom/brcm80211/Kconfig                   |    11 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c           |     4 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c         |   108 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h            |     3 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c       |   838 +++-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h       |    61 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c           |   187 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h           |     9 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c         |    78 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h         |    10 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c           |   394 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h           |    27 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c          |    83 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h          |    25 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c        |     6 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h        |     6 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h       |     2 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c       |     9 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c           |     2 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h           |    28 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h           |     4 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h     |    17 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c       |   105 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h       |    23 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c         |    61 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c             |    14 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c            |   158 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h            |     9 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c           |   288 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c            |     4 -
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c           |   439 ++-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h           |   114 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c            |    34 +-
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c         |    66 +
 drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h         |    19 +
 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c        |    10 +-
 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c    |     3 +-
 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c           |    18 +-
 drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h     |     9 +-
 drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h      |   195 +-
 drivers/net/wireless/intel/ipw2x00/ipw2100.c                      |     4 +-
 drivers/net/wireless/intel/iwlwifi/pcie/drv.c                     |     2 +
 drivers/parport/ieee1284.c                                        |     2 +-
 drivers/parport/ieee1284_ops.c                                    |     2 +-
 drivers/pci/pcie/edr.c                                            |     4 +-
 drivers/platform/x86/intel_ips.c                                  |     8 +-
 drivers/power/Kconfig                                             |     1 +
 drivers/power/Makefile                                            |     1 +
 drivers/power/pwrseq/Kconfig                                      |    20 +
 drivers/power/pwrseq/Makefile                                     |     2 +
 drivers/power/pwrseq/core.c                                       |   335 ++
 drivers/power/pwrseq/pwrseq_generic.c                             |   234 ++
 drivers/regulator/twl6030-regulator.c                             |    23 +
 drivers/rtc/rtc-wm8350.c                                          |     6 +-
 drivers/scsi/Kconfig                                              |     2 +
 drivers/scsi/Makefile                                             |     1 +
 drivers/scsi/fnic/fnic_scsi.c                                     |     4 +-
 drivers/scsi/lpfc/lpfc_scsi.c                                     |     2 +-
 drivers/scsi/snic/snic_scsi.c                                     |     2 +-
 drivers/scsi/vhba/Kconfig                                         |     9 +
 drivers/scsi/vhba/Makefile                                        |     4 +
 drivers/scsi/vhba/vhba.c                                          |  1086 +++++
 drivers/spi/spidev.c                                              |     6 +-
 drivers/staging/comedi/drivers/ni_mio_common.c                    |     2 +-
 drivers/staging/rts5208/rtsx.c                                    |     2 +-
 drivers/staging/speakup/speakup_acntpc.c                          |     4 +-
 drivers/staging/speakup/speakup_apollo.c                          |     2 +-
 drivers/staging/speakup/speakup_decext.c                          |     2 +-
 drivers/staging/speakup/speakup_decpc.c                           |     2 +-
 drivers/staging/speakup/speakup_dectlk.c                          |     2 +-
 drivers/staging/speakup/speakup_dtlk.c                            |     4 +-
 drivers/staging/speakup/speakup_keypc.c                           |     4 +-
 drivers/staging/speakup/synth.c                                   |     8 +-
 drivers/staging/unisys/visornic/visornic_main.c                   |     6 +-
 drivers/tty/serial/8250/8250_omap.c                               |     6 +-
 drivers/usb/Kconfig                                               |     1 +
 drivers/usb/core/hub.c                                            |    49 +-
 drivers/usb/core/hub.h                                            |     1 +
 drivers/usb/gadget/function/u_serial.c                            |     5 +
 drivers/video/fbdev/omap/hwa742.c                                 |     2 +-
 drivers/video/fbdev/pxafb.c                                       |     2 +-
 drivers/video/logo/logo_linux_clut224.ppm                         | 16006 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------
 drivers/video/logo/logo_linux_mono.pbm                            |   273 +-
 drivers/video/logo/logo_linux_vga16.ppm                           | 16006 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------
 drivers/virt/vboxguest/vboxguest_core.c                           |   266 +-
 drivers/virt/vboxguest/vboxguest_core.h                           |    23 +-
 drivers/virt/vboxguest/vboxguest_utils.c                          |     1 +
 fs/Kconfig                                                        |     1 +
 fs/Makefile                                                       |     1 +
 fs/aufs/Kconfig                                                   |   199 +
 fs/aufs/Makefile                                                  |    46 +
 fs/aufs/aufs.h                                                    |    62 +
 fs/aufs/branch.c                                                  |  1427 +++++++
 fs/aufs/branch.h                                                  |   366 ++
 fs/aufs/conf.mk                                                   |    40 +
 fs/aufs/cpup.c                                                    |  1458 +++++++
 fs/aufs/cpup.h                                                    |   100 +
 fs/aufs/dbgaufs.c                                                 |   526 +++
 fs/aufs/dbgaufs.h                                                 |    53 +
 fs/aufs/dcsub.c                                                   |   225 ++
 fs/aufs/dcsub.h                                                   |   137 +
 fs/aufs/debug.c                                                   |   441 +++
 fs/aufs/debug.h                                                   |   226 ++
 fs/aufs/dentry.c                                                  |  1154 ++++++
 fs/aufs/dentry.h                                                  |   268 ++
 fs/aufs/dinfo.c                                                   |   554 +++
 fs/aufs/dir.c                                                     |   763 ++++
 fs/aufs/dir.h                                                     |   134 +
 fs/aufs/dirren.c                                                  |  1316 +++++++
 fs/aufs/dirren.h                                                  |   140 +
 fs/aufs/dynop.c                                                   |   367 ++
 fs/aufs/dynop.h                                                   |    77 +
 fs/aufs/export.c                                                  |   838 ++++
 fs/aufs/f_op.c                                                    |   819 ++++
 fs/aufs/fhsm.c                                                    |   427 ++
 fs/aufs/file.c                                                    |   863 ++++
 fs/aufs/file.h                                                    |   342 ++
 fs/aufs/finfo.c                                                   |   149 +
 fs/aufs/fstype.h                                                  |   401 ++
 fs/aufs/hbl.h                                                     |    65 +
 fs/aufs/hfsnotify.c                                               |   288 ++
 fs/aufs/hfsplus.c                                                 |    60 +
 fs/aufs/hnotify.c                                                 |   715 ++++
 fs/aufs/i_op.c                                                    |  1502 +++++++
 fs/aufs/i_op_add.c                                                |   936 +++++
 fs/aufs/i_op_del.c                                                |   513 +++
 fs/aufs/i_op_ren.c                                                |  1250 ++++++
 fs/aufs/iinfo.c                                                   |   286 ++
 fs/aufs/inode.c                                                   |   529 +++
 fs/aufs/inode.h                                                   |   698 ++++
 fs/aufs/ioctl.c                                                   |   220 ++
 fs/aufs/lcnt.h                                                    |   186 +
 fs/aufs/loop.c                                                    |   148 +
 fs/aufs/loop.h                                                    |    55 +
 fs/aufs/magic.mk                                                  |    31 +
 fs/aufs/module.c                                                  |   273 ++
 fs/aufs/module.h                                                  |   166 +
 fs/aufs/mvdown.c                                                  |   706 ++++
 fs/aufs/opts.c                                                    |  1880 +++++++++
 fs/aufs/opts.h                                                    |   225 ++
 fs/aufs/plink.c                                                   |   516 +++
 fs/aufs/poll.c                                                    |    51 +
 fs/aufs/posix_acl.c                                               |   105 +
 fs/aufs/procfs.c                                                  |   170 +
 fs/aufs/rdu.c                                                     |   384 ++
 fs/aufs/rwsem.h                                                   |    73 +
 fs/aufs/sbinfo.c                                                  |   314 ++
 fs/aufs/super.c                                                   |  1047 +++++
 fs/aufs/super.h                                                   |   589 +++
 fs/aufs/sysaufs.c                                                 |    93 +
 fs/aufs/sysaufs.h                                                 |   102 +
 fs/aufs/sysfs.c                                                   |   374 ++
 fs/aufs/sysrq.c                                                   |   149 +
 fs/aufs/vdir.c                                                    |   896 +++++
 fs/aufs/vfsub.c                                                   |   895 +++++
 fs/aufs/vfsub.h                                                   |   354 ++
 fs/aufs/wbr_policy.c                                              |   830 ++++
 fs/aufs/whout.c                                                   |  1062 +++++
 fs/aufs/whout.h                                                   |    86 +
 fs/aufs/wkq.c                                                     |   372 ++
 fs/aufs/wkq.h                                                     |    89 +
 fs/aufs/xattr.c                                                   |   356 ++
 fs/aufs/xino.c                                                    |  1966 +++++++++
 fs/block_dev.c                                                    |     2 +-
 fs/btrfs/inode-map.c                                              |     2 +-
 fs/dax.c                                                          |    14 +
 fs/dcache.c                                                       |     4 +-
 fs/drop_caches.c                                                  |     2 +-
 fs/exec.c                                                         |     2 +
 fs/fcntl.c                                                        |     5 +-
 fs/file_table.c                                                   |     2 +
 fs/inode.c                                                        |   115 +-
 fs/internal.h                                                     |     2 +-
 fs/namespace.c                                                    |     9 +
 fs/notify/group.c                                                 |     1 +
 fs/open.c                                                         |     1 +
 fs/proc/base.c                                                    |     4 +-
 fs/proc/meminfo.c                                                 |     5 +-
 fs/proc/nommu.c                                                   |     5 +-
 fs/proc/task_mmu.c                                                |     7 +-
 fs/proc/task_nommu.c                                              |     5 +-
 fs/read_write.c                                                   |    26 +
 fs/splice.c                                                       |    12 +-
 fs/sync.c                                                         |     3 +-
 fs/xattr.c                                                        |     1 +
 include/asm-generic/pgtable.h                                     |    17 +-
 include/dt-bindings/board/am335x-bbw-bbb-base.h                   |   103 +
 include/linux/freezer.h                                           |     3 +
 include/linux/fs.h                                                |    27 +
 include/linux/init_task.h                                         |     4 +
 include/linux/ioprio.h                                            |     2 +
 include/linux/ksm.h                                               |    43 +-
 include/linux/lockdep.h                                           |     3 +
 include/linux/mm.h                                                |    22 +
 include/linux/mm_types.h                                          |     5 +
 include/linux/mmzone.h                                            |     3 +
 include/linux/mnt_namespace.h                                     |     3 +
 include/linux/pagemap.h                                           |     2 +-
 include/linux/power/pwrseq.h                                      |    81 +
 include/linux/sched.h                                             |    88 +-
 include/linux/sched/deadline.h                                    |     9 +
 include/linux/sched/nohz.h                                        |     2 +-
 include/linux/sched/prio.h                                        |    12 +
 include/linux/sched/rt.h                                          |     2 +
 include/linux/sched/task.h                                        |     2 +-
 include/linux/skip_list.h                                         |    33 +
 include/linux/splice.h                                            |     6 +
 include/linux/sradix-tree.h                                       |    77 +
 include/linux/uksm.h                                              |   149 +
 include/linux/vbox_utils.h                                        |     1 +
 include/linux/vm_event_item.h                                     |     3 +-
 include/net/cfg80211.h                                            |     7 +
 include/uapi/linux/Kbuild                                         |     1 +
 include/uapi/linux/aufs_type.h                                    |   452 +++
 include/uapi/linux/can/isotp.h                                    |   135 +
 include/uapi/linux/futex.h                                        |    20 +
 include/uapi/linux/nl80211.h                                      |    56 +-
 include/uapi/linux/sched.h                                        |     9 +-
 include/uapi/linux/vbox_vmmdev_types.h                            |     3 +
 include/uapi/linux/vboxguest.h                                    |    24 +
 init/Kconfig                                                      |    74 +-
 init/init_task.c                                                  |    10 +
 init/main.c                                                       |     2 +
 kernel/Kconfig.MuQSS                                              |   105 +
 kernel/Makefile                                                   |     2 +-
 kernel/cpu.c                                                      |     4 +
 kernel/delayacct.c                                                |     2 +-
 kernel/exit.c                                                     |     4 +-
 kernel/fork.c                                                     |    19 +-
 kernel/futex.c                                                    |   363 +-
 kernel/irq/Kconfig                                                |    17 +
 kernel/irq/manage.c                                               |    11 +
 kernel/kthread.c                                                  |    30 +-
 kernel/locking/lockdep.c                                          |     4 +-
 kernel/sched/Makefile                                             |    10 +-
 kernel/sched/MuQSS.c                                              |  7638 +++++++++++++++++++++++++++++++++++
 kernel/sched/MuQSS.h                                              |  1051 +++++
 kernel/sched/core.c                                               |     4 +
 kernel/sched/cpufreq_schedutil.c                                  |    12 +-
 kernel/sched/cpupri.h                                             |     2 +
 kernel/sched/cputime.c                                            |    22 +-
 kernel/sched/fair.c                                               |    25 +
 kernel/sched/idle.c                                               |     2 +
 kernel/sched/sched.h                                              |    35 +
 kernel/sched/topology.c                                           |     8 +
 kernel/skip_list.c                                                |   148 +
 kernel/sysctl.c                                                   |    84 +-
 kernel/task_work.c                                                |     1 +
 kernel/time/Kconfig                                               |     2 +-
 kernel/time/clockevents.c                                         |     6 +
 kernel/time/hrtimer.c                                             |   112 +
 kernel/time/posix-cpu-timers.c                                    |     4 +-
 kernel/time/timer.c                                               |    47 +-
 kernel/trace/trace_selftest.c                                     |     5 +
 kernel/user_namespace.c                                           |     7 +
 lib/Makefile                                                      |     2 +-
 lib/sradix-tree.c                                                 |   476 +++
 mm/Kconfig                                                        |    26 +
 mm/Makefile                                                       |     5 +-
 mm/filemap.c                                                      |    41 +-
 mm/huge_memory.c                                                  |     7 +-
 mm/internal.h                                                     |     1 +
 mm/ksm.c                                                          |    11 -
 mm/memory.c                                                       |    33 +-
 mm/mmap.c                                                         |    70 +-
 mm/nommu.c                                                        |    10 +-
 mm/page-writeback.c                                               |     8 +
 mm/page_alloc.c                                                   |    38 +-
 mm/prfile.c                                                       |    86 +
 mm/truncate.c                                                     |    34 +-
 mm/uksm.c                                                         |  5613 ++++++++++++++++++++++++++
 mm/vmscan.c                                                       |     9 +-
 mm/vmstat.c                                                       |     7 +-
 mm/workingset.c                                                   |     4 +
 net/can/Kconfig                                                   |    10 +
 net/can/Makefile                                                  |     3 +
 net/can/isotp.c                                                   |  1537 ++++++++
 net/core/pktgen.c                                                 |     2 +-
 net/sched/Kconfig                                                 |     4 +
 net/wireless/nl80211.c                                            |    15 +-
 net/wireless/sme.c                                                |     2 +
 scripts/headers_install.sh                                        |     1 +
 scripts/mkcompile_h                                               |     4 +-
 scripts/package/builddeb                                          |     1 +
 security/security.c                                               |     8 +
 sound/pci/hda/Kconfig                                             |    14 +
 sound/pci/hda/patch_hdmi.c                                        |    64 +
 sound/pci/maestro3.c                                              |     4 +-
 sound/soc/codecs/rt5631.c                                         |     4 +-
 sound/soc/codecs/wm8350.c                                         |    12 +-
 sound/soc/codecs/wm8900.c                                         |     2 +-
 sound/soc/codecs/wm9713.c                                         |     4 +-
 sound/soc/soc-dapm.c                                              |     2 +-
 sound/usb/line6/pcm.c                                             |     2 +-
 tools/testing/selftests/futex/functional/.gitignore               |     1 +
 tools/testing/selftests/futex/functional/Makefile                 |     3 +-
 tools/testing/selftests/futex/functional/futex_wait_multiple.c    |   173 +
 tools/testing/selftests/futex/functional/futex_wait_timeout.c     |    38 +-
 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c  |    28 +-
 tools/testing/selftests/futex/functional/run.sh                   |     3 +
 tools/testing/selftests/futex/include/futextest.h                 |    22 +
 448 files changed, 105165 insertions(+), 4484 deletions(-)

diff --git a/Documentation/ABI/testing/debugfs-aufs b/Documentation/ABI/testing/debugfs-aufs
new file mode 100644
index 00000000000..4a6694194ba
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-aufs
@@ -0,0 +1,55 @@
+What:		/debug/aufs/si_<id>/
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		Under /debug/aufs, a directory named si_<id> is created
+		per aufs mount, where <id> is a unique id generated
+		internally.
+
+What:		/debug/aufs/si_<id>/plink
+Date:		Apr 2013
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It has three lines and shows the information about the
+		pseudo-link. The first line is a single number
+		representing a number of buckets. The second line is a
+		number of pseudo-links per buckets (separated by a
+		blank). The last line is a single number representing a
+		total number of psedo-links.
+		When the aufs mount option 'noplink' is specified, it
+		will show "1\n0\n0\n".
+
+What:		/debug/aufs/si_<id>/xib
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the consumed blocks by xib (External Inode Number
+		Bitmap), its block size and file size.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
+
+What:		/debug/aufs/si_<id>/xi0, xi1 ... xiN and xiN-N
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the consumed blocks by xino (External Inode Number
+		Translation Table), its link count, block size and file
+		size.
+		Due to the file size limit, there may exist multiple
+		xino files per branch.  In this case, "-N" is added to
+		the filename and it corresponds to the index of the
+		internal xino array.  "-0" is omitted.
+		When the aufs mount option 'noxino' is specified, Those
+		entries won't exist.  About XINO files, see the aufs
+		manual.
+
+What:		/debug/aufs/si_<id>/xigen
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the consumed blocks by xigen (External Inode
+		Generation Table), its block size and file size.
+		If CONFIG_AUFS_EXPORT is disabled, this entry will not
+		be created.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-aufs b/Documentation/ABI/testing/sysfs-aufs
new file mode 100644
index 00000000000..82f9518495e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-aufs
@@ -0,0 +1,31 @@
+What:		/sys/fs/aufs/si_<id>/
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		Under /sys/fs/aufs, a directory named si_<id> is created
+		per aufs mount, where <id> is a unique id generated
+		internally.
+
+What:		/sys/fs/aufs/si_<id>/br0, br1 ... brN
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the abolute path of a member directory (which
+		is called branch) in aufs, and its permission.
+
+What:		/sys/fs/aufs/si_<id>/brid0, brid1 ... bridN
+Date:		July 2013
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the id of a member directory (which is called
+		branch) in aufs.
+
+What:		/sys/fs/aufs/si_<id>/xi_path
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05g@gmail.com>
+Description:
+		It shows the abolute path of XINO (External Inode Number
+		Bitmap, Translation Table and Generation Table) file
+		even if it is the default path.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 5e2ce88d6ed..24fd8725ec5 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1826,6 +1826,9 @@
 			disable
 			  Do not enable intel_pstate as the default
 			  scaling driver for the supported processors
+			enable
+			  Enable intel_pstate in-case "disable" was passed
+			  previously in the kernel boot parameters
 			passive
 			  Use intel_pstate as a scaling driver, but configure it
 			  to work with generic cpufreq governors (instead of
@@ -4429,6 +4432,14 @@
 			Memory area to be used by remote processor image,
 			managed by CMA.
 
+	rqshare=	[X86] Select the MuQSS scheduler runqueue sharing type.
+			Format: <string>
+			smt -- Share SMT (hyperthread) sibling runqueues
+			mc -- Share MC (multicore) sibling runqueues
+			smp -- Share SMP runqueues
+			none -- So not share any runqueues
+			Default value is mc
+
 	rw		[KNL] Mount root device read-write on boot
 
 	S		[KNL] Run init in single mode
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 0d427fd1094..5b3406a3d76 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -344,6 +344,16 @@ Controls whether the panic kmsg data should be reported to Hyper-V.
 = =========================================================
 
 
+iso_cpu: (MuQSS CPU scheduler only)
+===================================
+
+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
+run effectively at realtime priority, averaged over a rolling five
+seconds over the -whole- system, meaning all cpus.
+
+Set to 70 (percent) by default.
+
+
 kexec_load_disabled
 ===================
 
@@ -922,6 +932,20 @@ ROM/Flash boot loader. Maybe to tell it what to do after
 rebooting. ???
 
 
+rr_interval: (MuQSS CPU scheduler only)
+=======================================
+
+This is the smallest duration that any cpu process scheduling unit
+will run for. Increasing this value can increase throughput of cpu
+bound tasks substantially but at the expense of increased latencies
+overall. Conversely decreasing it will decrease average and maximum
+latencies but at the expense of throughput. This value is in
+milliseconds and the default value chosen depends on the number of
+cpus available at scheduler initialisation with a minimum of 6.
+
+Valid values are from 1-1000.
+
+
 sched_energy_aware
 ==================
 
@@ -1230,3 +1254,13 @@ is 10 seconds.
 
 The softlockup threshold is (``2 * watchdog_thresh``). Setting this
 tunable to zero will disable lockup detection altogether.
+
+
+yield_type: (MuQSS CPU scheduler only)
+======================================
+
+This determines what type of yield calls to sched_yield will perform.
+
+ 0: No yield.
+ 1: Yield only to better priority/deadline tasks. (default)
+ 2: Expire timeslice and recalculate deadline.
diff --git a/Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt b/Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt
new file mode 100644
index 00000000000..ebf0d477b68
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt
@@ -0,0 +1,48 @@
+The generic power sequence library
+
+Some hard-wired devices (eg USB/MMC) need to do power sequence before
+the device can be enumerated on the bus, the typical power sequence
+like: enable USB PHY clock, toggle reset pin, etc. But current
+Linux device driver lacks of such code to do it, it may cause some
+hard-wired devices works abnormal or can't be recognized by
+controller at all. The power sequence will be done before this device
+can be found at the bus.
+
+The power sequence properties is under the device node.
+
+Optional properties:
+- clocks: the input clocks for device.
+- reset-gpios: Should specify the GPIO for reset.
+- reset-duration-us: the duration in microsecond for assert reset signal.
+
+Below is the example of USB power sequence properties on USB device
+nodes which have two level USB hubs.
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg1_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usb_otg1_id>;
+	status = "okay";
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	genesys: hub@1 {
+		compatible = "usb5e3,608";
+		reg = <1>;
+
+		clocks = <&clks IMX6SX_CLK_CKO>;
+		reset-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; /* hub reset pin */
+		reset-duration-us = <10>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+		asix: ethernet@1 {
+			compatible = "usbb95,1708";
+			reg = <1>;
+
+			clocks = <&clks IMX6SX_CLK_IPG>;
+			reset-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>; /* ethernet_rst */
+			reset-duration-us = <15>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
index 036be172b1a..cb85f82a12b 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
@@ -65,6 +65,9 @@ Required properties for host-controller nodes with device nodes:
 - #address-cells: shall be 1
 - #size-cells: shall be 0
 
+Optional properties:
+power sequence properties, see
+Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt for detail
 
 Example:
 
@@ -72,9 +75,13 @@ Example:
 	#address-cells = <1>;
 	#size-cells = <0>;
 
-	hub@1 {		/* hub connected to port 1 */
+	genesys: hub@1 {	/* hub connected to port 1 */
 		compatible = "usb5e3,608";
 		reg = <1>;
+
+		clocks = <&clks IMX6SX_CLK_CKO>;
+		reset-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; /* hub reset pin */
+		reset-duration-us = <10>;
 	};
 
 	device@2 {	/* device connected to port 2 */
diff --git a/Documentation/filesystems/aufs/README b/Documentation/filesystems/aufs/README
new file mode 100644
index 00000000000..3e655d35713
--- /dev/null
+++ b/Documentation/filesystems/aufs/README
@@ -0,0 +1,401 @@
+
+Aufs5 -- advanced multi layered unification filesystem version 5.x
+http://aufs.sf.net
+Junjiro R. Okajima
+
+
+0. Introduction
+----------------------------------------
+In the early days, aufs was entirely re-designed and re-implemented
+Unionfs Version 1.x series. Adding many original ideas, approaches,
+improvements and implementations, it became totally different from
+Unionfs while keeping the basic features.
+Later, Unionfs Version 2.x series began taking some of the same
+approaches to aufs1's.
+Unionfs was being developed by Professor Erez Zadok at Stony Brook
+University and his team.
+
+Aufs5 supports linux-v5.0 and later, If you want older kernel version
+support,
+- for linux-v4.x series, try aufs4-linux.git or aufs4-standalone.git
+- for linux-v3.x series, try aufs3-linux.git or aufs3-standalone.git
+- for linux-v2.6.16 and later, try aufs2-2.6.git, aufs2-standalone.git
+  or aufs1 from CVS on SourceForge.
+
+Note: it becomes clear that "Aufs was rejected. Let's give it up."
+      According to Christoph Hellwig, linux rejects all union-type
+      filesystems but UnionMount.
+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
+
+PS. Al Viro seems have a plan to merge aufs as well as overlayfs and
+    UnionMount, and he pointed out an issue around a directory mutex
+    lock and aufs addressed it. But it is still unsure whether aufs will
+    be merged (or any other union solution).
+<http://marc.info/?l=linux-kernel&m=136312705029295&w=1>
+
+
+1. Features
+----------------------------------------
+- unite several directories into a single virtual filesystem. The member
+  directory is called as a branch.
+- you can specify the permission flags to the branch, which are 'readonly',
+  'readwrite' and 'whiteout-able.'
+- by upper writable branch, internal copyup and whiteout, files/dirs on
+  readonly branch are modifiable logically.
+- dynamic branch manipulation, add, del.
+- etc...
+
+Also there are many enhancements in aufs, such as:
+- test only the highest one for the directory permission (dirperm1)
+- copyup on open (coo=)
+- 'move' policy for copy-up between two writable branches, after
+  checking free space.
+- xattr, acl
+- readdir(3) in userspace.
+- keep inode number by external inode number table
+- keep the timestamps of file/dir in internal copyup operation
+- seekable directory, supporting NFS readdir.
+- whiteout is hardlinked in order to reduce the consumption of inodes
+  on branch
+- do not copyup, nor create a whiteout when it is unnecessary
+- revert a single systemcall when an error occurs in aufs
+- remount interface instead of ioctl
+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
+- loopback mounted filesystem as a branch
+- kernel thread for removing the dir who has a plenty of whiteouts
+- support copyup sparse file (a file which has a 'hole' in it)
+- default permission flags for branches
+- selectable permission flags for ro branch, whether whiteout can
+  exist or not
+- export via NFS.
+- support <sysfs>/fs/aufs and <debugfs>/aufs.
+- support multiple writable branches, some policies to select one
+  among multiple writable branches.
+- a new semantics for link(2) and rename(2) to support multiple
+  writable branches.
+- no glibc changes are required.
+- pseudo hardlink (hardlink over branches)
+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
+  including NFS or remote filesystem branch.
+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
+- and more...
+
+Currently these features are dropped temporary from aufs5.
+See design/08plan.txt in detail.
+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
+  (robr)
+- statistics of aufs thread (/sys/fs/aufs/stat)
+
+Features or just an idea in the future (see also design/*.txt),
+- reorder the branch index without del/re-add.
+- permanent xino files for NFSD
+- an option for refreshing the opened files after add/del branches
+- light version, without branch manipulation. (unnecessary?)
+- copyup in userspace
+- inotify in userspace
+- readv/writev
+
+
+2. Download
+----------------------------------------
+There are three GIT trees for aufs5, aufs5-linux.git,
+aufs5-standalone.git, and aufs-util.git. Note that there is no "5" in
+"aufs-util.git."
+While the aufs-util is always necessary, you need either of aufs5-linux
+or aufs5-standalone.
+
+The aufs5-linux tree includes the whole linux mainline GIT tree,
+git://git.kernel.org/.../torvalds/linux.git.
+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
+build aufs5 as an external kernel module.
+Several extra patches are not included in this tree. Only
+aufs5-standalone tree contains them. They are described in the later
+section "Configuration and Compilation."
+
+On the other hand, the aufs5-standalone tree has only aufs source files
+and necessary patches, and you can select CONFIG_AUFS_FS=m.
+But you need to apply all aufs patches manually.
+
+You will find GIT branches whose name is in form of "aufs5.x" where "x"
+represents the linux kernel version, "linux-5.x". For instance,
+"aufs5.0" is for linux-5.0. For latest "linux-5.x-rcN", use
+"aufs5.x-rcN" branch.
+
+o aufs5-linux tree
+$ git clone --reference /your/linux/git/tree \
+	git://github.com/sfjro/aufs5-linux.git aufs5-linux.git
+- if you don't have linux GIT tree, then remove "--reference ..."
+$ cd aufs5-linux.git
+$ git checkout origin/aufs5.0
+
+Or You may want to directly git-pull aufs into your linux GIT tree, and
+leave the patch-work to GIT.
+$ cd /your/linux/git/tree
+$ git remote add aufs5 git://github.com/sfjro/aufs5-linux.git
+$ git fetch aufs5
+$ git checkout -b my5.0 v5.0
+$ (add your local change...)
+$ git pull aufs5 aufs5.0
+- now you have v5.0 + your_changes + aufs5.0 in you my5.0 branch.
+- you may need to solve some conflicts between your_changes and
+  aufs5.0. in this case, git-rerere is recommended so that you can
+  solve the similar conflicts automatically when you upgrade to 5.1 or
+  later in the future.
+
+o aufs5-standalone tree
+$ git clone git://github.com/sfjro/aufs5-standalone.git aufs5-standalone.git
+$ cd aufs5-standalone.git
+$ git checkout origin/aufs5.0
+
+o aufs-util tree
+$ git clone git://git.code.sf.net/p/aufs/aufs-util aufs-util.git
+- note that the public aufs-util.git is on SourceForge instead of
+  GitHUB.
+$ cd aufs-util.git
+$ git checkout origin/aufs5.0
+
+Note: The 5.x-rcN branch is to be used with `rc' kernel versions ONLY.
+The minor version number, 'x' in '5.x', of aufs may not always
+follow the minor version number of the kernel.
+Because changes in the kernel that cause the use of a new
+minor version number do not always require changes to aufs-util.
+
+Since aufs-util has its own minor version number, you may not be
+able to find a GIT branch in aufs-util for your kernel's
+exact minor version number.
+In this case, you should git-checkout the branch for the
+nearest lower number.
+
+For (an unreleased) example:
+If you are using "linux-5.10" and the "aufs5.10" branch
+does not exist in aufs-util repository, then "aufs5.9", "aufs5.8"
+or something numerically smaller is the branch for your kernel.
+
+Also you can view all branches by
+	$ git branch -a
+
+
+3. Configuration and Compilation
+----------------------------------------
+Make sure you have git-checkout'ed the correct branch.
+
+For aufs5-linux tree,
+- enable CONFIG_AUFS_FS.
+- set other aufs configurations if necessary.
+
+For aufs5-standalone tree,
+There are several ways to build.
+
+1.
+- apply ./aufs5-kbuild.patch to your kernel source files.
+- apply ./aufs5-base.patch too.
+- apply ./aufs5-mmap.patch too.
+- apply ./aufs5-standalone.patch too, if you have a plan to set
+  CONFIG_AUFS_FS=m. otherwise you don't need ./aufs5-standalone.patch.
+- copy ./{Documentation,fs,include/uapi/linux/aufs_type.h} files to your
+  kernel source tree. Never copy $PWD/include/uapi/linux/Kbuild.
+- enable CONFIG_AUFS_FS, you can select either
+  =m or =y.
+- and build your kernel as usual.
+- install the built kernel.
+- install the header files too by "make headers_install" to the
+  directory where you specify. By default, it is $PWD/usr.
+  "make help" shows a brief note for headers_install.
+- and reboot your system.
+
+2.
+- module only (CONFIG_AUFS_FS=m).
+- apply ./aufs5-base.patch to your kernel source files.
+- apply ./aufs5-mmap.patch too.
+- apply ./aufs5-standalone.patch too.
+- build your kernel, don't forget "make headers_install", and reboot.
+- edit ./config.mk and set other aufs configurations if necessary.
+  Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
+  every aufs configurations.
+- build the module by simple "make".
+- you can specify ${KDIR} make variable which points to your kernel
+  source tree.
+- install the files
+  + run "make install" to install the aufs module, or copy the built
+    $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
+  + run "make install_headers" (instead of headers_install) to install
+    the modified aufs header file (you can specify DESTDIR which is
+    available in aufs standalone version's Makefile only), or copy
+    $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
+    you like manually. By default, the target directory is $PWD/usr.
+- no need to apply aufs5-kbuild.patch, nor copying source files to your
+  kernel source tree.
+
+Note: The header file aufs_type.h is necessary to build aufs-util
+      as well as "make headers_install" in the kernel source tree.
+      headers_install is subject to be forgotten, but it is essentially
+      necessary, not only for building aufs-util.
+      You may not meet problems without headers_install in some older
+      version though.
+
+And then,
+- read README in aufs-util, build and install it
+- note that your distribution may contain an obsoleted version of
+  aufs_type.h in /usr/include/linux or something. When you build aufs
+  utilities, make sure that your compiler refers the correct aufs header
+  file which is built by "make headers_install."
+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
+  then run "make install_ulib" too. And refer to the aufs manual in
+  detail.
+
+There several other patches in aufs5-standalone.git. They are all
+optional. When you meet some problems, they will help you.
+- aufs5-loopback.patch
+  Supports a nested loopback mount in a branch-fs. This patch is
+  unnecessary until aufs produces a message like "you may want to try
+  another patch for loopback file".
+- proc_mounts.patch
+  When there are many mountpoints and many mount(2)/umount(2) are
+  running, then /proc/mounts may not show the all mountpoints.  This
+  patch makes /proc/mounts always show the full mountpoints list.
+  If you don't want to apply this patch and meet such problem, then you
+  need to increase the value of 'ProcMounts_Times' make-variable in
+  aufs-util.git as a second best solution.
+- vfs-ino.patch
+  Modifies a system global kernel internal function get_next_ino() in
+  order to stop assigning 0 for an inode-number. Not directly related to
+  aufs, but recommended generally.
+- tmpfs-idr.patch
+  Keeps the tmpfs inode number as the lowest value. Effective to reduce
+  the size of aufs XINO files for tmpfs branch. Also it prevents the
+  duplication of inode number, which is important for backup tools and
+  other utilities. When you find aufs XINO files for tmpfs branch
+  growing too much, try this patch.
+- lockdep-debug.patch
+  Because aufs is not only an ordinary filesystem (callee of VFS), but
+  also a caller of VFS functions for branch filesystems, subclassing of
+  the internal locks for LOCKDEP is necessary. LOCKDEP is a debugging
+  feature of linux kernel. If you enable CONFIG_LOCKDEP, then you will
+  need to apply this debug patch to expand several constant values.
+  If you don't know what LOCKDEP is, then you don't have apply this
+  patch.
+
+
+4. Usage
+----------------------------------------
+At first, make sure aufs-util are installed, and please read the aufs
+manual, aufs.5 in aufs-util.git tree.
+$ man -l aufs.5
+
+And then,
+$ mkdir /tmp/rw /tmp/aufs
+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
+
+Here is another example. The result is equivalent.
+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
+  Or
+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
+# mount -o remount,append:${HOME} /tmp/aufs
+
+Then, you can see whole tree of your home dir through /tmp/aufs. If
+you modify a file under /tmp/aufs, the one on your home directory is
+not affected, instead the same named file will be newly created under
+/tmp/rw. And all of your modification to a file will be applied to
+the one under /tmp/rw. This is called the file based Copy on Write
+(COW) method.
+Aufs mount options are described in aufs.5.
+If you run chroot or something and make your aufs as a root directory,
+then you need to customize the shutdown script. See the aufs manual in
+detail.
+
+Additionally, there are some sample usages of aufs which are a
+diskless system with network booting, and LiveCD over NFS.
+See sample dir in CVS tree on SourceForge.
+
+
+5. Contact
+----------------------------------------
+When you have any problems or strange behaviour in aufs, please let me
+know with:
+- /proc/mounts (instead of the output of mount(8))
+- /sys/module/aufs/*
+- /sys/fs/aufs/* (if you have them)
+- /debug/aufs/* (if you have them)
+- linux kernel version
+  if your kernel is not plain, for example modified by distributor,
+  the url where i can download its source is necessary too.
+- aufs version which was printed at loading the module or booting the
+  system, instead of the date you downloaded.
+- configuration (define/undefine CONFIG_AUFS_xxx)
+- kernel configuration or /proc/config.gz (if you have it)
+- LSM (linux security module, if you are using)
+- behaviour which you think to be incorrect
+- actual operation, reproducible one is better
+- mailto: aufs-users at lists.sourceforge.net
+
+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
+and Feature Requests) on SourceForge. Please join and write to
+aufs-users ML.
+
+
+6. Acknowledgements
+----------------------------------------
+Thanks to everyone who have tried and are using aufs, whoever
+have reported a bug or any feedback.
+
+Especially donators:
+Tomas Matejicek(slax.org) made a donation (much more than once).
+	Since Apr 2010, Tomas M (the author of Slax and Linux Live
+	scripts) is making "doubling" donations.
+	Unfortunately I cannot list all of the donators, but I really
+	appreciate.
+	It ends Aug 2010, but the ordinary donation URL is still available.
+	<http://sourceforge.net/donate/index.php?group_id=167503>
+Dai Itasaka made a donation (2007/8).
+Chuck Smith made a donation (2008/4, 10 and 12).
+Henk Schoneveld made a donation (2008/9).
+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
+Francois Dupoux made a donation (2008/11).
+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
+	aufs2 GIT tree (2009/2).
+William Grant made a donation (2009/3).
+Patrick Lane made a donation (2009/4).
+The Mail Archive (mail-archive.com) made donations (2009/5).
+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
+Pavel Pronskiy made a donation (2011/2).
+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
+	Networks (Ed Wildgoose) made a donation for hardware (2011/3).
+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
+11).
+Sam Liddicott made a donation (2011/9).
+Era Scarecrow made a donation (2013/4).
+Bor Ratajc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+POIRETTE Marc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+lauri kasvandik made a donation (2013/5).
+"pemasu from Finland" made a donation (2013/7).
+The Parted Magic Project made a donation (2013/9 and 11).
+Pavel Barta made a donation (2013/10).
+Nikolay Pertsev made a donation (2014/5).
+James B made a donation (2014/7 and 2015/7).
+Stefano Di Biase made a donation (2014/8).
+Daniel Epellei made a donation (2015/1).
+OmegaPhil made a donation (2016/1, 2018/4).
+Tomasz Szewczyk made a donation (2016/4).
+James Burry made a donation (2016/12).
+Carsten Rose made a donation (2018/9).
+Porteus Kiosk made a donation (2018/10).
+
+Thank you very much.
+Donations are always, including future donations, very important and
+helpful for me to keep on developing aufs.
+
+
+7.
+----------------------------------------
+If you are an experienced user, no explanation is needed. Aufs is
+just a linux filesystem.
+
+
+Enjoy!
+
+# Local variables: ;
+# mode: text;
+# End: ;
diff --git a/Documentation/filesystems/aufs/design/01intro.txt b/Documentation/filesystems/aufs/design/01intro.txt
new file mode 100644
index 00000000000..47e0a01a8af
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/01intro.txt
@@ -0,0 +1,171 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Introduction
+----------------------------------------
+
+aufs [ei ju: ef es] | /ey-yoo-ef-es/ | [a u f s]
+1. abbrev. for "advanced multi-layered unification filesystem".
+2. abbrev. for "another unionfs".
+3. abbrev. for "auf das" in German which means "on the" in English.
+   Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
+   But "Filesystem aufs Filesystem" is hard to understand.
+4. abbrev. for "African Urban Fashion Show".
+
+AUFS is a filesystem with features:
+- multi layered stackable unification filesystem, the member directory
+  is called as a branch.
+- branch permission and attribute, 'readonly', 'real-readonly',
+  'readwrite', 'whiteout-able', 'link-able whiteout', etc. and their
+  combination.
+- internal "file copy-on-write".
+- logical deletion, whiteout.
+- dynamic branch manipulation, adding, deleting and changing permission.
+- allow bypassing aufs, user's direct branch access.
+- external inode number translation table and bitmap which maintains the
+  persistent aufs inode number.
+- seekable directory, including NFS readdir.
+- file mapping, mmap and sharing pages.
+- pseudo-link, hardlink over branches.
+- loopback mounted filesystem as a branch.
+- several policies to select one among multiple writable branches.
+- revert a single systemcall when an error occurs in aufs.
+- and more...
+
+
+Multi Layered Stackable Unification Filesystem
+----------------------------------------------------------------------
+Most people already knows what it is.
+It is a filesystem which unifies several directories and provides a
+merged single directory. When users access a file, the access will be
+passed/re-directed/converted (sorry, I am not sure which English word is
+correct) to the real file on the member filesystem. The member
+filesystem is called 'lower filesystem' or 'branch' and has a mode
+'readonly' and 'readwrite.' And the deletion for a file on the lower
+readonly branch is handled by creating 'whiteout' on the upper writable
+branch.
+
+On LKML, there have been discussions about UnionMount (Jan Blunck,
+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
+different approaches to implement the merged-view.
+The former tries putting it into VFS, and the latter implements as a
+separate filesystem.
+(If I misunderstand about these implementations, please let me know and
+I shall correct it. Because it is a long time ago when I read their
+source files last time).
+
+UnionMount's approach will be able to small, but may be hard to share
+branches between several UnionMount since the whiteout in it is
+implemented in the inode on branch filesystem and always
+shared. According to Bharata's post, readdir does not seems to be
+finished yet.
+There are several missing features known in this implementations such as
+- for users, the inode number may change silently. eg. copy-up.
+- link(2) may break by copy-up.
+- read(2) may get an obsoleted filedata (fstat(2) too).
+- fcntl(F_SETLK) may be broken by copy-up.
+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
+  open(O_RDWR).
+
+In linux-3.18, "overlay" filesystem (formerly known as "overlayfs") was
+merged into mainline. This is another implementation of UnionMount as a
+separated filesystem. All the limitations and known problems which
+UnionMount are equally inherited to "overlay" filesystem.
+
+Unionfs has a longer history. When I started implementing a stackable
+filesystem (Aug 2005), it already existed. It has virtual super_block,
+inode, dentry and file objects and they have an array pointing lower
+same kind objects. After contributing many patches for Unionfs, I
+re-started my project AUFS (Jun 2006).
+
+In AUFS, the structure of filesystem resembles to Unionfs, but I
+implemented my own ideas, approaches and enhancements and it became
+totally different one.
+
+Comparing DM snapshot and fs based implementation
+- the number of bytes to be copied between devices is much smaller.
+- the type of filesystem must be one and only.
+- the fs must be writable, no readonly fs, even for the lower original
+  device. so the compression fs will not be usable. but if we use
+  loopback mount, we may address this issue.
+  for instance,
+	mount /cdrom/squashfs.img /sq
+	losetup /sq/ext2.img
+	losetup /somewhere/cow
+	dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
+- it will be difficult (or needs more operations) to extract the
+  difference between the original device and COW.
+- DM snapshot-merge may help a lot when users try merging. in the
+  fs-layer union, users will use rsync(1).
+
+You may want to read my old paper "Filesystems in LiveCD"
+(http://aufs.sourceforge.net/aufs2/report/sq/sq.pdf).
+
+
+Several characters/aspects/persona of aufs
+----------------------------------------------------------------------
+
+Aufs has several characters, aspects or persona.
+1. a filesystem, callee of VFS helper
+2. sub-VFS, caller of VFS helper for branches
+3. a virtual filesystem which maintains persistent inode number
+4. reader/writer of files on branches such like an application
+
+1. Callee of VFS Helper
+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
+unlink(2) from an application reaches sys_unlink() kernel function and
+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
+calls filesystem specific unlink operation. Actually aufs implements the
+unlink operation but it behaves like a redirector.
+
+2. Caller of VFS Helper for Branches
+aufs_unlink() passes the unlink request to the branch filesystem as if
+it were called from VFS. So the called unlink operation of the branch
+filesystem acts as usual. As a caller of VFS helper, aufs should handle
+every necessary pre/post operation for the branch filesystem.
+- acquire the lock for the parent dir on a branch
+- lookup in a branch
+- revalidate dentry on a branch
+- mnt_want_write() for a branch
+- vfs_unlink() for a branch
+- mnt_drop_write() for a branch
+- release the lock on a branch
+
+3. Persistent Inode Number
+One of the most important issue for a filesystem is to maintain inode
+numbers. This is particularly important to support exporting a
+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
+backend block device for its own. But some storage is necessary to
+keep and maintain the inode numbers. It may be a large space and may not
+suit to keep in memory. Aufs rents some space from its first writable
+branch filesystem (by default) and creates file(s) on it. These files
+are created by aufs internally and removed soon (currently) keeping
+opened.
+Note: Because these files are removed, they are totally gone after
+      unmounting aufs. It means the inode numbers are not persistent
+      across unmount or reboot. I have a plan to make them really
+      persistent which will be important for aufs on NFS server.
+
+4. Read/Write Files Internally (copy-on-write)
+Because a branch can be readonly, when you write a file on it, aufs will
+"copy-up" it to the upper writable branch internally. And then write the
+originally requested thing to the file. Generally kernel doesn't
+open/read/write file actively. In aufs, even a single write may cause a
+internal "file copy". This behaviour is very similar to cp(1) command.
+
+Some people may think it is better to pass such work to user space
+helper, instead of doing in kernel space. Actually I am still thinking
+about it. But currently I have implemented it in kernel space.
diff --git a/Documentation/filesystems/aufs/design/02struct.txt b/Documentation/filesystems/aufs/design/02struct.txt
new file mode 100644
index 00000000000..0092b3dcfbe
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/02struct.txt
@@ -0,0 +1,258 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Basic Aufs Internal Structure
+
+Superblock/Inode/Dentry/File Objects
+----------------------------------------------------------------------
+As like an ordinary filesystem, aufs has its own
+superblock/inode/dentry/file objects. All these objects have a
+dynamically allocated array and store the same kind of pointers to the
+lower filesystem, branch.
+For example, when you build a union with one readwrite branch and one
+readonly, mounted /au, /rw and /ro respectively.
+- /au = /rw + /ro
+- /ro/fileA exists but /rw/fileA
+
+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
+pointers are stored in a aufs dentry. The array in aufs dentry will be,
+- [0] = NULL (because /rw/fileA doesn't exist)
+- [1] = /ro/fileA
+
+This style of an array is essentially same to the aufs
+superblock/inode/dentry/file objects.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+branches dynamically, these objects has its own generation. When
+branches are changed, the generation in aufs superblock is
+incremented. And a generation in other object are compared when it is
+accessed. When a generation in other objects are obsoleted, aufs
+refreshes the internal array.
+
+
+Superblock
+----------------------------------------------------------------------
+Additionally aufs superblock has some data for policies to select one
+among multiple writable branches, XIB files, pseudo-links and kobject.
+See below in detail.
+About the policies which supports copy-down a directory, see
+wbr_policy.txt too.
+
+
+Branch and XINO(External Inode Number Translation Table)
+----------------------------------------------------------------------
+Every branch has its own xino (external inode number translation table)
+file. The xino file is created and unlinked by aufs internally. When two
+members of a union exist on the same filesystem, they share the single
+xino file.
+The struct of a xino file is simple, just a sequence of aufs inode
+numbers which is indexed by the lower inode number.
+In the above sample, assume the inode number of /ro/fileA is i111 and
+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
+
+When the inode numbers are not contiguous, the xino file will be sparse
+which has a hole in it and doesn't consume as much disk space as it
+might appear. If your branch filesystem consumes disk space for such
+holes, then you should specify 'xino=' option at mounting aufs.
+
+Aufs has a mount option to free the disk blocks for such holes in XINO
+files on tmpfs or ramdisk. But it is not so effective actually. If you
+meet a problem of disk shortage due to XINO files, then you should try
+"tmpfs-ino.patch" (and "vfs-ino.patch" too) in aufs4-standalone.git.
+The patch localizes the assignment inumbers per tmpfs-mount and avoid
+the holes in XINO files.
+
+Also a writable branch has three kinds of "whiteout bases". All these
+are existed when the branch is joined to aufs, and their names are
+whiteout-ed doubly, so that users will never see their names in aufs
+hierarchy.
+1. a regular file which will be hardlinked to all whiteouts.
+2. a directory to store a pseudo-link.
+3. a directory to store an "orphan"-ed file temporary.
+
+1. Whiteout Base
+   When you remove a file on a readonly branch, aufs handles it as a
+   logical deletion and creates a whiteout on the upper writable branch
+   as a hardlink of this file in order not to consume inode on the
+   writable branch.
+2. Pseudo-link Dir
+   See below, Pseudo-link.
+3. Step-Parent Dir
+   When "fileC" exists on the lower readonly branch only and it is
+   opened and removed with its parent dir, and then user writes
+   something into it, then aufs copies-up fileC to this
+   directory. Because there is no other dir to store fileC. After
+   creating a file under this dir, the file is unlinked.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, a branch has its own id. When the branch order changes,
+aufs finds the new index by searching the branch id.
+
+
+Pseudo-link
+----------------------------------------------------------------------
+Assume "fileA" exists on the lower readonly branch only and it is
+hardlinked to "fileB" on the branch. When you write something to fileA,
+aufs copies-up it to the upper writable branch. Additionally aufs
+creates a hardlink under the Pseudo-link Directory of the writable
+branch. The inode of a pseudo-link is kept in aufs super_block as a
+simple list. If fileB is read after unlinking fileA, aufs returns
+filedata from the pseudo-link instead of the lower readonly
+branch. Because the pseudo-link is based upon the inode, to keep the
+inode number by xino (see above) is essentially necessary.
+
+All the hardlinks under the Pseudo-link Directory of the writable branch
+should be restored in a proper location later. Aufs provides a utility
+to do this. The userspace helpers executed at remounting and unmounting
+aufs by default.
+During this utility is running, it puts aufs into the pseudo-link
+maintenance mode. In this mode, only the process which began the
+maintenance mode (and its child processes) is allowed to operate in
+aufs. Some other processes which are not related to the pseudo-link will
+be allowed to run too, but the rest have to return an error or wait
+until the maintenance mode ends. If a process already acquires an inode
+mutex (in VFS), it has to return an error.
+
+
+XIB(external inode number bitmap)
+----------------------------------------------------------------------
+Addition to the xino file per a branch, aufs has an external inode number
+bitmap in a superblock object. It is also an internal file such like a
+xino file.
+It is a simple bitmap to mark whether the aufs inode number is in-use or
+not.
+To reduce the file I/O, aufs prepares a single memory page to cache xib.
+
+As well as XINO files, aufs has a feature to truncate/refresh XIB to
+reduce the number of consumed disk blocks for these files.
+
+
+Virtual or Vertical Dir, and Readdir in Userspace
+----------------------------------------------------------------------
+In order to support multiple layers (branches), aufs readdir operation
+constructs a virtual dir block on memory. For readdir, aufs calls
+vfs_readdir() internally for each dir on branches, merges their entries
+with eliminating the whiteout-ed ones, and sets it to file (dir)
+object. So the file object has its entry list until it is closed. The
+entry list will be updated when the file position is zero and becomes
+obsoleted. This decision is made in aufs automatically.
+
+The dynamically allocated memory block for the name of entries has a
+unit of 512 bytes (by default) and stores the names contiguously (no
+padding). Another block for each entry is handled by kmem_cache too.
+During building dir blocks, aufs creates hash list and judging whether
+the entry is whiteouted by its upper branch or already listed.
+The merged result is cached in the corresponding inode object and
+maintained by a customizable life-time option.
+
+Some people may call it can be a security hole or invite DoS attack
+since the opened and once readdir-ed dir (file object) holds its entry
+list and becomes a pressure for system memory. But I'd say it is similar
+to files under /proc or /sys. The virtual files in them also holds a
+memory page (generally) while they are opened. When an idea to reduce
+memory for them is introduced, it will be applied to aufs too.
+For those who really hate this situation, I've developed readdir(3)
+library which operates this merging in userspace. You just need to set
+LD_PRELOAD environment variable, and aufs will not consume no memory in
+kernel space for readdir(3).
+
+
+Workqueue
+----------------------------------------------------------------------
+Aufs sometimes requires privilege access to a branch. For instance,
+in copy-up/down operation. When a user process is going to make changes
+to a file which exists in the lower readonly branch only, and the mode
+of one of ancestor directories may not be writable by a user
+process. Here aufs copy-up the file with its ancestors and they may
+require privilege to set its owner/group/mode/etc.
+This is a typical case of a application character of aufs (see
+Introduction).
+
+Aufs uses workqueue synchronously for this case. It creates its own
+workqueue. The workqueue is a kernel thread and has privilege. Aufs
+passes the request to call mkdir or write (for example), and wait for
+its completion. This approach solves a problem of a signal handler
+simply.
+If aufs didn't adopt the workqueue and changed the privilege of the
+process, then the process may receive the unexpected SIGXFSZ or other
+signals.
+
+Also aufs uses the system global workqueue ("events" kernel thread) too
+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
+whiteout base and etc. This is unrelated to a privilege.
+Most of aufs operation tries acquiring a rw_semaphore for aufs
+superblock at the beginning, at the same time waits for the completion
+of all queued asynchronous tasks.
+
+
+Whiteout
+----------------------------------------------------------------------
+The whiteout in aufs is very similar to Unionfs's. That is represented
+by its filename. UnionMount takes an approach of a file mode, but I am
+afraid several utilities (find(1) or something) will have to support it.
+
+Basically the whiteout represents "logical deletion" which stops aufs to
+lookup further, but also it represents "dir is opaque" which also stop
+further lookup.
+
+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
+In order to make several functions in a single systemcall to be
+revertible, aufs adopts an approach to rename a directory to a temporary
+unique whiteouted name.
+For example, in rename(2) dir where the target dir already existed, aufs
+renames the target dir to a temporary unique whiteouted name before the
+actual rename on a branch, and then handles other actions (make it opaque,
+update the attributes, etc). If an error happens in these actions, aufs
+simply renames the whiteouted name back and returns an error. If all are
+succeeded, aufs registers a function to remove the whiteouted unique
+temporary name completely and asynchronously to the system global
+workqueue.
+
+
+Copy-up
+----------------------------------------------------------------------
+It is a well-known feature or concept.
+When user modifies a file on a readonly branch, aufs operate "copy-up"
+internally and makes change to the new file on the upper writable branch.
+When the trigger systemcall does not update the timestamps of the parent
+dir, aufs reverts it after copy-up.
+
+
+Move-down (aufs3.9 and later)
+----------------------------------------------------------------------
+"Copy-up" is one of the essential feature in aufs. It copies a file from
+the lower readonly branch to the upper writable branch when a user
+changes something about the file.
+"Move-down" is an opposite action of copy-up. Basically this action is
+ran manually instead of automatically and internally.
+For desgin and implementation, aufs has to consider these issues.
+- whiteout for the file may exist on the lower branch.
+- ancestor directories may not exist on the lower branch.
+- diropq for the ancestor directories may exist on the upper branch.
+- free space on the lower branch will reduce.
+- another access to the file may happen during moving-down, including
+  UDBA (see "Revalidate Dentry and UDBA").
+- the file should not be hard-linked nor pseudo-linked. they should be
+  handled by auplink utility later.
+
+Sometimes users want to move-down a file from the upper writable branch
+to the lower readonly or writable branch. For instance,
+- the free space of the upper writable branch is going to run out.
+- create a new intermediate branch between the upper and lower branch.
+- etc.
+
+For this purpose, use "aumvdown" command in aufs-util.git.
diff --git a/Documentation/filesystems/aufs/design/03atomic_open.txt b/Documentation/filesystems/aufs/design/03atomic_open.txt
new file mode 100644
index 00000000000..fb8cf0bc8c7
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03atomic_open.txt
@@ -0,0 +1,85 @@
+
+# Copyright (C) 2015-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Support for a branch who has its ->atomic_open()
+----------------------------------------------------------------------
+The filesystems who implement its ->atomic_open() are not majority. For
+example NFSv4 does, and aufs should call NFSv4 ->atomic_open,
+particularly for open(O_CREAT|O_EXCL, 0400) case. Other than
+->atomic_open(), NFSv4 returns an error for this open(2). While I am not
+sure whether all filesystems who have ->atomic_open() behave like this,
+but NFSv4 surely returns the error.
+
+In order to support ->atomic_open() for aufs, there are a few
+approaches.
+
+A. Introduce aufs_atomic_open()
+   - calls one of VFS:do_last(), lookup_open() or atomic_open() for
+     branch fs.
+B. Introduce aufs_atomic_open() calling create, open and chmod. this is
+   an aufs user Pip Cet's approach
+   - calls aufs_create(), VFS finish_open() and notify_change().
+   - pass fake-mode to finish_open(), and then correct the mode by
+     notify_change().
+C. Extend aufs_open() to call branch fs's ->atomic_open()
+   - no aufs_atomic_open().
+   - aufs_lookup() registers the TID to an aufs internal object.
+   - aufs_create() does nothing when the matching TID is registered, but
+     registers the mode.
+   - aufs_open() calls branch fs's ->atomic_open() when the matching
+     TID is registered.
+D. Extend aufs_open() to re-try branch fs's ->open() with superuser's
+   credential
+   - no aufs_atomic_open().
+   - aufs_create() registers the TID to an internal object. this info
+     represents "this process created this file just now."
+   - when aufs gets EACCES from branch fs's ->open(), then confirm the
+     registered TID and re-try open() with superuser's credential.
+
+Pros and cons for each approach.
+
+A.
+   - straightforward but highly depends upon VFS internal.
+   - the atomic behavaiour is kept.
+   - some of parameters such as nameidata are hard to reproduce for
+     branch fs.
+   - large overhead.
+B.
+   - easy to implement.
+   - the atomic behavaiour is lost.
+C.
+   - the atomic behavaiour is kept.
+   - dirty and tricky.
+   - VFS checks whether the file is created correctly after calling
+     ->create(), which means this approach doesn't work.
+D.
+   - easy to implement.
+   - the atomic behavaiour is lost.
+   - to open a file with superuser's credential and give it to a user
+     process is a bad idea, since the file object keeps the credential
+     in it. It may affect LSM or something. This approach doesn't work
+     either.
+
+The approach A is ideal, but it hard to implement. So here is a
+variation of A, which is to be implemented.
+
+A-1. Introduce aufs_atomic_open()
+     - calls branch fs ->atomic_open() if exists. otherwise calls
+       vfs_create() and finish_open().
+     - the demerit is that the several checks after branch fs
+       ->atomic_open() are lost. in the ordinary case, the checks are
+       done by VFS:do_last(), lookup_open() and atomic_open(). some can
+       be implemented in aufs, but not all I am afraid.
diff --git a/Documentation/filesystems/aufs/design/03lookup.txt b/Documentation/filesystems/aufs/design/03lookup.txt
new file mode 100644
index 00000000000..5c3c97f11c5
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03lookup.txt
@@ -0,0 +1,113 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Lookup in a Branch
+----------------------------------------------------------------------
+Since aufs has a character of sub-VFS (see Introduction), it operates
+lookup for branches as VFS does. It may be a heavy work. But almost all
+lookup operation in aufs is the simplest case, ie. lookup only an entry
+directly connected to its parent. Digging down the directory hierarchy
+is unnecessary. VFS has a function lookup_one_len() for that use, and
+aufs calls it.
+
+When a branch is a remote filesystem, aufs basically relies upon its
+->d_revalidate(), also aufs forces the hardest revalidate tests for
+them.
+For d_revalidate, aufs implements three levels of revalidate tests. See
+"Revalidate Dentry and UDBA" in detail.
+
+
+Test Only the Highest One for the Directory Permission (dirperm1 option)
+----------------------------------------------------------------------
+Let's try case study.
+- aufs has two branches, upper readwrite and lower readonly.
+  /au = /rw + /ro
+- "dirA" exists under /ro, but /rw. and its mode is 0700.
+- user invoked "chmod a+rx /au/dirA"
+- the internal copy-up is activated and "/rw/dirA" is created and its
+  permission bits are set to world readable.
+- then "/au/dirA" becomes world readable?
+
+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
+or it may be a natively readonly filesystem. If aufs respects the lower
+branch, it should not respond readdir request from other users. But user
+allowed it by chmod. Should really aufs rejects showing the entries
+under /ro/dirA?
+
+To be honest, I don't have a good solution for this case. So aufs
+implements 'dirperm1' and 'nodirperm1' mount options, and leave it to
+users.
+When dirperm1 is specified, aufs checks only the highest one for the
+directory permission, and shows the entries. Otherwise, as usual, checks
+every dir existing on all branches and rejects the request.
+
+As a side effect, dirperm1 option improves the performance of aufs
+because the number of permission check is reduced when the number of
+branch is many.
+
+
+Revalidate Dentry and UDBA (User's Direct Branch Access)
+----------------------------------------------------------------------
+Generally VFS helpers re-validate a dentry as a part of lookup.
+0. digging down the directory hierarchy.
+1. lock the parent dir by its i_mutex.
+2. lookup the final (child) entry.
+3. revalidate it.
+4. call the actual operation (create, unlink, etc.)
+5. unlock the parent dir
+
+If the filesystem implements its ->d_revalidate() (step 3), then it is
+called. Actually aufs implements it and checks the dentry on a branch is
+still valid.
+But it is not enough. Because aufs has to release the lock for the
+parent dir on a branch at the end of ->lookup() (step 2) and
+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
+held by VFS.
+If the file on a branch is changed directly, eg. bypassing aufs, after
+aufs released the lock, then the subsequent operation may cause
+something unpleasant result.
+
+This situation is a result of VFS architecture, ->lookup() and
+->d_revalidate() is separated. But I never say it is wrong. It is a good
+design from VFS's point of view. It is just not suitable for sub-VFS
+character in aufs.
+
+Aufs supports such case by three level of revalidation which is
+selectable by user.
+1. Simple Revalidate
+   Addition to the native flow in VFS's, confirm the child-parent
+   relationship on the branch just after locking the parent dir on the
+   branch in the "actual operation" (step 4). When this validation
+   fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
+   checks the validation of the dentry on branches.
+2. Monitor Changes Internally by Inotify/Fsnotify
+   Addition to above, in the "actual operation" (step 4) aufs re-lookup
+   the dentry on the branch, and returns EBUSY if it finds different
+   dentry.
+   Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
+   during it is in cache. When the event is notified, aufs registers a
+   function to kernel 'events' thread by schedule_work(). And the
+   function sets some special status to the cached aufs dentry and inode
+   private data. If they are not cached, then aufs has nothing to
+   do. When the same file is accessed through aufs (step 0-3) later,
+   aufs will detect the status and refresh all necessary data.
+   In this mode, aufs has to ignore the event which is fired by aufs
+   itself.
+3. No Extra Validation
+   This is the simplest test and doesn't add any additional revalidation
+   test, and skip the revalidation in step 4. It is useful and improves
+   aufs performance when system surely hide the aufs branches from user,
+   by over-mounting something (or another method).
diff --git a/Documentation/filesystems/aufs/design/04branch.txt b/Documentation/filesystems/aufs/design/04branch.txt
new file mode 100644
index 00000000000..da5200be41a
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/04branch.txt
@@ -0,0 +1,74 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Branch Manipulation
+
+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
+and changing its permission/attribute, there are a lot of works to do.
+
+
+Add a Branch
+----------------------------------------------------------------------
+o Confirm the adding dir exists outside of aufs, including loopback
+  mount, and its various attributes.
+o Initialize the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o Check the owner/group/mode of the directory
+  When the owner/group/mode of the adding directory differs from the
+  existing branch, aufs issues a warning because it may impose a
+  security risk.
+  For example, when a upper writable branch has a world writable empty
+  top directory, a malicious user can create any files on the writable
+  branch directly, like copy-up and modify manually. If something like
+  /etc/{passwd,shadow} exists on the lower readonly branch but the upper
+  writable branch, and the writable branch is world-writable, then a
+  malicious guy may create /etc/passwd on the writable branch directly
+  and the infected file will be valid in aufs.
+  I am afraid it can be a security issue, but aufs can do nothing except
+  producing a warning.
+
+
+Delete a Branch
+----------------------------------------------------------------------
+o Confirm the deleting branch is not busy
+  To be general, there is one merit to adopt "remount" interface to
+  manipulate branches. It is to discard caches. At deleting a branch,
+  aufs checks the still cached (and connected) dentries and inodes. If
+  there are any, then they are all in-use. An inode without its
+  corresponding dentry can be alive alone (for example, inotify/fsnotify case).
+
+  For the cached one, aufs checks whether the same named entry exists on
+  other branches.
+  If the cached one is a directory, because aufs provides a merged view
+  to users, as long as one dir is left on any branch aufs can show the
+  dir to users. In this case, the branch can be removed from aufs.
+  Otherwise aufs rejects deleting the branch.
+
+  If any file on the deleting branch is opened by aufs, then aufs
+  rejects deleting.
+
+
+Modify the Permission of a Branch
+----------------------------------------------------------------------
+o Re-initialize or remove the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o rw --> ro: Confirm the modifying branch is not busy
+  Aufs rejects the request if any of these conditions are true.
+  - a file on the branch is mmap-ed.
+  - a regular file on the branch is opened for write and there is no
+    same named entry on the upper branch.
diff --git a/Documentation/filesystems/aufs/design/05wbr_policy.txt b/Documentation/filesystems/aufs/design/05wbr_policy.txt
new file mode 100644
index 00000000000..0262084bf63
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/05wbr_policy.txt
@@ -0,0 +1,64 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Policies to Select One among Multiple Writable Branches
+----------------------------------------------------------------------
+When the number of writable branch is more than one, aufs has to decide
+the target branch for file creation or copy-up. By default, the highest
+writable branch which has the parent (or ancestor) dir of the target
+file is chosen (top-down-parent policy).
+By user's request, aufs implements some other policies to select the
+writable branch, for file creation several policies, round-robin,
+most-free-space, and other policies. For copy-up, top-down-parent,
+bottom-up-parent, bottom-up and others.
+
+As expected, the round-robin policy selects the branch in circular. When
+you have two writable branches and creates 10 new files, 5 files will be
+created for each branch. mkdir(2) systemcall is an exception. When you
+create 10 new directories, all will be created on the same branch.
+And the most-free-space policy selects the one which has most free
+space among the writable branches. The amount of free space will be
+checked by aufs internally, and users can specify its time interval.
+
+The policies for copy-up is more simple,
+top-down-parent is equivalent to the same named on in create policy,
+bottom-up-parent selects the writable branch where the parent dir
+exists and the nearest upper one from the copyup-source,
+bottom-up selects the nearest upper writable branch from the
+copyup-source, regardless the existence of the parent dir.
+
+There are some rules or exceptions to apply these policies.
+- If there is a readonly branch above the policy-selected branch and
+  the parent dir is marked as opaque (a variation of whiteout), or the
+  target (creating) file is whiteout-ed on the upper readonly branch,
+  then the result of the policy is ignored and the target file will be
+  created on the nearest upper writable branch than the readonly branch.
+- If there is a writable branch above the policy-selected branch and
+  the parent dir is marked as opaque or the target file is whiteouted
+  on the branch, then the result of the policy is ignored and the target
+  file will be created on the highest one among the upper writable
+  branches who has diropq or whiteout. In case of whiteout, aufs removes
+  it as usual.
+- link(2) and rename(2) systemcalls are exceptions in every policy.
+  They try selecting the branch where the source exists as possible
+  since copyup a large file will take long time. If it can't be,
+  ie. the branch where the source exists is readonly, then they will
+  follow the copyup policy.
+- There is an exception for rename(2) when the target exists.
+  If the rename target exists, aufs compares the index of the branches
+  where the source and the target exists and selects the higher
+  one. If the selected branch is readonly, then aufs follows the
+  copyup policy.
diff --git a/Documentation/filesystems/aufs/design/06dirren.dot b/Documentation/filesystems/aufs/design/06dirren.dot
new file mode 100644
index 00000000000..2d62bb6dd55
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06dirren.dot
@@ -0,0 +1,31 @@
+
+// to view this graph, run dot(1) command in GRAPHVIZ.
+
+digraph G {
+node [shape=box];
+whinfo [label="detailed info file\n(lower_brid_root-hinum, h_inum, namelen, old name)"];
+
+node [shape=oval];
+
+aufs_rename -> whinfo [label="store/remove"];
+
+node [shape=oval];
+inode_list [label="h_inum list in branch\ncache"];
+
+node [shape=box];
+whinode [label="h_inum list file"];
+
+node [shape=oval];
+brmgmt [label="br_add/del/mod/umount"];
+
+brmgmt -> inode_list [label="create/remove"];
+brmgmt -> whinode [label="load/store"];
+
+inode_list -> whinode [style=dashed,dir=both];
+
+aufs_rename -> inode_list [label="add/del"];
+
+aufs_lookup -> inode_list [label="search"];
+
+aufs_lookup -> whinfo [label="load/remove"];
+}
diff --git a/Documentation/filesystems/aufs/design/06dirren.txt b/Documentation/filesystems/aufs/design/06dirren.txt
new file mode 100644
index 00000000000..38ae77b2c84
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06dirren.txt
@@ -0,0 +1,102 @@
+
+# Copyright (C) 2017-2020 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Special handling for renaming a directory (DIRREN)
+----------------------------------------------------------------------
+First, let's assume we have a simple usecase.
+
+- /u = /rw + /ro
+- /rw/dirA exists
+- /ro/dirA and /ro/dirA/file exist too
+- there is no dirB on both branches
+- a user issues rename("dirA", "dirB")
+
+Now, what should aufs behave against this rename(2)?
+There are a few possible cases.
+
+A. returns EROFS.
+   since dirA exists on a readonly branch which cannot be renamed.
+B. returns EXDEV.
+   it is possible to copy-up dirA (only the dir itself), but the child
+   entries ("file" in this case) should not be. it must be a bad
+   approach to copy-up recursively.
+C. returns a success.
+   even the branch /ro is readonly, aufs tries renaming it. Obviously it
+   is a violation of aufs' policy.
+D. construct an extra information which indicates that /ro/dirA should
+   be handled as the name of dirB.
+   overlayfs has a similar feature called REDIRECT.
+
+Until now, aufs implements the case B only which returns EXDEV, and
+expects the userspace application behaves like mv(1) which tries
+issueing rename(2) recursively.
+
+A new aufs feature called DIRREN is introduced which implements the case
+D. There are several "extra information" added.
+
+1. detailed info per renamed directory
+   path: /rw/dirB/$AUFS_WH_DR_INFO_PFX.<lower branch-id>
+2. the inode-number list of directories on a branch
+   path: /rw/dirB/$AUFS_WH_DR_BRHINO
+
+The filename of "detailed info per directory" represents the lower
+branch, and its format is
+- a type of the branch id
+  one of these.
+  + uuid (not implemented yet)
+  + fsid
+  + dev
+- the inode-number of the branch root dir
+
+And it contains these info in a single regular file.
+- magic number
+- branch's inode-number of the logically renamed dir
+- the name of the before-renamed dir
+
+The "detailed info per directory" file is created in aufs rename(2), and
+loaded in any lookup.
+The info is considered in lookup for the matching case only. Here
+"matching" means that the root of branch (in the info filename) is same
+to the current looking-up branch. After looking-up the before-renamed
+name, the inode-number is compared. And the matched dentry is used.
+
+The "inode-number list of directories" is a regular file which contains
+simply the inode-numbers on the branch. The file is created or updated
+in removing the branch, and loaded in adding the branch. Its lifetime is
+equal to the branch.
+The list is refered in lookup, and when the current target inode is
+found in the list, the aufs tries loading the "detailed info per
+directory" and get the changed and valid name of the dir.
+
+Theoretically these "extra informaiton" may be able to be put into XATTR
+in the dir inode. But aufs doesn't choose this way because
+1. XATTR may not be supported by the branch (or its configuration)
+2. XATTR may have its size limit.
+3. XATTR may be less easy to convert than a regular file, when the
+   format of the info is changed in the future.
+At the same time, I agree that the regular file approach is much slower
+than XATTR approach. So, in the future, aufs may take the XATTR or other
+better approach.
+
+This DIRREN feature is enabled by aufs configuration, and is activated
+by a new mount option.
+
+For the more complicated case, there is a work with UDBA option, which
+is to dected the direct access to the branches (by-passing aufs) and to
+maintain the cashes in aufs. Since a single cached aufs dentry may
+contains two names, before- and after-rename, the name comparision in
+UDBA handler may not work correctly. In this case, the behaviour will be
+equivalen to udba=reval case.
diff --git a/Documentation/filesystems/aufs/design/06fhsm.txt b/Documentation/filesystems/aufs/design/06fhsm.txt
new file mode 100644
index 00000000000..df985662bef
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06fhsm.txt
@@ -0,0 +1,120 @@
+
+# Copyright (C) 2011-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+File-based Hierarchical Storage Management (FHSM)
+----------------------------------------------------------------------
+Hierarchical Storage Management (or HSM) is a well-known feature in the
+storage world. Aufs provides this feature as file-based with multiple
+writable branches, based upon the principle of "Colder, the Lower".
+Here the word "colder" means that the less used files, and "lower" means
+that the position in the order of the stacked branches vertically.
+These multiple writable branches are prioritized, ie. the topmost one
+should be the fastest drive and be used heavily.
+
+o Characters in aufs FHSM story
+- aufs itself and a new branch attribute.
+- a new ioctl interface to move-down and to establish a connection with
+  the daemon ("move-down" is a converse of "copy-up").
+- userspace tool and daemon.
+
+The userspace daemon establishes a connection with aufs and waits for
+the notification. The notified information is very similar to struct
+statfs containing the number of consumed blocks and inodes.
+When the consumed blocks/inodes of a branch exceeds the user-specified
+upper watermark, the daemon activates its move-down process until the
+consumed blocks/inodes reaches the user-specified lower watermark.
+
+The actual move-down is done by aufs based upon the request from
+user-space since we need to maintain the inode number and the internal
+pointer arrays in aufs.
+
+Currently aufs FHSM handles the regular files only. Additionally they
+must not be hard-linked nor pseudo-linked.
+
+
+o Cowork of aufs and the user-space daemon
+  During the userspace daemon established the connection, aufs sends a
+  small notification to it whenever aufs writes something into the
+  writable branch. But it may cost high since aufs issues statfs(2)
+  internally. So user can specify a new option to cache the
+  info. Actually the notification is controlled by these factors.
+  + the specified cache time.
+  + classified as "force" by aufs internally.
+  Until the specified time expires, aufs doesn't send the info
+  except the forced cases. When aufs decide forcing, the info is always
+  notified to userspace.
+  For example, the number of free inodes is generally large enough and
+  the shortage of it happens rarely. So aufs doesn't force the
+  notification when creating a new file, directory and others. This is
+  the typical case which aufs doesn't force.
+  When aufs writes the actual filedata and the files consumes any of new
+  blocks, the aufs forces notifying.
+
+
+o Interfaces in aufs
+- New branch attribute.
+  + fhsm
+    Specifies that the branch is managed by FHSM feature. In other word,
+    participant in the FHSM.
+    When nofhsm is set to the branch, it will not be the source/target
+    branch of the move-down operation. This attribute is set
+    independently from coo and moo attributes, and if you want full
+    FHSM, you should specify them as well.
+- New mount option.
+  + fhsm_sec
+    Specifies a second to suppress many less important info to be
+    notified.
+- New ioctl.
+  + AUFS_CTL_FHSM_FD
+    create a new file descriptor which userspace can read the notification
+    (a subset of struct statfs) from aufs.
+- Module parameter 'brs'
+  It has to be set to 1. Otherwise the new mount option 'fhsm' will not
+  be set.
+- mount helpers /sbin/mount.aufs and /sbin/umount.aufs
+  When there are two or more branches with fhsm attributes,
+  /sbin/mount.aufs invokes the user-space daemon and /sbin/umount.aufs
+  terminates it. As a result of remounting and branch-manipulation, the
+  number of branches with fhsm attribute can be one. In this case,
+  /sbin/mount.aufs will terminate the user-space daemon.
+
+
+Finally the operation is done as these steps in kernel-space.
+- make sure that,
+  + no one else is using the file.
+  + the file is not hard-linked.
+  + the file is not pseudo-linked.
+  + the file is a regular file.
+  + the parent dir is not opaqued.
+- find the target writable branch.
+- make sure the file is not whiteout-ed by the upper (than the target)
+  branch.
+- make the parent dir on the target branch.
+- mutex lock the inode on the branch.
+- unlink the whiteout on the target branch (if exists).
+- lookup and create the whiteout-ed temporary name on the target branch.
+- copy the file as the whiteout-ed temporary name on the target branch.
+- rename the whiteout-ed temporary name to the original name.
+- unlink the file on the source branch.
+- maintain the internal pointer array and the external inode number
+  table (XINO).
+- maintain the timestamps and other attributes of the parent dir and the
+  file.
+
+And of course, in every step, an error may happen. So the operation
+should restore the original file state after an error happens.
diff --git a/Documentation/filesystems/aufs/design/06mmap.txt b/Documentation/filesystems/aufs/design/06mmap.txt
new file mode 100644
index 00000000000..9184f671043
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06mmap.txt
@@ -0,0 +1,72 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+mmap(2) -- File Memory Mapping
+----------------------------------------------------------------------
+In aufs, the file-mapped pages are handled by a branch fs directly, no
+interaction with aufs. It means aufs_mmap() calls the branch fs's
+->mmap().
+This approach is simple and good, but there is one problem.
+Under /proc, several entries show the mmapped files by its path (with
+device and inode number), and the printed path will be the path on the
+branch fs's instead of virtual aufs's.
+This is not a problem in most cases, but some utilities lsof(1) (and its
+user) may expect the path on aufs.
+
+To address this issue, aufs adds a new member called vm_prfile in struct
+vm_area_struct (and struct vm_region). The original vm_file points to
+the file on the branch fs in order to handle everything correctly as
+usual. The new vm_prfile points to a virtual file in aufs, and the
+show-functions in procfs refers to vm_prfile if it is set.
+Also we need to maintain several other places where touching vm_file
+such like
+- fork()/clone() copies vma and the reference count of vm_file is
+  incremented.
+- merging vma maintains the ref count too.
+
+This is not a good approach. It just fakes the printed path. But it
+leaves all behaviour around f_mapping unchanged. This is surely an
+advantage.
+Actually aufs had adopted another complicated approach which calls
+generic_file_mmap() and handles struct vm_operations_struct. In this
+approach, aufs met a hard problem and I could not solve it without
+switching the approach.
+
+There may be one more another approach which is
+- bind-mount the branch-root onto the aufs-root internally
+- grab the new vfsmount (ie. struct mount)
+- lazy-umount the branch-root internally
+- in open(2) the aufs-file, open the branch-file with the hidden
+  vfsmount (instead of the original branch's vfsmount)
+- ideally this "bind-mount and lazy-umount" should be done atomically,
+  but it may be possible from userspace by the mount helper.
+
+Adding the internal hidden vfsmount and using it in opening a file, the
+file path under /proc will be printed correctly. This approach looks
+smarter, but is not possible I am afraid.
+- aufs-root may be bind-mount later. when it happens, another hidden
+  vfsmount will be required.
+- it is hard to get the chance to bind-mount and lazy-umount
+  + in kernel-space, FS can have vfsmount in open(2) via
+    file->f_path, and aufs can know its vfsmount. But several locks are
+    already acquired, and if aufs tries to bind-mount and lazy-umount
+    here, then it may cause a deadlock.
+  + in user-space, bind-mount doesn't invoke the mount helper.
+- since /proc shows dev and ino, aufs has to give vma these info. it
+  means a new member vm_prinode will be necessary. this is essentially
+  equivalent to vm_prfile described above.
+
+I have to give up this "looks-smater" approach.
diff --git a/Documentation/filesystems/aufs/design/06xattr.txt b/Documentation/filesystems/aufs/design/06xattr.txt
new file mode 100644
index 00000000000..d0f6aedfe2d
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06xattr.txt
@@ -0,0 +1,96 @@
+
+# Copyright (C) 2014-2020 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Listing XATTR/EA and getting the value
+----------------------------------------------------------------------
+For the inode standard attributes (owner, group, timestamps, etc.), aufs
+shows the values from the topmost existing file. This behaviour is good
+for the non-dir entries since the bahaviour exactly matches the shown
+information. But for the directories, aufs considers all the same named
+entries on the lower branches. Which means, if one of the lower entry
+rejects readdir call, then aufs returns an error even if the topmost
+entry allows it. This behaviour is necessary to respect the branch fs's
+security, but can make users confused since the user-visible standard
+attributes don't match the behaviour.
+To address this issue, aufs has a mount option called dirperm1 which
+checks the permission for the topmost entry only, and ignores the lower
+entry's permission.
+
+A similar issue can happen around XATTR.
+getxattr(2) and listxattr(2) families behave as if dirperm1 option is
+always set. Otherwise these very unpleasant situation would happen.
+- listxattr(2) may return the duplicated entries.
+- users may not be able to remove or reset the XATTR forever,
+
+
+XATTR/EA support in the internal (copy,move)-(up,down)
+----------------------------------------------------------------------
+Generally the extended attributes of inode are categorized as these.
+- "security" for LSM and capability.
+- "system" for posix ACL, 'acl' mount option is required for the branch
+  fs generally.
+- "trusted" for userspace, CAP_SYS_ADMIN is required.
+- "user" for userspace, 'user_xattr' mount option is required for the
+  branch fs generally.
+
+Moreover there are some other categories. Aufs handles these rather
+unpopular categories as the ordinary ones, ie. there is no special
+condition nor exception.
+
+In copy-up, the support for XATTR on the dst branch may differ from the
+src branch. In this case, the copy-up operation will get an error and
+the original user operation which triggered the copy-up will fail. It
+can happen that even all copy-up will fail.
+When both of src and dst branches support XATTR and if an error occurs
+during copying XATTR, then the copy-up should fail obviously. That is a
+good reason and aufs should return an error to userspace. But when only
+the src branch support that XATTR, aufs should not return an error.
+For example, the src branch supports ACL but the dst branch doesn't
+because the dst branch may natively un-support it or temporary
+un-support it due to "noacl" mount option. Of course, the dst branch fs
+may NOT return an error even if the XATTR is not supported. It is
+totally up to the branch fs.
+
+Anyway when the aufs internal copy-up gets an error from the dst branch
+fs, then aufs tries removing the just copied entry and returns the error
+to the userspace. The worst case of this situation will be all copy-up
+will fail.
+
+For the copy-up operation, there two basic approaches.
+- copy the specified XATTR only (by category above), and return the
+  error unconditionally if it happens.
+- copy all XATTR, and ignore the error on the specified category only.
+
+In order to support XATTR and to implement the correct behaviour, aufs
+chooses the latter approach and introduces some new branch attributes,
+"icexsec", "icexsys", "icextr", "icexusr", and "icexoth".
+They correspond to the XATTR namespaces (see above). Additionally, to be
+convenient, "icex" is also provided which means all "icex*" attributes
+are set (here the word "icex" stands for "ignore copy-error on XATTR").
+
+The meaning of these attributes is to ignore the error from setting
+XATTR on that branch.
+Note that aufs tries copying all XATTR unconditionally, and ignores the
+error from the dst branch according to the specified attributes.
+
+Some XATTR may have its default value. The default value may come from
+the parent dir or the environment. If the default value is set at the
+file creating-time, it will be overwritten by copy-up.
+Some contradiction may happen I am afraid.
+Do we need another attribute to stop copying XATTR? I am unsure. For
+now, aufs implements the branch attributes to ignore the error.
diff --git a/Documentation/filesystems/aufs/design/07export.txt b/Documentation/filesystems/aufs/design/07export.txt
new file mode 100644
index 00000000000..6fcb00d7dbd
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/07export.txt
@@ -0,0 +1,58 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Export Aufs via NFS
+----------------------------------------------------------------------
+Here is an approach.
+- like xino/xib, add a new file 'xigen' which stores aufs inode
+  generation.
+- iget_locked(): initialize aufs inode generation for a new inode, and
+  store it in xigen file.
+- destroy_inode(): increment aufs inode generation and store it in xigen
+  file. it is necessary even if it is not unlinked, because any data of
+  inode may be changed by UDBA.
+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
+  build file handle by
+  + branch id (4 bytes)
+  + superblock generation (4 bytes)
+  + inode number (4 or 8 bytes)
+  + parent dir inode number (4 or 8 bytes)
+  + inode generation (4 bytes))
+  + return value of exportfs_encode_fh() for the parent on a branch (4
+    bytes)
+  + file handle for a branch (by exportfs_encode_fh())
+- fh_to_dentry():
+  + find the index of a branch from its id in handle, and check it is
+    still exist in aufs.
+  + 1st level: get the inode number from handle and search it in cache.
+  + 2nd level: if not found in cache, get the parent inode number from
+    the handle and search it in cache. and then open the found parent
+    dir, find the matching inode number by vfs_readdir() and get its
+    name, and call lookup_one_len() for the target dentry.
+  + 3rd level: if the parent dir is not cached, call
+    exportfs_decode_fh() for a branch and get the parent on a branch,
+    build a pathname of it, convert it a pathname in aufs, call
+    path_lookup(). now aufs gets a parent dir dentry, then handle it as
+    the 2nd level.
+  + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
+    for every branch, but not itself. to get this, (currently) aufs
+    searches in current->nsproxy->mnt_ns list. it may not be a good
+    idea, but I didn't get other approach.
+  + test the generation of the gotten inode.
+- every inode operation: they may get EBUSY due to UDBA. in this case,
+  convert it into ESTALE for NFSD.
+- readdir(): call lockdep_on/off() because filldir in NFSD calls
+  lookup_one_len(), vfs_getattr(), encode_fh() and others.
diff --git a/Documentation/filesystems/aufs/design/08shwh.txt b/Documentation/filesystems/aufs/design/08shwh.txt
new file mode 100644
index 00000000000..d7e58319086
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/08shwh.txt
@@ -0,0 +1,52 @@
+
+# Copyright (C) 2005-2020 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Show Whiteout Mode (shwh)
+----------------------------------------------------------------------
+Generally aufs hides the name of whiteouts. But in some cases, to show
+them is very useful for users. For instance, creating a new middle layer
+(branch) by merging existing layers.
+
+(borrowing aufs1 HOW-TO from a user, Michael Towers)
+When you have three branches,
+- Bottom: 'system', squashfs (underlying base system), read-only
+- Middle: 'mods', squashfs, read-only
+- Top: 'overlay', ram (tmpfs), read-write
+
+The top layer is loaded at boot time and saved at shutdown, to preserve
+the changes made to the system during the session.
+When larger changes have been made, or smaller changes have accumulated,
+the size of the saved top layer data grows. At this point, it would be
+nice to be able to merge the two overlay branches ('mods' and 'overlay')
+and rewrite the 'mods' squashfs, clearing the top layer and thus
+restoring save and load speed.
+
+This merging is simplified by the use of another aufs mount, of just the
+two overlay branches using the 'shwh' option.
+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
+	aufs /livesys/merge_union
+
+A merged view of these two branches is then available at
+/livesys/merge_union, and the new feature is that the whiteouts are
+visible!
+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
+writing to all branches. Also the default mode for all branches is 'ro'.
+It is now possible to save the combined contents of the two overlay
+branches to a new squashfs, e.g.:
+# mksquashfs /livesys/merge_union /path/to/newmods.squash
+
+This new squashfs archive can be stored on the boot device and the
+initramfs will use it to replace the old one at the next boot.
diff --git a/Documentation/filesystems/aufs/design/10dynop.txt b/Documentation/filesystems/aufs/design/10dynop.txt
new file mode 100644
index 00000000000..d55cae285df
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/10dynop.txt
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2010-2020 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Dynamically customizable FS operations
+----------------------------------------------------------------------
+Generally FS operations (struct inode_operations, struct
+address_space_operations, struct file_operations, etc.) are defined as
+"static const", but it never means that FS have only one set of
+operation. Some FS have multiple sets of them. For instance, ext2 has
+three sets, one for XIP, for NOBH, and for normal.
+Since aufs overrides and redirects these operations, sometimes aufs has
+to change its behaviour according to the branch FS type. More importantly
+VFS acts differently if a function (member in the struct) is set or
+not. It means aufs should have several sets of operations and select one
+among them according to the branch FS definition.
+
+In order to solve this problem and not to affect the behaviour of VFS,
+aufs defines these operations dynamically. For instance, aufs defines
+dummy direct_IO function for struct address_space_operations, but it may
+not be set to the address_space_operations actually. When the branch FS
+doesn't have it, aufs doesn't set it to its address_space_operations
+while the function definition itself is still alive. So the behaviour
+itself will not change, and it will return an error when direct_IO is
+not set.
+
+The lifetime of these dynamically generated operation object is
+maintained by aufs branch object. When the branch is removed from aufs,
+the reference counter of the object is decremented. When it reaches
+zero, the dynamically generated operation object will be freed.
+
+This approach is designed to support AIO (io_submit), Direct I/O and
+XIP (DAX) mainly.
+Currently this approach is applied to address_space_operations for
+regular files only.
diff --git a/Documentation/power/power-sequence/design.rst b/Documentation/power/power-sequence/design.rst
new file mode 100644
index 00000000000..554608e5f3b
--- /dev/null
+++ b/Documentation/power/power-sequence/design.rst
@@ -0,0 +1,54 @@
+====================================
+Power Sequence Library
+====================================
+
+:Date: Feb, 2017
+:Author: Peter Chen <peter.chen@nxp.com>
+
+
+Introduction
+============
+
+We have an well-known problem that the device needs to do a power
+sequence before it can be recognized by related host, the typical
+examples are hard-wired mmc devices and usb devices. The host controller
+can't know what kinds of this device is in its bus if the power
+sequence has not done, since the related devices driver's probe calling
+is determined by runtime according to eunumeration results. Besides,
+the devices may have custom power sequence, so the power sequence library
+which is independent with the devices is needed.
+
+Design
+============
+
+The power sequence library includes the core file and customer power
+sequence library. The core file exports interfaces are called by
+host controller driver for power sequence and customer power sequence
+library files to register its power sequence instance to global
+power sequence list. The custom power sequence library creates power
+sequence instance and implement custom power sequence.
+
+Since the power sequence describes hardware design, the description is
+located at board description file, eg, device tree dts file. And
+a specific power sequence belongs to device, so its description
+is under the device node, please refer to:
+Documentation/devicetree/bindings/power/pwrseq/pwrseq-generic.txt
+
+Custom power sequence library allocates one power sequence instance at
+bootup periods using postcore_initcall, this static allocated instance is
+used to compare with device-tree (DT) node to see if this library can be
+used for the node or not. When the result is matched, the core API will
+try to get resourses (->get, implemented at each library) for power
+sequence, if all resources are got, it will try to allocate another
+instance for next possible request from host driver.
+
+Then, the host controller driver can carry out power sequence on for this
+DT node, the library will do corresponding operations, like open clocks,
+toggle gpio, etc. The power sequence off routine will close and free the
+resources, and is called when the parent is removed. And the power
+sequence suspend and resume routine can be called at host driver's
+suspend and resume routine if needed.
+
+The exported interfaces
+.. kernel-doc:: drivers/power/pwrseq/core.c
+   :export:
diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
new file mode 100644
index 00000000000..c0282002a07
--- /dev/null
+++ b/Documentation/scheduler/sched-BFS.txt
@@ -0,0 +1,351 @@
+BFS - The Brain Fuck Scheduler by Con Kolivas.
+
+Goals.
+
+The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
+completely do away with the complex designs of the past for the cpu process
+scheduler and instead implement one that is very simple in basic design.
+The main focus of BFS is to achieve excellent desktop interactivity and
+responsiveness without heuristics and tuning knobs that are difficult to
+understand, impossible to model and predict the effect of, and when tuned to
+one workload cause massive detriment to another.
+
+
+Design summary.
+
+BFS is best described as a single runqueue, O(n) lookup, earliest effective
+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
+deadline first) and my previous Staircase Deadline scheduler. Each component
+shall be described in order to understand the significance of, and reasoning for
+it. The codebase when the first stable version was released was approximately
+9000 lines less code than the existing mainline linux kernel scheduler (in
+2.6.31). This does not even take into account the removal of documentation and
+the cgroups code that is not used.
+
+Design reasoning.
+
+The single runqueue refers to the queued but not running processes for the
+entire system, regardless of the number of CPUs. The reason for going back to
+a single runqueue design is that once multiple runqueues are introduced,
+per-CPU or otherwise, there will be complex interactions as each runqueue will
+be responsible for the scheduling latency and fairness of the tasks only on its
+own runqueue, and to achieve fairness and low latency across multiple CPUs, any
+advantage in throughput of having CPU local tasks causes other disadvantages.
+This is due to requiring a very complex balancing system to at best achieve some
+semblance of fairness across CPUs and can only maintain relatively low latency
+for tasks bound to the same CPUs, not across them. To increase said fairness
+and latency across CPUs, the advantage of local runqueue locking, which makes
+for better scalability, is lost due to having to grab multiple locks.
+
+A significant feature of BFS is that all accounting is done purely based on CPU
+used and nowhere is sleep time used in any way to determine entitlement or
+interactivity. Interactivity "estimators" that use some kind of sleep/run
+algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
+tasks that aren't interactive as being so. The reason for this is that it is
+close to impossible to determine that when a task is sleeping, whether it is
+doing it voluntarily, as in a userspace application waiting for input in the
+form of a mouse click or otherwise, or involuntarily, because it is waiting for
+another thread, process, I/O, kernel activity or whatever. Thus, such an
+estimator will introduce corner cases, and more heuristics will be required to
+cope with those corner cases, introducing more corner cases and failed
+interactivity detection and so on. Interactivity in BFS is built into the design
+by virtue of the fact that tasks that are waking up have not used up their quota
+of CPU time, and have earlier effective deadlines, thereby making it very likely
+they will preempt any CPU bound task of equivalent nice level. See below for
+more information on the virtual deadline mechanism. Even if they do not preempt
+a running task, because the rr interval is guaranteed to have a bound upper
+limit on how long a task will wait for, it will be scheduled within a timeframe
+that will not cause visible interface jitter.
+
+
+Design details.
+
+Task insertion.
+
+BFS inserts tasks into each relevant queue as an O(1) insertion into a double
+linked list. On insertion, *every* running queue is checked to see if the newly
+queued task can run on any idle queue, or preempt the lowest running task on the
+system. This is how the cross-CPU scheduling of BFS achieves significantly lower
+latency per extra CPU the system has. In this case the lookup is, in the worst
+case scenario, O(n) where n is the number of CPUs on the system.
+
+Data protection.
+
+BFS has one single lock protecting the process local data of every task in the
+global queue. Thus every insertion, removal and modification of task data in the
+global runqueue needs to grab the global lock. However, once a task is taken by
+a CPU, the CPU has its own local data copy of the running process' accounting
+information which only that CPU accesses and modifies (such as during a
+timer tick) thus allowing the accounting data to be updated lockless. Once a
+CPU has taken a task to run, it removes it from the global queue. Thus the
+global queue only ever has, at most,
+
+	(number of tasks requesting cpu time) - (number of logical CPUs) + 1
+
+tasks in the global queue. This value is relevant for the time taken to look up
+tasks during scheduling. This will increase if many tasks with CPU affinity set
+in their policy to limit which CPUs they're allowed to run on if they outnumber
+the number of CPUs. The +1 is because when rescheduling a task, the CPU's
+currently running task is put back on the queue. Lookup will be described after
+the virtual deadline mechanism is explained.
+
+Virtual deadline.
+
+The key to achieving low latency, scheduling fairness, and "nice level"
+distribution in BFS is entirely in the virtual deadline mechanism. The one
+tunable in BFS is the rr_interval, or "round robin interval". This is the
+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
+tasks of the same nice level will be running for, or looking at it the other
+way around, the longest duration two tasks of the same nice level will be
+delayed for. When a task requests cpu time, it is given a quota (time_slice)
+equal to the rr_interval and a virtual deadline. The virtual deadline is
+offset from the current time in jiffies by this equation:
+
+	jiffies + (prio_ratio * rr_interval)
+
+The prio_ratio is determined as a ratio compared to the baseline of nice -20
+and increases by 10% per nice level. The deadline is a virtual one only in that
+no guarantee is placed that a task will actually be scheduled by this time, but
+it is used to compare which task should go next. There are three components to
+how a task is next chosen. First is time_slice expiration. If a task runs out
+of its time_slice, it is descheduled, the time_slice is refilled, and the
+deadline reset to that formula above. Second is sleep, where a task no longer
+is requesting CPU for whatever reason. The time_slice and deadline are _not_
+adjusted in this case and are just carried over for when the task is next
+scheduled. Third is preemption, and that is when a newly waking task is deemed
+higher priority than a currently running task on any cpu by virtue of the fact
+that it has an earlier virtual deadline than the currently running task. The
+earlier deadline is the key to which task is next chosen for the first and
+second cases. Once a task is descheduled, it is put back on the queue, and an
+O(n) lookup of all queued-but-not-running tasks is done to determine which has
+the earliest deadline and that task is chosen to receive CPU next.
+
+The CPU proportion of different nice tasks works out to be approximately the
+
+	(prio_ratio difference)^2
+
+The reason it is squared is that a task's deadline does not change while it is
+running unless it runs out of time_slice. Thus, even if the time actually
+passes the deadline of another task that is queued, it will not get CPU time
+unless the current running task deschedules, and the time "base" (jiffies) is
+constantly moving.
+
+Task lookup.
+
+BFS has 103 priority queues. 100 of these are dedicated to the static priority
+of realtime tasks, and the remaining 3 are, in order of best to worst priority,
+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
+scheduling). When a task of these priorities is queued, a bitmap of running
+priorities is set showing which of these priorities has tasks waiting for CPU
+time. When a CPU is made to reschedule, the lookup for the next task to get
+CPU time is performed in the following way:
+
+First the bitmap is checked to see what static priority tasks are queued. If
+any realtime priorities are found, the corresponding queue is checked and the
+first task listed there is taken (provided CPU affinity is suitable) and lookup
+is complete. If the priority corresponds to a SCHED_ISO task, they are also
+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
+stage, every task in the runlist that corresponds to that priority is checked
+to see which has the earliest set deadline, and (provided it has suitable CPU
+affinity) it is taken off the runqueue and given the CPU. If a task has an
+expired deadline, it is taken and the rest of the lookup aborted (as they are
+chosen in FIFO order).
+
+Thus, the lookup is O(n) in the worst case only, where n is as described
+earlier, as tasks may be chosen before the whole task list is looked over.
+
+
+Scalability.
+
+The major limitations of BFS will be that of scalability, as the separate
+runqueue designs will have less lock contention as the number of CPUs rises.
+However they do not scale linearly even with separate runqueues as multiple
+runqueues will need to be locked concurrently on such designs to be able to
+achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
+across CPUs, and to achieve low enough latency for tasks on a busy CPU when
+other CPUs would be more suited. BFS has the advantage that it requires no
+balancing algorithm whatsoever, as balancing occurs by proxy simply because
+all CPUs draw off the global runqueue, in priority and deadline order. Despite
+the fact that scalability is _not_ the prime concern of BFS, it both shows very
+good scalability to smaller numbers of CPUs and is likely a more scalable design
+at these numbers of CPUs.
+
+It also has some very low overhead scalability features built into the design
+when it has been deemed their overhead is so marginal that they're worth adding.
+The first is the local copy of the running process' data to the CPU it's running
+on to allow that data to be updated lockless where possible. Then there is
+deference paid to the last CPU a task was running on, by trying that CPU first
+when looking for an idle CPU to use the next time it's scheduled. Finally there
+is the notion of cache locality beyond the last running CPU. The sched_domains
+information is used to determine the relative virtual "cache distance" that
+other CPUs have from the last CPU a task was running on. CPUs with shared
+caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
+as cache local. CPUs without shared caches are treated as not cache local, and
+CPUs on different NUMA nodes are treated as very distant. This "relative cache
+distance" is used by modifying the virtual deadline value when doing lookups.
+Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
+"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
+behind the doubling of deadlines is as follows. The real cost of migrating a
+task from one CPU to another is entirely dependant on the cache footprint of
+the task, how cache intensive the task is, how long it's been running on that
+CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
+how layered the CPU cache is, how fast a context switch is... and so on. In
+other words, it's close to random in the real world where we do more than just
+one sole workload. The only thing we can be sure of is that it's not free. So
+BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
+is more important than cache locality, and cache locality only plays a part
+after that. Doubling the effective deadline is based on the premise that the
+"cache local" CPUs will tend to work on the same tasks up to double the number
+of cache local CPUs, and once the workload is beyond that amount, it is likely
+that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
+is a value I pulled out of my arse.
+
+When choosing an idle CPU for a waking task, the cache locality is determined
+according to where the task last ran and then idle CPUs are ranked from best
+to worst to choose the most suitable idle CPU based on cache locality, NUMA
+node locality and hyperthread sibling business. They are chosen in the
+following preference (if idle):
+
+* Same core, idle or busy cache, idle threads
+* Other core, same cache, idle or busy cache, idle threads.
+* Same node, other CPU, idle cache, idle threads.
+* Same node, other CPU, busy cache, idle threads.
+* Same core, busy threads.
+* Other core, same cache, busy threads.
+* Same node, other CPU, busy threads.
+* Other node, other CPU, idle cache, idle threads.
+* Other node, other CPU, busy cache, idle threads.
+* Other node, other CPU, busy threads.
+
+This shows the SMT or "hyperthread" awareness in the design as well which will
+choose a real idle core first before a logical SMT sibling which already has
+tasks on the physical CPU.
+
+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
+However this benchmarking was performed on an earlier design that was far less
+scalable than the current one so it's hard to know how scalable it is in terms
+of both CPUs (due to the global runqueue) and heavily loaded machines (due to
+O(n) lookup) at this stage. Note that in terms of scalability, the number of
+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
+results are very promising indeed, without needing to tweak any knobs, features
+or options. Benchmark contributions are most welcome.
+
+
+Features
+
+As the initial prime target audience for BFS was the average desktop user, it
+was designed to not need tweaking, tuning or have features set to obtain benefit
+from it. Thus the number of knobs and features has been kept to an absolute
+minimum and should not require extra user input for the vast majority of cases.
+There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
+and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
+to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
+support for CGROUPS. The average user should neither need to know what these
+are, nor should they need to be using them to have good desktop behaviour.
+
+rr_interval
+
+There is only one "scheduler" tunable, the round robin interval. This can be
+accessed in
+
+	/proc/sys/kernel/rr_interval
+
+The value is in milliseconds, and the default value is set to 6 on a
+uniprocessor machine, and automatically set to a progressively higher value on
+multiprocessor machines. The reasoning behind increasing the value on more CPUs
+is that the effective latency is decreased by virtue of there being more CPUs on
+BFS (for reasons explained above), and increasing the value allows for less
+cache contention and more throughput. Valid values are from 1 to 1000
+Decreasing the value will decrease latencies at the cost of decreasing
+throughput, while increasing it will improve throughput, but at the cost of
+worsening latencies. The accuracy of the rr interval is limited by HZ resolution
+of the kernel configuration. Thus, the worst case latencies are usually slightly
+higher than this actual value. The default value of 6 is not an arbitrary one.
+It is based on the fact that humans can detect jitter at approximately 7ms, so
+aiming for much lower latencies is pointless under most circumstances. It is
+worth noting this fact when comparing the latency performance of BFS to other
+schedulers. Worst case latencies being higher than 7ms are far worse than
+average latencies not being in the microsecond range.
+
+Isochronous scheduling.
+
+Isochronous scheduling is a unique scheduling policy designed to provide
+near-real-time performance to unprivileged (ie non-root) users without the
+ability to starve the machine indefinitely. Isochronous tasks (which means
+"same time") are set using, for example, the schedtool application like so:
+
+	schedtool -I -e amarok
+
+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
+is that it has a priority level between true realtime tasks and SCHED_NORMAL
+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
+rate). However if ISO tasks run for more than a tunable finite amount of time,
+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
+time is the percentage of _total CPU_ available across the machine, configurable
+as a percentage in the following "resource handling" tunable (as opposed to a
+scheduler tunable):
+
+	/proc/sys/kernel/iso_cpu
+
+and is set to 70% by default. It is calculated over a rolling 5 second average
+Because it is the total CPU available, it means that on a multi CPU machine, it
+is possible to have an ISO task running as realtime scheduling indefinitely on
+just one CPU, as the other CPUs will be available. Setting this to 100 is the
+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
+ability to run any pseudo-realtime tasks.
+
+A feature of BFS is that it detects when an application tries to obtain a
+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
+appropriate privileges to use those policies. When it detects this, it will
+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
+Because some applications constantly set their policy as well as their nice
+level, there is potential for them to undo the override specified by the user
+on the command line of setting the policy to SCHED_ISO. To counter this, once
+a task has been set to SCHED_ISO policy, it needs superuser privileges to set
+it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
+processes and threads will also inherit the ISO policy.
+
+Idleprio scheduling.
+
+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
+ultra low priority tasks to be run in the background that have virtually no
+effect on the foreground tasks. This is ideally suited to distributed computing
+clients (like setiathome, folding, mprime etc) but can also be used to start
+a video encode or so on without any slowdown of other tasks. To avoid this
+policy from grabbing shared resources and holding them indefinitely, if it
+detects a state where the task is waiting on I/O, the machine is about to
+suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
+per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
+it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
+be set to start as SCHED_IDLEPRIO with the schedtool command like so:
+
+	schedtool -D -e ./mprime
+
+Subtick accounting.
+
+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
+the accounting is done by simply determining what is happening at the precise
+moment a timer tick fires off. This becomes increasingly inaccurate as the
+timer tick frequency (HZ) is lowered. It is possible to create an application
+which uses almost 100% CPU, yet by being descheduled at the right time, records
+zero CPU usage. While the main problem with this is that there are possible
+security implications, it is also difficult to determine how much CPU a task
+really does use. BFS tries to use the sub-tick accounting from the TSC clock,
+where possible, to determine real CPU usage. This is not entirely reliable, but
+is far more likely to produce accurate CPU usage data than the existing designs
+and will not show tasks as consuming no CPU usage when they actually are. Thus,
+the amount of CPU reported as being used by BFS will more accurately represent
+how much CPU the task itself is using (as is shown for example by the 'time'
+application), so the reported values may be quite different to other schedulers.
+Values reported as the 'load' are more prone to problems with this design, but
+per process values are closer to real usage. When comparing throughput of BFS
+to other designs, it is important to compare the actual completed work in terms
+of total wall clock time taken and total work done, rather than the reported
+"cpu usage".
+
+
+Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
diff --git a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
new file mode 100644
index 00000000000..ae28b85c999
--- /dev/null
+++ b/Documentation/scheduler/sched-MuQSS.txt
@@ -0,0 +1,373 @@
+MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
+
+MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
+one 8 level skiplist per runqueue, and fine grained locking for much more
+scalability.
+
+
+Goals.
+
+The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
+here on (pronounced mux) is to completely do away with the complex designs of
+the past for the cpu process scheduler and instead implement one that is very
+simple in basic design. The main focus of MuQSS is to achieve excellent desktop
+interactivity and responsiveness without heuristics and tuning knobs that are
+difficult to understand, impossible to model and predict the effect of, and when
+tuned to one workload cause massive detriment to another, while still being
+scalable to many CPUs and processes.
+
+
+Design summary.
+
+MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
+lookup, earliest effective virtual deadline first tickless design, loosely based
+on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
+Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
+Each component shall be described in order to understand the significance of,
+and reasoning for it.
+
+
+Design reasoning.
+
+In BFS, the use of a single runqueue across all CPUs meant that each CPU would
+need to scan the entire runqueue looking for the process with the earliest
+deadline and schedule that next, regardless of which CPU it originally came
+from. This made BFS deterministic with respect to latency and provided
+guaranteed latencies dependent on number of processes and CPUs. The single
+runqueue, however, meant that all CPUs would compete for the single lock
+protecting it, which would lead to increasing lock contention as the number of
+CPUs rose and appeared to limit scalability of common workloads beyond 16
+logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
+increased overhead proportionate to the number of queued proecesses and led to
+cache thrashing while iterating over the linked list.
+
+MuQSS is an evolution of BFS, designed to maintain the same scheduling
+decision mechanism and be virtually deterministic without relying on the
+constrained design of the single runqueue by splitting out the single runqueue
+to be per-CPU and use skiplists instead of linked lists.
+
+The original reason for going back to a single runqueue design for BFS was that
+once multiple runqueues are introduced, per-CPU or otherwise, there will be
+complex interactions as each runqueue will be responsible for the scheduling
+latency and fairness of the tasks only on its own runqueue, and to achieve
+fairness and low latency across multiple CPUs, any advantage in throughput of
+having CPU local tasks causes other disadvantages. This is due to requiring a
+very complex balancing system to at best achieve some semblance of fairness
+across CPUs and can only maintain relatively low latency for tasks bound to the
+same CPUs, not across them. To increase said fairness and latency across CPUs,
+the advantage of local runqueue locking, which makes for better scalability, is
+lost due to having to grab multiple locks.
+
+MuQSS works around the problems inherent in multiple runqueue designs by
+making its skip lists priority ordered and through novel use of lockless
+examination of each other runqueue it can decide if it should take the earliest
+deadline task from another runqueue for latency reasons, or for CPU balancing
+reasons. It still does not have a balancing system, choosing to allow the
+next task scheduling decision and task wakeup CPU choice to allow balancing to
+happen by virtue of its choices.
+
+As a further evolution of the design, MuQSS normally configures sharing of
+runqueues in a logical fashion for when CPU resources are shared for improved
+latency and throughput. By default it shares runqueues and locks between
+multicore siblings. Optionally it can be configured to run with sharing of
+SMT siblings only, all SMP packages or no sharing at all. Additionally it can
+be selected at boot time.
+
+
+Design details.
+
+Custom skip list implementation:
+
+To avoid the overhead of building up and tearing down skip list structures,
+the variant used by MuQSS has a number of optimisations making it specific for
+its use case in the scheduler. It uses static arrays of 8 'levels' instead of
+building up and tearing down structures dynamically. This makes each runqueue
+only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
+it means that it scales O(log N) up to 64k x number of logical CPUs which is
+far beyond the realistic task limits each CPU could handle. By being 8 levels
+it also makes the array exactly one cacheline in size. Additionally, each
+skip list node is bidirectional making insertion and removal amortised O(1),
+being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
+first entry in each list at all times with MuQSS, so there is never a need to
+do a search and thus look up is always O(1). In interactive mode, the queues
+will be searched beyond their first entry if the first task is not suitable
+for affinity or SMT nice reasons.
+
+Task insertion:
+
+MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
+a custom skip list as described above (based on the original design by William
+Pugh). Insertion is ordered in such a way that there is never a need to do a
+search by ordering tasks according to static priority primarily, and then
+virtual deadline at the time of insertion.
+
+Niffies:
+
+Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
+of nanosecond resolution. Niffies are calculated per-runqueue from the high
+resolution TSC timers, and in order to maintain fairness are synchronised
+between CPUs whenever both runqueues are locked concurrently.
+
+Virtual deadline:
+
+The key to achieving low latency, scheduling fairness, and "nice level"
+distribution in MuQSS is entirely in the virtual deadline mechanism. The one
+tunable in MuQSS is the rr_interval, or "round robin interval". This is the
+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
+tasks of the same nice level will be running for, or looking at it the other
+way around, the longest duration two tasks of the same nice level will be
+delayed for. When a task requests cpu time, it is given a quota (time_slice)
+equal to the rr_interval and a virtual deadline. The virtual deadline is
+offset from the current time in niffies by this equation:
+
+	niffies + (prio_ratio * rr_interval)
+
+The prio_ratio is determined as a ratio compared to the baseline of nice -20
+and increases by 10% per nice level. The deadline is a virtual one only in that
+no guarantee is placed that a task will actually be scheduled by this time, but
+it is used to compare which task should go next. There are three components to
+how a task is next chosen. First is time_slice expiration. If a task runs out
+of its time_slice, it is descheduled, the time_slice is refilled, and the
+deadline reset to that formula above. Second is sleep, where a task no longer
+is requesting CPU for whatever reason. The time_slice and deadline are _not_
+adjusted in this case and are just carried over for when the task is next
+scheduled. Third is preemption, and that is when a newly waking task is deemed
+higher priority than a currently running task on any cpu by virtue of the fact
+that it has an earlier virtual deadline than the currently running task. The
+earlier deadline is the key to which task is next chosen for the first and
+second cases.
+
+The CPU proportion of different nice tasks works out to be approximately the
+
+	(prio_ratio difference)^2
+
+The reason it is squared is that a task's deadline does not change while it is
+running unless it runs out of time_slice. Thus, even if the time actually
+passes the deadline of another task that is queued, it will not get CPU time
+unless the current running task deschedules, and the time "base" (niffies) is
+constantly moving.
+
+Task lookup:
+
+As tasks are already pre-ordered according to anticipated scheduling order in
+the skip lists, lookup for the next suitable task per-runqueue is always a
+matter of simply selecting the first task in the 0th level skip list entry.
+In order to maintain optimal latency and fairness across CPUs, MuQSS does a
+novel examination of every other runqueue in cache locality order, choosing the
+best task across all runqueues. This provides near-determinism of how long any
+task across the entire system may wait before receiving CPU time. The other
+runqueues are first examine lockless and then trylocked to minimise the
+potential lock contention if they are likely to have a suitable better task.
+Each other runqueue lock is only held for as long as it takes to examine the
+entry for suitability. In "interactive" mode, the default setting, MuQSS will
+look for the best deadline task across all CPUs, while in !interactive mode,
+it will only select a better deadline task from another CPU if it is more
+heavily laden than the current one.
+
+Lookup is therefore O(k) where k is number of CPUs.
+
+
+Latency.
+
+Through the use of virtual deadlines to govern the scheduling order of normal
+tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
+the rr_interval tunable which is set to 6ms by default. This means that the
+longest a CPU bound task will wait for more CPU is proportional to the number
+of running tasks and in the common case of 0-2 running tasks per CPU, will be
+under the 7ms threshold for human perception of jitter. Additionally, as newly
+woken tasks will have an early deadline from their previous runtime, the very
+tasks that are usually latency sensitive will have the shortest interval for
+activation, usually preempting any existing CPU bound tasks.
+
+Tickless expiry:
+
+A feature of MuQSS is that it is not tied to the resolution of the chosen tick
+rate in Hz, instead depending entirely on the high resolution timers where
+possible for sub-millisecond accuracy on timeouts regarless of the underlying
+tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
+such as 100 by default, benefiting from the improved throughput and lower
+power usage it provides. Another advantage of this approach is that in
+combination with the Full No HZ option, which disables ticks on running task
+CPUs instead of just idle CPUs, the tick can be disabled at all times
+regardless of how many tasks are running instead of being limited to just one
+running task. Note that this option is NOT recommended for regular desktop
+users.
+
+
+Scalability and balancing.
+
+Unlike traditional approaches where balancing is a combination of CPU selection
+at task wakeup and intermittent balancing based on a vast array of rules set
+according to architecture, busyness calculations and special case management,
+MuQSS indirectly balances on the fly at task wakeup and next task selection.
+During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
+each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
+Additionally it selects any idle CPUs, if they are available, at any time over
+busy CPUs according to the following preference:
+
+ * Same thread, idle or busy cache, idle or busy threads
+ * Other core, same cache, idle or busy cache, idle threads.
+ * Same node, other CPU, idle cache, idle threads.
+ * Same node, other CPU, busy cache, idle threads.
+ * Other core, same cache, busy threads.
+ * Same node, other CPU, busy threads.
+ * Other node, other CPU, idle cache, idle threads.
+ * Other node, other CPU, busy cache, idle threads.
+ * Other node, other CPU, busy threads.
+
+Mux is therefore SMT, MC and Numa aware without the need for extra
+intermittent balancing to maintain CPUs busy and make the most of cache
+coherency.
+
+
+Features
+
+As the initial prime target audience for MuQSS was the average desktop user, it
+was designed to not need tweaking, tuning or have features set to obtain benefit
+from it. Thus the number of knobs and features has been kept to an absolute
+minimum and should not require extra user input for the vast majority of cases.
+There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
+interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
+policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
+does _not_ now feature is support for CGROUPS. The average user should neither
+need to know what these are, nor should they need to be using them to have good
+desktop behaviour. However since some applications refuse to work without
+cgroups, one can enable them with MuQSS as a stub and the filesystem will be
+created which will allow the applications to work.
+
+rr_interval:
+
+	/proc/sys/kernel/rr_interval
+
+The value is in milliseconds, and the default value is set to 6. Valid values
+are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
+decreasing throughput, while increasing it will improve throughput, but at the
+cost of worsening latencies. It is based on the fact that humans can detect
+jitter at approximately 7ms, so aiming for much lower latencies is pointless
+under most circumstances. It is worth noting this fact when comparing the
+latency performance of MuQSS to other schedulers. Worst case latencies being
+higher than 7ms are far worse than average latencies not being in the
+microsecond range.
+
+interactive:
+
+	/proc/sys/kernel/interactive
+
+The value is a simple boolean of 1 for on and 0 for off and is set to on by
+default. Disabling this will disable the near-determinism of MuQSS when
+selecting the next task by not examining all CPUs for the earliest deadline
+task, or which CPU to wake to, instead prioritising CPU balancing for improved
+throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
+instead of across the whole system.
+
+Runqueue sharing.
+
+By default MuQSS chooses to share runqueue resources (specifically the skip
+list and locking) between multicore siblings. It is configurable at build time
+to select between None, SMT, MC and SMP, corresponding to no sharing, sharing
+only between simultaneous mulithreading siblings, multicore siblings, or
+symmetric multiprocessing physical packages. Additionally it can be se at
+bootime with the use of the rqshare parameter. The reason for configurability
+is that some architectures have CPUs with many multicore siblings (>= 16)
+where it may be detrimental to throughput to share runqueues and another
+sharing option may be desirable. Additionally, more sharing than usual can
+improve latency on a system-wide level at the expense of throughput if desired.
+
+The options are:
+none, smt, mc, smp
+
+eg:
+	rqshare=mc
+
+Isochronous scheduling:
+
+Isochronous scheduling is a unique scheduling policy designed to provide
+near-real-time performance to unprivileged (ie non-root) users without the
+ability to starve the machine indefinitely. Isochronous tasks (which means
+"same time") are set using, for example, the schedtool application like so:
+
+	schedtool -I -e amarok
+
+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
+is that it has a priority level between true realtime tasks and SCHED_NORMAL
+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
+rate). However if ISO tasks run for more than a tunable finite amount of time,
+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
+time is the percentage of CPU available per CPU, configurable as a percentage in
+the following "resource handling" tunable (as opposed to a scheduler tunable):
+
+iso_cpu:
+
+	/proc/sys/kernel/iso_cpu
+
+and is set to 70% by default. It is calculated over a rolling 5 second average
+Because it is the total CPU available, it means that on a multi CPU machine, it
+is possible to have an ISO task running as realtime scheduling indefinitely on
+just one CPU, as the other CPUs will be available. Setting this to 100 is the
+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
+ability to run any pseudo-realtime tasks.
+
+A feature of MuQSS is that it detects when an application tries to obtain a
+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
+appropriate privileges to use those policies. When it detects this, it will
+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
+
+
+Idleprio scheduling:
+
+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
+ultra low priority tasks to be run in the background that have virtually no
+effect on the foreground tasks. This is ideally suited to distributed computing
+clients (like setiathome, folding, mprime etc) but can also be used to start a
+video encode or so on without any slowdown of other tasks. To avoid this policy
+from grabbing shared resources and holding them indefinitely, if it detects a
+state where the task is waiting on I/O, the machine is about to suspend to ram
+and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
+been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
+superuser privileges since it is effectively a lower scheduling policy. Tasks
+can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
+
+schedtool -D -e ./mprime
+
+Subtick accounting:
+
+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
+the accounting is done by simply determining what is happening at the precise
+moment a timer tick fires off. This becomes increasingly inaccurate as the timer
+tick frequency (HZ) is lowered. It is possible to create an application which
+uses almost 100% CPU, yet by being descheduled at the right time, records zero
+CPU usage. While the main problem with this is that there are possible security
+implications, it is also difficult to determine how much CPU a task really does
+use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
+usage. Thus, the amount of CPU reported as being used by MuQSS will more
+accurately represent how much CPU the task itself is using (as is shown for
+example by the 'time' application), so the reported values may be quite
+different to other schedulers. When comparing throughput of MuQSS to other
+designs, it is important to compare the actual completed work in terms of total
+wall clock time taken and total work done, rather than the reported "cpu usage".
+
+Symmetric MultiThreading (SMT) aware nice:
+
+SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
+logical CPU count rises by adding thread units to each CPU core, allowing more
+than one task to be run simultaneously on the same core, the disadvantage of it
+is that the CPU power is shared between the tasks, not summating to the power
+of two CPUs. The practical upshot of this is that two tasks running on
+separate threads of the same core run significantly slower than if they had one
+core each to run on. While smart CPU selection allows each task to have a core
+to itself whenever available (as is done on MuQSS), it cannot offset the
+slowdown that occurs when the cores are all loaded and only a thread is left.
+Most of the time this is harmless as the CPU is effectively overloaded at this
+point and the extra thread is of benefit. However when running a niced task in
+the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
+precisely the same amount of CPU power as the unniced one. MuQSS has an
+optional configuration feature known as SMT-NICE which selectively idles the
+secondary niced thread for a period proportional to the nice difference,
+allowing CPU distribution according to nice level to be maintained, at the
+expense of a small amount of extra overhead. If this is configured in on a
+machine without SMT threads, the overhead is minimal.
+
+
+Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
diff --git a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt
new file mode 100644
index 00000000000..be19a312700
--- /dev/null
+++ b/Documentation/vm/uksm.txt
@@ -0,0 +1,61 @@
+The Ultra Kernel Samepage Merging feature
+----------------------------------------------
+/*
+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
+ *
+ * This is an improvement upon KSM. Some basic data structures and routines
+ * are borrowed from ksm.c .
+ *
+ * Its new features:
+ * 1. Full system scan:
+ *      It automatically scans all user processes' anonymous VMAs. Kernel-user
+ *      interaction to submit a memory area to KSM is no longer needed.
+ *
+ * 2. Rich area detection:
+ *      It automatically detects rich areas containing abundant duplicated
+ *      pages based. Rich areas are given a full scan speed. Poor areas are
+ *      sampled at a reasonable speed with very low CPU consumption.
+ *
+ * 3. Ultra Per-page scan speed improvement:
+ *      A new hash algorithm is proposed. As a result, on a machine with
+ *      Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
+ *      can scan memory areas that does not contain duplicated pages at speed of
+ *      627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
+ *      477MB/sec ~ 923MB/sec.
+ *
+ * 4. Thrashing area avoidance:
+ *      Thrashing area(an VMA that has frequent Ksm page break-out) can be
+ *      filtered out. My benchmark shows it's more efficient than KSM's per-page
+ *      hash value based volatile page detection.
+ *
+ *
+ * 5. Misc changes upon KSM:
+ *      * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
+ *        comparison. It's much faster than default C version on x86.
+ *      * rmap_item now has an struct *page member to loosely cache a
+ *        address-->page mapping, which reduces too much time-costly
+ *        follow_page().
+ *      * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
+ *      * try_to_merge_two_pages() now can revert a pte if it fails. No break_
+ *        ksm is needed for this case.
+ *
+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
+ *    Now uksmd consider full zero pages as special pages and merge them to an
+ *    special unswappable uksm zero page.
+ */
+
+ChangeLog:
+
+2012-05-05 The creation of this Doc
+2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up.
+2012-05-28 UKSM 0.1.1.2 bug fix release
+2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2
+2012-07-2  UKSM 0.1.2-beta2
+2012-07-10 UKSM 0.1.2-beta3
+2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization.
+2012-10-13 UKSM 0.1.2.1 Bug fixes.
+2012-12-31 UKSM 0.1.2.2 Minor bug fixes.
+2014-07-02 UKSM 0.1.2.3 Fix a " __this_cpu_read() in preemptible bug".
+2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
+2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
+2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration.
diff --git a/MAINTAINERS b/MAINTAINERS
index 50659d76976..51682ee15ac 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2946,6 +2946,19 @@ F:	include/linux/audit.h
 F:	include/uapi/linux/audit.h
 F:	kernel/audit*
 
+AUFS (advanced multi layered unification filesystem) FILESYSTEM
+M:	"J. R. Okajima" <hooanon05g@gmail.com>
+L:	aufs-users@lists.sourceforge.net (members only)
+L:	linux-unionfs@vger.kernel.org
+W:	http://aufs.sourceforge.net
+T:	git://github.com/sfjro/aufs4-linux.git
+S:	Supported
+F:	Documentation/filesystems/aufs/
+F:	Documentation/ABI/testing/debugfs-aufs
+F:	Documentation/ABI/testing/sysfs-aufs
+F:	fs/aufs/
+F:	include/uapi/linux/aufs_type.h
+
 AUXILIARY DISPLAY DRIVERS
 M:	Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
 S:	Maintained
@@ -13529,6 +13542,15 @@ F:	drivers/firmware/psci/
 F:	include/linux/psci.h
 F:	include/uapi/linux/psci.h
 
+POWER SEQUENCE LIBRARY
+M:	Peter Chen <Peter.Chen@nxp.com>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
+L:	linux-pm@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/power/pwrseq/
+F:	drivers/power/pwrseq/
+F:	include/linux/power/pwrseq.h
+
 POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
 M:	Sebastian Reichel <sre@kernel.org>
 L:	linux-pm@vger.kernel.org
diff --git a/Makefile b/Makefile
index 2b9400e21a4..21333cebf11 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 7
 SUBLEVEL = 10
-EXTRAVERSION = -gnu
-NAME = Kleptomaniac Octopus
+EXTRAVERSION = -gnu-pck1
+NAME = Tea Storm
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -899,9 +899,6 @@ KBUILD_CFLAGS	+= $(call cc-option,-fmerge-constants)
 # Make sure -fstack-check isn't enabled (like gentoo apparently did)
 KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
 
-# conserve stack if available
-KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
-
 # Prohibit date/time macros, which would make the build non-deterministic
 KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
 
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index ef179033a7c..14b576a531a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -665,6 +665,8 @@ config HZ
 	default 1200 if HZ_1200
 	default 1024
 
+source "kernel/Kconfig.MuQSS"
+
 config SRM_ENV
 	tristate "SRM environment through procfs"
 	depends on PROC_FS
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c77c93c485a..c16a89549ff 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1237,6 +1237,8 @@ config SCHED_SMT
 	  MultiThreading at a cost of slightly increased overhead in some
 	  places. If unsure say N here.
 
+source "kernel/Kconfig.MuQSS"
+
 config HAVE_ARM_SCU
 	bool
 	help
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 0b3cd7a33a2..87a8b8db341 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -29,6 +29,10 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
+ifeq ($(CONFIG_OF_OVERLAY),y)
+DTC_FLAGS += -@
+endif
+
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
 cmd_deflate_xip_data = $(CONFIG_SHELL) -c \
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 8452753efeb..3393a10741b 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -204,6 +204,12 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
 			hex_str(serno, atag->u.serialnr.high);
 			hex_str(serno+8, atag->u.serialnr.low);
 			setprop_string(fdt, "/", "serial-number", serno);
+		} else if (atag->hdr.tag == ATAG_MV_UBOOT) {
+			/* This ATAG provides up to 4 MAC addresses */
+			setprop(fdt, "eth0", "mac-address", atag->u.mv_uboot.macAddr[0], 6);
+			setprop(fdt, "eth1", "mac-address", atag->u.mv_uboot.macAddr[1], 6);
+			setprop(fdt, "eth2", "mac-address", atag->u.mv_uboot.macAddr[2], 6);
+			setprop(fdt, "eth3", "mac-address", atag->u.mv_uboot.macAddr[3], 6);
 		}
 	}
 
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index e8dd9920139..edb5520891d 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
+
+ifeq ($(CONFIG_OF_OVERLAY),y)
+DTC_FLAGS += -@
+endif
+
 dtb-$(CONFIG_ARCH_ALPINE) += \
 	alpine-db.dtb
 dtb-$(CONFIG_MACH_ARTPEC6) += \
@@ -609,10 +614,12 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
 	imx6ul-tx6ul-0010.dtb \
 	imx6ul-tx6ul-0011.dtb \
 	imx6ul-tx6ul-mainboard.dtb \
+	imx6ul-usbarmory.dtb \
 	imx6ull-14x14-evk.dtb \
 	imx6ull-colibri-eval-v3.dtb \
 	imx6ull-colibri-wifi-eval-v3.dtb \
 	imx6ull-opos6uldev.dtb \
+	imx6ull-usbarmory.dtb \
 	imx6ull-phytec-segin-ff-rdk-nand.dtb \
 	imx6ull-phytec-segin-ff-rdk-emmc.dtb \
 	imx6ull-phytec-segin-lc-rdk-nand.dtb \
@@ -776,6 +783,12 @@ dtb-$(CONFIG_SOC_AM33XX) += \
 	am335x-base0033.dtb \
 	am335x-bone.dtb \
 	am335x-boneblack.dtb \
+	am335x-bonegreen-wireless-uboot-univ.dtb \
+	am335x-boneblack-uboot-univ.dtb \
+	am335x-bone-uboot-univ.dtb \
+	am335x-boneblack-uboot.dtb \
+	am335x-bonegreen-gateway.dtb \
+	am335x-abbbi.dtb \
 	am335x-boneblack-wireless.dtb \
 	am335x-boneblue.dtb \
 	am335x-bonegreen.dtb \
@@ -810,6 +823,7 @@ dtb-$(CONFIG_ARCH_OMAP4) += \
 	omap4-duovero-parlor.dtb \
 	omap4-kc1.dtb \
 	omap4-panda.dtb \
+	omap4-panda-es-b3.dtb \
 	omap4-panda-a4.dtb \
 	omap4-panda-es.dtb \
 	omap4-sdp.dtb \
@@ -1265,6 +1279,7 @@ dtb-$(CONFIG_MACH_ARMADA_370) += \
 	armada-370-db.dtb \
 	armada-370-dlink-dns327l.dtb \
 	armada-370-mirabox.dtb \
+	armada-370-smileplug.dtb \
 	armada-370-netgear-rn102.dtb \
 	armada-370-netgear-rn104.dtb \
 	armada-370-rd.dtb \
diff --git a/arch/arm/boot/dts/am335x-abbbi.dts b/arch/arm/boot/dts/am335x-abbbi.dts
new file mode 100644
index 00000000000..870ce856747
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-abbbi.dts
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright 2015 Konsulko Group
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+
+/ {
+	model = "Arrow BeagleBone Black Industrial";
+	compatible = "arrow,am335x-abbbi", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-abbbi.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+};
+
+&ldo3_reg {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-always-on;
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmcsd_fixed>;
+};
+
+&mmc2 {
+	vmmc-supply = <&vmmcsd_fixed>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_pins>;
+	bus-width = <8>;
+	status = "okay";
+};
+
+&am33xx_pinmux {
+	adi_hdmi_bbbi_pins: adi_hdmi_bbbi_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+		>;
+	};
+
+	mcasp0_pins: mcasp0_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+		>;
+	};
+};
+
+&lcdc {
+	status = "okay";
+
+	/* If you want to get 24 bit RGB and 16 BGR mode instead of
+	 * current 16 bit RGB and 24 BGR modes, set the propety
+	 * below to "crossed" and uncomment the video-ports -property
+	 * in tda19988 node.
+	 */
+	blue-and-red-wiring = "straight";
+
+	port {
+		lcdc_0: endpoint@0 {
+			remote-endpoint = <&hdmi_0>;
+		};
+	};
+};
+
+&i2c0 {
+	adv7511: adv7511@39 {
+		compatible = "adi,adv7511";
+		reg = <0x39>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&adi_hdmi_bbbi_pins>;
+
+		/* Convert 24bit BGR to RGB, e.g. cross red and blue wiring */
+		/* video-ports = <0x234501>; */
+
+		#sound-dai-cells = <0>;
+
+		ports {
+			port@0 {
+				hdmi_0: endpoint@0 {
+					remote-endpoint = <&lcdc_0>;
+				};
+			};
+		};
+	};
+};
+
+&rtc {
+	system-power-controller;
+};
+
+&mcasp0	{
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcasp0_pins>;
+	status = "okay";
+	op-mode = <0>;	/* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	serial-dir = <	/* 0: INACTIVE, 1: TX, 2: RX */
+			0 0 1 0
+		>;
+	tx-num-evt = <1>;
+	rx-num-evt = <1>;
+};
+
+/ {
+	clk_mcasp0_fixed: clk_mcasp0_fixed {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24576000>;
+	};
+
+	clk_mcasp0: clk_mcasp0 {
+		#clock-cells = <0>;
+		compatible = "gpio-gate-clock";
+		clocks = <&clk_mcasp0_fixed>;
+		enable-gpios = <&gpio1 27 0>; /* BeagleBone Black Clk enable on GPIO1_27 */
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "TI BeagleBone Black";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&dailink0_master>;
+		simple-audio-card,frame-master = <&dailink0_master>;
+
+		dailink0_master: simple-audio-card,cpu {
+			sound-dai = <&mcasp0>;
+			clocks = <&clk_mcasp0>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&adv7511>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bone-common-no-capemgr.dtsi b/arch/arm/boot/dts/am335x-bone-common-no-capemgr.dtsi
new file mode 100644
index 00000000000..ce9f7cf3d25
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-common-no-capemgr.dtsi
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+/ {
+	cpus {
+		cpu@0 {
+			cpu0-supply = <&dcdc2_reg>;
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>; /* 256 MB */
+	};
+
+	chosen {
+		stdout-path = &uart0;
+	};
+
+	leds {
+		pinctrl-names = "default";
+		pinctrl-0 = <&user_leds_s0>;
+
+		compatible = "gpio-leds";
+
+		led2 {
+			label = "beaglebone:green:usr0";
+			gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+
+		led3 {
+			label = "beaglebone:green:usr1";
+			gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc0";
+			default-state = "off";
+		};
+
+		led4 {
+			label = "beaglebone:green:usr2";
+			gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "cpu0";
+			default-state = "off";
+		};
+
+		led5 {
+			label = "beaglebone:green:usr3";
+			gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc1";
+			default-state = "off";
+		};
+	};
+
+	vmmcsd_fixed: fixedregulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmcsd_fixed";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+};
+
+&am33xx_pinmux {
+	user_leds_s0: user_leds_s0 {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7)	/* gpmc_a5.gpio1_21 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLUP, MUX_MODE7)	/* gpmc_a6.gpio1_22 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT_PULLDOWN, MUX_MODE7)	/* gpmc_a7.gpio1_23 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT_PULLUP, MUX_MODE7)	/* gpmc_a8.gpio1_24 */
+		>;
+	};
+
+	i2c0_pins: pinmux_i2c0_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)	/* i2c0_sda.i2c0_sda */
+			AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)	/* i2c0_scl.i2c0_scl */
+		>;
+	};
+
+	i2c2_pins: pinmux_i2c2_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* uart1_ctsn.i2c2_sda */
+			AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* uart1_rtsn.i2c2_scl */
+		>;
+	};
+
+	uart0_pins: pinmux_uart0_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+		>;
+	};
+
+	cpsw_default: cpsw_default {
+		pinctrl-single,pins = <
+			/* Slave 1 */
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE0)
+		>;
+	};
+
+	cpsw_sleep: cpsw_sleep {
+		pinctrl-single,pins = <
+			/* Slave 1 reset value */
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+		>;
+	};
+
+	davinci_mdio_default: davinci_mdio_default {
+		pinctrl-single,pins = <
+			/* MDIO */
+			AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
+		>;
+	};
+
+	davinci_mdio_sleep: davinci_mdio_sleep {
+		pinctrl-single,pins = <
+			/* MDIO reset value */
+			AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+			AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+		>;
+	};
+
+	mmc1_pins: pinmux_mmc1_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7)		/* spio0_cs1.gpio0_6 */
+			AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+			AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+		>;
+	};
+
+	emmc_pins: pinmux_emmc_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+		>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+
+	status = "okay";
+};
+
+&usb0 {
+	dr_mode = "peripheral";
+	interrupts-extended = <&intc 18 &tps 0>;
+	interrupt-names = "mc", "vbus";
+};
+
+&usb1 {
+	dr_mode = "host";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+
+	tps: tps@24 {
+		reg = <0x24>;
+	};
+
+	baseboard_eeprom: baseboard_eeprom@50 {
+		compatible = "atmel,24c256";
+		reg = <0x50>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+		baseboard_data: baseboard_data@0 {
+			reg = <0 0x100>;
+		};
+	};
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins>;
+
+	status = "okay";
+	clock-frequency = <100000>;
+};
+
+
+/include/ "tps65217.dtsi"
+
+&tps {
+	/*
+	 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
+	 * mode") at poweroff.  Most BeagleBone versions do not support RTC-only
+	 * mode and risk hardware damage if this mode is entered.
+	 *
+	 * For details, see linux-omap mailing list May 2015 thread
+	 *	[PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
+	 * In particular, messages:
+	 *	http://www.spinics.net/lists/linux-omap/msg118585.html
+	 *	http://www.spinics.net/lists/linux-omap/msg118615.html
+	 *
+	 * You can override this later with
+	 *	&tps {  /delete-property/ ti,pmic-shutdown-controller;  }
+	 * if you want to use RTC-only mode and made sure you are not affected
+	 * by the hardware problems. (Tip: double-check by performing a current
+	 * measurement after shutdown: it should be less than 1 mA.)
+	 */
+
+	interrupts = <7>; /* NMI */
+	interrupt-parent = <&intc>;
+
+	ti,pmic-shutdown-controller;
+
+	charger {
+		status = "okay";
+	};
+
+	pwrbutton {
+		status = "okay";
+	};
+
+	regulators {
+		dcdc1_reg: regulator@0 {
+			regulator-name = "vdds_dpr";
+			regulator-always-on;
+		};
+
+		dcdc2_reg: regulator@1 {
+			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <1351500>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		dcdc3_reg: regulator@2 {
+			/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <1150000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		ldo1_reg: regulator@3 {
+			regulator-name = "vio,vrtc,vdds";
+			regulator-always-on;
+		};
+
+		ldo2_reg: regulator@4 {
+			regulator-name = "vdd_3v3aux";
+			regulator-always-on;
+		};
+
+		ldo3_reg: regulator@5 {
+			regulator-name = "vdd_1v8";
+			regulator-always-on;
+		};
+
+		ldo4_reg: regulator@6 {
+			regulator-name = "vdd_3v3a";
+			regulator-always-on;
+		};
+	};
+};
+
+&cpsw_emac0 {
+	phy-handle = <&ethphy0>;
+	phy-mode = "mii";
+};
+
+&mac {
+	slaves = <1>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&cpsw_default>;
+	pinctrl-1 = <&cpsw_sleep>;
+	status = "okay";
+};
+
+&davinci_mdio {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&davinci_mdio_default>;
+	pinctrl-1 = <&davinci_mdio_sleep>;
+	status = "okay";
+
+	ethphy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
+&mmc1 {
+	status = "okay";
+	bus-width = <0x4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>;
+	cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+};
+
+&aes {
+	status = "okay";
+};
+
+&sham {
+	status = "okay";
+};
+
+&rtc {
+	clocks = <&clk_32768_ck>, <&clk_24mhz_clkctrl AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL 0>;
+	clock-names = "ext-clk", "int-clk";
+	system-power-controller;
+};
diff --git a/arch/arm/boot/dts/am335x-bone-common-univ.dtsi b/arch/arm/boot/dts/am335x-bone-common-univ.dtsi
new file mode 100644
index 00000000000..2c448f836ef
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-common-univ.dtsi
@@ -0,0 +1,2940 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+&am33xx_pinmux {
+	/************************/
+	/* P8 Header */
+	/************************/
+
+	/* P8_01                GND */
+
+	/* P8_02                GND */
+
+
+	/* P8_03 (ZCZ ball R9) emmc */
+	P8_03_default_pin: pinmux_P8_03_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pin: pinmux_P8_03_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pu_pin: pinmux_P8_03_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pd_pin: pinmux_P8_03_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_input_pin: pinmux_P8_03_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad6.gpio1_6 */
+
+	/* P8_04 (ZCZ ball T9) emmc */
+	P8_04_default_pin: pinmux_P8_04_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pin: pinmux_P8_04_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pu_pin: pinmux_P8_04_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pd_pin: pinmux_P8_04_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_input_pin: pinmux_P8_04_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad7.gpio1_7 */
+
+	/* P8_05 (ZCZ ball R8) emmc */
+	P8_05_default_pin: pinmux_P8_05_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pin: pinmux_P8_05_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pu_pin: pinmux_P8_05_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pd_pin: pinmux_P8_05_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_input_pin: pinmux_P8_05_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad2.gpio1_2 */
+
+	/* P8_06 (ZCZ ball T8) emmc */
+	P8_06_default_pin: pinmux_P8_06_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pin: pinmux_P8_06_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pu_pin: pinmux_P8_06_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pd_pin: pinmux_P8_06_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_input_pin: pinmux_P8_06_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad3.gpio1_3 */
+
+	/* P8_07 (ZCZ ball R7) gpio2_2 */
+	P8_07_default_pin: pinmux_P8_07_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pin: pinmux_P8_07_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pu_pin: pinmux_P8_07_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pd_pin: pinmux_P8_07_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_input_pin: pinmux_P8_07_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_advn_ale.gpio2_2 */
+	P8_07_timer_pin: pinmux_P8_07_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_advn_ale.timer4 */
+
+	/* P8_08 (ZCZ ball T7) gpio2_3 */
+	P8_08_default_pin: pinmux_P8_08_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pin: pinmux_P8_08_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pu_pin: pinmux_P8_08_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pd_pin: pinmux_P8_08_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_input_pin: pinmux_P8_08_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_oen_ren.gpio2_3 */
+	P8_08_timer_pin: pinmux_P8_08_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_oen_ren.timer7 */
+
+	/* P8_09 (ZCZ ball T6) gpio2_5 */
+	P8_09_default_pin: pinmux_P8_09_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pin: pinmux_P8_09_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pu_pin: pinmux_P8_09_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pd_pin: pinmux_P8_09_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_input_pin: pinmux_P8_09_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_timer_pin: pinmux_P8_09_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_be0n_cle.timer5 */
+
+	/* P8_10 (ZCZ ball U6) gpio2_4 */
+	P8_10_default_pin: pinmux_P8_10_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pin: pinmux_P8_10_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pu_pin: pinmux_P8_10_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pd_pin: pinmux_P8_10_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_input_pin: pinmux_P8_10_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wen.gpio2_4 */
+	P8_10_timer_pin: pinmux_P8_10_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_wen.timer6 */
+
+	/* P8_11 (ZCZ ball R12) gpio1_13 */
+	P8_11_default_pin: pinmux_P8_11_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pin: pinmux_P8_11_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pu_pin: pinmux_P8_11_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pd_pin: pinmux_P8_11_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_input_pin: pinmux_P8_11_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad13.gpio1_13 */
+	P8_11_qep_pin: pinmux_P8_11_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad13.eqep2b_in */
+	P8_11_pruout_pin: pinmux_P8_11_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad13.pru0_out15 */
+
+	/* P8_12 (ZCZ ball T12) gpio1_12 */
+	P8_12_default_pin: pinmux_P8_12_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pin: pinmux_P8_12_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pu_pin: pinmux_P8_12_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pd_pin: pinmux_P8_12_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_input_pin: pinmux_P8_12_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad12.gpio1_12 */
+	P8_12_qep_pin: pinmux_P8_12_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad12.eqep2a_in */
+	P8_12_pruout_pin: pinmux_P8_12_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad12.pru0_out14 */
+
+	/* P8_13 (ZCZ ball T10) gpio0_23 */
+	P8_13_default_pin: pinmux_P8_13_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pin: pinmux_P8_13_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pu_pin: pinmux_P8_13_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pd_pin: pinmux_P8_13_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_input_pin: pinmux_P8_13_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad9.gpio0_23 */
+	P8_13_pwm_pin: pinmux_P8_13_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad9.ehrpwm2b */
+
+	/* P8_14 (ZCZ ball T11) gpio0_26 */
+	P8_14_default_pin: pinmux_P8_14_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P8_14_gpio_pin: pinmux_P8_14_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad10.gpio0_26 */
+	P8_14_gpio_pu_pin: pinmux_P8_14_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P8_14_gpio_pd_pin: pinmux_P8_14_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P8_14_gpio_input_pin: pinmux_P8_14_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad10.gpio0_26 */
+	P8_14_pwm_pin: pinmux_P8_14_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad10.ehrpwm2_tripzone_input */
+
+	/* P8_15 (ZCZ ball U13) gpio1_15 */
+	P8_15_default_pin: pinmux_P8_15_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pin: pinmux_P8_15_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pu_pin: pinmux_P8_15_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pd_pin: pinmux_P8_15_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_input_pin: pinmux_P8_15_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad15.gpio1_15 */
+	P8_15_qep_pin: pinmux_P8_15_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad15.eqep2_strobe */
+	P8_15_pru_ecap_pin: pinmux_P8_15_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_ad15.pr1_ecap0_ecap_capin_apwm_o */
+	P8_15_pruin_pin: pinmux_P8_15_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad15.pru0_in15 */
+
+	/* P8_16 (ZCZ ball V13) gpio1_14 */
+	P8_16_default_pin: pinmux_P8_16_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pin: pinmux_P8_16_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pu_pin: pinmux_P8_16_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pd_pin: pinmux_P8_16_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_input_pin: pinmux_P8_16_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad14.gpio1_14 */
+	P8_16_qep_pin: pinmux_P8_16_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad14.eqep2_index */
+	P8_16_pruin_pin: pinmux_P8_16_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad14.pru0_in14 */
+
+	/* P8_17 (ZCZ ball U12) gpio0_27 */
+	P8_17_default_pin: pinmux_P8_17_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P8_17_gpio_pin: pinmux_P8_17_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad11.gpio0_27 */
+	P8_17_gpio_pu_pin: pinmux_P8_17_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P8_17_gpio_pd_pin: pinmux_P8_17_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P8_17_gpio_input_pin: pinmux_P8_17_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad11.gpio0_27 */
+	P8_17_pwm_pin: pinmux_P8_17_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad11.ehrpwm0_synco */
+
+	/* P8_18 (ZCZ ball V12) gpio2_1 */
+	P8_18_default_pin: pinmux_P8_18_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pin: pinmux_P8_18_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pu_pin: pinmux_P8_18_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pd_pin: pinmux_P8_18_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_input_pin: pinmux_P8_18_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_clk.gpio2_1 */
+
+	/* P8_19 (ZCZ ball U10) gpio0_22 */
+	P8_19_default_pin: pinmux_P8_19_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pin: pinmux_P8_19_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pu_pin: pinmux_P8_19_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pd_pin: pinmux_P8_19_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_input_pin: pinmux_P8_19_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad8.gpio0_22 */
+	P8_19_pwm_pin: pinmux_P8_19_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad8.ehrpwm2a */
+
+	/* P8_20 (ZCZ ball V9) emmc */
+	P8_20_default_pin: pinmux_P8_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pin: pinmux_P8_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pu_pin: pinmux_P8_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pd_pin: pinmux_P8_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_input_pin: pinmux_P8_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn2.gpio1_31 */
+	P8_20_pruout_pin: pinmux_P8_20_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_csn2.pru1_out13 */
+	P8_20_pruin_pin: pinmux_P8_20_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_csn2.pru1_in13 */
+
+	/* P8_21 (ZCZ ball U9) emmc */
+	P8_21_default_pin: pinmux_P8_21_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pin: pinmux_P8_21_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pu_pin: pinmux_P8_21_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pd_pin: pinmux_P8_21_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_input_pin: pinmux_P8_21_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn1.gpio1_30 */
+	P8_21_pruout_pin: pinmux_P8_21_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_csn1.pru1_out12 */
+	P8_21_pruin_pin: pinmux_P8_21_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_csn1.pru1_in12 */
+
+	/* P8_22 (ZCZ ball V8) emmc */
+	P8_22_default_pin: pinmux_P8_22_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pin: pinmux_P8_22_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pu_pin: pinmux_P8_22_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pd_pin: pinmux_P8_22_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_input_pin: pinmux_P8_22_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad5.gpio1_5 */
+
+	/* P8_23 (ZCZ ball U8) emmc */
+	P8_23_default_pin: pinmux_P8_23_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pin: pinmux_P8_23_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pu_pin: pinmux_P8_23_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pd_pin: pinmux_P8_23_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_input_pin: pinmux_P8_23_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad4.gpio1_4 */
+
+	/* P8_24 (ZCZ ball V7) emmc */
+	P8_24_default_pin: pinmux_P8_24_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pin: pinmux_P8_24_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pu_pin: pinmux_P8_24_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pd_pin: pinmux_P8_24_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_input_pin: pinmux_P8_24_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad1.gpio1_1 */
+
+	/* P8_25 (ZCZ ball U7) emmc */
+	P8_25_default_pin: pinmux_P8_25_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pin: pinmux_P8_25_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pu_pin: pinmux_P8_25_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pd_pin: pinmux_P8_25_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_input_pin: pinmux_P8_25_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad0.gpio1_0 */
+
+	/* P8_26 (ZCZ ball V6) gpio1_29 */
+	P8_26_default_pin: pinmux_P8_26_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x087c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn0.gpio1_29 */
+	P8_26_gpio_pin: pinmux_P8_26_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x087c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn0.gpio1_29 */
+	P8_26_gpio_pu_pin: pinmux_P8_26_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x087c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn0.gpio1_29 */
+	P8_26_gpio_pd_pin: pinmux_P8_26_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x087c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn0.gpio1_29 */
+	P8_26_gpio_input_pin: pinmux_P8_26_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x087c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn0.gpio1_29 */
+
+	/* P8_27 (ZCZ ball U5) hdmi */
+	P8_27_default_pin: pinmux_P8_27_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pin: pinmux_P8_27_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pu_pin: pinmux_P8_27_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pd_pin: pinmux_P8_27_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_input_pin: pinmux_P8_27_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_vsync.gpio2_22 */
+	P8_27_pruout_pin: pinmux_P8_27_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_vsync.pru1_out8 */
+	P8_27_pruin_pin: pinmux_P8_27_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_vsync.pru1_in8 */
+
+	/* P8_28 (ZCZ ball V5) hdmi */
+	P8_28_default_pin: pinmux_P8_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pin: pinmux_P8_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pu_pin: pinmux_P8_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pd_pin: pinmux_P8_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_input_pin: pinmux_P8_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_pclk.gpio2_24 */
+	P8_28_pruout_pin: pinmux_P8_28_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_pclk.pru1_out10 */
+	P8_28_pruin_pin: pinmux_P8_28_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_pclk.pru1_in10 */
+
+	/* P8_29 (ZCZ ball R5) hdmi */
+	P8_29_default_pin: pinmux_P8_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pin: pinmux_P8_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pu_pin: pinmux_P8_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pd_pin: pinmux_P8_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_input_pin: pinmux_P8_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_hsync.gpio2_23 */
+	P8_29_pruout_pin: pinmux_P8_29_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_hsync.pru1_out9 */
+	P8_29_pruin_pin: pinmux_P8_29_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_hsync.pru1_in9 */
+
+	/* P8_30 (ZCZ ball R6) hdmi */
+	P8_30_default_pin: pinmux_P8_30_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pin: pinmux_P8_30_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pu_pin: pinmux_P8_30_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pd_pin: pinmux_P8_30_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_input_pin: pinmux_P8_30_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE7) >; };			/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_pruout_pin: pinmux_P8_30_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_ac_bias_en.pru1_out11 */
+	P8_30_pruin_pin: pinmux_P8_30_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE6) >; };			/* lcd_ac_bias_en.pru1_in11 */
+
+	/* P8_31 (ZCZ ball V4) hdmi */
+	P8_31_default_pin: pinmux_P8_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pin: pinmux_P8_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pu_pin: pinmux_P8_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pd_pin: pinmux_P8_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_input_pin: pinmux_P8_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data14.gpio0_10 */
+	P8_31_qep_pin: pinmux_P8_31_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data14.eqep1_index */
+	P8_31_uart_pin: pinmux_P8_31_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data14.uart5_rxd */
+
+	/* P8_32 (ZCZ ball T5) hdmi */
+	P8_32_default_pin: pinmux_P8_32_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pin: pinmux_P8_32_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pu_pin: pinmux_P8_32_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pd_pin: pinmux_P8_32_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_input_pin: pinmux_P8_32_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data15.gpio0_11 */
+	P8_32_qep_pin: pinmux_P8_32_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data15.eqep1_strobe */
+
+	/* P8_33 (ZCZ ball V3) hdmi */
+	P8_33_default_pin: pinmux_P8_33_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pin: pinmux_P8_33_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pu_pin: pinmux_P8_33_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pd_pin: pinmux_P8_33_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_input_pin: pinmux_P8_33_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data13.gpio0_9 */
+	P8_33_qep_pin: pinmux_P8_33_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data13.eqep1b_in */
+
+	/* P8_34 (ZCZ ball U4) hdmi */
+	P8_34_default_pin: pinmux_P8_34_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pin: pinmux_P8_34_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pu_pin: pinmux_P8_34_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pd_pin: pinmux_P8_34_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_input_pin: pinmux_P8_34_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data11.gpio2_17 */
+	P8_34_pwm_pin: pinmux_P8_34_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data11.ehrpwm1b */
+
+	/* P8_35 (ZCZ ball V2) hdmi */
+	P8_35_default_pin: pinmux_P8_35_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pin: pinmux_P8_35_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pu_pin: pinmux_P8_35_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pd_pin: pinmux_P8_35_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_input_pin: pinmux_P8_35_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data12.gpio0_8 */
+	P8_35_qep_pin: pinmux_P8_35_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data12.eqep1a_in */
+
+	/* P8_36 (ZCZ ball U3) hdmi */
+	P8_36_default_pin: pinmux_P8_36_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pin: pinmux_P8_36_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pu_pin: pinmux_P8_36_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pd_pin: pinmux_P8_36_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_input_pin: pinmux_P8_36_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data10.gpio2_16 */
+	P8_36_pwm_pin: pinmux_P8_36_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data10.ehrpwm1a */
+
+	/* P8_37 (ZCZ ball U1) hdmi */
+	P8_37_default_pin: pinmux_P8_37_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pin: pinmux_P8_37_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pu_pin: pinmux_P8_37_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pd_pin: pinmux_P8_37_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_input_pin: pinmux_P8_37_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data8.gpio2_14 */
+	P8_37_pwm_pin: pinmux_P8_37_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data8.ehrpwm1_tripzone_input */
+	P8_37_uart_pin: pinmux_P8_37_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data8.uart5_txd */
+
+	/* P8_38 (ZCZ ball U2) hdmi */
+	P8_38_default_pin: pinmux_P8_38_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pin: pinmux_P8_38_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pu_pin: pinmux_P8_38_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pd_pin: pinmux_P8_38_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_input_pin: pinmux_P8_38_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data9.gpio2_15 */
+	P8_38_pwm_pin: pinmux_P8_38_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data9.ehrpwm0_synco */
+	P8_38_uart_pin: pinmux_P8_38_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data9.uart5_rxd */
+
+	/* P8_39 (ZCZ ball T3) hdmi */
+	P8_39_default_pin: pinmux_P8_39_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pin: pinmux_P8_39_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pu_pin: pinmux_P8_39_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pd_pin: pinmux_P8_39_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_input_pin: pinmux_P8_39_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data6.gpio2_12 */
+	P8_39_qep_pin: pinmux_P8_39_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data6.eqep2_index */
+	P8_39_pruout_pin: pinmux_P8_39_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data6.pru1_out6 */
+	P8_39_pruin_pin: pinmux_P8_39_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data6.pru1_in6 */
+
+	/* P8_40 (ZCZ ball T4) hdmi */
+	P8_40_default_pin: pinmux_P8_40_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pin: pinmux_P8_40_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pu_pin: pinmux_P8_40_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pd_pin: pinmux_P8_40_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_input_pin: pinmux_P8_40_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data7.gpio2_13 */
+	P8_40_qep_pin: pinmux_P8_40_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data7.eqep2_strobe */
+	P8_40_pruout_pin: pinmux_P8_40_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data7.pru1_out7 */
+	P8_40_pruin_pin: pinmux_P8_40_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data7.pru1_in7 */
+
+	/* P8_41 (ZCZ ball T1) hdmi */
+	P8_41_default_pin: pinmux_P8_41_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pin: pinmux_P8_41_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pu_pin: pinmux_P8_41_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pd_pin: pinmux_P8_41_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_input_pin: pinmux_P8_41_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data4.gpio2_10 */
+	P8_41_qep_pin: pinmux_P8_41_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data4.eqep2a_in */
+	P8_41_pruout_pin: pinmux_P8_41_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data4.pru1_out4 */
+	P8_41_pruin_pin: pinmux_P8_41_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data4.pru1_in4 */
+
+	/* P8_42 (ZCZ ball T2) hdmi */
+	P8_42_default_pin: pinmux_P8_42_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pin: pinmux_P8_42_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pu_pin: pinmux_P8_42_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pd_pin: pinmux_P8_42_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_input_pin: pinmux_P8_42_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data5.gpio2_11 */
+	P8_42_qep_pin: pinmux_P8_42_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data5.eqep2b_in */
+	P8_42_pruout_pin: pinmux_P8_42_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data5.pru1_out5 */
+	P8_42_pruin_pin: pinmux_P8_42_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data5.pru1_in5 */
+
+	/* P8_43 (ZCZ ball R3) hdmi */
+	P8_43_default_pin: pinmux_P8_43_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pin: pinmux_P8_43_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pu_pin: pinmux_P8_43_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pd_pin: pinmux_P8_43_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_input_pin: pinmux_P8_43_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data2.gpio2_8 */
+	P8_43_pwm_pin: pinmux_P8_43_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data2.ehrpwm2_tripzone_input */
+	P8_43_pruout_pin: pinmux_P8_43_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data2.pru1_out2 */
+	P8_43_pruin_pin: pinmux_P8_43_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data2.pru1_in2 */
+
+	/* P8_44 (ZCZ ball R4) hdmi */
+	P8_44_default_pin: pinmux_P8_44_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pin: pinmux_P8_44_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pu_pin: pinmux_P8_44_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pd_pin: pinmux_P8_44_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_input_pin: pinmux_P8_44_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data3.gpio2_9 */
+	P8_44_pwm_pin: pinmux_P8_44_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data3.ehrpwm0_synco */
+	P8_44_pruout_pin: pinmux_P8_44_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data3.pru1_out3 */
+	P8_44_pruin_pin: pinmux_P8_44_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data3.pru1_in3 */
+
+	/* P8_45 (ZCZ ball R1) hdmi */
+	P8_45_default_pin: pinmux_P8_45_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pin: pinmux_P8_45_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pu_pin: pinmux_P8_45_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pd_pin: pinmux_P8_45_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_input_pin: pinmux_P8_45_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data0.gpio2_6 */
+	P8_45_pwm_pin: pinmux_P8_45_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data0.ehrpwm2a */
+	P8_45_pruout_pin: pinmux_P8_45_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data0.pru1_out0 */
+	P8_45_pruin_pin: pinmux_P8_45_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data0.pru1_in0 */
+
+	/* P8_46 (ZCZ ball R2) hdmi */
+	P8_46_default_pin: pinmux_P8_46_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pin: pinmux_P8_46_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pu_pin: pinmux_P8_46_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pd_pin: pinmux_P8_46_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_input_pin: pinmux_P8_46_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data1.gpio2_7 */
+	P8_46_pwm_pin: pinmux_P8_46_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data1.ehrpwm2b */
+	P8_46_pruout_pin: pinmux_P8_46_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data1.pru1_out1 */
+	P8_46_pruin_pin: pinmux_P8_46_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data1.pru1_in1 */
+
+	/************************/
+	/* P9 Header */
+	/************************/
+
+	/* P9_01                GND */
+
+	/* P9_02                GND */
+
+	/* P9_03                3V3 */
+
+	/* P9_04                3V3 */
+
+	/* P9_05                VDD_5V */
+
+	/* P9_06                VDD_5V */
+
+	/* P9_07                SYS_5V */
+
+	/* P9_08                SYS_5V */
+
+	/* P9_09                PWR_BUT */
+
+	/* P9_10                RSTn */
+
+	/* P9_11 (ZCZ ball T17) gpio0_30 */
+	P9_11_default_pin: pinmux_P9_11_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pin: pinmux_P9_11_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pu_pin: pinmux_P9_11_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pd_pin: pinmux_P9_11_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_input_pin: pinmux_P9_11_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wait0.gpio0_30 */
+	P9_11_uart_pin: pinmux_P9_11_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wait0.uart4_rxd */
+
+	/* P9_12 (ZCZ ball U18) gpio1_28 */
+	P9_12_default_pin: pinmux_P9_12_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pin: pinmux_P9_12_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pu_pin: pinmux_P9_12_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pd_pin: pinmux_P9_12_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_input_pin: pinmux_P9_12_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_be1n.gpio1_28 */
+
+	/* P9_13 (ZCZ ball U17) gpio0_31 */
+	P9_13_default_pin: pinmux_P9_13_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pin: pinmux_P9_13_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pu_pin: pinmux_P9_13_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pd_pin: pinmux_P9_13_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_input_pin: pinmux_P9_13_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wpn.gpio0_31 */
+	P9_13_uart_pin: pinmux_P9_13_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wpn.uart4_txd */
+
+	/* P9_14 (ZCZ ball U14) gpio1_18 */
+	P9_14_default_pin: pinmux_P9_14_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pin: pinmux_P9_14_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pu_pin: pinmux_P9_14_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pd_pin: pinmux_P9_14_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_input_pin: pinmux_P9_14_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a2.gpio1_18 */
+	P9_14_pwm_pin: pinmux_P9_14_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a2.ehrpwm1a */
+
+	/* P9_15 (ZCZ ball R13) gpio1_16 */
+	P9_15_default_pin: pinmux_P9_15_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pin: pinmux_P9_15_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pu_pin: pinmux_P9_15_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pd_pin: pinmux_P9_15_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_input_pin: pinmux_P9_15_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a0.gpio1_16 */
+	P9_15_pwm_pin: pinmux_P9_15_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a0.ehrpwm1_tripzone_input */
+
+	/* P9_16 (ZCZ ball T14) gpio1_19 */
+	P9_16_default_pin: pinmux_P9_16_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pin: pinmux_P9_16_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pu_pin: pinmux_P9_16_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pd_pin: pinmux_P9_16_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_input_pin: pinmux_P9_16_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a3.gpio1_19 */
+	P9_16_pwm_pin: pinmux_P9_16_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a3.ehrpwm1b */
+
+	/* P9_17 (ZCZ ball A16) gpio0_5 */
+	P9_17_default_pin: pinmux_P9_17_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pin: pinmux_P9_17_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pu_pin: pinmux_P9_17_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pd_pin: pinmux_P9_17_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_input_pin: pinmux_P9_17_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_INPUT | MUX_MODE7) >; };			/* spi0_cs0.gpio0_5 */
+	P9_17_spi_cs_pin: pinmux_P9_17_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_cs0.spi0_cs0 */
+	P9_17_i2c_pin: pinmux_P9_17_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_cs0.i2c1_scl */
+	P9_17_pwm_pin: pinmux_P9_17_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_cs0.ehrpwm0_synci */
+	P9_17_pru_uart_pin: pinmux_P9_17_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_cs0.pr1_uart0_txd */
+
+	/* P9_18 (ZCZ ball B16) gpio0_4 */
+	P9_18_default_pin: pinmux_P9_18_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pin: pinmux_P9_18_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pu_pin: pinmux_P9_18_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pd_pin: pinmux_P9_18_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_input_pin: pinmux_P9_18_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d1.gpio0_4 */
+	P9_18_spi_pin: pinmux_P9_18_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d1.spi0_d1 */
+	P9_18_i2c_pin: pinmux_P9_18_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d1.i2c1_sda */
+	P9_18_pwm_pin: pinmux_P9_18_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d1.ehrpwm0_tripzone_input */
+	P9_18_pru_uart_pin: pinmux_P9_18_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d1.pr1_uart0_rxd */
+
+	/* P9_19 (ZCZ ball D17) i2c2_scl */
+	P9_19_default_pin: pinmux_P9_19_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P9_19_gpio_pin: pinmux_P9_19_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_pu_pin: pinmux_P9_19_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_pd_pin: pinmux_P9_19_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_input_pin: pinmux_P9_19_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rtsn.gpio0_13 */
+	P9_19_timer_pin: pinmux_P9_19_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart1_rtsn.timer5 */
+	P9_19_can_pin: pinmux_P9_19_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rtsn.dcan0_rx */
+	P9_19_i2c_pin: pinmux_P9_19_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P9_19_spi_cs_pin: pinmux_P9_19_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_rtsn.spi1_cs1 */
+	P9_19_pru_uart_pin: pinmux_P9_19_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rtsn.pr1_uart0_rts_n */
+
+	/* P9_20 (ZCZ ball D18) i2c2_sda */
+	P9_20_default_pin: pinmux_P9_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P9_20_gpio_pin: pinmux_P9_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_pu_pin: pinmux_P9_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_pd_pin: pinmux_P9_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_input_pin: pinmux_P9_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_INPUT | MUX_MODE7) >; };			/* uart1_ctsn.gpio0_12 */
+	P9_20_timer_pin: pinmux_P9_20_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart1_ctsn.timer6 */
+	P9_20_can_pin: pinmux_P9_20_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_ctsn.dcan0_tx */
+	P9_20_i2c_pin: pinmux_P9_20_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P9_20_spi_cs_pin: pinmux_P9_20_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_ctsn.spi1_cs0 */
+	P9_20_pru_uart_pin: pinmux_P9_20_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_ctsn.pr1_uart0_cts_n */
+
+	/* P9_21 (ZCZ ball B17) gpio0_3 */
+	P9_21_default_pin: pinmux_P9_21_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pin: pinmux_P9_21_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pu_pin: pinmux_P9_21_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pd_pin: pinmux_P9_21_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_input_pin: pinmux_P9_21_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d0.gpio0_3 */
+	P9_21_spi_pin: pinmux_P9_21_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d0.spi0_d0 */
+	P9_21_uart_pin: pinmux_P9_21_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_d0.uart2_txd */
+	P9_21_i2c_pin: pinmux_P9_21_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d0.i2c2_scl */
+	P9_21_pwm_pin: pinmux_P9_21_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d0.ehrpwm0b */
+	P9_21_pru_uart_pin: pinmux_P9_21_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d0.pr1_uart0_rts_n */
+
+	/* P9_22 (ZCZ ball A17) gpio0_2 */
+	P9_22_default_pin: pinmux_P9_22_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pin: pinmux_P9_22_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pu_pin: pinmux_P9_22_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pd_pin: pinmux_P9_22_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_input_pin: pinmux_P9_22_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_INPUT | MUX_MODE7) >; };			/* spi0_sclk.gpio0_2 */
+	P9_22_spi_sclk_pin: pinmux_P9_22_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_sclk.spi0_sclk */
+	P9_22_uart_pin: pinmux_P9_22_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_sclk.uart2_rxd */
+	P9_22_i2c_pin: pinmux_P9_22_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_sclk.i2c2_sda */
+	P9_22_pwm_pin: pinmux_P9_22_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_sclk.ehrpwm0a */
+	P9_22_pru_uart_pin: pinmux_P9_22_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_sclk.pr1_uart0_cts_n */
+
+	/* P9_23 (ZCZ ball V14) gpio1_17 */
+	P9_23_default_pin: pinmux_P9_23_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pin: pinmux_P9_23_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pu_pin: pinmux_P9_23_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pd_pin: pinmux_P9_23_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_input_pin: pinmux_P9_23_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a1.gpio1_17 */
+	P9_23_pwm_pin: pinmux_P9_23_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a1.ehrpwm0_synco */
+
+	/* P9_24 (ZCZ ball D15) gpio0_15 */
+	P9_24_default_pin: pinmux_P9_24_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pin: pinmux_P9_24_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pu_pin: pinmux_P9_24_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pd_pin: pinmux_P9_24_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_input_pin: pinmux_P9_24_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE7) >; };			/* uart1_txd.gpio0_15 */
+	P9_24_uart_pin: pinmux_P9_24_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_txd.uart1_txd */
+	P9_24_can_pin: pinmux_P9_24_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_txd.dcan1_rx */
+	P9_24_i2c_pin: pinmux_P9_24_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_txd.i2c1_scl */
+	P9_24_pru_uart_pin: pinmux_P9_24_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_txd.pr1_uart0_txd */
+	P9_24_pruin_pin: pinmux_P9_24_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE6) >; };			/* uart1_txd.pru0_in16 */
+
+	/* P9_25 (ZCZ ball A14) audio */
+	P9_25_default_pin: pinmux_P9_25_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pin: pinmux_P9_25_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pu_pin: pinmux_P9_25_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pd_pin: pinmux_P9_25_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_input_pin: pinmux_P9_25_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_qep_pin: pinmux_P9_25_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkx.eqep0_strobe */
+	P9_25_pruout_pin: pinmux_P9_25_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkx.pru0_out7 */
+	P9_25_pruin_pin: pinmux_P9_25_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkx.pru0_in7 */
+
+	/* P9_26 (ZCZ ball D16) gpio0_14 */
+	P9_26_default_pin: pinmux_P9_26_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pin: pinmux_P9_26_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pu_pin: pinmux_P9_26_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pd_pin: pinmux_P9_26_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_input_pin: pinmux_P9_26_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rxd.gpio0_14 */
+	P9_26_uart_pin: pinmux_P9_26_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_rxd.uart1_rxd */
+	P9_26_can_pin: pinmux_P9_26_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rxd.dcan1_tx */
+	P9_26_i2c_pin: pinmux_P9_26_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rxd.i2c1_sda */
+	P9_26_pru_uart_pin: pinmux_P9_26_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rxd.pr1_uart0_rxd */
+	P9_26_pruin_pin: pinmux_P9_26_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE6) >; };			/* uart1_rxd.pru1_in16 */
+
+	/* P9_27 (ZCZ ball C13) gpio3_19 */
+	P9_27_default_pin: pinmux_P9_27_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pin: pinmux_P9_27_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pu_pin: pinmux_P9_27_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pd_pin: pinmux_P9_27_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_input_pin: pinmux_P9_27_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsr.gpio3_19 */
+	P9_27_qep_pin: pinmux_P9_27_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsr.eqep0b_in */
+	P9_27_pruout_pin: pinmux_P9_27_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsr.pru0_out5 */
+	P9_27_pruin_pin: pinmux_P9_27_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsr.pru0_in5 */
+
+	/* P9_28 (ZCZ ball C12) audio */
+	P9_28_default_pin: pinmux_P9_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pin: pinmux_P9_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pu_pin: pinmux_P9_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pd_pin: pinmux_P9_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_input_pin: pinmux_P9_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_pwm_pin: pinmux_P9_28_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkr.ehrpwm0_synci */
+	P9_28_spi_cs_pin: pinmux_P9_28_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_ahclkr.spi1_cs0 */
+	P9_28_pwm2_pin: pinmux_P9_28_pwm2_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* mcasp0_ahclkr.ecap2_in_pwm2_out */
+	P9_28_pruout_pin: pinmux_P9_28_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkr.pru0_out3 */
+	P9_28_pruin_pin: pinmux_P9_28_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkr.pru0_in3 */
+
+	/* P9_29 (ZCZ ball B13) audio */
+	P9_29_default_pin: pinmux_P9_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pin: pinmux_P9_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pu_pin: pinmux_P9_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pd_pin: pinmux_P9_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_input_pin: pinmux_P9_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsx.gpio3_15 */
+	P9_29_pwm_pin: pinmux_P9_29_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsx.ehrpwm0b */
+	P9_29_spi_pin: pinmux_P9_29_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_fsx.spi1_d0 */
+	P9_29_pruout_pin: pinmux_P9_29_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsx.pru0_out1 */
+	P9_29_pruin_pin: pinmux_P9_29_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsx.pru0_in1 */
+
+	/* P9_30 (ZCZ ball D12) gpio3_16 */
+	P9_30_default_pin: pinmux_P9_30_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr0.gpio3_16 */
+	P9_30_gpio_pin: pinmux_P9_30_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_axr0.gpio3_16 */
+	P9_30_gpio_pu_pin: pinmux_P9_30_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr0.gpio3_16 */
+	P9_30_gpio_pd_pin: pinmux_P9_30_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr0.gpio3_16 */
+	P9_30_gpio_input_pin: pinmux_P9_30_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_axr0.gpio3_16 */
+	P9_30_pwm_pin: pinmux_P9_30_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_axr0.ehrpwm0_tripzone_input */
+	P9_30_spi_pin: pinmux_P9_30_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_axr0.spi1_d1 */
+	P9_30_pruout_pin: pinmux_P9_30_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_axr0.pru0_out2 */
+	P9_30_pruin_pin: pinmux_P9_30_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr0.pru0_in2 */
+
+	/* P9_31 (ZCZ ball A13) audio */
+	P9_31_default_pin: pinmux_P9_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pin: pinmux_P9_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pu_pin: pinmux_P9_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pd_pin: pinmux_P9_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_input_pin: pinmux_P9_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkx.gpio3_14 */
+	P9_31_pwm_pin: pinmux_P9_31_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkx.ehrpwm0a */
+	P9_31_spi_sclk_pin: pinmux_P9_31_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_aclkx.spi1_sclk */
+	P9_31_pruout_pin: pinmux_P9_31_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkx.pru0_out0 */
+	P9_31_pruin_pin: pinmux_P9_31_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkx.pru0_in0 */
+
+	/* P9_32                VADC */
+
+	/* P9_33 (ZCZ ball C8)  AIN4         */
+
+	/* P9_34                AGND */
+
+	/* P9_35 (ZCZ ball A8)  AIN6         */
+
+	/* P9_36 (ZCZ ball B8)  AIN5         */
+
+	/* P9_37 (ZCZ ball B7)  AIN2         */
+
+	/* P9_38 (ZCZ ball A7)  AIN3         */
+
+	/* P9_39 (ZCZ ball B6)  AIN0         */
+
+	/* P9_40 (ZCZ ball C7)  AIN1         */
+
+	/* P9_41 (ZCZ ball D14) gpio0_20 */
+	P9_41_default_pin: pinmux_P9_41_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pin: pinmux_P9_41_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pu_pin: pinmux_P9_41_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pd_pin: pinmux_P9_41_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_input_pin: pinmux_P9_41_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE7) >; };			/* xdma_event_intr1.gpio0_20 */
+	P9_41_timer_pin: pinmux_P9_41_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* xdma_event_intr1.timer7 */
+	P9_41_pruin_pin: pinmux_P9_41_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE5) >; };			/* xdma_event_intr1.pru0_in16 */
+
+	/* P9_41.1 */
+	/* P9_91 (ZCZ ball D13) gpio3_20 */
+	P9_91_default_pin: pinmux_P9_91_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pin: pinmux_P9_91_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pu_pin: pinmux_P9_91_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pd_pin: pinmux_P9_91_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_input_pin: pinmux_P9_91_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_axr1.gpio3_20 */
+	P9_91_qep_pin: pinmux_P9_91_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_axr1.eqep0_index */
+	P9_91_pruout_pin: pinmux_P9_91_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_axr1.pru0_out6 */
+	P9_91_pruin_pin: pinmux_P9_91_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr1.pru0_in6 */
+
+	/* P9_42 (ZCZ ball C18) gpio0_7 */
+	P9_42_default_pin: pinmux_P9_42_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pin: pinmux_P9_42_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pu_pin: pinmux_P9_42_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pd_pin: pinmux_P9_42_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_input_pin: pinmux_P9_42_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_INPUT | MUX_MODE7) >; };			/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_pwm_pin: pinmux_P9_42_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE0) >; };	/* eCAP0_in_PWM0_out.ecap0_in_pwm0_out */
+	P9_42_uart_pin: pinmux_P9_42_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* eCAP0_in_PWM0_out.uart3_txd */
+	P9_42_spi_cs_pin: pinmux_P9_42_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* eCAP0_in_PWM0_out.spi1_cs1 */
+	P9_42_pru_ecap_pin: pinmux_P9_42_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* eCAP0_in_PWM0_out.pr1_ecap0_ecap_capin_apwm_o */
+	P9_42_spi_sclk_pin: pinmux_P9_42_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* eCAP0_in_PWM0_out.spi1_sclk */
+
+	/* P9_42.1 */
+	/* P9_92 (ZCZ ball B12) gpio3_18 */
+	P9_92_default_pin: pinmux_P9_92_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pin: pinmux_P9_92_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pu_pin: pinmux_P9_92_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pd_pin: pinmux_P9_92_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_input_pin: pinmux_P9_92_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkr.gpio3_18 */
+	P9_92_qep_pin: pinmux_P9_92_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkr.eqep0a_in */
+	P9_92_pruout_pin: pinmux_P9_92_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkr.pru0_out4 */
+	P9_92_pruin_pin: pinmux_P9_92_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkr.pru0_in4 */
+
+	/* P9_43                GND */
+
+	/* P9_44                GND */
+
+	/* P9_45                GND */
+
+	/* P9_46                GND */
+
+	/*       (ZCZ ball A15) */
+	A15_default_pin: pinmux_A15_default_pin {
+		pinctrl-single,pins = <0x1b0  (PIN_OUTPUT_PULLUP | MUX_MODE7)>; };     /* Mode 7, Pull-Up */
+	A15_clkout_pin: pinmux_A15_clkout_pin {
+		pinctrl-single,pins = <0x1b0  0x0b>; };     /* Mode 3 */
+	A15_gpio_pin: pinmux_A15_gpio_pin {
+		pinctrl-single,pins = <0x1b0  0x2f>; };     /* Mode 7, RxActive */
+	A15_gpio_pu_pin: pinmux_A15_gpio_pu_pin {
+		pinctrl-single,pins = <0x1b0  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+	A15_gpio_pd_pin: pinmux_A15_gpio_pd_pin {
+		pinctrl-single,pins = <0x1b0  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+};
+
+&i2c1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	clock-frequency = <100000>;
+	symlink = "bone/i2c/1";
+};
+
+&i2c2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	clock-frequency = <100000>;
+	symlink = "bone/i2c/2";
+};
+
+&uart1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/1";
+};
+
+&uart2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/2";
+};
+
+&uart3 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/3";
+};
+
+&uart4 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/4";
+};
+
+&uart5 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/5";
+};
+
+&dcan0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/can/0";
+};
+
+&dcan1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/can/1";
+};
+
+&eqep0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/0";
+};
+
+&eqep1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/1";
+};
+
+&eqep2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/2";
+};
+
+&epwmss0 {
+	status = "okay";
+};
+
+&epwmss1 {
+	status = "okay";
+};
+
+&epwmss2 {
+	status = "okay";
+};
+
+&ehrpwm0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ehrpwm1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ehrpwm2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&spi0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/0.0";
+
+		reg = <0>;
+		spi-max-frequency = <16000000>;
+		spi-cpha;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/0.1";
+
+		reg = <1>;
+		spi-max-frequency = <16000000>;
+	};
+};
+
+&spi1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/1.0";
+
+		reg = <0>;
+		spi-max-frequency = <16000000>;
+		spi-cpha;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/1.1";
+
+		reg = <1>;
+		spi-max-frequency = <16000000>;
+	};
+};
+
+/**********************************************************************/
+/* Pin Multiplex Helpers                                              */
+/*                                                                    */
+/* These provide userspace runtime pin configuration for the          */
+/* BeagleBone cape expansion headers                                  */
+/**********************************************************************/
+
+&ocp {
+	/************************/
+	/* P8 Header */
+	/************************/
+
+	/* P8_01                GND */
+
+	/* P8_02                GND */
+
+
+	/* P8_03 (ZCZ ball R9) emmc */
+	P8_03_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_03_default_pin>;
+		pinctrl-1 = <&P8_03_gpio_pin>;
+		pinctrl-2 = <&P8_03_gpio_pu_pin>;
+		pinctrl-3 = <&P8_03_gpio_pd_pin>;
+		pinctrl-4 = <&P8_03_gpio_input_pin>;
+	};
+
+	/* P8_04 (ZCZ ball T9) emmc */
+	P8_04_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_04_default_pin>;
+		pinctrl-1 = <&P8_04_gpio_pin>;
+		pinctrl-2 = <&P8_04_gpio_pu_pin>;
+		pinctrl-3 = <&P8_04_gpio_pd_pin>;
+		pinctrl-4 = <&P8_04_gpio_input_pin>;
+	};
+
+	/* P8_05 (ZCZ ball R8) emmc */
+	P8_05_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_05_default_pin>;
+		pinctrl-1 = <&P8_05_gpio_pin>;
+		pinctrl-2 = <&P8_05_gpio_pu_pin>;
+		pinctrl-3 = <&P8_05_gpio_pd_pin>;
+		pinctrl-4 = <&P8_05_gpio_input_pin>;
+	};
+
+	/* P8_06 (ZCZ ball T8) emmc */
+	P8_06_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_06_default_pin>;
+		pinctrl-1 = <&P8_06_gpio_pin>;
+		pinctrl-2 = <&P8_06_gpio_pu_pin>;
+		pinctrl-3 = <&P8_06_gpio_pd_pin>;
+		pinctrl-4 = <&P8_06_gpio_input_pin>;
+	};
+
+	/* P8_07 (ZCZ ball R7) */
+	P8_07_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_07_default_pin>;
+		pinctrl-1 = <&P8_07_gpio_pin>;
+		pinctrl-2 = <&P8_07_gpio_pu_pin>;
+		pinctrl-3 = <&P8_07_gpio_pd_pin>;
+		pinctrl-4 = <&P8_07_gpio_input_pin>;
+		pinctrl-5 = <&P8_07_timer_pin>;
+	};
+
+	/* P8_08 (ZCZ ball T7) */
+	P8_08_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_08_default_pin>;
+		pinctrl-1 = <&P8_08_gpio_pin>;
+		pinctrl-2 = <&P8_08_gpio_pu_pin>;
+		pinctrl-3 = <&P8_08_gpio_pd_pin>;
+		pinctrl-4 = <&P8_08_gpio_input_pin>;
+		pinctrl-5 = <&P8_08_timer_pin>;
+	};
+
+	/* P8_09 (ZCZ ball T6) */
+	P8_09_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_09_default_pin>;
+		pinctrl-1 = <&P8_09_gpio_pin>;
+		pinctrl-2 = <&P8_09_gpio_pu_pin>;
+		pinctrl-3 = <&P8_09_gpio_pd_pin>;
+		pinctrl-4 = <&P8_09_gpio_input_pin>;
+		pinctrl-5 = <&P8_09_timer_pin>;
+	};
+
+	/* P8_10 (ZCZ ball U6) */
+	P8_10_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_10_default_pin>;
+		pinctrl-1 = <&P8_10_gpio_pin>;
+		pinctrl-2 = <&P8_10_gpio_pu_pin>;
+		pinctrl-3 = <&P8_10_gpio_pd_pin>;
+		pinctrl-4 = <&P8_10_gpio_input_pin>;
+		pinctrl-5 = <&P8_10_timer_pin>;
+	};
+
+	/* P8_11 (ZCZ ball R12) */
+	P8_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P8_11_default_pin>;
+		pinctrl-1 = <&P8_11_gpio_pin>;
+		pinctrl-2 = <&P8_11_gpio_pu_pin>;
+		pinctrl-3 = <&P8_11_gpio_pd_pin>;
+		pinctrl-4 = <&P8_11_gpio_input_pin>;
+		pinctrl-5 = <&P8_11_qep_pin>;
+		pinctrl-6 = <&P8_11_pruout_pin>;
+	};
+
+	/* P8_12 (ZCZ ball T12) */
+	P8_12_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P8_12_default_pin>;
+		pinctrl-1 = <&P8_12_gpio_pin>;
+		pinctrl-2 = <&P8_12_gpio_pu_pin>;
+		pinctrl-3 = <&P8_12_gpio_pd_pin>;
+		pinctrl-4 = <&P8_12_gpio_input_pin>;
+		pinctrl-5 = <&P8_12_qep_pin>;
+		pinctrl-6 = <&P8_12_pruout_pin>;
+	};
+
+	/* P8_13 (ZCZ ball T10) */
+	P8_13_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_13_default_pin>;
+		pinctrl-1 = <&P8_13_gpio_pin>;
+		pinctrl-2 = <&P8_13_gpio_pu_pin>;
+		pinctrl-3 = <&P8_13_gpio_pd_pin>;
+		pinctrl-4 = <&P8_13_gpio_input_pin>;
+		pinctrl-5 = <&P8_13_pwm_pin>;
+	};
+
+	/* P8_14 (ZCZ ball T11) */
+	P8_14_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_14_default_pin>;
+		pinctrl-1 = <&P8_14_gpio_pin>;
+		pinctrl-2 = <&P8_14_gpio_pu_pin>;
+		pinctrl-3 = <&P8_14_gpio_pd_pin>;
+		pinctrl-4 = <&P8_14_gpio_input_pin>;
+		pinctrl-5 = <&P8_14_pwm_pin>;
+	};
+
+	/* P8_15 (ZCZ ball U13) */
+	P8_15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pru_ecap", "pruin";
+		pinctrl-0 = <&P8_15_default_pin>;
+		pinctrl-1 = <&P8_15_gpio_pin>;
+		pinctrl-2 = <&P8_15_gpio_pu_pin>;
+		pinctrl-3 = <&P8_15_gpio_pd_pin>;
+		pinctrl-4 = <&P8_15_gpio_input_pin>;
+		pinctrl-5 = <&P8_15_qep_pin>;
+		pinctrl-6 = <&P8_15_pru_ecap_pin>;
+		pinctrl-7 = <&P8_15_pruin_pin>;
+	};
+
+	/* P8_16 (ZCZ ball V13) */
+	P8_16_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruin";
+		pinctrl-0 = <&P8_16_default_pin>;
+		pinctrl-1 = <&P8_16_gpio_pin>;
+		pinctrl-2 = <&P8_16_gpio_pu_pin>;
+		pinctrl-3 = <&P8_16_gpio_pd_pin>;
+		pinctrl-4 = <&P8_16_gpio_input_pin>;
+		pinctrl-5 = <&P8_16_qep_pin>;
+		pinctrl-6 = <&P8_16_pruin_pin>;
+	};
+
+	/* P8_17 (ZCZ ball U12) */
+	P8_17_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_17_default_pin>;
+		pinctrl-1 = <&P8_17_gpio_pin>;
+		pinctrl-2 = <&P8_17_gpio_pu_pin>;
+		pinctrl-3 = <&P8_17_gpio_pd_pin>;
+		pinctrl-4 = <&P8_17_gpio_input_pin>;
+		pinctrl-5 = <&P8_17_pwm_pin>;
+	};
+
+	/* P8_18 (ZCZ ball V12) */
+	P8_18_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_18_default_pin>;
+		pinctrl-1 = <&P8_18_gpio_pin>;
+		pinctrl-2 = <&P8_18_gpio_pu_pin>;
+		pinctrl-3 = <&P8_18_gpio_pd_pin>;
+		pinctrl-4 = <&P8_18_gpio_input_pin>;
+	};
+
+	/* P8_19 (ZCZ ball U10) */
+	P8_19_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_19_default_pin>;
+		pinctrl-1 = <&P8_19_gpio_pin>;
+		pinctrl-2 = <&P8_19_gpio_pu_pin>;
+		pinctrl-3 = <&P8_19_gpio_pd_pin>;
+		pinctrl-4 = <&P8_19_gpio_input_pin>;
+		pinctrl-5 = <&P8_19_pwm_pin>;
+	};
+
+	/* P8_20 (ZCZ ball V9) emmc */
+	P8_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_20_default_pin>;
+		pinctrl-1 = <&P8_20_gpio_pin>;
+		pinctrl-2 = <&P8_20_gpio_pu_pin>;
+		pinctrl-3 = <&P8_20_gpio_pd_pin>;
+		pinctrl-4 = <&P8_20_gpio_input_pin>;
+		pinctrl-5 = <&P8_20_pruout_pin>;
+		pinctrl-6 = <&P8_20_pruin_pin>;
+	};
+
+	/* P8_21 (ZCZ ball U9) emmc */
+	P8_21_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_21_default_pin>;
+		pinctrl-1 = <&P8_21_gpio_pin>;
+		pinctrl-2 = <&P8_21_gpio_pu_pin>;
+		pinctrl-3 = <&P8_21_gpio_pd_pin>;
+		pinctrl-4 = <&P8_21_gpio_input_pin>;
+		pinctrl-5 = <&P8_21_pruout_pin>;
+		pinctrl-6 = <&P8_21_pruin_pin>;
+	};
+
+	/* P8_22 (ZCZ ball V8) emmc */
+	P8_22_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_22_default_pin>;
+		pinctrl-1 = <&P8_22_gpio_pin>;
+		pinctrl-2 = <&P8_22_gpio_pu_pin>;
+		pinctrl-3 = <&P8_22_gpio_pd_pin>;
+		pinctrl-4 = <&P8_22_gpio_input_pin>;
+	};
+
+	/* P8_23 (ZCZ ball U8) emmc */
+	P8_23_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_23_default_pin>;
+		pinctrl-1 = <&P8_23_gpio_pin>;
+		pinctrl-2 = <&P8_23_gpio_pu_pin>;
+		pinctrl-3 = <&P8_23_gpio_pd_pin>;
+		pinctrl-4 = <&P8_23_gpio_input_pin>;
+	};
+
+	/* P8_24 (ZCZ ball V7) emmc */
+	P8_24_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_24_default_pin>;
+		pinctrl-1 = <&P8_24_gpio_pin>;
+		pinctrl-2 = <&P8_24_gpio_pu_pin>;
+		pinctrl-3 = <&P8_24_gpio_pd_pin>;
+		pinctrl-4 = <&P8_24_gpio_input_pin>;
+	};
+
+	/* P8_25 (ZCZ ball U7) emmc */
+	P8_25_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_25_default_pin>;
+		pinctrl-1 = <&P8_25_gpio_pin>;
+		pinctrl-2 = <&P8_25_gpio_pu_pin>;
+		pinctrl-3 = <&P8_25_gpio_pd_pin>;
+		pinctrl-4 = <&P8_25_gpio_input_pin>;
+	};
+
+	/* P8_26 (ZCZ ball V6) */
+	P8_26_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_26_default_pin>;
+		pinctrl-1 = <&P8_26_gpio_pin>;
+		pinctrl-2 = <&P8_26_gpio_pu_pin>;
+		pinctrl-3 = <&P8_26_gpio_pd_pin>;
+		pinctrl-4 = <&P8_26_gpio_input_pin>;
+	};
+
+	/* P8_27 (ZCZ ball U5) hdmi */
+	P8_27_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_27_default_pin>;
+		pinctrl-1 = <&P8_27_gpio_pin>;
+		pinctrl-2 = <&P8_27_gpio_pu_pin>;
+		pinctrl-3 = <&P8_27_gpio_pd_pin>;
+		pinctrl-4 = <&P8_27_gpio_input_pin>;
+		pinctrl-5 = <&P8_27_pruout_pin>;
+		pinctrl-6 = <&P8_27_pruin_pin>;
+	};
+
+	/* P8_28 (ZCZ ball V5) hdmi */
+	P8_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_28_default_pin>;
+		pinctrl-1 = <&P8_28_gpio_pin>;
+		pinctrl-2 = <&P8_28_gpio_pu_pin>;
+		pinctrl-3 = <&P8_28_gpio_pd_pin>;
+		pinctrl-4 = <&P8_28_gpio_input_pin>;
+		pinctrl-5 = <&P8_28_pruout_pin>;
+		pinctrl-6 = <&P8_28_pruin_pin>;
+	};
+
+	/* P8_29 (ZCZ ball R5) hdmi */
+	P8_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_29_default_pin>;
+		pinctrl-1 = <&P8_29_gpio_pin>;
+		pinctrl-2 = <&P8_29_gpio_pu_pin>;
+		pinctrl-3 = <&P8_29_gpio_pd_pin>;
+		pinctrl-4 = <&P8_29_gpio_input_pin>;
+		pinctrl-5 = <&P8_29_pruout_pin>;
+		pinctrl-6 = <&P8_29_pruin_pin>;
+	};
+
+	/* P8_30 (ZCZ ball R6) hdmi */
+	P8_30_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_30_default_pin>;
+		pinctrl-1 = <&P8_30_gpio_pin>;
+		pinctrl-2 = <&P8_30_gpio_pu_pin>;
+		pinctrl-3 = <&P8_30_gpio_pd_pin>;
+		pinctrl-4 = <&P8_30_gpio_input_pin>;
+		pinctrl-5 = <&P8_30_pruout_pin>;
+		pinctrl-6 = <&P8_30_pruin_pin>;
+	};
+
+	/* P8_31 (ZCZ ball V4) hdmi */
+	P8_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "qep";
+		pinctrl-0 = <&P8_31_default_pin>;
+		pinctrl-1 = <&P8_31_gpio_pin>;
+		pinctrl-2 = <&P8_31_gpio_pu_pin>;
+		pinctrl-3 = <&P8_31_gpio_pd_pin>;
+		pinctrl-4 = <&P8_31_gpio_input_pin>;
+		pinctrl-5 = <&P8_31_uart_pin>;
+		pinctrl-6 = <&P8_31_qep_pin>;
+	};
+
+	/* P8_32 (ZCZ ball T5) hdmi */
+	P8_32_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_32_default_pin>;
+		pinctrl-1 = <&P8_32_gpio_pin>;
+		pinctrl-2 = <&P8_32_gpio_pu_pin>;
+		pinctrl-3 = <&P8_32_gpio_pd_pin>;
+		pinctrl-4 = <&P8_32_gpio_input_pin>;
+		pinctrl-5 = <&P8_32_qep_pin>;
+	};
+
+	/* P8_33 (ZCZ ball V3) hdmi */
+	P8_33_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_33_default_pin>;
+		pinctrl-1 = <&P8_33_gpio_pin>;
+		pinctrl-2 = <&P8_33_gpio_pu_pin>;
+		pinctrl-3 = <&P8_33_gpio_pd_pin>;
+		pinctrl-4 = <&P8_33_gpio_input_pin>;
+		pinctrl-5 = <&P8_33_qep_pin>;
+	};
+
+	/* P8_34 (ZCZ ball U4) hdmi */
+	P8_34_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_34_default_pin>;
+		pinctrl-1 = <&P8_34_gpio_pin>;
+		pinctrl-2 = <&P8_34_gpio_pu_pin>;
+		pinctrl-3 = <&P8_34_gpio_pd_pin>;
+		pinctrl-4 = <&P8_34_gpio_input_pin>;
+		pinctrl-5 = <&P8_34_pwm_pin>;
+	};
+
+	/* P8_35 (ZCZ ball V2) hdmi */
+	P8_35_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_35_default_pin>;
+		pinctrl-1 = <&P8_35_gpio_pin>;
+		pinctrl-2 = <&P8_35_gpio_pu_pin>;
+		pinctrl-3 = <&P8_35_gpio_pd_pin>;
+		pinctrl-4 = <&P8_35_gpio_input_pin>;
+		pinctrl-5 = <&P8_35_qep_pin>;
+	};
+
+	/* P8_36 (ZCZ ball U3) hdmi */
+	P8_36_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_36_default_pin>;
+		pinctrl-1 = <&P8_36_gpio_pin>;
+		pinctrl-2 = <&P8_36_gpio_pu_pin>;
+		pinctrl-3 = <&P8_36_gpio_pd_pin>;
+		pinctrl-4 = <&P8_36_gpio_input_pin>;
+		pinctrl-5 = <&P8_36_pwm_pin>;
+	};
+
+	/* P8_37 (ZCZ ball U1) hdmi */
+	P8_37_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "pwm";
+		pinctrl-0 = <&P8_37_default_pin>;
+		pinctrl-1 = <&P8_37_gpio_pin>;
+		pinctrl-2 = <&P8_37_gpio_pu_pin>;
+		pinctrl-3 = <&P8_37_gpio_pd_pin>;
+		pinctrl-4 = <&P8_37_gpio_input_pin>;
+		pinctrl-5 = <&P8_37_uart_pin>;
+		pinctrl-6 = <&P8_37_pwm_pin>;
+	};
+
+	/* P8_38 (ZCZ ball U2) hdmi */
+	P8_38_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "pwm";
+		pinctrl-0 = <&P8_38_default_pin>;
+		pinctrl-1 = <&P8_38_gpio_pin>;
+		pinctrl-2 = <&P8_38_gpio_pu_pin>;
+		pinctrl-3 = <&P8_38_gpio_pd_pin>;
+		pinctrl-4 = <&P8_38_gpio_input_pin>;
+		pinctrl-5 = <&P8_38_uart_pin>;
+		pinctrl-6 = <&P8_38_pwm_pin>;
+	};
+
+	/* P8_39 (ZCZ ball T3) hdmi */
+	P8_39_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_39_default_pin>;
+		pinctrl-1 = <&P8_39_gpio_pin>;
+		pinctrl-2 = <&P8_39_gpio_pu_pin>;
+		pinctrl-3 = <&P8_39_gpio_pd_pin>;
+		pinctrl-4 = <&P8_39_gpio_input_pin>;
+		pinctrl-5 = <&P8_39_qep_pin>;
+		pinctrl-6 = <&P8_39_pruout_pin>;
+		pinctrl-7 = <&P8_39_pruin_pin>;
+	};
+
+	/* P8_40 (ZCZ ball T4) hdmi */
+	P8_40_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_40_default_pin>;
+		pinctrl-1 = <&P8_40_gpio_pin>;
+		pinctrl-2 = <&P8_40_gpio_pu_pin>;
+		pinctrl-3 = <&P8_40_gpio_pd_pin>;
+		pinctrl-4 = <&P8_40_gpio_input_pin>;
+		pinctrl-5 = <&P8_40_qep_pin>;
+		pinctrl-6 = <&P8_40_pruout_pin>;
+		pinctrl-7 = <&P8_40_pruin_pin>;
+	};
+
+	/* P8_41 (ZCZ ball T1) hdmi */
+	P8_41_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_41_default_pin>;
+		pinctrl-1 = <&P8_41_gpio_pin>;
+		pinctrl-2 = <&P8_41_gpio_pu_pin>;
+		pinctrl-3 = <&P8_41_gpio_pd_pin>;
+		pinctrl-4 = <&P8_41_gpio_input_pin>;
+		pinctrl-5 = <&P8_41_qep_pin>;
+		pinctrl-6 = <&P8_41_pruout_pin>;
+		pinctrl-7 = <&P8_41_pruin_pin>;
+	};
+
+	/* P8_42 (ZCZ ball T2) hdmi */
+	P8_42_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_42_default_pin>;
+		pinctrl-1 = <&P8_42_gpio_pin>;
+		pinctrl-2 = <&P8_42_gpio_pu_pin>;
+		pinctrl-3 = <&P8_42_gpio_pd_pin>;
+		pinctrl-4 = <&P8_42_gpio_input_pin>;
+		pinctrl-5 = <&P8_42_qep_pin>;
+		pinctrl-6 = <&P8_42_pruout_pin>;
+		pinctrl-7 = <&P8_42_pruin_pin>;
+	};
+
+	/* P8_43 (ZCZ ball R3) hdmi */
+	P8_43_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_43_default_pin>;
+		pinctrl-1 = <&P8_43_gpio_pin>;
+		pinctrl-2 = <&P8_43_gpio_pu_pin>;
+		pinctrl-3 = <&P8_43_gpio_pd_pin>;
+		pinctrl-4 = <&P8_43_gpio_input_pin>;
+		pinctrl-5 = <&P8_43_pwm_pin>;
+		pinctrl-6 = <&P8_43_pruout_pin>;
+		pinctrl-7 = <&P8_43_pruin_pin>;
+	};
+
+	/* P8_44 (ZCZ ball R4) hdmi */
+	P8_44_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_44_default_pin>;
+		pinctrl-1 = <&P8_44_gpio_pin>;
+		pinctrl-2 = <&P8_44_gpio_pu_pin>;
+		pinctrl-3 = <&P8_44_gpio_pd_pin>;
+		pinctrl-4 = <&P8_44_gpio_input_pin>;
+		pinctrl-5 = <&P8_44_pwm_pin>;
+		pinctrl-6 = <&P8_44_pruout_pin>;
+		pinctrl-7 = <&P8_44_pruin_pin>;
+	};
+
+	/* P8_45 (ZCZ ball R1) hdmi */
+	P8_45_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_45_default_pin>;
+		pinctrl-1 = <&P8_45_gpio_pin>;
+		pinctrl-2 = <&P8_45_gpio_pu_pin>;
+		pinctrl-3 = <&P8_45_gpio_pd_pin>;
+		pinctrl-4 = <&P8_45_gpio_input_pin>;
+		pinctrl-5 = <&P8_45_pwm_pin>;
+		pinctrl-6 = <&P8_45_pruout_pin>;
+		pinctrl-7 = <&P8_45_pruin_pin>;
+	};
+
+	/* P8_46 (ZCZ ball R2) hdmi */
+	P8_46_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_46_default_pin>;
+		pinctrl-1 = <&P8_46_gpio_pin>;
+		pinctrl-2 = <&P8_46_gpio_pu_pin>;
+		pinctrl-3 = <&P8_46_gpio_pd_pin>;
+		pinctrl-4 = <&P8_46_gpio_input_pin>;
+		pinctrl-5 = <&P8_46_pwm_pin>;
+		pinctrl-6 = <&P8_46_pruout_pin>;
+		pinctrl-7 = <&P8_46_pruin_pin>;
+	};
+
+	/************************/
+	/* P9 Header */
+	/************************/
+
+	/* P9_01                GND */
+
+	/* P9_02                GND */
+
+	/* P9_03                3V3 */
+
+	/* P9_04                3V3 */
+
+	/* P9_05                VDD_5V */
+
+	/* P9_06                VDD_5V */
+
+	/* P9_07                SYS_5V */
+
+	/* P9_08                SYS_5V */
+
+	/* P9_09                PWR_BUT */
+
+	/* P9_10                RSTn */
+
+	/* P9_11 (ZCZ ball T17) */
+	P9_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P9_11_default_pin>;
+		pinctrl-1 = <&P9_11_gpio_pin>;
+		pinctrl-2 = <&P9_11_gpio_pu_pin>;
+		pinctrl-3 = <&P9_11_gpio_pd_pin>;
+		pinctrl-4 = <&P9_11_gpio_input_pin>;
+		pinctrl-5 = <&P9_11_uart_pin>;
+	};
+
+	/* P9_12 (ZCZ ball U18) */
+	P9_12_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P9_12_default_pin>;
+		pinctrl-1 = <&P9_12_gpio_pin>;
+		pinctrl-2 = <&P9_12_gpio_pu_pin>;
+		pinctrl-3 = <&P9_12_gpio_pd_pin>;
+		pinctrl-4 = <&P9_12_gpio_input_pin>;
+	};
+
+	/* P9_13 (ZCZ ball U17) */
+	P9_13_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P9_13_default_pin>;
+		pinctrl-1 = <&P9_13_gpio_pin>;
+		pinctrl-2 = <&P9_13_gpio_pu_pin>;
+		pinctrl-3 = <&P9_13_gpio_pd_pin>;
+		pinctrl-4 = <&P9_13_gpio_input_pin>;
+		pinctrl-5 = <&P9_13_uart_pin>;
+	};
+
+	/* P9_14 (ZCZ ball U14) */
+	P9_14_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_14_default_pin>;
+		pinctrl-1 = <&P9_14_gpio_pin>;
+		pinctrl-2 = <&P9_14_gpio_pu_pin>;
+		pinctrl-3 = <&P9_14_gpio_pd_pin>;
+		pinctrl-4 = <&P9_14_gpio_input_pin>;
+		pinctrl-5 = <&P9_14_pwm_pin>;
+	};
+
+	/* P9_15 (ZCZ ball R13) */
+	P9_15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_15_default_pin>;
+		pinctrl-1 = <&P9_15_gpio_pin>;
+		pinctrl-2 = <&P9_15_gpio_pu_pin>;
+		pinctrl-3 = <&P9_15_gpio_pd_pin>;
+		pinctrl-4 = <&P9_15_gpio_input_pin>;
+		pinctrl-5 = <&P9_15_pwm_pin>;
+	};
+
+	/* P9_16 (ZCZ ball T14) */
+	P9_16_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_16_default_pin>;
+		pinctrl-1 = <&P9_16_gpio_pin>;
+		pinctrl-2 = <&P9_16_gpio_pu_pin>;
+		pinctrl-3 = <&P9_16_gpio_pd_pin>;
+		pinctrl-4 = <&P9_16_gpio_input_pin>;
+		pinctrl-5 = <&P9_16_pwm_pin>;
+	};
+
+	/* P9_17 (ZCZ ball A16) */
+	P9_17_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_17_default_pin>;
+		pinctrl-1 = <&P9_17_gpio_pin>;
+		pinctrl-2 = <&P9_17_gpio_pu_pin>;
+		pinctrl-3 = <&P9_17_gpio_pd_pin>;
+		pinctrl-4 = <&P9_17_gpio_input_pin>;
+		pinctrl-5 = <&P9_17_spi_cs_pin>;
+		pinctrl-6 = <&P9_17_i2c_pin>;
+		pinctrl-7 = <&P9_17_pwm_pin>;
+		pinctrl-8 = <&P9_17_pru_uart_pin>;
+	};
+
+	/* P9_18 (ZCZ ball B16) */
+	P9_18_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_18_default_pin>;
+		pinctrl-1 = <&P9_18_gpio_pin>;
+		pinctrl-2 = <&P9_18_gpio_pu_pin>;
+		pinctrl-3 = <&P9_18_gpio_pd_pin>;
+		pinctrl-4 = <&P9_18_gpio_input_pin>;
+		pinctrl-5 = <&P9_18_spi_pin>;
+		pinctrl-6 = <&P9_18_i2c_pin>;
+		pinctrl-7 = <&P9_18_pwm_pin>;
+		pinctrl-8 = <&P9_18_pru_uart_pin>;
+	};
+
+	/* P9_19 (ZCZ ball D17) i2c */
+	P9_19_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart", "timer";
+		pinctrl-0 = <&P9_19_default_pin>;
+		pinctrl-1 = <&P9_19_gpio_pin>;
+		pinctrl-2 = <&P9_19_gpio_pu_pin>;
+		pinctrl-3 = <&P9_19_gpio_pd_pin>;
+		pinctrl-4 = <&P9_19_gpio_input_pin>;
+		pinctrl-5 = <&P9_19_spi_cs_pin>;
+		pinctrl-6 = <&P9_19_can_pin>;
+		pinctrl-7 = <&P9_19_i2c_pin>;
+		pinctrl-8 = <&P9_19_pru_uart_pin>;
+		pinctrl-9 = <&P9_19_timer_pin>;
+	};
+
+	/* P9_20 (ZCZ ball D18) i2c */
+	P9_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart", "timer";
+		pinctrl-0 = <&P9_20_default_pin>;
+		pinctrl-1 = <&P9_20_gpio_pin>;
+		pinctrl-2 = <&P9_20_gpio_pu_pin>;
+		pinctrl-3 = <&P9_20_gpio_pd_pin>;
+		pinctrl-4 = <&P9_20_gpio_input_pin>;
+		pinctrl-5 = <&P9_20_spi_cs_pin>;
+		pinctrl-6 = <&P9_20_can_pin>;
+		pinctrl-7 = <&P9_20_i2c_pin>;
+		pinctrl-8 = <&P9_20_pru_uart_pin>;
+		pinctrl-9 = <&P9_20_timer_pin>;
+	};
+
+	/* P9_21 (ZCZ ball B17) */
+	P9_21_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_21_default_pin>;
+		pinctrl-1 = <&P9_21_gpio_pin>;
+		pinctrl-2 = <&P9_21_gpio_pu_pin>;
+		pinctrl-3 = <&P9_21_gpio_pd_pin>;
+		pinctrl-4 = <&P9_21_gpio_input_pin>;
+		pinctrl-5 = <&P9_21_spi_pin>;
+		pinctrl-6 = <&P9_21_uart_pin>;
+		pinctrl-7 = <&P9_21_i2c_pin>;
+		pinctrl-8 = <&P9_21_pwm_pin>;
+		pinctrl-9 = <&P9_21_pru_uart_pin>;
+	};
+
+	/* P9_22 (ZCZ ball A17) */
+	P9_22_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_22_default_pin>;
+		pinctrl-1 = <&P9_22_gpio_pin>;
+		pinctrl-2 = <&P9_22_gpio_pu_pin>;
+		pinctrl-3 = <&P9_22_gpio_pd_pin>;
+		pinctrl-4 = <&P9_22_gpio_input_pin>;
+		pinctrl-5 = <&P9_22_spi_sclk_pin>;
+		pinctrl-6 = <&P9_22_uart_pin>;
+		pinctrl-7 = <&P9_22_i2c_pin>;
+		pinctrl-8 = <&P9_22_pwm_pin>;
+		pinctrl-9 = <&P9_22_pru_uart_pin>;
+	};
+
+	/* P9_23 (ZCZ ball V14) */
+	P9_23_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_23_default_pin>;
+		pinctrl-1 = <&P9_23_gpio_pin>;
+		pinctrl-2 = <&P9_23_gpio_pu_pin>;
+		pinctrl-3 = <&P9_23_gpio_pd_pin>;
+		pinctrl-4 = <&P9_23_gpio_input_pin>;
+		pinctrl-5 = <&P9_23_pwm_pin>;
+	};
+
+	/* P9_24 (ZCZ ball D15) */
+	P9_24_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P9_24_default_pin>;
+		pinctrl-1 = <&P9_24_gpio_pin>;
+		pinctrl-2 = <&P9_24_gpio_pu_pin>;
+		pinctrl-3 = <&P9_24_gpio_pd_pin>;
+		pinctrl-4 = <&P9_24_gpio_input_pin>;
+		pinctrl-5 = <&P9_24_uart_pin>;
+		pinctrl-6 = <&P9_24_can_pin>;
+		pinctrl-7 = <&P9_24_i2c_pin>;
+		pinctrl-8 = <&P9_24_pru_uart_pin>;
+		pinctrl-9 = <&P9_24_pruin_pin>;
+	};
+
+	/* P9_25 (ZCZ ball A14) audio */
+	P9_25_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_25_default_pin>;
+		pinctrl-1 = <&P9_25_gpio_pin>;
+		pinctrl-2 = <&P9_25_gpio_pu_pin>;
+		pinctrl-3 = <&P9_25_gpio_pd_pin>;
+		pinctrl-4 = <&P9_25_gpio_input_pin>;
+		pinctrl-5 = <&P9_25_qep_pin>;
+		pinctrl-6 = <&P9_25_pruout_pin>;
+		pinctrl-7 = <&P9_25_pruin_pin>;
+	};
+
+	/* P9_26 (ZCZ ball D16) */
+	P9_26_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P9_26_default_pin>;
+		pinctrl-1 = <&P9_26_gpio_pin>;
+		pinctrl-2 = <&P9_26_gpio_pu_pin>;
+		pinctrl-3 = <&P9_26_gpio_pd_pin>;
+		pinctrl-4 = <&P9_26_gpio_input_pin>;
+		pinctrl-5 = <&P9_26_uart_pin>;
+		pinctrl-6 = <&P9_26_can_pin>;
+		pinctrl-7 = <&P9_26_i2c_pin>;
+		pinctrl-8 = <&P9_26_pru_uart_pin>;
+		pinctrl-9 = <&P9_26_pruin_pin>;
+	};
+
+	/* P9_27 (ZCZ ball C13) */
+	P9_27_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_27_default_pin>;
+		pinctrl-1 = <&P9_27_gpio_pin>;
+		pinctrl-2 = <&P9_27_gpio_pu_pin>;
+		pinctrl-3 = <&P9_27_gpio_pd_pin>;
+		pinctrl-4 = <&P9_27_gpio_input_pin>;
+		pinctrl-5 = <&P9_27_qep_pin>;
+		pinctrl-6 = <&P9_27_pruout_pin>;
+		pinctrl-7 = <&P9_27_pruin_pin>;
+	};
+
+	/* P9_28 (ZCZ ball C12) audio */
+	P9_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "pwm", "pwm2", "pruout", "pruin";
+		pinctrl-0 = <&P9_28_default_pin>;
+		pinctrl-1 = <&P9_28_gpio_pin>;
+		pinctrl-2 = <&P9_28_gpio_pu_pin>;
+		pinctrl-3 = <&P9_28_gpio_pd_pin>;
+		pinctrl-4 = <&P9_28_gpio_input_pin>;
+		pinctrl-5 = <&P9_28_spi_cs_pin>;
+		pinctrl-6 = <&P9_28_pwm_pin>;
+		pinctrl-7 = <&P9_28_pwm2_pin>;
+		pinctrl-8 = <&P9_28_pruout_pin>;
+		pinctrl-9 = <&P9_28_pruin_pin>;
+	};
+
+	/* P9_29 (ZCZ ball B13) audio */
+	P9_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P9_29_default_pin>;
+		pinctrl-1 = <&P9_29_gpio_pin>;
+		pinctrl-2 = <&P9_29_gpio_pu_pin>;
+		pinctrl-3 = <&P9_29_gpio_pd_pin>;
+		pinctrl-4 = <&P9_29_gpio_input_pin>;
+		pinctrl-5 = <&P9_29_spi_pin>;
+		pinctrl-6 = <&P9_29_pwm_pin>;
+		pinctrl-7 = <&P9_29_pruout_pin>;
+		pinctrl-8 = <&P9_29_pruin_pin>;
+	};
+
+	/* P9_30 (ZCZ ball D12) */
+	P9_30_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P9_30_default_pin>;
+		pinctrl-1 = <&P9_30_gpio_pin>;
+		pinctrl-2 = <&P9_30_gpio_pu_pin>;
+		pinctrl-3 = <&P9_30_gpio_pd_pin>;
+		pinctrl-4 = <&P9_30_gpio_input_pin>;
+		pinctrl-5 = <&P9_30_spi_pin>;
+		pinctrl-6 = <&P9_30_pwm_pin>;
+		pinctrl-7 = <&P9_30_pruout_pin>;
+		pinctrl-8 = <&P9_30_pruin_pin>;
+	};
+
+	/* P9_31 (ZCZ ball A13) audio */
+	P9_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P9_31_default_pin>;
+		pinctrl-1 = <&P9_31_gpio_pin>;
+		pinctrl-2 = <&P9_31_gpio_pu_pin>;
+		pinctrl-3 = <&P9_31_gpio_pd_pin>;
+		pinctrl-4 = <&P9_31_gpio_input_pin>;
+		pinctrl-5 = <&P9_31_spi_sclk_pin>;
+		pinctrl-6 = <&P9_31_pwm_pin>;
+		pinctrl-7 = <&P9_31_pruout_pin>;
+		pinctrl-8 = <&P9_31_pruin_pin>;
+	};
+
+	/* P9_32                VADC */
+
+	/* P9_33 (ZCZ ball C8)  AIN4         */
+
+	/* P9_34                AGND */
+
+	/* P9_35 (ZCZ ball A8)  AIN6         */
+
+	/* P9_36 (ZCZ ball B8)  AIN5         */
+
+	/* P9_37 (ZCZ ball B7)  AIN2         */
+
+	/* P9_38 (ZCZ ball A7)  AIN3         */
+
+	/* P9_39 (ZCZ ball B6)  AIN0         */
+
+	/* P9_40 (ZCZ ball C7)  AIN1         */
+
+	/* P9_41 (ZCZ ball D14) */
+	P9_41_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer", "pruin";
+		pinctrl-0 = <&P9_41_default_pin>;
+		pinctrl-1 = <&P9_41_gpio_pin>;
+		pinctrl-2 = <&P9_41_gpio_pu_pin>;
+		pinctrl-3 = <&P9_41_gpio_pd_pin>;
+		pinctrl-4 = <&P9_41_gpio_input_pin>;
+		pinctrl-5 = <&P9_41_timer_pin>;
+		pinctrl-6 = <&P9_41_pruin_pin>;
+	};
+
+	/* P9_41.1 */
+	/* P9_91 (ZCZ ball D13) */
+	P9_91_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_91_default_pin>;
+		pinctrl-1 = <&P9_91_gpio_pin>;
+		pinctrl-2 = <&P9_91_gpio_pu_pin>;
+		pinctrl-3 = <&P9_91_gpio_pd_pin>;
+		pinctrl-4 = <&P9_91_gpio_input_pin>;
+		pinctrl-5 = <&P9_91_qep_pin>;
+		pinctrl-6 = <&P9_91_pruout_pin>;
+		pinctrl-7 = <&P9_91_pruin_pin>;
+	};
+
+	/* P9_42 (ZCZ ball C18) */
+	P9_42_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "spi_sclk", "uart", "pwm", "pru_ecap";
+		pinctrl-0 = <&P9_42_default_pin>;
+		pinctrl-1 = <&P9_42_gpio_pin>;
+		pinctrl-2 = <&P9_42_gpio_pu_pin>;
+		pinctrl-3 = <&P9_42_gpio_pd_pin>;
+		pinctrl-4 = <&P9_42_gpio_input_pin>;
+		pinctrl-5 = <&P9_42_spi_cs_pin>;
+		pinctrl-6 = <&P9_42_spi_sclk_pin>;
+		pinctrl-7 = <&P9_42_uart_pin>;
+		pinctrl-8 = <&P9_42_pwm_pin>;
+		pinctrl-9 = <&P9_42_pru_ecap_pin>;
+	};
+
+	/* P9_42.1 */
+	/* P9_92 (ZCZ ball B12) */
+	P9_92_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_92_default_pin>;
+		pinctrl-1 = <&P9_92_gpio_pin>;
+		pinctrl-2 = <&P9_92_gpio_pu_pin>;
+		pinctrl-3 = <&P9_92_gpio_pd_pin>;
+		pinctrl-4 = <&P9_92_gpio_input_pin>;
+		pinctrl-5 = <&P9_92_qep_pin>;
+		pinctrl-6 = <&P9_92_pruout_pin>;
+		pinctrl-7 = <&P9_92_pruin_pin>;
+	};
+
+	/* P9_43                GND */
+
+	/* P9_44                GND */
+
+	/* P9_45                GND */
+
+	/* P9_46                GND */
+
+	/*       (ZCZ ball A15) */
+	A15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "clkout", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&A15_default_pin>;
+		pinctrl-1 = <&A15_clkout_pin>;
+		pinctrl-2 = <&A15_gpio_pin>;
+		pinctrl-3 = <&A15_gpio_pu_pin>;
+		pinctrl-4 = <&A15_gpio_pd_pin>;
+	};
+
+	cape-universal {
+		compatible = "gpio-of-helper";
+		status = "okay";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		P8_03 {
+			gpio-name = "P8_03";
+			gpio = <&gpio1 6 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_04 {
+			gpio-name = "P8_04";
+			gpio = <&gpio1 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_05 {
+			gpio-name = "P8_05";
+			gpio = <&gpio1 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_06 {
+			gpio-name = "P8_06";
+			gpio = <&gpio1 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_07 {
+			gpio-name = "P8_07";
+			gpio = <&gpio2 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_08 {
+			gpio-name = "P8_08";
+			gpio = <&gpio2 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_09 {
+			gpio-name = "P8_09";
+			gpio = <&gpio2 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_10 {
+			gpio-name = "P8_10";
+			gpio = <&gpio2 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_11 {
+			gpio-name = "P8_11";
+			gpio = <&gpio1 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_12 {
+			gpio-name = "P8_12";
+			gpio = <&gpio1 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_13 {
+			gpio-name = "P8_13";
+			gpio = <&gpio0 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_14 {
+			gpio-name = "P8_14";
+			gpio = <&gpio0 26 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_15 {
+			gpio-name = "P8_15";
+			gpio = <&gpio1 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_16 {
+			gpio-name = "P8_16";
+			gpio = <&gpio1 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_17 {
+			gpio-name = "P8_17";
+			gpio = <&gpio0 27 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_18 {
+			gpio-name = "P8_18";
+			gpio = <&gpio2 1 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_19 {
+			gpio-name = "P8_19";
+			gpio = <&gpio0 22 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_20 {
+			gpio-name = "P8_20";
+			gpio = <&gpio1 31 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_21 {
+			gpio-name = "P8_21";
+			gpio = <&gpio1 30 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_22 {
+			gpio-name = "P8_22";
+			gpio = <&gpio1 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_23 {
+			gpio-name = "P8_23";
+			gpio = <&gpio1 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_24 {
+			gpio-name = "P8_24";
+			gpio = <&gpio1 1 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_25 {
+			gpio-name = "P8_25";
+			gpio = <&gpio1 0 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_26 {
+			gpio-name = "P8_26";
+			gpio = <&gpio1 29 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_27 {
+			gpio-name = "P8_27";
+			gpio = <&gpio2 22 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_28 {
+			gpio-name = "P8_28";
+			gpio = <&gpio2 24 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_29 {
+			gpio-name = "P8_29";
+			gpio = <&gpio2 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_30 {
+			gpio-name = "P8_30";
+			gpio = <&gpio2 25 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_31 {
+			gpio-name = "P8_31";
+			gpio = <&gpio0 10 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_32 {
+			gpio-name = "P8_32";
+			gpio = <&gpio0 11 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_33 {
+			gpio-name = "P8_33";
+			gpio = <&gpio0 9 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_34 {
+			gpio-name = "P8_34";
+			gpio = <&gpio2 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_35 {
+			gpio-name = "P8_35";
+			gpio = <&gpio0 8 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_36 {
+			gpio-name = "P8_36";
+			gpio = <&gpio2 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_37 {
+			gpio-name = "P8_37";
+			gpio = <&gpio2 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_38 {
+			gpio-name = "P8_38";
+			gpio = <&gpio2 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_39 {
+			gpio-name = "P8_39";
+			gpio = <&gpio2 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_40 {
+			gpio-name = "P8_40";
+			gpio = <&gpio2 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_41 {
+			gpio-name = "P8_41";
+			gpio = <&gpio2 10 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_42 {
+			gpio-name = "P8_42";
+			gpio = <&gpio2 11 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_43 {
+			gpio-name = "P8_43";
+			gpio = <&gpio2 8 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_44 {
+			gpio-name = "P8_44";
+			gpio = <&gpio2 9 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_45 {
+			gpio-name = "P8_45";
+			gpio = <&gpio2 6 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_46 {
+			gpio-name = "P8_46";
+			gpio = <&gpio2 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_11 {
+			gpio-name = "P9_11";
+			gpio = <&gpio0 30 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_12 {
+			gpio-name = "P9_12";
+			gpio = <&gpio1 28 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_13 {
+			gpio-name = "P9_13";
+			gpio = <&gpio0 31 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_14 {
+			gpio-name = "P9_14";
+			gpio = <&gpio1 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_15 {
+			gpio-name = "P9_15";
+			gpio = <&gpio1 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_16 {
+			gpio-name = "P9_16";
+			gpio = <&gpio1 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_17 {
+			gpio-name = "P9_17";
+			gpio = <&gpio0 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_18 {
+			gpio-name = "P9_18";
+			gpio = <&gpio0 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_19 {
+			gpio-name = "P9_19";
+			gpio = <&gpio0 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_20 {
+			gpio-name = "P9_20";
+			gpio = <&gpio0 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_21 {
+			gpio-name = "P9_21";
+			gpio = <&gpio0 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_22 {
+			gpio-name = "P9_22";
+			gpio = <&gpio0 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_23 {
+			gpio-name = "P9_23";
+			gpio = <&gpio1 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_24 {
+			gpio-name = "P9_24";
+			gpio = <&gpio0 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_25 {
+			gpio-name = "P9_25";
+			gpio = <&gpio3 21 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_26 {
+			gpio-name = "P9_26";
+			gpio = <&gpio0 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_27 {
+			gpio-name = "P9_27";
+			gpio = <&gpio3 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_28 {
+			gpio-name = "P9_28";
+			gpio = <&gpio3 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_29 {
+			gpio-name = "P9_29";
+			gpio = <&gpio3 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_30 {
+			gpio-name = "P9_30";
+			gpio = <&gpio3 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_31 {
+			gpio-name = "P9_31";
+			gpio = <&gpio3 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_41 {
+			gpio-name = "P9_41";
+			gpio = <&gpio0 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_91 {
+			gpio-name = "P9_91";
+			gpio = <&gpio3 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_42 {
+			gpio-name = "P9_42";
+			gpio = <&gpio0 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_92 {
+			gpio-name = "P9_92";
+			gpio = <&gpio3 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		A15 {
+			gpio-name = "A15";
+			gpio = <&gpio0 19 0>;
+			input;
+			dir-changeable;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 6c9187bc0f1..f4063d8c7d3 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -26,14 +26,14 @@ leds {
 		compatible = "gpio-leds";
 
 		led2 {
-			label = "beaglebone:green:heartbeat";
+			label = "beaglebone:green:usr0";
 			gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
 			linux,default-trigger = "heartbeat";
 			default-state = "off";
 		};
 
 		led3 {
-			label = "beaglebone:green:mmc0";
+			label = "beaglebone:green:usr1";
 			gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
 			linux,default-trigger = "mmc0";
 			default-state = "off";
@@ -63,9 +63,6 @@ vmmcsd_fixed: fixedregulator0 {
 };
 
 &am33xx_pinmux {
-	pinctrl-names = "default";
-	pinctrl-0 = <&clkout2_pin>;
-
 	user_leds_s0: user_leds_s0 {
 		pinctrl-single,pins = <
 			AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7)	/* gpmc_a5.gpio1_21 */
@@ -96,12 +93,6 @@ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
 		>;
 	};
 
-	clkout2_pin: pinmux_clkout2_pin {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3)	/* xdma_event_intr1.clkout2 */
-		>;
-	};
-
 	cpsw_default: cpsw_default {
 		pinctrl-single,pins = <
 			/* Slave 1 */
@@ -396,4 +387,5 @@ &sham {
 &rtc {
 	clocks = <&clk_32768_ck>, <&clk_24mhz_clkctrl AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL 0>;
 	clock-names = "ext-clk", "int-clk";
+	system-power-controller;
 };
diff --git a/arch/arm/boot/dts/am335x-bone-jtag.dtsi b/arch/arm/boot/dts/am335x-bone-jtag.dtsi
new file mode 100644
index 00000000000..2cdca014171
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-jtag.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Robert Nelson <robertcnelson@gmail.com>
+ */
+
+&am33xx_pinmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&clkout2_pin>;
+
+	clkout2_pin: pinmux_clkout2_pin {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3)     /* xdma_event_intr1.clkout2 */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bone-uboot-univ.dts b/arch/arm/boot/dts/am335x-bone-uboot-univ.dts
new file mode 100644
index 00000000000..87559166d38
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-uboot-univ.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+#include "am335x-bone-common-univ.dtsi"
+
+/ {
+	model = "TI AM335x BeagleBone";
+	compatible = "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-bone-uboot-univ.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+};
+
+&ldo3_reg {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-always-on;
+};
+
+&mmc1 {
+	vmmc-supply = <&ldo3_reg>;
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 43bfbce4104..c7ae3e59d29 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -6,10 +6,16 @@
 
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "TI AM335x BeagleBone";
 	compatible = "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-bone.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &ldo3_reg {
diff --git a/arch/arm/boot/dts/am335x-boneblack-common.dtsi b/arch/arm/boot/dts/am335x-boneblack-common.dtsi
index 91f93bc8971..94f4b6d437f 100644
--- a/arch/arm/boot/dts/am335x-boneblack-common.dtsi
+++ b/arch/arm/boot/dts/am335x-boneblack-common.dtsi
@@ -27,7 +27,6 @@ &mmc2 {
 &am33xx_pinmux {
 	nxp_hdmi_bonelt_pins: nxp_hdmi_bonelt_pins {
 		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
@@ -51,12 +50,6 @@ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
 		>;
 	};
 
-	nxp_hdmi_bonelt_off_pins: nxp_hdmi_bonelt_off_pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
-		>;
-	};
-
 	mcasp0_pins: mcasp0_pins {
 		pinctrl-single,pins = <
 			AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
@@ -92,9 +85,8 @@ tda19988: tda19988@70 {
 		nxp,calib-gpios = <&gpio1 25 0>;
 		interrupts-extended = <&gpio1 25 IRQ_TYPE_LEVEL_LOW>;
 
-		pinctrl-names = "default", "off";
+		pinctrl-names = "default";
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
-		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
 		/* Convert 24bit BGR to RGB, e.g. cross red and blue wiring */
 		/* video-ports = <0x234501>; */
diff --git a/arch/arm/boot/dts/am335x-boneblack-uboot-univ.dts b/arch/arm/boot/dts/am335x-boneblack-uboot-univ.dts
new file mode 100644
index 00000000000..4e5633e3869
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-boneblack-uboot-univ.dts
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+#include "am335x-bone-common-univ.dtsi"
+
+/ {
+	model = "TI AM335x BeagleBone Black";
+	compatible = "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-boneblack-uboot-univ.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+};
+
+&cpu0_opp_table {
+	/*
+	 * All PG 2.0 silicon may not support 1GHz but some of the early
+	 * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
+	 * to support 1GHz OPP so enable it for PG 2.0 on this board.
+	 */
+	oppnitro-1000000000 {
+		opp-supported-hw = <0x06 0x0100>;
+	};
+};
+
+&ldo3_reg {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-always-on;
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmcsd_fixed>;
+};
diff --git a/arch/arm/boot/dts/am335x-boneblack-uboot.dts b/arch/arm/boot/dts/am335x-boneblack-uboot.dts
new file mode 100644
index 00000000000..285a9212685
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-boneblack-uboot.dts
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+/* #include "am335x-bone-jtag.dtsi" */
+
+/ {
+	model = "TI AM335x BeagleBone Black";
+	compatible = "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-boneblack-uboot.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+};
+
+&cpu0_opp_table {
+	/*
+	 * All PG 2.0 silicon may not support 1GHz but some of the early
+	 * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
+	 * to support 1GHz OPP so enable it for PG 2.0 on this board.
+	 */
+	oppnitro-1000000000 {
+		opp-supported-hw = <0x06 0x0100>;
+	};
+};
+
+&ldo3_reg {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-always-on;
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmcsd_fixed>;
+};
diff --git a/arch/arm/boot/dts/am335x-boneblack-wireless.dts b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
index 3124d94c0b3..f98e0cfe14a 100644
--- a/arch/arm/boot/dts/am335x-boneblack-wireless.dts
+++ b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
@@ -8,11 +8,17 @@
 #include "am335x-bone-common.dtsi"
 #include "am335x-boneblack-common.dtsi"
 #include <dt-bindings/interrupt-controller/irq.h>
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "TI AM335x BeagleBone Black Wireless";
 	compatible = "ti,am335x-bone-black-wireless", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
 
+	chosen {
+		base_dtb = "am335x-boneblack-wireless.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+
 	wlan_en_reg: fixedregulator@2 {
 		compatible = "regulator-fixed";
 		regulator-name = "wlan-en-regulator";
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index d3928662aed..40b17b95610 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -7,10 +7,16 @@
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
 #include "am335x-boneblack-common.dtsi"
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "TI AM335x BeagleBone Black";
 	compatible = "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-boneblack.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &cpu0_opp_table {
diff --git a/arch/arm/boot/dts/am335x-boneblue.dts b/arch/arm/boot/dts/am335x-boneblue.dts
index 5811fb8d4fd..304a2aeff8b 100644
--- a/arch/arm/boot/dts/am335x-boneblue.dts
+++ b/arch/arm/boot/dts/am335x-boneblue.dts
@@ -14,6 +14,8 @@ / {
 
 	chosen {
 		stdout-path = &uart0;
+		base_dtb = "am335x-boneblue.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
 	};
 
 	leds {
@@ -115,6 +117,153 @@ wlan_en_reg: fixedregulator@2 {
 };
 
 &am33xx_pinmux {
+	/***************************************************************************
+	* Static Pinmux
+	***************************************************************************/
+	mux_helper_pins: pins {
+		pinctrl-single,pins = <
+
+			/* GPIO Inputs */
+			0x09c 0x37	/*P8.9  Pause BUTTON, input pullup*/
+			0x098 0x37	/*P8.10 MODE BUTTON input pullup*/
+			0x1AC 0x37	/*P9.25 MPU-9150 INTERRUPT IN*/
+
+			/* Motor Control GPIO Out*/
+			0x088 ( PIN_OUTPUT | MUX_MODE7 ) /* (T13) gpmc_csn3.gpio2[0] - MDIR_1A different from cape! */
+			0x074 ( PIN_OUTPUT | MUX_MODE7 ) /* (U17) gpmc_wpn.gpio0[31] - P9.13, MDIR_1B */
+			0x040 ( PIN_OUTPUT | MUX_MODE7 ) /* (R13) gpmc_a0.gpio1[16] - P9.15, MDIR_2A */
+			0x0D8 ( PIN_OUTPUT | MUX_MODE7 ) /* (V4) lcd_data14.gpio0[10] - P8.31, MDIR_2B different from cape! */
+			0x0AC ( PIN_OUTPUT | MUX_MODE7 ) /* (R4) lcd_data3.gpio2[9] - P8.44, MDIR_3A */
+			0x0A8 ( PIN_OUTPUT | MUX_MODE7 ) /* (R3) lcd_data2.gpio2[8] - P8.43, MDIR_3B */
+			0x0A0 ( PIN_OUTPUT | MUX_MODE7 ) /* (R1) lcd_data0.gpio2[6] - P8.45, MDIR_4A */
+			0x0A4 ( PIN_OUTPUT | MUX_MODE7 ) /* (R2) lcd_data1.gpio2[7] - P8.46, MDIR_4B */
+			0x1B4 ( PIN_OUTPUT | MUX_MODE7 ) /* (D14) xdma_event_intr1.gpio0[20] - P9.41, MOT_STBY */
+
+			/* PRU encoder input */
+			0x038 0x36	/* P8_16,PRU0_r31_16,MODE6 */
+
+			/* PRU Servo output */
+			0x0e0 0x05	/*pru1_pru_r30_8, MODE5*/
+			0x0e8 0x05	/*pru1_pru_r30_10, MODE5 */
+			0x0e4 0x05	/*pr1_pru1_pru_r30_9, MODE5 */
+			0x0ec 0x05	/*pru1_pru_r30_11, MODE5 */
+			0x0b8 0x05	/*pru1_pru_r30_6, MODE5 */
+			0x0bc 0x05	/*pru1_pru_r30_7, MODE5 */
+			0x0b0 0x05	/*pru1_pru_r30_4, MODE5 */
+			0x0b4 0x05	/*pru1_pru_r30_5, MODE5 */
+			0x0C8 0x0F	/*P8.36, SERVO_PWR GPIO OUT*/
+
+			/* WILINK 8 */
+			0x08c 0x0F	/*P8.18 V12 A2DP FSYNC */
+			0x078 0x0F	/*P9.12 A2DP_CLOCK*/
+		>;
+
+		/* D13 BLUE_GP0_PIN_5 gpio 3_20  */
+		D13_default_pin: pinmux_D13_default_pin {
+			pinctrl-single,pins = < 0x1A8 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		D13_gpio_pin: pinmux_D13_gpio_pin {
+			pinctrl-single,pins = < 0x1A8 ( PIN_OUTPUT | MUX_MODE7 ) >; };
+		D13_gpio_pu_pin: pinmux_D13_gpio_pu_pin {
+			pinctrl-single,pins = < 0x1A8 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		D13_gpio_pd_pin: pinmux_D13_gpio_pd_pin {
+			pinctrl-single,pins = < 0x1A8 ( PIN_INPUT_PULLDOWN | MUX_MODE7 ) >; };
+
+		/* H17 BLUE_GP1_PIN_4 gpio 3_1  */
+		H17_default_pin: pinmux_H17_default_pin {
+			pinctrl-single,pins = < 0x10C ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		H17_gpio_pin: pinmux_H17_gpio_pin {
+			pinctrl-single,pins = < 0x10C ( PIN_OUTPUT | MUX_MODE7 ) >; };
+		H17_gpio_pu_pin: pinmux_H17_gpio_pu_pin {
+			pinctrl-single,pins = < 0x10C ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		H17_gpio_pd_pin: pinmux_H17_gpio_pd_pin {
+			pinctrl-single,pins = < 0x10C ( PIN_INPUT_PULLDOWN | MUX_MODE7 ) >; };
+
+		/* J15 BLUE_GP1_PIN_3 gpio 3_2  */
+		J15_default_pin: pinmux_J15_default_pin {
+			pinctrl-single,pins = < 0x110 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		J15_gpio_pin: pinmux_J15_gpio_pin {
+			pinctrl-single,pins = < 0x110 ( PIN_OUTPUT | MUX_MODE7 ) >; };
+		J15_gpio_pu_pin: pinmux_J15_gpio_pu_pin {
+			pinctrl-single,pins = < 0x110 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		J15_gpio_pd_pin: pinmux_J15_gpio_pd_pin {
+			pinctrl-single,pins = < 0x110 ( PIN_INPUT_PULLDOWN | MUX_MODE7 ) >; };
+
+		/* P8_15 (ZCZ ball U13) */
+		P8_15_default_pin: pinmux_P8_15_default_pin {
+			pinctrl-single,pins = <0x03c  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P8_15_gpio_pin: pinmux_P8_15_gpio_pin {
+			pinctrl-single,pins = <0x03c  0x2F>; };     /* Mode 7, RxActive */
+		P8_15_gpio_pu_pin: pinmux_P8_15_gpio_pu_pin {
+			pinctrl-single,pins = <0x03c  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+		P8_15_gpio_pd_pin: pinmux_P8_15_gpio_pd_pin {
+			pinctrl-single,pins = <0x03c  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P8_15_pruin_pin: pinmux_P8_15_pruin_pin {
+			pinctrl-single,pins = <0x03c  0x26>; };     /* Mode 6, Pull-Down, RxActive */
+		P8_15_qep_pin: pinmux_P8_15_qep_pin {
+			pinctrl-single,pins = <0x03c  0x24>; };     /* Mode 4, Pull-Down, RxActive */
+		P8_15_pruin_pu_pin: pinmux_P8_15_pruin_pu_pin {
+			pinctrl-single,pins = <0x03c  0x36>; };     /* Mode 6, Pull-Up, RxActive */
+		P8_15_pruecapin_pu_pin: pinmux_P8_15_pruecapin_pu_pin {
+			pinctrl-single,pins = <0x03c  0x35>; };     /* Mode 5, Pull-Up, RxActive */
+
+		/* P9_11 (ZCZ ball T17) */
+		P9_11_default_pin: pinmux_P9_11_default_pin {
+			pinctrl-single,pins = <0x070  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+		P9_11_gpio_pin: pinmux_P9_11_gpio_pin {
+			pinctrl-single,pins = <0x070  0x2F>; };     /* Mode 7, RxActive */
+		P9_11_gpio_pu_pin: pinmux_P9_11_gpio_pu_pin {
+			pinctrl-single,pins = <0x070  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+		P9_11_gpio_pd_pin: pinmux_P9_11_gpio_pd_pin {
+			pinctrl-single,pins = <0x070  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P9_11_uart_pin: pinmux_P9_11_uart_pin {
+			pinctrl-single,pins = <0x070  0x36>; };     /* Mode 6, Pull-Up, RxActive */
+
+		/* P9_23 (ZCZ ball V14) */
+		P9_23_default_pin: pinmux_P9_23_default_pin {
+			pinctrl-single,pins = <0x044  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P9_23_gpio_pin: pinmux_P9_23_gpio_pin {
+			pinctrl-single,pins = <0x044  0x2F>; };     /* Mode 7, RxActive */
+		P9_23_gpio_pu_pin: pinmux_P9_23_gpio_pu_pin {
+			pinctrl-single,pins = <0x044  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+		P9_23_gpio_pd_pin: pinmux_P9_23_gpio_pd_pin {
+			pinctrl-single,pins = <0x044  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P9_23_pwm_pin: pinmux_P9_23_pwm_pin {
+			pinctrl-single,pins = <0x044  0x26>; };     /* Mode 6, Pull-Down, RxActive */
+
+		/* P9_28 (ZCZ ball C12) Audio   */
+		P9_28_default_pin: pinmux_P9_28_default_pin {
+			pinctrl-single,pins = <0x19c  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P9_28_gpio_pin: pinmux_P9_28_gpio_pin {
+			pinctrl-single,pins = <0x19c  0x2F>; };     /* Mode 7, RxActive */
+		P9_28_gpio_pu_pin: pinmux_P9_28_gpio_pu_pin {
+			pinctrl-single,pins = <0x19c  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+		P9_28_gpio_pd_pin: pinmux_P9_28_gpio_pd_pin {
+			pinctrl-single,pins = <0x19c  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+		P9_28_pwm_pin: pinmux_P9_28_pwm_pin {
+			pinctrl-single,pins = <0x19c  0x21>; };     /* Mode 1, Pull-Down, RxActive */
+		P9_28_spi_pin: pinmux_P9_28_spi_pin {
+			pinctrl-single,pins = <0x19c  0x23>; };     /* Mode 3, Pull-Down, RxActive */
+		P9_28_pwm2_pin: pinmux_P9_28_pwm2_pin {
+			pinctrl-single,pins = <0x19c  0x24>; };     /* Mode 4, Pull-Down, RxActive */
+		P9_28_pruout_pin: pinmux_P9_28_pruout_pin {
+			pinctrl-single,pins = <0x19c  0x25>; };     /* Mode 5, Pull-Down, RxActive */
+		P9_28_pruin_pin: pinmux_P9_28_pruin_pin {
+			pinctrl-single,pins = <0x19c  0x26>; };     /* Mode 6, Pull-Down, RxActive */
+		P9_28_audio_pin: pinmux_P9_28_audio_pin {
+			pinctrl-single,pins = <0x19c  (PIN_OUTPUT_PULLDOWN | MUX_MODE2)>; };	/* mcasp0_ahclkr.mcasp0_axr2 */
+
+		/* U16 BLUE_GP0_PIN_3 gpio 1_25   */
+		U16_default_pin: pinmux_U16_default_pin {
+			pinctrl-single,pins = < 0x064 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		U16_gpio_pin: pinmux_U16_gpio_pin {
+			pinctrl-single,pins = < 0x064 ( PIN_OUTPUT | MUX_MODE7 ) >; };
+		U16_gpio_pu_pin: pinmux_U16_gpio_pu_pin {
+			pinctrl-single,pins = < 0x064 ( PIN_INPUT_PULLUP | MUX_MODE7 ) >; };
+		U16_gpio_pd_pin: pinmux_U16_gpio_pd_pin {
+			pinctrl-single,pins = < 0x064 ( PIN_INPUT_PULLDOWN | MUX_MODE7 ) >; };
+
+	};
+
 	user_leds_s0: user_leds_s0 {
 		pinctrl-single,pins = <
 			AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
@@ -164,11 +313,11 @@ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT_PULLDOWN, MUX_MODE1)	/* (B17) spi0
 	};
 
 	/* DSM2 */
-	uart4_pins: pinmux_uart4_pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6)	/* (T17) gpmc_wait0.uart4_rxd */
-		>;
-	};
+	//uart4_pins: pinmux_uart4_pins {
+	//	pinctrl-single,pins = <
+	//		AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6)	/* (T17) gpmc_wait0.uart4_rxd */
+	//	>;
+	//};
 
 	/* UT5 */
 	uart5_pins: pinmux_uart5_pins {
@@ -241,6 +390,54 @@ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT, MUX_MODE2)		/* (E18) uart0_cts
 			AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_OUTPUT, MUX_MODE7)		/* (M16) gmii1_rxd0.gpio2[21] */
 		>;
 	};
+
+	/* E1 */
+	eqep0_pins: pinmux_eqep0_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR0, PIN_INPUT, MUX_MODE1)		/* (B12) mcasp0_aclkr.eQEP0A_in */
+			AM33XX_PADCONF(AM335X_PIN_MCASP0_FSR, PIN_INPUT, MUX_MODE1)		/* (C13) mcasp0_fsr.eQEP0B_in */
+		>;
+	};
+
+	/* E2 */
+	eqep1_pins: pinmux_eqep1_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_INPUT, MUX_MODE2)		/* (V2) lcd_data12.eQEP1A_in */
+			AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_INPUT, MUX_MODE2)		/* (V3) lcd_data13.eQEP1B_in */
+		>;
+	};
+
+	/* E3 */
+	eqep2_pins: pinmux_eqep2_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT, MUX_MODE4)		/* (T12) gpmc_ad12.eQEP2A_in */
+			AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT, MUX_MODE4)		/* (R12) gpmc_ad13.eQEP2B_in */
+		>;
+	};
+
+	ehrpwm1_pins: pinmux_ehrpwm1_pins {
+		pinctrl-single,pins = <
+			0x020  0x24 /* P8_19_pwm_pin */
+			0x024  0x24 /* P8_13_pwm_pin */
+		>;
+	};
+
+	ehrpwm2_pins: pinmux_ehrpwm2_pins {
+		pinctrl-single,pins = <
+			0x048  0x26 /* P9_14_pwm_pin */
+			0x04c  0x26 /* P9_16_pwm_pin */
+		>;
+	};
+
+	spi1_pins: pinmux_spi1_pins {
+		pinctrl-single,pins = <
+			0x190  0x23 /* spi1_sclk */
+			0x194  0x23 /* spi1_d0 */
+			0x198  0x23 /* spi1_d1 */
+			0x144 ( PIN_OUTPUT | MUX_MODE2 ) /* spi1_cs0 */
+			0x164 ( PIN_OUTPUT | MUX_MODE2 ) /* spi1_cs1 */
+		>;
+	};
 };
 
 &uart0 {
@@ -264,12 +461,12 @@ &uart2 {
 	status = "okay";
 };
 
-&uart4 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart4_pins>;
-
-	status = "okay";
-};
+//&uart4 {
+//	pinctrl-names = "default";
+//	pinctrl-0 = <&uart4_pins>;
+//
+//	status = "okay";
+//};
 
 &uart5 {
 	pinctrl-names = "default";
@@ -382,12 +579,12 @@ wlcore: wlcore@2 {
 	};
 };
 
-&tscadc {
-	status = "okay";
-	adc {
-		ti,adc-channels = <0 1 2 3 4 5 6 7>;
-	};
-};
+//&tscadc {
+//	status = "okay";
+//	adc {
+//		ti,adc-channels = <0 1 2 3 4 5 6 7>;
+//	};
+//};
 
 &uart3 {
 	pinctrl-names = "default";
@@ -406,6 +603,10 @@ &rtc {
 	clock-names = "ext-clk", "int-clk";
 };
 
+&wkup_m3_ipc {
+	ti,scale-data-fw = "/*(DEBLOBBED)*/";
+};
+
 &dcan1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&dcan1_pins>;
@@ -420,3 +621,193 @@ ls_buf_en {
 		line-name = "LS_BUF_EN";
 	};
 };
+
+&epwmss0 {
+	status = "okay";
+};
+
+&eqep0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&eqep0_pins>;
+};
+
+&epwmss1 {
+	status = "okay";
+};
+
+&eqep1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&eqep1_pins>;
+};
+
+&epwmss2 {
+	status = "okay";
+};
+
+&eqep2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&eqep2_pins>;
+};
+
+&ocp {
+	/* activate the static pinmux helper list of pin modes above */
+	test_helper: helper {
+		compatible = "bone-pinmux-helper";
+		pinctrl-names = "default";
+		pinctrl-0 = <&mux_helper_pins>;
+
+		status = "okay";
+	};
+
+	/* Encoder 4 (U13) */
+	P8_15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "pruin_pu", "gpio", "gpio_pu", "gpio_pd", "pruin", "qep", "pruecapin_pu";
+		pinctrl-0 = <&P8_15_pruin_pu_pin>;
+		pinctrl-1 = <&P8_15_pruin_pu_pin>;
+		pinctrl-2 = <&P8_15_gpio_pin>;
+		pinctrl-3 = <&P8_15_gpio_pu_pin>;
+		pinctrl-4 = <&P8_15_gpio_pd_pin>;
+		pinctrl-5 = <&P8_15_pruin_pin>;
+		pinctrl-6 = <&P8_15_qep_pin>;
+		pinctrl-7 = <&P8_15_pruecapin_pu_pin>;
+	};
+
+	/* UART4 RX DSM */
+	P9_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "uart", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&P9_11_uart_pin>;
+		pinctrl-1 = <&P9_11_uart_pin>;
+		pinctrl-2 = <&P9_11_gpio_pin>;
+		pinctrl-3 = <&P9_11_gpio_pu_pin>;
+		pinctrl-4 = <&P9_11_gpio_pd_pin>;
+	};
+
+	/* U16 BLUE_GP0_PIN_3 gpio 1_25*/
+	U16_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&U16_default_pin>;
+		pinctrl-1 = <&U16_gpio_pin>;
+		pinctrl-2 = <&U16_gpio_pu_pin>;
+		pinctrl-3 = <&U16_gpio_pd_pin>;
+	};
+
+
+	/* BLUE_GP0_PIN_3 gpio1_17*/
+	P9_23_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "pwm";
+		pinctrl-0 = <&P9_23_default_pin>;
+		pinctrl-1 = <&P9_23_gpio_pin>;
+		pinctrl-2 = <&P9_23_gpio_pu_pin>;
+		pinctrl-3 = <&P9_23_gpio_pd_pin>;
+		pinctrl-4 = <&P9_23_pwm_pin>;
+	};
+
+	/* BLUE_GP0_PIN_5 gpio3_20 */
+	D13_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&D13_default_pin>;
+		pinctrl-1 = <&D13_gpio_pin>;
+		pinctrl-2 = <&D13_gpio_pu_pin>;
+		pinctrl-3 = <&D13_gpio_pd_pin>;
+	};
+
+	/* BLUE_GP0_PIN_6 gpio3_17 */
+	P9_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "pwm", "spi", "pwm2", "pruout", "pruin";
+		pinctrl-0 = <&P9_28_default_pin>;
+		pinctrl-1 = <&P9_28_gpio_pin>;
+		pinctrl-2 = <&P9_28_gpio_pu_pin>;
+		pinctrl-3 = <&P9_28_gpio_pd_pin>;
+		pinctrl-4 = <&P9_28_pwm_pin>;
+		pinctrl-5 = <&P9_28_spi_pin>;
+		pinctrl-6 = <&P9_28_pwm2_pin>;
+		pinctrl-7 = <&P9_28_pruout_pin>;
+		pinctrl-8 = <&P9_28_pruin_pin>;
+	};
+
+	/* BLUE_GP1_PIN_3 gpio3_2 */
+	J15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&J15_default_pin>;
+		pinctrl-1 = <&J15_gpio_pin>;
+		pinctrl-2 = <&J15_gpio_pu_pin>;
+		pinctrl-3 = <&J15_gpio_pd_pin>;
+	};
+
+	/* BLUE_GP1_PIN_4 gpio3_1 */
+	H17_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&H17_default_pin>;
+		pinctrl-1 = <&H17_gpio_pin>;
+		pinctrl-2 = <&H17_gpio_pu_pin>;
+		pinctrl-3 = <&H17_gpio_pd_pin>;
+	};
+};
+
+/*******************************************************************************
+*	PWMSS
+*******************************************************************************/
+&ehrpwm0 {
+	status = "okay";
+};
+
+&ehrpwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ehrpwm1_pins>;
+	status = "okay";
+};
+
+&ehrpwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ehrpwm2_pins>;
+	status = "okay";
+};
+
+/*******************************************************************************
+	SPI
+*******************************************************************************/
+&spi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi1_pins>;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "okay";
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/1.0";
+		reg = <0>;
+		spi-max-frequency = <24000000>;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/1.1";
+		reg = <1>;
+		spi-max-frequency = <24000000>;
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-gateway.dts b/arch/arm/boot/dts/am335x-bonegreen-gateway.dts
new file mode 100644
index 00000000000..dd399ff97c0
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bonegreen-gateway.dts
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+#include "am335x-bonegreen-common.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+/* #include "am335x-bone-jtag.dtsi" */
+
+/ {
+	model = "SeeedStudio BeagleBone Green Gateway";
+	compatible = "ti,am335x-bone-green-gateway", "ti,am335x-bone-green", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	aliases {
+		rtc0 = &extrtc;
+		rtc1 = &rtc;
+	};
+
+	chosen {
+		base_dtb = "am335x-bonegreen-gateway.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+
+	wlan_en_reg: fixedregulator@2 {
+		compatible = "regulator-fixed";
+		regulator-name = "wlan-en-regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		startup-delay-us= <70000>;
+
+		/* WL_EN */
+		gpio = <&gpio3 9 0>;
+		enable-active-high;
+	};
+};
+
+&cpu0_opp_table {
+	/*
+	 * Octavo Systems:
+	 * The EFUSE_SMA register is not programmed for any of the AM335x wafers
+	 * we get and we are not programming them during our production test.
+	 * Therefore, from a DEVICE_ID revision point of view, the silicon looks
+	 * like it is Revision 2.1.  However, from an EFUSE_SMA point of view for
+	 * the HW OPP table, the silicon looks like it is Revision 1.0 (ie the
+	 * EFUSE_SMA register reads as all zeros).
+	 */
+	oppnitro-1000000000 {
+		opp-supported-hw = <0x06 0x0100>;
+	};
+};
+
+&am33xx_pinmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usbhost_pins>;
+
+	bt_pins: pinmux_bt_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLUP, MUX_MODE7)	/* gmii1_txd0.gpio0_28 - BT_EN */
+		>;
+	};
+
+	mmc3_pins: pinmux_mmc3_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE6 ) /* (L15) gmii1_rxd1.mmc2_clk */
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLUP, MUX_MODE6 ) /* (J16) gmii1_txen.mmc2_cmd */
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (J17) gmii1_rxdv.mmc2_dat0 */
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (J18) gmii1_txd3.mmc2_dat1 */
+			AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (K15) gmii1_txd2.mmc2_dat2 */
+			AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (H16) gmii1_col.mmc2_dat3 */
+		>;
+	};
+
+	uart2_grove_pins: pinmux_uart2_grove_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x90c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6)
+			AM33XX_IOPAD(0x910, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6)
+		>;
+	};
+
+	uart3_pins: pinmux_uart3_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1)	/* gmii1_rxd3.uart3_rxd */
+			AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE1)	/* gmii1_rxd2.uart3_txd */
+			AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT, MUX_MODE3)		/* mdio_data.uart3_ctsn */
+			AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLDOWN, MUX_MODE3)	/* mdio_clk.uart3_rtsn */
+		>;
+	};
+
+	wl18xx_pins: pinmux_wl18xx_pins {
+		pinctrl-single,pins = <
+			AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7)	/* gmii1_txclk.gpio3_9 WL_EN */
+			AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)	/* rmii1_refclk.gpio0_29 WL_IRQ */
+			AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_OUTPUT_PULLUP, MUX_MODE7)	/* gmii1_rxclk.gpio3_10 LS_BUF_EN */
+		>;
+	};
+
+	usbhost_pins: pinmux_usbhost_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gmii1_txd1.gpio0[21] */
+		>;
+	};
+};
+
+&mac {
+	status = "disabled";
+};
+
+&mmc3 {
+	dmas = <&edma_xbar 12 0 1
+		&edma_xbar 13 0 2>;
+	dma-names = "tx", "rx";
+	status = "okay";
+	vmmc-supply = <&wlan_en_reg>;
+	bus-width = <4>;
+	non-removable;
+	cap-power-off-card;
+	ti,needs-special-hs-handling;
+	keep-power-in-suspend;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1835";
+		reg = <2>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <29 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_grove_pins>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins &bt_pins>;
+	status = "okay";
+
+	bluetooth {
+		compatible = "ti,wl1835-st";
+		enable-gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&i2c0 {
+	extrtc: rtc@68 {
+		compatible = "dallas,ds1340";
+		reg = <0x68>;
+	};
+};
+
+// (K16) gmii1_txd1.gpio0[21]
+&gpio0 {
+	usb_reset {
+		gpio-hog;
+		gpios = <21 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "usb_reset";
+	};
+};
+
+&gpio3 {
+	ls_buf_en {
+		gpio-hog;
+		gpios = <10 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "LS_BUF_EN";
+	};
+};
+
+&usb1 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	hub@1 {
+		compatible = "usb424,9512";
+		reg = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethernet: ethernet@1 {
+			compatible = "usb424,ec00";
+			reg = <1>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless-common-univ.dtsi b/arch/arm/boot/dts/am335x-bonegreen-wireless-common-univ.dtsi
new file mode 100644
index 00000000000..6e372b4876d
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless-common-univ.dtsi
@@ -0,0 +1,2814 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+&am33xx_pinmux {
+	/************************/
+	/* P8 Header */
+	/************************/
+
+	/* P8_01                GND */
+
+	/* P8_02                GND */
+
+
+	/* P8_03 (ZCZ ball R9) emmc */
+	P8_03_default_pin: pinmux_P8_03_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pin: pinmux_P8_03_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pu_pin: pinmux_P8_03_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_pd_pin: pinmux_P8_03_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad6.gpio1_6 */
+	P8_03_gpio_input_pin: pinmux_P8_03_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0818, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad6.gpio1_6 */
+
+	/* P8_04 (ZCZ ball T9) emmc */
+	P8_04_default_pin: pinmux_P8_04_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pin: pinmux_P8_04_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pu_pin: pinmux_P8_04_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_pd_pin: pinmux_P8_04_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad7.gpio1_7 */
+	P8_04_gpio_input_pin: pinmux_P8_04_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x081c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad7.gpio1_7 */
+
+	/* P8_05 (ZCZ ball R8) emmc */
+	P8_05_default_pin: pinmux_P8_05_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pin: pinmux_P8_05_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pu_pin: pinmux_P8_05_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_pd_pin: pinmux_P8_05_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad2.gpio1_2 */
+	P8_05_gpio_input_pin: pinmux_P8_05_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0808, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad2.gpio1_2 */
+
+	/* P8_06 (ZCZ ball T8) emmc */
+	P8_06_default_pin: pinmux_P8_06_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pin: pinmux_P8_06_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pu_pin: pinmux_P8_06_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_pd_pin: pinmux_P8_06_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad3.gpio1_3 */
+	P8_06_gpio_input_pin: pinmux_P8_06_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x080c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad3.gpio1_3 */
+
+	/* P8_07 (ZCZ ball R7) gpio2_2 */
+	P8_07_default_pin: pinmux_P8_07_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pin: pinmux_P8_07_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pu_pin: pinmux_P8_07_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_pd_pin: pinmux_P8_07_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_advn_ale.gpio2_2 */
+	P8_07_gpio_input_pin: pinmux_P8_07_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_advn_ale.gpio2_2 */
+	P8_07_timer_pin: pinmux_P8_07_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0890, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_advn_ale.timer4 */
+
+	/* P8_08 (ZCZ ball T7) gpio2_3 */
+	P8_08_default_pin: pinmux_P8_08_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pin: pinmux_P8_08_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pu_pin: pinmux_P8_08_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_pd_pin: pinmux_P8_08_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_oen_ren.gpio2_3 */
+	P8_08_gpio_input_pin: pinmux_P8_08_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_oen_ren.gpio2_3 */
+	P8_08_timer_pin: pinmux_P8_08_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0894, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_oen_ren.timer7 */
+
+	/* P8_09 (ZCZ ball T6) gpio2_5 */
+	P8_09_default_pin: pinmux_P8_09_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pin: pinmux_P8_09_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pu_pin: pinmux_P8_09_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_pd_pin: pinmux_P8_09_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_gpio_input_pin: pinmux_P8_09_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_be0n_cle.gpio2_5 */
+	P8_09_timer_pin: pinmux_P8_09_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x089c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_be0n_cle.timer5 */
+
+	/* P8_10 (ZCZ ball U6) gpio2_4 */
+	P8_10_default_pin: pinmux_P8_10_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pin: pinmux_P8_10_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pu_pin: pinmux_P8_10_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_pd_pin: pinmux_P8_10_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wen.gpio2_4 */
+	P8_10_gpio_input_pin: pinmux_P8_10_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wen.gpio2_4 */
+	P8_10_timer_pin: pinmux_P8_10_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0898, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* gpmc_wen.timer6 */
+
+	/* P8_11 (ZCZ ball R12) gpio1_13 */
+	P8_11_default_pin: pinmux_P8_11_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pin: pinmux_P8_11_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pu_pin: pinmux_P8_11_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_pd_pin: pinmux_P8_11_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P8_11_gpio_input_pin: pinmux_P8_11_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad13.gpio1_13 */
+	P8_11_qep_pin: pinmux_P8_11_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad13.eqep2b_in */
+	P8_11_pruout_pin: pinmux_P8_11_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad13.pru0_out15 */
+
+	/* P8_12 (ZCZ ball T12) gpio1_12 */
+	P8_12_default_pin: pinmux_P8_12_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pin: pinmux_P8_12_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pu_pin: pinmux_P8_12_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_pd_pin: pinmux_P8_12_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P8_12_gpio_input_pin: pinmux_P8_12_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad12.gpio1_12 */
+	P8_12_qep_pin: pinmux_P8_12_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad12.eqep2a_in */
+	P8_12_pruout_pin: pinmux_P8_12_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad12.pru0_out14 */
+
+	/* P8_13 (ZCZ ball T10) gpio0_23 */
+	P8_13_default_pin: pinmux_P8_13_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pin: pinmux_P8_13_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pu_pin: pinmux_P8_13_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_pd_pin: pinmux_P8_13_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P8_13_gpio_input_pin: pinmux_P8_13_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad9.gpio0_23 */
+	P8_13_pwm_pin: pinmux_P8_13_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad9.ehrpwm2b */
+
+	/* P8_14 (ZCZ ball T11) wl1835: wl_en */
+
+	/* P8_15 (ZCZ ball U13) gpio1_15 */
+	P8_15_default_pin: pinmux_P8_15_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pin: pinmux_P8_15_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pu_pin: pinmux_P8_15_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_pd_pin: pinmux_P8_15_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P8_15_gpio_input_pin: pinmux_P8_15_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad15.gpio1_15 */
+	P8_15_qep_pin: pinmux_P8_15_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad15.eqep2_strobe */
+	P8_15_pru_ecap_pin: pinmux_P8_15_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_ad15.pr1_ecap0_ecap_capin_apwm_o */
+	P8_15_pruin_pin: pinmux_P8_15_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad15.pru0_in15 */
+
+	/* P8_16 (ZCZ ball V13) gpio1_14 */
+	P8_16_default_pin: pinmux_P8_16_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pin: pinmux_P8_16_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pu_pin: pinmux_P8_16_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_pd_pin: pinmux_P8_16_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P8_16_gpio_input_pin: pinmux_P8_16_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad14.gpio1_14 */
+	P8_16_qep_pin: pinmux_P8_16_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad14.eqep2_index */
+	P8_16_pruin_pin: pinmux_P8_16_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad14.pru0_in14 */
+
+	/* P8_17 (ZCZ ball U12) wl1835: wl_irq */
+
+	/* P8_18 (ZCZ ball V12) gpio2_1 */
+	P8_18_default_pin: pinmux_P8_18_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pin: pinmux_P8_18_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pu_pin: pinmux_P8_18_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_pd_pin: pinmux_P8_18_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P8_18_gpio_input_pin: pinmux_P8_18_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_clk.gpio2_1 */
+
+	/* P8_19 (ZCZ ball U10) gpio0_22 */
+	P8_19_default_pin: pinmux_P8_19_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pin: pinmux_P8_19_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pu_pin: pinmux_P8_19_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_pd_pin: pinmux_P8_19_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad8.gpio0_22 */
+	P8_19_gpio_input_pin: pinmux_P8_19_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad8.gpio0_22 */
+	P8_19_pwm_pin: pinmux_P8_19_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0820, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad8.ehrpwm2a */
+
+	/* P8_20 (ZCZ ball V9) emmc */
+	P8_20_default_pin: pinmux_P8_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pin: pinmux_P8_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pu_pin: pinmux_P8_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_pd_pin: pinmux_P8_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn2.gpio1_31 */
+	P8_20_gpio_input_pin: pinmux_P8_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn2.gpio1_31 */
+	P8_20_pruout_pin: pinmux_P8_20_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_csn2.pru1_out13 */
+	P8_20_pruin_pin: pinmux_P8_20_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0884, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_csn2.pru1_in13 */
+
+	/* P8_21 (ZCZ ball U9) emmc */
+	P8_21_default_pin: pinmux_P8_21_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pin: pinmux_P8_21_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pu_pin: pinmux_P8_21_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_pd_pin: pinmux_P8_21_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn1.gpio1_30 */
+	P8_21_gpio_input_pin: pinmux_P8_21_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn1.gpio1_30 */
+	P8_21_pruout_pin: pinmux_P8_21_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_csn1.pru1_out12 */
+	P8_21_pruin_pin: pinmux_P8_21_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0880, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_csn1.pru1_in12 */
+
+	/* P8_22 (ZCZ ball V8) emmc */
+	P8_22_default_pin: pinmux_P8_22_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pin: pinmux_P8_22_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pu_pin: pinmux_P8_22_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_pd_pin: pinmux_P8_22_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad5.gpio1_5 */
+	P8_22_gpio_input_pin: pinmux_P8_22_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0814, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad5.gpio1_5 */
+
+	/* P8_23 (ZCZ ball U8) emmc */
+	P8_23_default_pin: pinmux_P8_23_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pin: pinmux_P8_23_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pu_pin: pinmux_P8_23_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_pd_pin: pinmux_P8_23_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad4.gpio1_4 */
+	P8_23_gpio_input_pin: pinmux_P8_23_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0810, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad4.gpio1_4 */
+
+	/* P8_24 (ZCZ ball V7) emmc */
+	P8_24_default_pin: pinmux_P8_24_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pin: pinmux_P8_24_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pu_pin: pinmux_P8_24_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_pd_pin: pinmux_P8_24_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad1.gpio1_1 */
+	P8_24_gpio_input_pin: pinmux_P8_24_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0804, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad1.gpio1_1 */
+
+	/* P8_25 (ZCZ ball U7) emmc */
+	P8_25_default_pin: pinmux_P8_25_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pin: pinmux_P8_25_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pu_pin: pinmux_P8_25_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_pd_pin: pinmux_P8_25_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad0.gpio1_0 */
+	P8_25_gpio_input_pin: pinmux_P8_25_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0800, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad0.gpio1_0 */
+
+	/* P8_26 (ZCZ ball V6) gpio-hog wl1835 */
+
+	/* P8_27 (ZCZ ball U5) hdmi */
+	P8_27_default_pin: pinmux_P8_27_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pin: pinmux_P8_27_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pu_pin: pinmux_P8_27_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_pd_pin: pinmux_P8_27_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P8_27_gpio_input_pin: pinmux_P8_27_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_vsync.gpio2_22 */
+	P8_27_pruout_pin: pinmux_P8_27_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_vsync.pru1_out8 */
+	P8_27_pruin_pin: pinmux_P8_27_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_vsync.pru1_in8 */
+
+	/* P8_28 (ZCZ ball V5) hdmi */
+	P8_28_default_pin: pinmux_P8_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pin: pinmux_P8_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pu_pin: pinmux_P8_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_pd_pin: pinmux_P8_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P8_28_gpio_input_pin: pinmux_P8_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_pclk.gpio2_24 */
+	P8_28_pruout_pin: pinmux_P8_28_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_pclk.pru1_out10 */
+	P8_28_pruin_pin: pinmux_P8_28_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_pclk.pru1_in10 */
+
+	/* P8_29 (ZCZ ball R5) hdmi */
+	P8_29_default_pin: pinmux_P8_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pin: pinmux_P8_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pu_pin: pinmux_P8_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_pd_pin: pinmux_P8_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P8_29_gpio_input_pin: pinmux_P8_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_hsync.gpio2_23 */
+	P8_29_pruout_pin: pinmux_P8_29_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_hsync.pru1_out9 */
+	P8_29_pruin_pin: pinmux_P8_29_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_hsync.pru1_in9 */
+
+	/* P8_30 (ZCZ ball R6) hdmi */
+	P8_30_default_pin: pinmux_P8_30_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pin: pinmux_P8_30_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pu_pin: pinmux_P8_30_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_pd_pin: pinmux_P8_30_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_gpio_input_pin: pinmux_P8_30_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE7) >; };			/* lcd_ac_bias_en.gpio2_25 */
+	P8_30_pruout_pin: pinmux_P8_30_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_ac_bias_en.pru1_out11 */
+	P8_30_pruin_pin: pinmux_P8_30_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE6) >; };			/* lcd_ac_bias_en.pru1_in11 */
+
+	/* P8_31 (ZCZ ball V4) hdmi */
+	P8_31_default_pin: pinmux_P8_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pin: pinmux_P8_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pu_pin: pinmux_P8_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_pd_pin: pinmux_P8_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data14.gpio0_10 */
+	P8_31_gpio_input_pin: pinmux_P8_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data14.gpio0_10 */
+	P8_31_qep_pin: pinmux_P8_31_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data14.eqep1_index */
+	P8_31_uart_pin: pinmux_P8_31_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data14.uart5_rxd */
+
+	/* P8_32 (ZCZ ball T5) hdmi */
+	P8_32_default_pin: pinmux_P8_32_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pin: pinmux_P8_32_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pu_pin: pinmux_P8_32_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_pd_pin: pinmux_P8_32_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data15.gpio0_11 */
+	P8_32_gpio_input_pin: pinmux_P8_32_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data15.gpio0_11 */
+	P8_32_qep_pin: pinmux_P8_32_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08dc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data15.eqep1_strobe */
+
+	/* P8_33 (ZCZ ball V3) hdmi */
+	P8_33_default_pin: pinmux_P8_33_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pin: pinmux_P8_33_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pu_pin: pinmux_P8_33_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_pd_pin: pinmux_P8_33_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data13.gpio0_9 */
+	P8_33_gpio_input_pin: pinmux_P8_33_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data13.gpio0_9 */
+	P8_33_qep_pin: pinmux_P8_33_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data13.eqep1b_in */
+
+	/* P8_34 (ZCZ ball U4) hdmi */
+	P8_34_default_pin: pinmux_P8_34_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pin: pinmux_P8_34_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pu_pin: pinmux_P8_34_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_pd_pin: pinmux_P8_34_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data11.gpio2_17 */
+	P8_34_gpio_input_pin: pinmux_P8_34_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data11.gpio2_17 */
+	P8_34_pwm_pin: pinmux_P8_34_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08cc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data11.ehrpwm1b */
+
+	/* P8_35 (ZCZ ball V2) hdmi */
+	P8_35_default_pin: pinmux_P8_35_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pin: pinmux_P8_35_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pu_pin: pinmux_P8_35_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_pd_pin: pinmux_P8_35_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data12.gpio0_8 */
+	P8_35_gpio_input_pin: pinmux_P8_35_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data12.gpio0_8 */
+	P8_35_qep_pin: pinmux_P8_35_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08d0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* lcd_data12.eqep1a_in */
+
+	/* P8_36 (ZCZ ball U3) hdmi */
+	P8_36_default_pin: pinmux_P8_36_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pin: pinmux_P8_36_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pu_pin: pinmux_P8_36_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_pd_pin: pinmux_P8_36_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data10.gpio2_16 */
+	P8_36_gpio_input_pin: pinmux_P8_36_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data10.gpio2_16 */
+	P8_36_pwm_pin: pinmux_P8_36_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data10.ehrpwm1a */
+
+	/* P8_37 (ZCZ ball U1) hdmi */
+	P8_37_default_pin: pinmux_P8_37_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pin: pinmux_P8_37_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pu_pin: pinmux_P8_37_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_pd_pin: pinmux_P8_37_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data8.gpio2_14 */
+	P8_37_gpio_input_pin: pinmux_P8_37_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data8.gpio2_14 */
+	P8_37_pwm_pin: pinmux_P8_37_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data8.ehrpwm1_tripzone_input */
+	P8_37_uart_pin: pinmux_P8_37_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data8.uart5_txd */
+
+	/* P8_38 (ZCZ ball U2) hdmi */
+	P8_38_default_pin: pinmux_P8_38_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pin: pinmux_P8_38_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pu_pin: pinmux_P8_38_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_pd_pin: pinmux_P8_38_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data9.gpio2_15 */
+	P8_38_gpio_input_pin: pinmux_P8_38_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data9.gpio2_15 */
+	P8_38_pwm_pin: pinmux_P8_38_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE2) >; };	/* lcd_data9.ehrpwm0_synco */
+	P8_38_uart_pin: pinmux_P8_38_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08c4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* lcd_data9.uart5_rxd */
+
+	/* P8_39 (ZCZ ball T3) hdmi */
+	P8_39_default_pin: pinmux_P8_39_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pin: pinmux_P8_39_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pu_pin: pinmux_P8_39_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_pd_pin: pinmux_P8_39_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data6.gpio2_12 */
+	P8_39_gpio_input_pin: pinmux_P8_39_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data6.gpio2_12 */
+	P8_39_qep_pin: pinmux_P8_39_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data6.eqep2_index */
+	P8_39_pruout_pin: pinmux_P8_39_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data6.pru1_out6 */
+	P8_39_pruin_pin: pinmux_P8_39_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data6.pru1_in6 */
+
+	/* P8_40 (ZCZ ball T4) hdmi */
+	P8_40_default_pin: pinmux_P8_40_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pin: pinmux_P8_40_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pu_pin: pinmux_P8_40_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_pd_pin: pinmux_P8_40_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data7.gpio2_13 */
+	P8_40_gpio_input_pin: pinmux_P8_40_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data7.gpio2_13 */
+	P8_40_qep_pin: pinmux_P8_40_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data7.eqep2_strobe */
+	P8_40_pruout_pin: pinmux_P8_40_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data7.pru1_out7 */
+	P8_40_pruin_pin: pinmux_P8_40_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08bc, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data7.pru1_in7 */
+
+	/* P8_41 (ZCZ ball T1) hdmi */
+	P8_41_default_pin: pinmux_P8_41_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pin: pinmux_P8_41_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pu_pin: pinmux_P8_41_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_pd_pin: pinmux_P8_41_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data4.gpio2_10 */
+	P8_41_gpio_input_pin: pinmux_P8_41_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data4.gpio2_10 */
+	P8_41_qep_pin: pinmux_P8_41_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data4.eqep2a_in */
+	P8_41_pruout_pin: pinmux_P8_41_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data4.pru1_out4 */
+	P8_41_pruin_pin: pinmux_P8_41_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data4.pru1_in4 */
+
+	/* P8_42 (ZCZ ball T2) hdmi */
+	P8_42_default_pin: pinmux_P8_42_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pin: pinmux_P8_42_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pu_pin: pinmux_P8_42_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_pd_pin: pinmux_P8_42_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data5.gpio2_11 */
+	P8_42_gpio_input_pin: pinmux_P8_42_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data5.gpio2_11 */
+	P8_42_qep_pin: pinmux_P8_42_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* lcd_data5.eqep2b_in */
+	P8_42_pruout_pin: pinmux_P8_42_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data5.pru1_out5 */
+	P8_42_pruin_pin: pinmux_P8_42_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08b4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data5.pru1_in5 */
+
+	/* P8_43 (ZCZ ball R3) hdmi */
+	P8_43_default_pin: pinmux_P8_43_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pin: pinmux_P8_43_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pu_pin: pinmux_P8_43_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_pd_pin: pinmux_P8_43_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data2.gpio2_8 */
+	P8_43_gpio_input_pin: pinmux_P8_43_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data2.gpio2_8 */
+	P8_43_pwm_pin: pinmux_P8_43_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data2.ehrpwm2_tripzone_input */
+	P8_43_pruout_pin: pinmux_P8_43_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data2.pru1_out2 */
+	P8_43_pruin_pin: pinmux_P8_43_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data2.pru1_in2 */
+
+	/* P8_44 (ZCZ ball R4) hdmi */
+	P8_44_default_pin: pinmux_P8_44_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pin: pinmux_P8_44_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pu_pin: pinmux_P8_44_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_pd_pin: pinmux_P8_44_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data3.gpio2_9 */
+	P8_44_gpio_input_pin: pinmux_P8_44_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data3.gpio2_9 */
+	P8_44_pwm_pin: pinmux_P8_44_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data3.ehrpwm0_synco */
+	P8_44_pruout_pin: pinmux_P8_44_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data3.pru1_out3 */
+	P8_44_pruin_pin: pinmux_P8_44_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ac, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data3.pru1_in3 */
+
+	/* P8_45 (ZCZ ball R1) hdmi */
+	P8_45_default_pin: pinmux_P8_45_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pin: pinmux_P8_45_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pu_pin: pinmux_P8_45_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_pd_pin: pinmux_P8_45_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data0.gpio2_6 */
+	P8_45_gpio_input_pin: pinmux_P8_45_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data0.gpio2_6 */
+	P8_45_pwm_pin: pinmux_P8_45_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data0.ehrpwm2a */
+	P8_45_pruout_pin: pinmux_P8_45_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data0.pru1_out0 */
+	P8_45_pruin_pin: pinmux_P8_45_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data0.pru1_in0 */
+
+	/* P8_46 (ZCZ ball R2) hdmi */
+	P8_46_default_pin: pinmux_P8_46_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pin: pinmux_P8_46_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pu_pin: pinmux_P8_46_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_pd_pin: pinmux_P8_46_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_data1.gpio2_7 */
+	P8_46_gpio_input_pin: pinmux_P8_46_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_data1.gpio2_7 */
+	P8_46_pwm_pin: pinmux_P8_46_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* lcd_data1.ehrpwm2b */
+	P8_46_pruout_pin: pinmux_P8_46_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_data1.pru1_out1 */
+	P8_46_pruin_pin: pinmux_P8_46_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08a4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_data1.pru1_in1 */
+
+	/************************/
+	/* P9 Header */
+	/************************/
+
+	/* P9_01                GND */
+
+	/* P9_02                GND */
+
+	/* P9_03                3V3 */
+
+	/* P9_04                3V3 */
+
+	/* P9_05                VDD_5V */
+
+	/* P9_06                VDD_5V */
+
+	/* P9_07                SYS_5V */
+
+	/* P9_08                SYS_5V */
+
+	/* P9_09                PWR_BUT */
+
+	/* P9_10                RSTn */
+
+	/* P9_11 (ZCZ ball T17) gpio0_30 */
+	P9_11_default_pin: pinmux_P9_11_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pin: pinmux_P9_11_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pu_pin: pinmux_P9_11_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_pd_pin: pinmux_P9_11_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P9_11_gpio_input_pin: pinmux_P9_11_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wait0.gpio0_30 */
+	P9_11_uart_pin: pinmux_P9_11_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wait0.uart4_rxd */
+
+	/* P9_12 (ZCZ ball U18) gpio1_28 */
+	P9_12_default_pin: pinmux_P9_12_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pin: pinmux_P9_12_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pu_pin: pinmux_P9_12_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_pd_pin: pinmux_P9_12_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P9_12_gpio_input_pin: pinmux_P9_12_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_be1n.gpio1_28 */
+
+	/* P9_13 (ZCZ ball U17) gpio0_31 */
+	P9_13_default_pin: pinmux_P9_13_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pin: pinmux_P9_13_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pu_pin: pinmux_P9_13_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_pd_pin: pinmux_P9_13_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P9_13_gpio_input_pin: pinmux_P9_13_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wpn.gpio0_31 */
+	P9_13_uart_pin: pinmux_P9_13_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wpn.uart4_txd */
+
+	/* P9_14 (ZCZ ball U14) gpio1_18 */
+	P9_14_default_pin: pinmux_P9_14_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pin: pinmux_P9_14_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pu_pin: pinmux_P9_14_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_pd_pin: pinmux_P9_14_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P9_14_gpio_input_pin: pinmux_P9_14_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a2.gpio1_18 */
+	P9_14_pwm_pin: pinmux_P9_14_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a2.ehrpwm1a */
+
+	/* P9_15 (ZCZ ball R13) gpio1_16 */
+	P9_15_default_pin: pinmux_P9_15_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pin: pinmux_P9_15_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pu_pin: pinmux_P9_15_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_pd_pin: pinmux_P9_15_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a0.gpio1_16 */
+	P9_15_gpio_input_pin: pinmux_P9_15_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a0.gpio1_16 */
+	P9_15_pwm_pin: pinmux_P9_15_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0840, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a0.ehrpwm1_tripzone_input */
+
+	/* P9_16 (ZCZ ball T14) gpio1_19 */
+	P9_16_default_pin: pinmux_P9_16_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pin: pinmux_P9_16_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pu_pin: pinmux_P9_16_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_pd_pin: pinmux_P9_16_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a3.gpio1_19 */
+	P9_16_gpio_input_pin: pinmux_P9_16_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a3.gpio1_19 */
+	P9_16_pwm_pin: pinmux_P9_16_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x084c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a3.ehrpwm1b */
+
+	/* P9_17 (ZCZ ball A16) gpio0_5 */
+	P9_17_default_pin: pinmux_P9_17_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pin: pinmux_P9_17_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pu_pin: pinmux_P9_17_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_pd_pin: pinmux_P9_17_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P9_17_gpio_input_pin: pinmux_P9_17_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_INPUT | MUX_MODE7) >; };			/* spi0_cs0.gpio0_5 */
+	P9_17_spi_cs_pin: pinmux_P9_17_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_cs0.spi0_cs0 */
+	P9_17_i2c_pin: pinmux_P9_17_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_cs0.i2c1_scl */
+	P9_17_pwm_pin: pinmux_P9_17_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_cs0.ehrpwm0_synci */
+	P9_17_pru_uart_pin: pinmux_P9_17_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_cs0.pr1_uart0_txd */
+
+	/* P9_18 (ZCZ ball B16) gpio0_4 */
+	P9_18_default_pin: pinmux_P9_18_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pin: pinmux_P9_18_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pu_pin: pinmux_P9_18_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_pd_pin: pinmux_P9_18_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P9_18_gpio_input_pin: pinmux_P9_18_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d1.gpio0_4 */
+	P9_18_spi_pin: pinmux_P9_18_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d1.spi0_d1 */
+	P9_18_i2c_pin: pinmux_P9_18_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d1.i2c1_sda */
+	P9_18_pwm_pin: pinmux_P9_18_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d1.ehrpwm0_tripzone_input */
+	P9_18_pru_uart_pin: pinmux_P9_18_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d1.pr1_uart0_rxd */
+
+	/* P9_19 (ZCZ ball D17) i2c2_scl */
+	P9_19_default_pin: pinmux_P9_19_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P9_19_gpio_pin: pinmux_P9_19_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_pu_pin: pinmux_P9_19_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_pd_pin: pinmux_P9_19_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P9_19_gpio_input_pin: pinmux_P9_19_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rtsn.gpio0_13 */
+	P9_19_timer_pin: pinmux_P9_19_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart1_rtsn.timer5 */
+	P9_19_can_pin: pinmux_P9_19_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rtsn.dcan0_rx */
+	P9_19_i2c_pin: pinmux_P9_19_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P9_19_spi_cs_pin: pinmux_P9_19_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_rtsn.spi1_cs1 */
+	P9_19_pru_uart_pin: pinmux_P9_19_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rtsn.pr1_uart0_rts_n */
+
+	/* P9_20 (ZCZ ball D18) i2c2_sda */
+	P9_20_default_pin: pinmux_P9_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P9_20_gpio_pin: pinmux_P9_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_pu_pin: pinmux_P9_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_pd_pin: pinmux_P9_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P9_20_gpio_input_pin: pinmux_P9_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_INPUT | MUX_MODE7) >; };			/* uart1_ctsn.gpio0_12 */
+	P9_20_timer_pin: pinmux_P9_20_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart1_ctsn.timer6 */
+	P9_20_can_pin: pinmux_P9_20_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_ctsn.dcan0_tx */
+	P9_20_i2c_pin: pinmux_P9_20_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P9_20_spi_cs_pin: pinmux_P9_20_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_ctsn.spi1_cs0 */
+	P9_20_pru_uart_pin: pinmux_P9_20_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_ctsn.pr1_uart0_cts_n */
+
+	/* P9_21 (ZCZ ball B17) gpio0_3 */
+	P9_21_default_pin: pinmux_P9_21_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pin: pinmux_P9_21_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pu_pin: pinmux_P9_21_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_pd_pin: pinmux_P9_21_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P9_21_gpio_input_pin: pinmux_P9_21_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d0.gpio0_3 */
+	P9_21_spi_pin: pinmux_P9_21_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d0.spi0_d0 */
+	P9_21_uart_pin: pinmux_P9_21_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_d0.uart2_txd */
+	P9_21_i2c_pin: pinmux_P9_21_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d0.i2c2_scl */
+	P9_21_pwm_pin: pinmux_P9_21_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d0.ehrpwm0b */
+	P9_21_pru_uart_pin: pinmux_P9_21_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d0.pr1_uart0_rts_n */
+
+	/* P9_22 (ZCZ ball A17) gpio0_2 */
+	P9_22_default_pin: pinmux_P9_22_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pin: pinmux_P9_22_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pu_pin: pinmux_P9_22_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_pd_pin: pinmux_P9_22_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P9_22_gpio_input_pin: pinmux_P9_22_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_INPUT | MUX_MODE7) >; };			/* spi0_sclk.gpio0_2 */
+	P9_22_spi_sclk_pin: pinmux_P9_22_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_sclk.spi0_sclk */
+	P9_22_uart_pin: pinmux_P9_22_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_sclk.uart2_rxd */
+	P9_22_i2c_pin: pinmux_P9_22_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_sclk.i2c2_sda */
+	P9_22_pwm_pin: pinmux_P9_22_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_sclk.ehrpwm0a */
+	P9_22_pru_uart_pin: pinmux_P9_22_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_sclk.pr1_uart0_cts_n */
+
+	/* P9_23 (ZCZ ball V14) gpio1_17 */
+	P9_23_default_pin: pinmux_P9_23_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pin: pinmux_P9_23_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pu_pin: pinmux_P9_23_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_pd_pin: pinmux_P9_23_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a1.gpio1_17 */
+	P9_23_gpio_input_pin: pinmux_P9_23_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a1.gpio1_17 */
+	P9_23_pwm_pin: pinmux_P9_23_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0844, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a1.ehrpwm0_synco */
+
+	/* P9_24 (ZCZ ball D15) gpio0_15 */
+	P9_24_default_pin: pinmux_P9_24_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pin: pinmux_P9_24_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pu_pin: pinmux_P9_24_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_pd_pin: pinmux_P9_24_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P9_24_gpio_input_pin: pinmux_P9_24_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE7) >; };			/* uart1_txd.gpio0_15 */
+	P9_24_uart_pin: pinmux_P9_24_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_txd.uart1_txd */
+	P9_24_can_pin: pinmux_P9_24_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_txd.dcan1_rx */
+	P9_24_i2c_pin: pinmux_P9_24_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_txd.i2c1_scl */
+	P9_24_pru_uart_pin: pinmux_P9_24_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_txd.pr1_uart0_txd */
+	P9_24_pruin_pin: pinmux_P9_24_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE6) >; };			/* uart1_txd.pru0_in16 */
+
+	/* P9_25 (ZCZ ball A14) audio */
+	P9_25_default_pin: pinmux_P9_25_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pin: pinmux_P9_25_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pu_pin: pinmux_P9_25_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_pd_pin: pinmux_P9_25_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_gpio_input_pin: pinmux_P9_25_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkx.gpio3_21 */
+	P9_25_qep_pin: pinmux_P9_25_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkx.eqep0_strobe */
+	P9_25_pruout_pin: pinmux_P9_25_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkx.pru0_out7 */
+	P9_25_pruin_pin: pinmux_P9_25_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkx.pru0_in7 */
+
+	/* P9_26 (ZCZ ball D16) gpio0_14 */
+	P9_26_default_pin: pinmux_P9_26_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pin: pinmux_P9_26_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pu_pin: pinmux_P9_26_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_pd_pin: pinmux_P9_26_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P9_26_gpio_input_pin: pinmux_P9_26_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rxd.gpio0_14 */
+	P9_26_uart_pin: pinmux_P9_26_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_rxd.uart1_rxd */
+	P9_26_can_pin: pinmux_P9_26_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rxd.dcan1_tx */
+	P9_26_i2c_pin: pinmux_P9_26_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rxd.i2c1_sda */
+	P9_26_pru_uart_pin: pinmux_P9_26_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rxd.pr1_uart0_rxd */
+	P9_26_pruin_pin: pinmux_P9_26_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE6) >; };			/* uart1_rxd.pru1_in16 */
+
+	/* P9_27 (ZCZ ball C13) gpio3_19 */
+	P9_27_default_pin: pinmux_P9_27_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pin: pinmux_P9_27_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pu_pin: pinmux_P9_27_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_pd_pin: pinmux_P9_27_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P9_27_gpio_input_pin: pinmux_P9_27_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsr.gpio3_19 */
+	P9_27_qep_pin: pinmux_P9_27_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsr.eqep0b_in */
+	P9_27_pruout_pin: pinmux_P9_27_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsr.pru0_out5 */
+	P9_27_pruin_pin: pinmux_P9_27_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsr.pru0_in5 */
+
+	/* P9_28 (ZCZ ball C12) audio */
+	P9_28_default_pin: pinmux_P9_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pin: pinmux_P9_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pu_pin: pinmux_P9_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_pd_pin: pinmux_P9_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_gpio_input_pin: pinmux_P9_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkr.gpio3_17 */
+	P9_28_pwm_pin: pinmux_P9_28_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkr.ehrpwm0_synci */
+	P9_28_spi_cs_pin: pinmux_P9_28_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_ahclkr.spi1_cs0 */
+	P9_28_pwm2_pin: pinmux_P9_28_pwm2_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* mcasp0_ahclkr.ecap2_in_pwm2_out */
+	P9_28_pruout_pin: pinmux_P9_28_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkr.pru0_out3 */
+	P9_28_pruin_pin: pinmux_P9_28_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkr.pru0_in3 */
+
+	/* P9_29 (ZCZ ball B13) audio */
+	P9_29_default_pin: pinmux_P9_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pin: pinmux_P9_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pu_pin: pinmux_P9_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_pd_pin: pinmux_P9_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P9_29_gpio_input_pin: pinmux_P9_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsx.gpio3_15 */
+	P9_29_pwm_pin: pinmux_P9_29_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsx.ehrpwm0b */
+	P9_29_spi_pin: pinmux_P9_29_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_fsx.spi1_d0 */
+	P9_29_pruout_pin: pinmux_P9_29_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsx.pru0_out1 */
+	P9_29_pruin_pin: pinmux_P9_29_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsx.pru0_in1 */
+
+	/* P9_30 (ZCZ ball D12) gpio-hog wl1835 */
+
+	/* P9_31 (ZCZ ball A13) audio */
+	P9_31_default_pin: pinmux_P9_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pin: pinmux_P9_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pu_pin: pinmux_P9_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_pd_pin: pinmux_P9_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P9_31_gpio_input_pin: pinmux_P9_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkx.gpio3_14 */
+	P9_31_pwm_pin: pinmux_P9_31_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkx.ehrpwm0a */
+	P9_31_spi_sclk_pin: pinmux_P9_31_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_aclkx.spi1_sclk */
+	P9_31_pruout_pin: pinmux_P9_31_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkx.pru0_out0 */
+	P9_31_pruin_pin: pinmux_P9_31_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkx.pru0_in0 */
+
+	/* P9_32                VADC */
+
+	/* P9_33 (ZCZ ball C8)  AIN4         */
+
+	/* P9_34                AGND */
+
+	/* P9_35 (ZCZ ball A8)  AIN6         */
+
+	/* P9_36 (ZCZ ball B8)  AIN5         */
+
+	/* P9_37 (ZCZ ball B7)  AIN2         */
+
+	/* P9_38 (ZCZ ball A7)  AIN3         */
+
+	/* P9_39 (ZCZ ball B6)  AIN0         */
+
+	/* P9_40 (ZCZ ball C7)  AIN1         */
+
+	/* P9_41 (ZCZ ball D14) gpio0_20 */
+	P9_41_default_pin: pinmux_P9_41_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pin: pinmux_P9_41_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pu_pin: pinmux_P9_41_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_pd_pin: pinmux_P9_41_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P9_41_gpio_input_pin: pinmux_P9_41_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE7) >; };			/* xdma_event_intr1.gpio0_20 */
+	P9_41_timer_pin: pinmux_P9_41_timer_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* xdma_event_intr1.timer7 */
+	P9_41_pruin_pin: pinmux_P9_41_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE5) >; };			/* xdma_event_intr1.pru0_in16 */
+
+	/* P9_41.1 */
+	/* P9_91 (ZCZ ball D13) gpio3_20 */
+	P9_91_default_pin: pinmux_P9_91_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pin: pinmux_P9_91_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pu_pin: pinmux_P9_91_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_pd_pin: pinmux_P9_91_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P9_91_gpio_input_pin: pinmux_P9_91_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_axr1.gpio3_20 */
+	P9_91_qep_pin: pinmux_P9_91_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_axr1.eqep0_index */
+	P9_91_pruout_pin: pinmux_P9_91_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_axr1.pru0_out6 */
+	P9_91_pruin_pin: pinmux_P9_91_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr1.pru0_in6 */
+
+	/* P9_42 (ZCZ ball C18) gpio0_7 */
+	P9_42_default_pin: pinmux_P9_42_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pin: pinmux_P9_42_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pu_pin: pinmux_P9_42_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_pd_pin: pinmux_P9_42_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_gpio_input_pin: pinmux_P9_42_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_INPUT | MUX_MODE7) >; };			/* eCAP0_in_PWM0_out.gpio0_7 */
+	P9_42_pwm_pin: pinmux_P9_42_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE0) >; };	/* eCAP0_in_PWM0_out.ecap0_in_pwm0_out */
+	P9_42_uart_pin: pinmux_P9_42_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* eCAP0_in_PWM0_out.uart3_txd */
+	P9_42_spi_cs_pin: pinmux_P9_42_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* eCAP0_in_PWM0_out.spi1_cs1 */
+	P9_42_pru_ecap_pin: pinmux_P9_42_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* eCAP0_in_PWM0_out.pr1_ecap0_ecap_capin_apwm_o */
+	P9_42_spi_sclk_pin: pinmux_P9_42_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* eCAP0_in_PWM0_out.spi1_sclk */
+
+	/* P9_42.1 */
+	/* P9_92 (ZCZ ball B12) gpio3_18 */
+	P9_92_default_pin: pinmux_P9_92_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pin: pinmux_P9_92_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pu_pin: pinmux_P9_92_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_pd_pin: pinmux_P9_92_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P9_92_gpio_input_pin: pinmux_P9_92_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkr.gpio3_18 */
+	P9_92_qep_pin: pinmux_P9_92_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkr.eqep0a_in */
+	P9_92_pruout_pin: pinmux_P9_92_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkr.pru0_out4 */
+	P9_92_pruin_pin: pinmux_P9_92_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkr.pru0_in4 */
+
+	/* P9_43                GND */
+
+	/* P9_44                GND */
+
+	/* P9_45                GND */
+
+	/* P9_46                GND */
+
+	/*       (ZCZ ball A15) */
+	A15_default_pin: pinmux_A15_default_pin {
+		pinctrl-single,pins = <0x1b0  (PIN_OUTPUT_PULLUP | MUX_MODE7)>; };     /* Mode 7, Pull-Up */
+	A15_clkout_pin: pinmux_A15_clkout_pin {
+		pinctrl-single,pins = <0x1b0  0x0b>; };     /* Mode 3 */
+	A15_gpio_pin: pinmux_A15_gpio_pin {
+		pinctrl-single,pins = <0x1b0  0x2f>; };     /* Mode 7, RxActive */
+	A15_gpio_pu_pin: pinmux_A15_gpio_pu_pin {
+		pinctrl-single,pins = <0x1b0  0x37>; };     /* Mode 7, Pull-Up, RxActive */
+	A15_gpio_pd_pin: pinmux_A15_gpio_pd_pin {
+		pinctrl-single,pins = <0x1b0  0x27>; };     /* Mode 7, Pull-Down, RxActive */
+};
+
+&i2c1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	clock-frequency = <100000>;
+	symlink = "bone/i2c/1";
+};
+
+&i2c2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	clock-frequency = <100000>;
+	symlink = "bone/i2c/2";
+};
+
+&uart1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/1";
+};
+
+&uart2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/2";
+};
+
+&uart3 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/3";
+};
+
+&uart4 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/4";
+};
+
+&uart5 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/uart/5";
+};
+
+&dcan0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/can/0";
+};
+
+&dcan1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/can/1";
+};
+
+&eqep0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/0";
+};
+
+&eqep1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/1";
+};
+
+&eqep2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+	symlink = "bone/eqep/2";
+};
+
+&epwmss0 {
+	status = "okay";
+};
+
+&epwmss1 {
+	status = "okay";
+};
+
+&epwmss2 {
+	status = "okay";
+};
+
+&ehrpwm0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ehrpwm1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ehrpwm2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ecap2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&spi0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/0.0";
+
+		reg = <0>;
+		spi-max-frequency = <16000000>;
+		spi-cpha;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/0.1";
+
+		reg = <1>;
+		spi-max-frequency = <16000000>;
+	};
+};
+
+&spi1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/1.0";
+
+		reg = <0>;
+		spi-max-frequency = <16000000>;
+		spi-cpha;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "spidev";
+		symlink = "spi/1.1";
+
+		reg = <1>;
+		spi-max-frequency = <16000000>;
+	};
+};
+
+/**********************************************************************/
+/* Pin Multiplex Helpers                                              */
+/*                                                                    */
+/* These provide userspace runtime pin configuration for the          */
+/* BeagleBone cape expansion headers                                  */
+/**********************************************************************/
+
+&ocp {
+	/************************/
+	/* P8 Header */
+	/************************/
+
+	/* P8_01                GND */
+
+	/* P8_02                GND */
+
+
+	/* P8_03 (ZCZ ball R9) emmc */
+	P8_03_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_03_default_pin>;
+		pinctrl-1 = <&P8_03_gpio_pin>;
+		pinctrl-2 = <&P8_03_gpio_pu_pin>;
+		pinctrl-3 = <&P8_03_gpio_pd_pin>;
+		pinctrl-4 = <&P8_03_gpio_input_pin>;
+	};
+
+	/* P8_04 (ZCZ ball T9) emmc */
+	P8_04_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_04_default_pin>;
+		pinctrl-1 = <&P8_04_gpio_pin>;
+		pinctrl-2 = <&P8_04_gpio_pu_pin>;
+		pinctrl-3 = <&P8_04_gpio_pd_pin>;
+		pinctrl-4 = <&P8_04_gpio_input_pin>;
+	};
+
+	/* P8_05 (ZCZ ball R8) emmc */
+	P8_05_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_05_default_pin>;
+		pinctrl-1 = <&P8_05_gpio_pin>;
+		pinctrl-2 = <&P8_05_gpio_pu_pin>;
+		pinctrl-3 = <&P8_05_gpio_pd_pin>;
+		pinctrl-4 = <&P8_05_gpio_input_pin>;
+	};
+
+	/* P8_06 (ZCZ ball T8) emmc */
+	P8_06_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_06_default_pin>;
+		pinctrl-1 = <&P8_06_gpio_pin>;
+		pinctrl-2 = <&P8_06_gpio_pu_pin>;
+		pinctrl-3 = <&P8_06_gpio_pd_pin>;
+		pinctrl-4 = <&P8_06_gpio_input_pin>;
+	};
+
+	/* P8_07 (ZCZ ball R7) */
+	P8_07_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_07_default_pin>;
+		pinctrl-1 = <&P8_07_gpio_pin>;
+		pinctrl-2 = <&P8_07_gpio_pu_pin>;
+		pinctrl-3 = <&P8_07_gpio_pd_pin>;
+		pinctrl-4 = <&P8_07_gpio_input_pin>;
+		pinctrl-5 = <&P8_07_timer_pin>;
+	};
+
+	/* P8_08 (ZCZ ball T7) */
+	P8_08_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_08_default_pin>;
+		pinctrl-1 = <&P8_08_gpio_pin>;
+		pinctrl-2 = <&P8_08_gpio_pu_pin>;
+		pinctrl-3 = <&P8_08_gpio_pd_pin>;
+		pinctrl-4 = <&P8_08_gpio_input_pin>;
+		pinctrl-5 = <&P8_08_timer_pin>;
+	};
+
+	/* P8_09 (ZCZ ball T6) */
+	P8_09_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_09_default_pin>;
+		pinctrl-1 = <&P8_09_gpio_pin>;
+		pinctrl-2 = <&P8_09_gpio_pu_pin>;
+		pinctrl-3 = <&P8_09_gpio_pd_pin>;
+		pinctrl-4 = <&P8_09_gpio_input_pin>;
+		pinctrl-5 = <&P8_09_timer_pin>;
+	};
+
+	/* P8_10 (ZCZ ball U6) */
+	P8_10_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer";
+		pinctrl-0 = <&P8_10_default_pin>;
+		pinctrl-1 = <&P8_10_gpio_pin>;
+		pinctrl-2 = <&P8_10_gpio_pu_pin>;
+		pinctrl-3 = <&P8_10_gpio_pd_pin>;
+		pinctrl-4 = <&P8_10_gpio_input_pin>;
+		pinctrl-5 = <&P8_10_timer_pin>;
+	};
+
+	/* P8_11 (ZCZ ball R12) */
+	P8_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P8_11_default_pin>;
+		pinctrl-1 = <&P8_11_gpio_pin>;
+		pinctrl-2 = <&P8_11_gpio_pu_pin>;
+		pinctrl-3 = <&P8_11_gpio_pd_pin>;
+		pinctrl-4 = <&P8_11_gpio_input_pin>;
+		pinctrl-5 = <&P8_11_qep_pin>;
+		pinctrl-6 = <&P8_11_pruout_pin>;
+	};
+
+	/* P8_12 (ZCZ ball T12) */
+	P8_12_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P8_12_default_pin>;
+		pinctrl-1 = <&P8_12_gpio_pin>;
+		pinctrl-2 = <&P8_12_gpio_pu_pin>;
+		pinctrl-3 = <&P8_12_gpio_pd_pin>;
+		pinctrl-4 = <&P8_12_gpio_input_pin>;
+		pinctrl-5 = <&P8_12_qep_pin>;
+		pinctrl-6 = <&P8_12_pruout_pin>;
+	};
+
+	/* P8_13 (ZCZ ball T10) */
+	P8_13_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_13_default_pin>;
+		pinctrl-1 = <&P8_13_gpio_pin>;
+		pinctrl-2 = <&P8_13_gpio_pu_pin>;
+		pinctrl-3 = <&P8_13_gpio_pd_pin>;
+		pinctrl-4 = <&P8_13_gpio_input_pin>;
+		pinctrl-5 = <&P8_13_pwm_pin>;
+	};
+
+	/* P8_14 (ZCZ ball T11) wl1835: wl_en */
+
+	/* P8_15 (ZCZ ball U13) */
+	P8_15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pru_ecap", "pruin";
+		pinctrl-0 = <&P8_15_default_pin>;
+		pinctrl-1 = <&P8_15_gpio_pin>;
+		pinctrl-2 = <&P8_15_gpio_pu_pin>;
+		pinctrl-3 = <&P8_15_gpio_pd_pin>;
+		pinctrl-4 = <&P8_15_gpio_input_pin>;
+		pinctrl-5 = <&P8_15_qep_pin>;
+		pinctrl-6 = <&P8_15_pru_ecap_pin>;
+		pinctrl-7 = <&P8_15_pruin_pin>;
+	};
+
+	/* P8_16 (ZCZ ball V13) */
+	P8_16_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruin";
+		pinctrl-0 = <&P8_16_default_pin>;
+		pinctrl-1 = <&P8_16_gpio_pin>;
+		pinctrl-2 = <&P8_16_gpio_pu_pin>;
+		pinctrl-3 = <&P8_16_gpio_pd_pin>;
+		pinctrl-4 = <&P8_16_gpio_input_pin>;
+		pinctrl-5 = <&P8_16_qep_pin>;
+		pinctrl-6 = <&P8_16_pruin_pin>;
+	};
+
+	/* P8_17 (ZCZ ball U12) wl1835: wl_irq */
+
+	/* P8_18 (ZCZ ball V12) */
+	P8_18_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_18_default_pin>;
+		pinctrl-1 = <&P8_18_gpio_pin>;
+		pinctrl-2 = <&P8_18_gpio_pu_pin>;
+		pinctrl-3 = <&P8_18_gpio_pd_pin>;
+		pinctrl-4 = <&P8_18_gpio_input_pin>;
+	};
+
+	/* P8_19 (ZCZ ball U10) */
+	P8_19_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_19_default_pin>;
+		pinctrl-1 = <&P8_19_gpio_pin>;
+		pinctrl-2 = <&P8_19_gpio_pu_pin>;
+		pinctrl-3 = <&P8_19_gpio_pd_pin>;
+		pinctrl-4 = <&P8_19_gpio_input_pin>;
+		pinctrl-5 = <&P8_19_pwm_pin>;
+	};
+
+	/* P8_20 (ZCZ ball V9) emmc */
+	P8_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_20_default_pin>;
+		pinctrl-1 = <&P8_20_gpio_pin>;
+		pinctrl-2 = <&P8_20_gpio_pu_pin>;
+		pinctrl-3 = <&P8_20_gpio_pd_pin>;
+		pinctrl-4 = <&P8_20_gpio_input_pin>;
+		pinctrl-5 = <&P8_20_pruout_pin>;
+		pinctrl-6 = <&P8_20_pruin_pin>;
+	};
+
+	/* P8_21 (ZCZ ball U9) emmc */
+	P8_21_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_21_default_pin>;
+		pinctrl-1 = <&P8_21_gpio_pin>;
+		pinctrl-2 = <&P8_21_gpio_pu_pin>;
+		pinctrl-3 = <&P8_21_gpio_pd_pin>;
+		pinctrl-4 = <&P8_21_gpio_input_pin>;
+		pinctrl-5 = <&P8_21_pruout_pin>;
+		pinctrl-6 = <&P8_21_pruin_pin>;
+	};
+
+	/* P8_22 (ZCZ ball V8) emmc */
+	P8_22_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_22_default_pin>;
+		pinctrl-1 = <&P8_22_gpio_pin>;
+		pinctrl-2 = <&P8_22_gpio_pu_pin>;
+		pinctrl-3 = <&P8_22_gpio_pd_pin>;
+		pinctrl-4 = <&P8_22_gpio_input_pin>;
+	};
+
+	/* P8_23 (ZCZ ball U8) emmc */
+	P8_23_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_23_default_pin>;
+		pinctrl-1 = <&P8_23_gpio_pin>;
+		pinctrl-2 = <&P8_23_gpio_pu_pin>;
+		pinctrl-3 = <&P8_23_gpio_pd_pin>;
+		pinctrl-4 = <&P8_23_gpio_input_pin>;
+	};
+
+	/* P8_24 (ZCZ ball V7) emmc */
+	P8_24_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_24_default_pin>;
+		pinctrl-1 = <&P8_24_gpio_pin>;
+		pinctrl-2 = <&P8_24_gpio_pu_pin>;
+		pinctrl-3 = <&P8_24_gpio_pd_pin>;
+		pinctrl-4 = <&P8_24_gpio_input_pin>;
+	};
+
+	/* P8_25 (ZCZ ball U7) emmc */
+	P8_25_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P8_25_default_pin>;
+		pinctrl-1 = <&P8_25_gpio_pin>;
+		pinctrl-2 = <&P8_25_gpio_pu_pin>;
+		pinctrl-3 = <&P8_25_gpio_pd_pin>;
+		pinctrl-4 = <&P8_25_gpio_input_pin>;
+	};
+
+	/* P8_26 (ZCZ ball V6) gpio-hog wl1835 */
+
+	/* P8_27 (ZCZ ball U5) hdmi */
+	P8_27_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_27_default_pin>;
+		pinctrl-1 = <&P8_27_gpio_pin>;
+		pinctrl-2 = <&P8_27_gpio_pu_pin>;
+		pinctrl-3 = <&P8_27_gpio_pd_pin>;
+		pinctrl-4 = <&P8_27_gpio_input_pin>;
+		pinctrl-5 = <&P8_27_pruout_pin>;
+		pinctrl-6 = <&P8_27_pruin_pin>;
+	};
+
+	/* P8_28 (ZCZ ball V5) hdmi */
+	P8_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_28_default_pin>;
+		pinctrl-1 = <&P8_28_gpio_pin>;
+		pinctrl-2 = <&P8_28_gpio_pu_pin>;
+		pinctrl-3 = <&P8_28_gpio_pd_pin>;
+		pinctrl-4 = <&P8_28_gpio_input_pin>;
+		pinctrl-5 = <&P8_28_pruout_pin>;
+		pinctrl-6 = <&P8_28_pruin_pin>;
+	};
+
+	/* P8_29 (ZCZ ball R5) hdmi */
+	P8_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_29_default_pin>;
+		pinctrl-1 = <&P8_29_gpio_pin>;
+		pinctrl-2 = <&P8_29_gpio_pu_pin>;
+		pinctrl-3 = <&P8_29_gpio_pd_pin>;
+		pinctrl-4 = <&P8_29_gpio_input_pin>;
+		pinctrl-5 = <&P8_29_pruout_pin>;
+		pinctrl-6 = <&P8_29_pruin_pin>;
+	};
+
+	/* P8_30 (ZCZ ball R6) hdmi */
+	P8_30_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P8_30_default_pin>;
+		pinctrl-1 = <&P8_30_gpio_pin>;
+		pinctrl-2 = <&P8_30_gpio_pu_pin>;
+		pinctrl-3 = <&P8_30_gpio_pd_pin>;
+		pinctrl-4 = <&P8_30_gpio_input_pin>;
+		pinctrl-5 = <&P8_30_pruout_pin>;
+		pinctrl-6 = <&P8_30_pruin_pin>;
+	};
+
+	/* P8_31 (ZCZ ball V4) hdmi */
+	P8_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "qep";
+		pinctrl-0 = <&P8_31_default_pin>;
+		pinctrl-1 = <&P8_31_gpio_pin>;
+		pinctrl-2 = <&P8_31_gpio_pu_pin>;
+		pinctrl-3 = <&P8_31_gpio_pd_pin>;
+		pinctrl-4 = <&P8_31_gpio_input_pin>;
+		pinctrl-5 = <&P8_31_uart_pin>;
+		pinctrl-6 = <&P8_31_qep_pin>;
+	};
+
+	/* P8_32 (ZCZ ball T5) hdmi */
+	P8_32_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_32_default_pin>;
+		pinctrl-1 = <&P8_32_gpio_pin>;
+		pinctrl-2 = <&P8_32_gpio_pu_pin>;
+		pinctrl-3 = <&P8_32_gpio_pd_pin>;
+		pinctrl-4 = <&P8_32_gpio_input_pin>;
+		pinctrl-5 = <&P8_32_qep_pin>;
+	};
+
+	/* P8_33 (ZCZ ball V3) hdmi */
+	P8_33_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_33_default_pin>;
+		pinctrl-1 = <&P8_33_gpio_pin>;
+		pinctrl-2 = <&P8_33_gpio_pu_pin>;
+		pinctrl-3 = <&P8_33_gpio_pd_pin>;
+		pinctrl-4 = <&P8_33_gpio_input_pin>;
+		pinctrl-5 = <&P8_33_qep_pin>;
+	};
+
+	/* P8_34 (ZCZ ball U4) hdmi */
+	P8_34_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_34_default_pin>;
+		pinctrl-1 = <&P8_34_gpio_pin>;
+		pinctrl-2 = <&P8_34_gpio_pu_pin>;
+		pinctrl-3 = <&P8_34_gpio_pd_pin>;
+		pinctrl-4 = <&P8_34_gpio_input_pin>;
+		pinctrl-5 = <&P8_34_pwm_pin>;
+	};
+
+	/* P8_35 (ZCZ ball V2) hdmi */
+	P8_35_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P8_35_default_pin>;
+		pinctrl-1 = <&P8_35_gpio_pin>;
+		pinctrl-2 = <&P8_35_gpio_pu_pin>;
+		pinctrl-3 = <&P8_35_gpio_pd_pin>;
+		pinctrl-4 = <&P8_35_gpio_input_pin>;
+		pinctrl-5 = <&P8_35_qep_pin>;
+	};
+
+	/* P8_36 (ZCZ ball U3) hdmi */
+	P8_36_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P8_36_default_pin>;
+		pinctrl-1 = <&P8_36_gpio_pin>;
+		pinctrl-2 = <&P8_36_gpio_pu_pin>;
+		pinctrl-3 = <&P8_36_gpio_pd_pin>;
+		pinctrl-4 = <&P8_36_gpio_input_pin>;
+		pinctrl-5 = <&P8_36_pwm_pin>;
+	};
+
+	/* P8_37 (ZCZ ball U1) hdmi */
+	P8_37_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "pwm";
+		pinctrl-0 = <&P8_37_default_pin>;
+		pinctrl-1 = <&P8_37_gpio_pin>;
+		pinctrl-2 = <&P8_37_gpio_pu_pin>;
+		pinctrl-3 = <&P8_37_gpio_pd_pin>;
+		pinctrl-4 = <&P8_37_gpio_input_pin>;
+		pinctrl-5 = <&P8_37_uart_pin>;
+		pinctrl-6 = <&P8_37_pwm_pin>;
+	};
+
+	/* P8_38 (ZCZ ball U2) hdmi */
+	P8_38_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "pwm";
+		pinctrl-0 = <&P8_38_default_pin>;
+		pinctrl-1 = <&P8_38_gpio_pin>;
+		pinctrl-2 = <&P8_38_gpio_pu_pin>;
+		pinctrl-3 = <&P8_38_gpio_pd_pin>;
+		pinctrl-4 = <&P8_38_gpio_input_pin>;
+		pinctrl-5 = <&P8_38_uart_pin>;
+		pinctrl-6 = <&P8_38_pwm_pin>;
+	};
+
+	/* P8_39 (ZCZ ball T3) hdmi */
+	P8_39_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_39_default_pin>;
+		pinctrl-1 = <&P8_39_gpio_pin>;
+		pinctrl-2 = <&P8_39_gpio_pu_pin>;
+		pinctrl-3 = <&P8_39_gpio_pd_pin>;
+		pinctrl-4 = <&P8_39_gpio_input_pin>;
+		pinctrl-5 = <&P8_39_qep_pin>;
+		pinctrl-6 = <&P8_39_pruout_pin>;
+		pinctrl-7 = <&P8_39_pruin_pin>;
+	};
+
+	/* P8_40 (ZCZ ball T4) hdmi */
+	P8_40_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_40_default_pin>;
+		pinctrl-1 = <&P8_40_gpio_pin>;
+		pinctrl-2 = <&P8_40_gpio_pu_pin>;
+		pinctrl-3 = <&P8_40_gpio_pd_pin>;
+		pinctrl-4 = <&P8_40_gpio_input_pin>;
+		pinctrl-5 = <&P8_40_qep_pin>;
+		pinctrl-6 = <&P8_40_pruout_pin>;
+		pinctrl-7 = <&P8_40_pruin_pin>;
+	};
+
+	/* P8_41 (ZCZ ball T1) hdmi */
+	P8_41_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_41_default_pin>;
+		pinctrl-1 = <&P8_41_gpio_pin>;
+		pinctrl-2 = <&P8_41_gpio_pu_pin>;
+		pinctrl-3 = <&P8_41_gpio_pd_pin>;
+		pinctrl-4 = <&P8_41_gpio_input_pin>;
+		pinctrl-5 = <&P8_41_qep_pin>;
+		pinctrl-6 = <&P8_41_pruout_pin>;
+		pinctrl-7 = <&P8_41_pruin_pin>;
+	};
+
+	/* P8_42 (ZCZ ball T2) hdmi */
+	P8_42_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P8_42_default_pin>;
+		pinctrl-1 = <&P8_42_gpio_pin>;
+		pinctrl-2 = <&P8_42_gpio_pu_pin>;
+		pinctrl-3 = <&P8_42_gpio_pd_pin>;
+		pinctrl-4 = <&P8_42_gpio_input_pin>;
+		pinctrl-5 = <&P8_42_qep_pin>;
+		pinctrl-6 = <&P8_42_pruout_pin>;
+		pinctrl-7 = <&P8_42_pruin_pin>;
+	};
+
+	/* P8_43 (ZCZ ball R3) hdmi */
+	P8_43_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_43_default_pin>;
+		pinctrl-1 = <&P8_43_gpio_pin>;
+		pinctrl-2 = <&P8_43_gpio_pu_pin>;
+		pinctrl-3 = <&P8_43_gpio_pd_pin>;
+		pinctrl-4 = <&P8_43_gpio_input_pin>;
+		pinctrl-5 = <&P8_43_pwm_pin>;
+		pinctrl-6 = <&P8_43_pruout_pin>;
+		pinctrl-7 = <&P8_43_pruin_pin>;
+	};
+
+	/* P8_44 (ZCZ ball R4) hdmi */
+	P8_44_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_44_default_pin>;
+		pinctrl-1 = <&P8_44_gpio_pin>;
+		pinctrl-2 = <&P8_44_gpio_pu_pin>;
+		pinctrl-3 = <&P8_44_gpio_pd_pin>;
+		pinctrl-4 = <&P8_44_gpio_input_pin>;
+		pinctrl-5 = <&P8_44_pwm_pin>;
+		pinctrl-6 = <&P8_44_pruout_pin>;
+		pinctrl-7 = <&P8_44_pruin_pin>;
+	};
+
+	/* P8_45 (ZCZ ball R1) hdmi */
+	P8_45_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_45_default_pin>;
+		pinctrl-1 = <&P8_45_gpio_pin>;
+		pinctrl-2 = <&P8_45_gpio_pu_pin>;
+		pinctrl-3 = <&P8_45_gpio_pd_pin>;
+		pinctrl-4 = <&P8_45_gpio_input_pin>;
+		pinctrl-5 = <&P8_45_pwm_pin>;
+		pinctrl-6 = <&P8_45_pruout_pin>;
+		pinctrl-7 = <&P8_45_pruin_pin>;
+	};
+
+	/* P8_46 (ZCZ ball R2) hdmi */
+	P8_46_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P8_46_default_pin>;
+		pinctrl-1 = <&P8_46_gpio_pin>;
+		pinctrl-2 = <&P8_46_gpio_pu_pin>;
+		pinctrl-3 = <&P8_46_gpio_pd_pin>;
+		pinctrl-4 = <&P8_46_gpio_input_pin>;
+		pinctrl-5 = <&P8_46_pwm_pin>;
+		pinctrl-6 = <&P8_46_pruout_pin>;
+		pinctrl-7 = <&P8_46_pruin_pin>;
+	};
+
+	/************************/
+	/* P9 Header */
+	/************************/
+
+	/* P9_01                GND */
+
+	/* P9_02                GND */
+
+	/* P9_03                3V3 */
+
+	/* P9_04                3V3 */
+
+	/* P9_05                VDD_5V */
+
+	/* P9_06                VDD_5V */
+
+	/* P9_07                SYS_5V */
+
+	/* P9_08                SYS_5V */
+
+	/* P9_09                PWR_BUT */
+
+	/* P9_10                RSTn */
+
+	/* P9_11 (ZCZ ball T17) */
+	P9_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P9_11_default_pin>;
+		pinctrl-1 = <&P9_11_gpio_pin>;
+		pinctrl-2 = <&P9_11_gpio_pu_pin>;
+		pinctrl-3 = <&P9_11_gpio_pd_pin>;
+		pinctrl-4 = <&P9_11_gpio_input_pin>;
+		pinctrl-5 = <&P9_11_uart_pin>;
+	};
+
+	/* P9_12 (ZCZ ball U18) */
+	P9_12_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P9_12_default_pin>;
+		pinctrl-1 = <&P9_12_gpio_pin>;
+		pinctrl-2 = <&P9_12_gpio_pu_pin>;
+		pinctrl-3 = <&P9_12_gpio_pd_pin>;
+		pinctrl-4 = <&P9_12_gpio_input_pin>;
+	};
+
+	/* P9_13 (ZCZ ball U17) */
+	P9_13_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P9_13_default_pin>;
+		pinctrl-1 = <&P9_13_gpio_pin>;
+		pinctrl-2 = <&P9_13_gpio_pu_pin>;
+		pinctrl-3 = <&P9_13_gpio_pd_pin>;
+		pinctrl-4 = <&P9_13_gpio_input_pin>;
+		pinctrl-5 = <&P9_13_uart_pin>;
+	};
+
+	/* P9_14 (ZCZ ball U14) */
+	P9_14_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_14_default_pin>;
+		pinctrl-1 = <&P9_14_gpio_pin>;
+		pinctrl-2 = <&P9_14_gpio_pu_pin>;
+		pinctrl-3 = <&P9_14_gpio_pd_pin>;
+		pinctrl-4 = <&P9_14_gpio_input_pin>;
+		pinctrl-5 = <&P9_14_pwm_pin>;
+	};
+
+	/* P9_15 (ZCZ ball R13) */
+	P9_15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_15_default_pin>;
+		pinctrl-1 = <&P9_15_gpio_pin>;
+		pinctrl-2 = <&P9_15_gpio_pu_pin>;
+		pinctrl-3 = <&P9_15_gpio_pd_pin>;
+		pinctrl-4 = <&P9_15_gpio_input_pin>;
+		pinctrl-5 = <&P9_15_pwm_pin>;
+	};
+
+	/* P9_16 (ZCZ ball T14) */
+	P9_16_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_16_default_pin>;
+		pinctrl-1 = <&P9_16_gpio_pin>;
+		pinctrl-2 = <&P9_16_gpio_pu_pin>;
+		pinctrl-3 = <&P9_16_gpio_pd_pin>;
+		pinctrl-4 = <&P9_16_gpio_input_pin>;
+		pinctrl-5 = <&P9_16_pwm_pin>;
+	};
+
+	/* P9_17 (ZCZ ball A16) */
+	P9_17_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_17_default_pin>;
+		pinctrl-1 = <&P9_17_gpio_pin>;
+		pinctrl-2 = <&P9_17_gpio_pu_pin>;
+		pinctrl-3 = <&P9_17_gpio_pd_pin>;
+		pinctrl-4 = <&P9_17_gpio_input_pin>;
+		pinctrl-5 = <&P9_17_spi_cs_pin>;
+		pinctrl-6 = <&P9_17_i2c_pin>;
+		pinctrl-7 = <&P9_17_pwm_pin>;
+		pinctrl-8 = <&P9_17_pru_uart_pin>;
+	};
+
+	/* P9_18 (ZCZ ball B16) */
+	P9_18_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_18_default_pin>;
+		pinctrl-1 = <&P9_18_gpio_pin>;
+		pinctrl-2 = <&P9_18_gpio_pu_pin>;
+		pinctrl-3 = <&P9_18_gpio_pd_pin>;
+		pinctrl-4 = <&P9_18_gpio_input_pin>;
+		pinctrl-5 = <&P9_18_spi_pin>;
+		pinctrl-6 = <&P9_18_i2c_pin>;
+		pinctrl-7 = <&P9_18_pwm_pin>;
+		pinctrl-8 = <&P9_18_pru_uart_pin>;
+	};
+
+	/* P9_19 (ZCZ ball D17) i2c */
+	P9_19_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart", "timer";
+		pinctrl-0 = <&P9_19_default_pin>;
+		pinctrl-1 = <&P9_19_gpio_pin>;
+		pinctrl-2 = <&P9_19_gpio_pu_pin>;
+		pinctrl-3 = <&P9_19_gpio_pd_pin>;
+		pinctrl-4 = <&P9_19_gpio_input_pin>;
+		pinctrl-5 = <&P9_19_spi_cs_pin>;
+		pinctrl-6 = <&P9_19_can_pin>;
+		pinctrl-7 = <&P9_19_i2c_pin>;
+		pinctrl-8 = <&P9_19_pru_uart_pin>;
+		pinctrl-9 = <&P9_19_timer_pin>;
+	};
+
+	/* P9_20 (ZCZ ball D18) i2c */
+	P9_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart", "timer";
+		pinctrl-0 = <&P9_20_default_pin>;
+		pinctrl-1 = <&P9_20_gpio_pin>;
+		pinctrl-2 = <&P9_20_gpio_pu_pin>;
+		pinctrl-3 = <&P9_20_gpio_pd_pin>;
+		pinctrl-4 = <&P9_20_gpio_input_pin>;
+		pinctrl-5 = <&P9_20_spi_cs_pin>;
+		pinctrl-6 = <&P9_20_can_pin>;
+		pinctrl-7 = <&P9_20_i2c_pin>;
+		pinctrl-8 = <&P9_20_pru_uart_pin>;
+		pinctrl-9 = <&P9_20_timer_pin>;
+	};
+
+	/* P9_21 (ZCZ ball B17) */
+	P9_21_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_21_default_pin>;
+		pinctrl-1 = <&P9_21_gpio_pin>;
+		pinctrl-2 = <&P9_21_gpio_pu_pin>;
+		pinctrl-3 = <&P9_21_gpio_pd_pin>;
+		pinctrl-4 = <&P9_21_gpio_input_pin>;
+		pinctrl-5 = <&P9_21_spi_pin>;
+		pinctrl-6 = <&P9_21_uart_pin>;
+		pinctrl-7 = <&P9_21_i2c_pin>;
+		pinctrl-8 = <&P9_21_pwm_pin>;
+		pinctrl-9 = <&P9_21_pru_uart_pin>;
+	};
+
+	/* P9_22 (ZCZ ball A17) */
+	P9_22_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P9_22_default_pin>;
+		pinctrl-1 = <&P9_22_gpio_pin>;
+		pinctrl-2 = <&P9_22_gpio_pu_pin>;
+		pinctrl-3 = <&P9_22_gpio_pd_pin>;
+		pinctrl-4 = <&P9_22_gpio_input_pin>;
+		pinctrl-5 = <&P9_22_spi_sclk_pin>;
+		pinctrl-6 = <&P9_22_uart_pin>;
+		pinctrl-7 = <&P9_22_i2c_pin>;
+		pinctrl-8 = <&P9_22_pwm_pin>;
+		pinctrl-9 = <&P9_22_pru_uart_pin>;
+	};
+
+	/* P9_23 (ZCZ ball V14) */
+	P9_23_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P9_23_default_pin>;
+		pinctrl-1 = <&P9_23_gpio_pin>;
+		pinctrl-2 = <&P9_23_gpio_pu_pin>;
+		pinctrl-3 = <&P9_23_gpio_pd_pin>;
+		pinctrl-4 = <&P9_23_gpio_input_pin>;
+		pinctrl-5 = <&P9_23_pwm_pin>;
+	};
+
+	/* P9_24 (ZCZ ball D15) */
+	P9_24_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P9_24_default_pin>;
+		pinctrl-1 = <&P9_24_gpio_pin>;
+		pinctrl-2 = <&P9_24_gpio_pu_pin>;
+		pinctrl-3 = <&P9_24_gpio_pd_pin>;
+		pinctrl-4 = <&P9_24_gpio_input_pin>;
+		pinctrl-5 = <&P9_24_uart_pin>;
+		pinctrl-6 = <&P9_24_can_pin>;
+		pinctrl-7 = <&P9_24_i2c_pin>;
+		pinctrl-8 = <&P9_24_pru_uart_pin>;
+		pinctrl-9 = <&P9_24_pruin_pin>;
+	};
+
+	/* P9_25 (ZCZ ball A14) audio */
+	P9_25_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_25_default_pin>;
+		pinctrl-1 = <&P9_25_gpio_pin>;
+		pinctrl-2 = <&P9_25_gpio_pu_pin>;
+		pinctrl-3 = <&P9_25_gpio_pd_pin>;
+		pinctrl-4 = <&P9_25_gpio_input_pin>;
+		pinctrl-5 = <&P9_25_qep_pin>;
+		pinctrl-6 = <&P9_25_pruout_pin>;
+		pinctrl-7 = <&P9_25_pruin_pin>;
+	};
+
+	/* P9_26 (ZCZ ball D16) */
+	P9_26_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P9_26_default_pin>;
+		pinctrl-1 = <&P9_26_gpio_pin>;
+		pinctrl-2 = <&P9_26_gpio_pu_pin>;
+		pinctrl-3 = <&P9_26_gpio_pd_pin>;
+		pinctrl-4 = <&P9_26_gpio_input_pin>;
+		pinctrl-5 = <&P9_26_uart_pin>;
+		pinctrl-6 = <&P9_26_can_pin>;
+		pinctrl-7 = <&P9_26_i2c_pin>;
+		pinctrl-8 = <&P9_26_pru_uart_pin>;
+		pinctrl-9 = <&P9_26_pruin_pin>;
+	};
+
+	/* P9_27 (ZCZ ball C13) */
+	P9_27_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_27_default_pin>;
+		pinctrl-1 = <&P9_27_gpio_pin>;
+		pinctrl-2 = <&P9_27_gpio_pu_pin>;
+		pinctrl-3 = <&P9_27_gpio_pd_pin>;
+		pinctrl-4 = <&P9_27_gpio_input_pin>;
+		pinctrl-5 = <&P9_27_qep_pin>;
+		pinctrl-6 = <&P9_27_pruout_pin>;
+		pinctrl-7 = <&P9_27_pruin_pin>;
+	};
+
+	/* P9_28 (ZCZ ball C12) audio */
+	P9_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "pwm", "pwm2", "pruout", "pruin";
+		pinctrl-0 = <&P9_28_default_pin>;
+		pinctrl-1 = <&P9_28_gpio_pin>;
+		pinctrl-2 = <&P9_28_gpio_pu_pin>;
+		pinctrl-3 = <&P9_28_gpio_pd_pin>;
+		pinctrl-4 = <&P9_28_gpio_input_pin>;
+		pinctrl-5 = <&P9_28_spi_cs_pin>;
+		pinctrl-6 = <&P9_28_pwm_pin>;
+		pinctrl-7 = <&P9_28_pwm2_pin>;
+		pinctrl-8 = <&P9_28_pruout_pin>;
+		pinctrl-9 = <&P9_28_pruin_pin>;
+	};
+
+	/* P9_29 (ZCZ ball B13) audio */
+	P9_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P9_29_default_pin>;
+		pinctrl-1 = <&P9_29_gpio_pin>;
+		pinctrl-2 = <&P9_29_gpio_pu_pin>;
+		pinctrl-3 = <&P9_29_gpio_pd_pin>;
+		pinctrl-4 = <&P9_29_gpio_input_pin>;
+		pinctrl-5 = <&P9_29_spi_pin>;
+		pinctrl-6 = <&P9_29_pwm_pin>;
+		pinctrl-7 = <&P9_29_pruout_pin>;
+		pinctrl-8 = <&P9_29_pruin_pin>;
+	};
+
+	/* P9_30 (ZCZ ball D12) gpio-hog wl1835 */
+
+	/* P9_31 (ZCZ ball A13) audio */
+	P9_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P9_31_default_pin>;
+		pinctrl-1 = <&P9_31_gpio_pin>;
+		pinctrl-2 = <&P9_31_gpio_pu_pin>;
+		pinctrl-3 = <&P9_31_gpio_pd_pin>;
+		pinctrl-4 = <&P9_31_gpio_input_pin>;
+		pinctrl-5 = <&P9_31_spi_sclk_pin>;
+		pinctrl-6 = <&P9_31_pwm_pin>;
+		pinctrl-7 = <&P9_31_pruout_pin>;
+		pinctrl-8 = <&P9_31_pruin_pin>;
+	};
+
+	/* P9_32                VADC */
+
+	/* P9_33 (ZCZ ball C8)  AIN4         */
+
+	/* P9_34                AGND */
+
+	/* P9_35 (ZCZ ball A8)  AIN6         */
+
+	/* P9_36 (ZCZ ball B8)  AIN5         */
+
+	/* P9_37 (ZCZ ball B7)  AIN2         */
+
+	/* P9_38 (ZCZ ball A7)  AIN3         */
+
+	/* P9_39 (ZCZ ball B6)  AIN0         */
+
+	/* P9_40 (ZCZ ball C7)  AIN1         */
+
+	/* P9_41 (ZCZ ball D14) */
+	P9_41_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "timer", "pruin";
+		pinctrl-0 = <&P9_41_default_pin>;
+		pinctrl-1 = <&P9_41_gpio_pin>;
+		pinctrl-2 = <&P9_41_gpio_pu_pin>;
+		pinctrl-3 = <&P9_41_gpio_pd_pin>;
+		pinctrl-4 = <&P9_41_gpio_input_pin>;
+		pinctrl-5 = <&P9_41_timer_pin>;
+		pinctrl-6 = <&P9_41_pruin_pin>;
+	};
+
+	/* P9_41.1 */
+	/* P9_91 (ZCZ ball D13) */
+	P9_91_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_91_default_pin>;
+		pinctrl-1 = <&P9_91_gpio_pin>;
+		pinctrl-2 = <&P9_91_gpio_pu_pin>;
+		pinctrl-3 = <&P9_91_gpio_pd_pin>;
+		pinctrl-4 = <&P9_91_gpio_input_pin>;
+		pinctrl-5 = <&P9_91_qep_pin>;
+		pinctrl-6 = <&P9_91_pruout_pin>;
+		pinctrl-7 = <&P9_91_pruin_pin>;
+	};
+
+	/* P9_42 (ZCZ ball C18) */
+	P9_42_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "spi_sclk", "uart", "pwm", "pru_ecap";
+		pinctrl-0 = <&P9_42_default_pin>;
+		pinctrl-1 = <&P9_42_gpio_pin>;
+		pinctrl-2 = <&P9_42_gpio_pu_pin>;
+		pinctrl-3 = <&P9_42_gpio_pd_pin>;
+		pinctrl-4 = <&P9_42_gpio_input_pin>;
+		pinctrl-5 = <&P9_42_spi_cs_pin>;
+		pinctrl-6 = <&P9_42_spi_sclk_pin>;
+		pinctrl-7 = <&P9_42_uart_pin>;
+		pinctrl-8 = <&P9_42_pwm_pin>;
+		pinctrl-9 = <&P9_42_pru_ecap_pin>;
+	};
+
+	/* P9_42.1 */
+	/* P9_92 (ZCZ ball B12) */
+	P9_92_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P9_92_default_pin>;
+		pinctrl-1 = <&P9_92_gpio_pin>;
+		pinctrl-2 = <&P9_92_gpio_pu_pin>;
+		pinctrl-3 = <&P9_92_gpio_pd_pin>;
+		pinctrl-4 = <&P9_92_gpio_input_pin>;
+		pinctrl-5 = <&P9_92_qep_pin>;
+		pinctrl-6 = <&P9_92_pruout_pin>;
+		pinctrl-7 = <&P9_92_pruin_pin>;
+	};
+
+	/* P9_43                GND */
+
+	/* P9_44                GND */
+
+	/* P9_45                GND */
+
+	/* P9_46                GND */
+
+	/*       (ZCZ ball A15) */
+	A15_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "clkout", "gpio", "gpio_pu", "gpio_pd";
+		pinctrl-0 = <&A15_default_pin>;
+		pinctrl-1 = <&A15_clkout_pin>;
+		pinctrl-2 = <&A15_gpio_pin>;
+		pinctrl-3 = <&A15_gpio_pu_pin>;
+		pinctrl-4 = <&A15_gpio_pd_pin>;
+	};
+
+	cape-universal {
+		compatible = "gpio-of-helper";
+		status = "okay";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		P8_03 {
+			gpio-name = "P8_03";
+			gpio = <&gpio1 6 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_04 {
+			gpio-name = "P8_04";
+			gpio = <&gpio1 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_05 {
+			gpio-name = "P8_05";
+			gpio = <&gpio1 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_06 {
+			gpio-name = "P8_06";
+			gpio = <&gpio1 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_07 {
+			gpio-name = "P8_07";
+			gpio = <&gpio2 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_08 {
+			gpio-name = "P8_08";
+			gpio = <&gpio2 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_09 {
+			gpio-name = "P8_09";
+			gpio = <&gpio2 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_10 {
+			gpio-name = "P8_10";
+			gpio = <&gpio2 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_11 {
+			gpio-name = "P8_11";
+			gpio = <&gpio1 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_12 {
+			gpio-name = "P8_12";
+			gpio = <&gpio1 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_13 {
+			gpio-name = "P8_13";
+			gpio = <&gpio0 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_15 {
+			gpio-name = "P8_15";
+			gpio = <&gpio1 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_16 {
+			gpio-name = "P8_16";
+			gpio = <&gpio1 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_18 {
+			gpio-name = "P8_18";
+			gpio = <&gpio2 1 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_19 {
+			gpio-name = "P8_19";
+			gpio = <&gpio0 22 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_20 {
+			gpio-name = "P8_20";
+			gpio = <&gpio1 31 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_21 {
+			gpio-name = "P8_21";
+			gpio = <&gpio1 30 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_22 {
+			gpio-name = "P8_22";
+			gpio = <&gpio1 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_23 {
+			gpio-name = "P8_23";
+			gpio = <&gpio1 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_24 {
+			gpio-name = "P8_24";
+			gpio = <&gpio1 1 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_25 {
+			gpio-name = "P8_25";
+			gpio = <&gpio1 0 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_27 {
+			gpio-name = "P8_27";
+			gpio = <&gpio2 22 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_28 {
+			gpio-name = "P8_28";
+			gpio = <&gpio2 24 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_29 {
+			gpio-name = "P8_29";
+			gpio = <&gpio2 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_30 {
+			gpio-name = "P8_30";
+			gpio = <&gpio2 25 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_31 {
+			gpio-name = "P8_31";
+			gpio = <&gpio0 10 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_32 {
+			gpio-name = "P8_32";
+			gpio = <&gpio0 11 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_33 {
+			gpio-name = "P8_33";
+			gpio = <&gpio0 9 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_34 {
+			gpio-name = "P8_34";
+			gpio = <&gpio2 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_35 {
+			gpio-name = "P8_35";
+			gpio = <&gpio0 8 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_36 {
+			gpio-name = "P8_36";
+			gpio = <&gpio2 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_37 {
+			gpio-name = "P8_37";
+			gpio = <&gpio2 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_38 {
+			gpio-name = "P8_38";
+			gpio = <&gpio2 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_39 {
+			gpio-name = "P8_39";
+			gpio = <&gpio2 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_40 {
+			gpio-name = "P8_40";
+			gpio = <&gpio2 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_41 {
+			gpio-name = "P8_41";
+			gpio = <&gpio2 10 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_42 {
+			gpio-name = "P8_42";
+			gpio = <&gpio2 11 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_43 {
+			gpio-name = "P8_43";
+			gpio = <&gpio2 8 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_44 {
+			gpio-name = "P8_44";
+			gpio = <&gpio2 9 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_45 {
+			gpio-name = "P8_45";
+			gpio = <&gpio2 6 0>;
+			input;
+			dir-changeable;
+		};
+
+		P8_46 {
+			gpio-name = "P8_46";
+			gpio = <&gpio2 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_11 {
+			gpio-name = "P9_11";
+			gpio = <&gpio0 30 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_12 {
+			gpio-name = "P9_12";
+			gpio = <&gpio1 28 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_13 {
+			gpio-name = "P9_13";
+			gpio = <&gpio0 31 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_14 {
+			gpio-name = "P9_14";
+			gpio = <&gpio1 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_15 {
+			gpio-name = "P9_15";
+			gpio = <&gpio1 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_16 {
+			gpio-name = "P9_16";
+			gpio = <&gpio1 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_17 {
+			gpio-name = "P9_17";
+			gpio = <&gpio0 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_18 {
+			gpio-name = "P9_18";
+			gpio = <&gpio0 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_19 {
+			gpio-name = "P9_19";
+			gpio = <&gpio0 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_20 {
+			gpio-name = "P9_20";
+			gpio = <&gpio0 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_21 {
+			gpio-name = "P9_21";
+			gpio = <&gpio0 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_22 {
+			gpio-name = "P9_22";
+			gpio = <&gpio0 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_23 {
+			gpio-name = "P9_23";
+			gpio = <&gpio1 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_24 {
+			gpio-name = "P9_24";
+			gpio = <&gpio0 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_25 {
+			gpio-name = "P9_25";
+			gpio = <&gpio3 21 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_26 {
+			gpio-name = "P9_26";
+			gpio = <&gpio0 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_27 {
+			gpio-name = "P9_27";
+			gpio = <&gpio3 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_28 {
+			gpio-name = "P9_28";
+			gpio = <&gpio3 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_29 {
+			gpio-name = "P9_29";
+			gpio = <&gpio3 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_31 {
+			gpio-name = "P9_31";
+			gpio = <&gpio3 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_41 {
+			gpio-name = "P9_41";
+			gpio = <&gpio0 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_91 {
+			gpio-name = "P9_91";
+			gpio = <&gpio3 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_42 {
+			gpio-name = "P9_42";
+			gpio = <&gpio0 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P9_92 {
+			gpio-name = "P9_92";
+			gpio = <&gpio3 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		A15 {
+			gpio-name = "A15";
+			gpio = <&gpio0 19 0>;
+			input;
+			dir-changeable;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless-uboot-univ.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless-uboot-univ.dts
new file mode 100644
index 00000000000..ac63c249b19
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless-uboot-univ.dts
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+#include "am335x-bonegreen-wireless-common-univ.dtsi"
+
+/ {
+	model = "TI AM335x BeagleBone Green Wireless";
+	compatible = "ti,am335x-bone-green-wireless", "ti,am335x-bone-green", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-bonegreen-wireless-uboot-univ.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+};
+
+&ldo3_reg {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-always-on;
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmcsd_fixed>;
+};
+
+&gpio1 {
+	ls_buf_en {
+		gpio-hog;
+		gpios = <29 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "LS_BUF_EN";
+	};
+};
+
+/* BT_AUD_OUT from wl1835 has to be pulled low when WL_EN is activated.*/
+/* in case it isn't, wilink8 ends up in one of the test modes that     */
+/* intruces various issues (elp wkaeup timeouts etc.)                  */
+/* On the BBGW this pin is routed through the level shifter (U21) that */
+/* introduces a pullup on the line and wilink8 ends up in a bad state. */
+/* use a gpio hog to force this pin low. An alternative may be adding  */
+/* an external pulldown on U21 pin 4.                                  */
+
+&gpio3 {
+	bt_aud_in {
+		gpio-hog;
+		gpios = <16 GPIO_ACTIVE_HIGH>;
+		output-low;
+		line-name = "MCASP0_AHCLKR";
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
index 4092cd193b8..a0a841d9716 100644
--- a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
@@ -8,11 +8,17 @@
 #include "am335x-bone-common.dtsi"
 #include "am335x-bonegreen-common.dtsi"
 #include <dt-bindings/interrupt-controller/irq.h>
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "TI AM335x BeagleBone Green Wireless";
 	compatible = "ti,am335x-bone-green-wireless", "ti,am335x-bone-green", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
 
+	chosen {
+		base_dtb = "am335x-bonegreen-wireless.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
+
 	wlan_en_reg: fixedregulator@2 {
 		compatible = "regulator-fixed";
 		regulator-name = "wlan-en-regulator";
diff --git a/arch/arm/boot/dts/am335x-bonegreen.dts b/arch/arm/boot/dts/am335x-bonegreen.dts
index c12bb071777..d3009aeeb64 100644
--- a/arch/arm/boot/dts/am335x-bonegreen.dts
+++ b/arch/arm/boot/dts/am335x-bonegreen.dts
@@ -7,8 +7,14 @@
 #include "am33xx.dtsi"
 #include "am335x-bone-common.dtsi"
 #include "am335x-bonegreen-common.dtsi"
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "TI AM335x BeagleBone Green";
 	compatible = "ti,am335x-bone-green", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-bonegreen.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index 1d290208348..e9cbbb341bc 100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
@@ -13,10 +13,16 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 
 #include <dt-bindings/display/tda998x.h>
+/* #include "am335x-bone-jtag.dtsi" */
 
 / {
 	model = "Octavo Systems OSD3358-SM-RED";
 	compatible = "oct,osd3358-sm-refdesign", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-osd3358-sm-red.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &ldo3_reg {
@@ -40,7 +46,6 @@ &mmc2 {
 &am33xx_pinmux {
 	nxp_hdmi_bonelt_pins: nxp-hdmi-bonelt-pins {
 		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
 			AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
@@ -64,12 +69,6 @@ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
 		>;
 	};
 
-	nxp_hdmi_bonelt_off_pins: nxp-hdmi-bonelt-off-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
-		>;
-	};
-
 	mcasp0_pins: mcasp0-pins {
 		pinctrl-single,pins = <
 			AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0)
@@ -124,9 +123,8 @@ tda19988: hdmi-encoder@70 {
 		compatible = "nxp,tda998x";
 		reg = <0x70>;
 
-		pinctrl-names = "default", "off";
+		pinctrl-names = "default";
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
-		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
 		/* Convert 24bit BGR to RGB, e.g. cross red and blue wiring */
 		/* video-ports = <0x234501>; */
@@ -264,9 +262,6 @@ vmmcsd_fixed: fixedregulator0 {
 };
 
 &am33xx_pinmux {
-	pinctrl-names = "default";
-	pinctrl-0 = <&clkout2_pin>;
-
 	user_leds_s0: user-leds-s0 {
 		pinctrl-single,pins = <
 			AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7)	/* gpmc_a5.gpio1_21 */
@@ -290,12 +285,6 @@ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
 		>;
 	};
 
-	clkout2_pin: pinmux-clkout2-pin {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3)	/* xdma_event_intr1.clkout2 */
-		>;
-	};
-
 	cpsw_default: cpsw-default {
 		pinctrl-single,pins = <
 			/* Slave 1 */
diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
index f0b222201b8..72818913366 100644
--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
+++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
@@ -15,6 +15,8 @@ / {
 
 	chosen {
 		stdout-path = &uart0;
+		base_dtb = "am335x-pocketbeagle.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
 	};
 
 	leds {
@@ -60,24 +62,24 @@ vmmcsd_fixed: fixedregulator0 {
 };
 
 &am33xx_pinmux {
-	i2c2_pins: pinmux-i2c2-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* (D17) uart1_rtsn.I2C2_SCL */
-			AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* (D18) uart1_ctsn.I2C2_SDA */
-		>;
-	};
+//	i2c2_pins: pinmux-i2c2-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* (D17) uart1_rtsn.I2C2_SCL */
+//			AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3)	/* (D18) uart1_ctsn.I2C2_SDA */
+//		>;
+//	};
 
-	ehrpwm0_pins: pinmux-ehrpwm0-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE1)	/* (A13) mcasp0_aclkx.ehrpwm0A */
-		>;
-	};
+//	ehrpwm0_pins: pinmux-ehrpwm0-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE1)	/* (A13) mcasp0_aclkx.ehrpwm0A */
+//		>;
+//	};
 
-	ehrpwm1_pins: pinmux-ehrpwm1-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE6)	/* (U14) gpmc_a2.ehrpwm1A */
-		>;
-	};
+//	ehrpwm1_pins: pinmux-ehrpwm1-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE6)	/* (U14) gpmc_a2.ehrpwm1A */
+//		>;
+//	};
 
 	mmc0_pins: pinmux-mmc0-pins {
 		pinctrl-single,pins = <
@@ -91,23 +93,23 @@ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
 		>;
 	};
 
-	spi0_pins: pinmux-spi0-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
-			AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
-			AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
-			AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
-		>;
-	};
+//	spi0_pins: pinmux-spi0-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)	/* (A17) spi0_sclk.spi0_sclk */
+//			AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)	/* (B17) spi0_d0.spi0_d0 */
+//			AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)	/* (B16) spi0_d1.spi0_d1 */
+//			AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)	/* (A16) spi0_cs0.spi0_cs0 */
+//		>;
+//	};
 
-	spi1_pins: pinmux-spi1-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_INPUT_PULLUP, MUX_MODE4)	/* (C18) eCAP0_in_PWM0_out.spi1_sclk */
-			AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE4)	/* (E18) uart0_ctsn.spi1_d0 */
-			AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE4)	/* (E17) uart0_rtsn.spi1_d1 */
-			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_INPUT_PULLUP, MUX_MODE4)	/* (A15) xdma_event_intr0.spi1_cs1 */
-		>;
-	};
+//	spi1_pins: pinmux-spi1-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_INPUT_PULLUP, MUX_MODE4)	/* (C18) eCAP0_in_PWM0_out.spi1_sclk */
+//			AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE4)	/* (E18) uart0_ctsn.spi1_d0 */
+//			AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE4)	/* (E17) uart0_rtsn.spi1_d1 */
+//			AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_INPUT_PULLUP, MUX_MODE4)	/* (A15) xdma_event_intr0.spi1_cs1 */
+//		>;
+//	};
 
 	usr_leds_pins: pinmux-usr-leds-pins {
 		pinctrl-single,pins = <
@@ -125,12 +127,839 @@ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
 		>;
 	};
 
-	uart4_pins: pinmux-uart4-pins {
-		pinctrl-single,pins = <
-			AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6)	/* (T17) gpmc_wait0.uart4_rxd */
-			AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLDOWN, MUX_MODE6)	/* (U17) gpmc_wpn.uart4_txd */
-		>;
-	};
+//	uart4_pins: pinmux-uart4-pins {
+//		pinctrl-single,pins = <
+//			AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6)	/* (T17) gpmc_wait0.uart4_rxd */
+//			AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLDOWN, MUX_MODE6)	/* (U17) gpmc_wpn.uart4_txd */
+//		>;
+//	};
+
+	/************************/
+	/* P1 Header */
+	/************************/
+
+	/* P1_01                VIN-AC */
+
+	/* P1_02 (ZCZ ball R5) gpio2_23 */
+	P1_02_default_pin: pinmux_P1_02_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_hsync.gpio2_23 */
+	P1_02_gpio_pin: pinmux_P1_02_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_hsync.gpio2_23 */
+	P1_02_gpio_pu_pin: pinmux_P1_02_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P1_02_gpio_pd_pin: pinmux_P1_02_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_hsync.gpio2_23 */
+	P1_02_gpio_input_pin: pinmux_P1_02_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE7) >; };			/* lcd_hsync.gpio2_23 */
+	P1_02_pruout_pin: pinmux_P1_02_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_hsync.pru1_out9 */
+	P1_02_pruin_pin: pinmux_P1_02_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e4, PIN_INPUT | MUX_MODE6) >; };			/* lcd_hsync.pru1_in9 */
+
+	/* P1_03 (ZCZ ball F15)  usb1_vbus_out         */
+
+	/* P1_04 (ZCZ ball R6) gpio2_25 */
+	P1_04_default_pin: pinmux_P1_04_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P1_04_gpio_pin: pinmux_P1_04_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_ac_bias_en.gpio2_25 */
+	P1_04_gpio_pu_pin: pinmux_P1_04_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P1_04_gpio_pd_pin: pinmux_P1_04_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_ac_bias_en.gpio2_25 */
+	P1_04_gpio_input_pin: pinmux_P1_04_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE7) >; };			/* lcd_ac_bias_en.gpio2_25 */
+	P1_04_pruout_pin: pinmux_P1_04_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_ac_bias_en.pru1_out11 */
+	P1_04_pruin_pin: pinmux_P1_04_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08ec, PIN_INPUT | MUX_MODE6) >; };			/* lcd_ac_bias_en.pru1_in11 */
+
+	/* P1_05 (ZCZ ball T18)  usb1_vbus_in         */
+
+	/* P1_06 (ZCZ ball A16) spi0_cs0 */
+	P1_06_default_pin: pinmux_P1_06_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_cs0.spi0_cs0 */
+	P1_06_gpio_pin: pinmux_P1_06_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_cs0.gpio0_5 */
+	P1_06_gpio_pu_pin: pinmux_P1_06_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P1_06_gpio_pd_pin: pinmux_P1_06_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_cs0.gpio0_5 */
+	P1_06_gpio_input_pin: pinmux_P1_06_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_INPUT | MUX_MODE7) >; };			/* spi0_cs0.gpio0_5 */
+	P1_06_spi_cs_pin: pinmux_P1_06_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_cs0.spi0_cs0 */
+	P1_06_i2c_pin: pinmux_P1_06_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_cs0.i2c1_scl */
+	P1_06_pwm_pin: pinmux_P1_06_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_cs0.ehrpwm0_synci */
+	P1_06_pru_uart_pin: pinmux_P1_06_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x095c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_cs0.pr1_uart0_txd */
+
+	/* P1_07                VIN-USB */
+
+	/* P1_08 (ZCZ ball A17) spi0_sclk */
+	P1_08_default_pin: pinmux_P1_08_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_sclk.spi0_sclk */
+	P1_08_gpio_pin: pinmux_P1_08_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_sclk.gpio0_2 */
+	P1_08_gpio_pu_pin: pinmux_P1_08_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P1_08_gpio_pd_pin: pinmux_P1_08_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_sclk.gpio0_2 */
+	P1_08_gpio_input_pin: pinmux_P1_08_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_INPUT | MUX_MODE7) >; };			/* spi0_sclk.gpio0_2 */
+	P1_08_spi_sclk_pin: pinmux_P1_08_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_sclk.spi0_sclk */
+	P1_08_uart_pin: pinmux_P1_08_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_sclk.uart2_rxd */
+	P1_08_i2c_pin: pinmux_P1_08_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_sclk.i2c2_sda */
+	P1_08_pwm_pin: pinmux_P1_08_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_sclk.ehrpwm0a */
+	P1_08_pru_uart_pin: pinmux_P1_08_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0950, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_sclk.pr1_uart0_cts_n */
+
+	/* P1_09 (ZCZ ball R18)  USB1-DN         */
+
+	/* P1_10 (ZCZ ball B17) spi0_d0 */
+	P1_10_default_pin: pinmux_P1_10_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d0.spi0_d0 */
+	P1_10_gpio_pin: pinmux_P1_10_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d0.gpio0_3 */
+	P1_10_gpio_pu_pin: pinmux_P1_10_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P1_10_gpio_pd_pin: pinmux_P1_10_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d0.gpio0_3 */
+	P1_10_gpio_input_pin: pinmux_P1_10_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d0.gpio0_3 */
+	P1_10_spi_pin: pinmux_P1_10_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d0.spi0_d0 */
+	P1_10_uart_pin: pinmux_P1_10_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* spi0_d0.uart2_txd */
+	P1_10_i2c_pin: pinmux_P1_10_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d0.i2c2_scl */
+	P1_10_pwm_pin: pinmux_P1_10_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d0.ehrpwm0b */
+	P1_10_pru_uart_pin: pinmux_P1_10_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0954, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d0.pr1_uart0_rts_n */
+
+	/* P1_11 (ZCZ ball R17)  USB1-DP         */
+
+	/* P1_12 (ZCZ ball B16) spi0_d1 */
+	P1_12_default_pin: pinmux_P1_12_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d1.spi0_d1 */
+	P1_12_gpio_pin: pinmux_P1_12_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* spi0_d1.gpio0_4 */
+	P1_12_gpio_pu_pin: pinmux_P1_12_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P1_12_gpio_pd_pin: pinmux_P1_12_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* spi0_d1.gpio0_4 */
+	P1_12_gpio_input_pin: pinmux_P1_12_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_INPUT | MUX_MODE7) >; };			/* spi0_d1.gpio0_4 */
+	P1_12_spi_pin: pinmux_P1_12_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* spi0_d1.spi0_d1 */
+	P1_12_i2c_pin: pinmux_P1_12_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* spi0_d1.i2c1_sda */
+	P1_12_pwm_pin: pinmux_P1_12_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* spi0_d1.ehrpwm0_tripzone_input */
+	P1_12_pru_uart_pin: pinmux_P1_12_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0958, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* spi0_d1.pr1_uart0_rxd */
+
+	/* P1_13 (ZCZ ball P17)  USB1-ID         */
+
+	/* P1_14                VOUT-3.3V */
+
+	/* P1_15                GND */
+
+	/* P1_16                GND */
+
+	/* P1_17 (ZCZ ball A9)  VREFN         */
+
+	/* P1_18 (ZCZ ball B9)  VREFP         */
+
+	/* P1_19 (ZCZ ball B6)  AIN0         */
+
+	/* P1_20 (ZCZ ball D14) gpio0_20 */
+	P1_20_default_pin: pinmux_P1_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P1_20_gpio_pin: pinmux_P1_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* xdma_event_intr1.gpio0_20 */
+	P1_20_gpio_pu_pin: pinmux_P1_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P1_20_gpio_pd_pin: pinmux_P1_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr1.gpio0_20 */
+	P1_20_gpio_input_pin: pinmux_P1_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE7) >; };			/* xdma_event_intr1.gpio0_20 */
+	P1_20_pruin_pin: pinmux_P1_20_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b4, PIN_INPUT | MUX_MODE5) >; };			/* xdma_event_intr1.pru0_in16 */
+
+	/* P1_21 (ZCZ ball C7)  AIN1         */
+
+	/* P1_22                GND */
+
+	/* P1_23 (ZCZ ball B7)  AIN2         */
+
+	/* P1_24                VOUT-5V */
+
+	/* P1_25 (ZCZ ball A7)  AIN3         */
+
+	/* P1_26 (ZCZ ball D18) i2c2_sda */
+	P1_26_default_pin: pinmux_P1_26_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P1_26_gpio_pin: pinmux_P1_26_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_ctsn.gpio0_12 */
+	P1_26_gpio_pu_pin: pinmux_P1_26_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P1_26_gpio_pd_pin: pinmux_P1_26_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_ctsn.gpio0_12 */
+	P1_26_gpio_input_pin: pinmux_P1_26_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_INPUT | MUX_MODE7) >; };			/* uart1_ctsn.gpio0_12 */
+	P1_26_can_pin: pinmux_P1_26_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_ctsn.dcan0_tx */
+	P1_26_i2c_pin: pinmux_P1_26_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_ctsn.i2c2_sda */
+	P1_26_spi_cs_pin: pinmux_P1_26_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_ctsn.spi1_cs0 */
+	P1_26_pru_uart_pin: pinmux_P1_26_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0978, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_ctsn.pr1_uart0_cts_n */
+
+	/* P1_27 (ZCZ ball C8)  AIN4         */
+
+	/* P1_28 (ZCZ ball D17) i2c2_scl */
+	P1_28_default_pin: pinmux_P1_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P1_28_gpio_pin: pinmux_P1_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rtsn.gpio0_13 */
+	P1_28_gpio_pu_pin: pinmux_P1_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P1_28_gpio_pd_pin: pinmux_P1_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rtsn.gpio0_13 */
+	P1_28_gpio_input_pin: pinmux_P1_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rtsn.gpio0_13 */
+	P1_28_can_pin: pinmux_P1_28_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rtsn.dcan0_rx */
+	P1_28_i2c_pin: pinmux_P1_28_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rtsn.i2c2_scl */
+	P1_28_spi_cs_pin: pinmux_P1_28_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart1_rtsn.spi1_cs1 */
+	P1_28_pru_uart_pin: pinmux_P1_28_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x097c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rtsn.pr1_uart0_rts_n */
+
+	/* P1_29 (ZCZ ball A14) pru0_in7 */
+	P1_29_default_pin: pinmux_P1_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkx.pru0_in7 */
+	P1_29_gpio_pin: pinmux_P1_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkx.gpio3_21 */
+	P1_29_gpio_pu_pin: pinmux_P1_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P1_29_gpio_pd_pin: pinmux_P1_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkx.gpio3_21 */
+	P1_29_gpio_input_pin: pinmux_P1_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkx.gpio3_21 */
+	P1_29_qep_pin: pinmux_P1_29_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkx.eqep0_strobe */
+	P1_29_pruout_pin: pinmux_P1_29_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkx.pru0_out7 */
+	P1_29_pruin_pin: pinmux_P1_29_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09ac, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkx.pru0_in7 */
+
+	/* P1_30 (ZCZ ball E16) uart0_txd */
+	P1_30_default_pin: pinmux_P1_30_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart0_txd.uart0_txd */
+	P1_30_gpio_pin: pinmux_P1_30_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart0_txd.gpio1_11 */
+	P1_30_gpio_pu_pin: pinmux_P1_30_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart0_txd.gpio1_11 */
+	P1_30_gpio_pd_pin: pinmux_P1_30_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart0_txd.gpio1_11 */
+	P1_30_gpio_input_pin: pinmux_P1_30_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_INPUT | MUX_MODE7) >; };			/* uart0_txd.gpio1_11 */
+	P1_30_uart_pin: pinmux_P1_30_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart0_txd.uart0_txd */
+	P1_30_spi_cs_pin: pinmux_P1_30_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart0_txd.spi1_cs1 */
+	P1_30_can_pin: pinmux_P1_30_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart0_txd.dcan0_rx */
+	P1_30_i2c_pin: pinmux_P1_30_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart0_txd.i2c2_scl */
+	P1_30_pruout_pin: pinmux_P1_30_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* uart0_txd.pru1_out15 */
+	P1_30_pruin_pin: pinmux_P1_30_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0974, PIN_INPUT | MUX_MODE6) >; };			/* uart0_txd.pru1_in15 */
+
+	/* P1_31 (ZCZ ball B12) pru0_in4 */
+	P1_31_default_pin: pinmux_P1_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkr.pru0_in4 */
+	P1_31_gpio_pin: pinmux_P1_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkr.gpio3_18 */
+	P1_31_gpio_pu_pin: pinmux_P1_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P1_31_gpio_pd_pin: pinmux_P1_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkr.gpio3_18 */
+	P1_31_gpio_input_pin: pinmux_P1_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkr.gpio3_18 */
+	P1_31_qep_pin: pinmux_P1_31_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkr.eqep0a_in */
+	P1_31_pruout_pin: pinmux_P1_31_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkr.pru0_out4 */
+	P1_31_pruin_pin: pinmux_P1_31_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a0, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkr.pru0_in4 */
+
+	/* P1_32 (ZCZ ball E15) uart0_rxd */
+	P1_32_default_pin: pinmux_P1_32_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart0_rxd.uart0_rxd */
+	P1_32_gpio_pin: pinmux_P1_32_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart0_rxd.gpio1_10 */
+	P1_32_gpio_pu_pin: pinmux_P1_32_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart0_rxd.gpio1_10 */
+	P1_32_gpio_pd_pin: pinmux_P1_32_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart0_rxd.gpio1_10 */
+	P1_32_gpio_input_pin: pinmux_P1_32_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_INPUT | MUX_MODE7) >; };			/* uart0_rxd.gpio1_10 */
+	P1_32_uart_pin: pinmux_P1_32_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart0_rxd.uart0_rxd */
+	P1_32_spi_cs_pin: pinmux_P1_32_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart0_rxd.spi1_cs0 */
+	P1_32_can_pin: pinmux_P1_32_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart0_rxd.dcan0_tx */
+	P1_32_i2c_pin: pinmux_P1_32_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart0_rxd.i2c2_sda */
+	P1_32_pruout_pin: pinmux_P1_32_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* uart0_rxd.pru1_out14 */
+	P1_32_pruin_pin: pinmux_P1_32_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0970, PIN_INPUT | MUX_MODE6) >; };			/* uart0_rxd.pru1_in14 */
+
+	/* P1_33 (ZCZ ball B13) pru0_in1 */
+	P1_33_default_pin: pinmux_P1_33_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsx.pru0_in1 */
+	P1_33_gpio_pin: pinmux_P1_33_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsx.gpio3_15 */
+	P1_33_gpio_pu_pin: pinmux_P1_33_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P1_33_gpio_pd_pin: pinmux_P1_33_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsx.gpio3_15 */
+	P1_33_gpio_input_pin: pinmux_P1_33_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsx.gpio3_15 */
+	P1_33_pwm_pin: pinmux_P1_33_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsx.ehrpwm0b */
+	P1_33_spi_pin: pinmux_P1_33_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_fsx.spi1_d0 */
+	P1_33_pruout_pin: pinmux_P1_33_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsx.pru0_out1 */
+	P1_33_pruin_pin: pinmux_P1_33_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0994, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsx.pru0_in1 */
+
+	/* P1_34 (ZCZ ball T11) gpio0_26 */
+	P1_34_default_pin: pinmux_P1_34_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P1_34_gpio_pin: pinmux_P1_34_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad10.gpio0_26 */
+	P1_34_gpio_pu_pin: pinmux_P1_34_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P1_34_gpio_pd_pin: pinmux_P1_34_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad10.gpio0_26 */
+	P1_34_gpio_input_pin: pinmux_P1_34_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad10.gpio0_26 */
+	P1_34_pwm_pin: pinmux_P1_34_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0828, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad10.ehrpwm2_tripzone_input */
+
+	/* P1_35 (ZCZ ball V5) pru1_in10 */
+	P1_35_default_pin: pinmux_P1_35_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_pclk.pru1_in10 */
+	P1_35_gpio_pin: pinmux_P1_35_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_pclk.gpio2_24 */
+	P1_35_gpio_pu_pin: pinmux_P1_35_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P1_35_gpio_pd_pin: pinmux_P1_35_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_pclk.gpio2_24 */
+	P1_35_gpio_input_pin: pinmux_P1_35_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE7) >; };			/* lcd_pclk.gpio2_24 */
+	P1_35_pruout_pin: pinmux_P1_35_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_pclk.pru1_out10 */
+	P1_35_pruin_pin: pinmux_P1_35_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e8, PIN_INPUT | MUX_MODE6) >; };			/* lcd_pclk.pru1_in10 */
+
+	/* P1_36 (ZCZ ball A13) ehrpwm0a */
+	P1_36_default_pin: pinmux_P1_36_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkx.ehrpwm0a */
+	P1_36_gpio_pin: pinmux_P1_36_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_aclkx.gpio3_14 */
+	P1_36_gpio_pu_pin: pinmux_P1_36_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P1_36_gpio_pd_pin: pinmux_P1_36_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_aclkx.gpio3_14 */
+	P1_36_gpio_input_pin: pinmux_P1_36_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_aclkx.gpio3_14 */
+	P1_36_pwm_pin: pinmux_P1_36_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_aclkx.ehrpwm0a */
+	P1_36_spi_sclk_pin: pinmux_P1_36_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_aclkx.spi1_sclk */
+	P1_36_pruout_pin: pinmux_P1_36_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_aclkx.pru0_out0 */
+	P1_36_pruin_pin: pinmux_P1_36_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0990, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_aclkx.pru0_in0 */
+
+
+	/************************/
+	/* P2 Header */
+	/************************/
+
+	/* P2_01 (ZCZ ball U14) ehrpwm1a */
+	P2_01_default_pin: pinmux_P2_01_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a2.ehrpwm1a */
+	P2_01_gpio_pin: pinmux_P2_01_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a2.gpio1_18 */
+	P2_01_gpio_pu_pin: pinmux_P2_01_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P2_01_gpio_pd_pin: pinmux_P2_01_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a2.gpio1_18 */
+	P2_01_gpio_input_pin: pinmux_P2_01_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a2.gpio1_18 */
+	P2_01_pwm_pin: pinmux_P2_01_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0848, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a2.ehrpwm1a */
+
+	/* P2_02 (ZCZ ball V17) gpio1_27 */
+	P2_02_default_pin: pinmux_P2_02_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x086c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a11.gpio1_27 */
+	P2_02_gpio_pin: pinmux_P2_02_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x086c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a11.gpio1_27 */
+	P2_02_gpio_pu_pin: pinmux_P2_02_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x086c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a11.gpio1_27 */
+	P2_02_gpio_pd_pin: pinmux_P2_02_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x086c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a11.gpio1_27 */
+	P2_02_gpio_input_pin: pinmux_P2_02_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x086c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a11.gpio1_27 */
+
+	/* P2_03 (ZCZ ball T10) gpio0_23 */
+	P2_03_default_pin: pinmux_P2_03_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P2_03_gpio_pin: pinmux_P2_03_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad9.gpio0_23 */
+	P2_03_gpio_pu_pin: pinmux_P2_03_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P2_03_gpio_pd_pin: pinmux_P2_03_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad9.gpio0_23 */
+	P2_03_gpio_input_pin: pinmux_P2_03_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad9.gpio0_23 */
+	P2_03_pwm_pin: pinmux_P2_03_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0824, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad9.ehrpwm2b */
+
+	/* P2_04 (ZCZ ball T16) gpio1_26 */
+	P2_04_default_pin: pinmux_P2_04_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0868, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a10.gpio1_26 */
+	P2_04_gpio_pin: pinmux_P2_04_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0868, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a10.gpio1_26 */
+	P2_04_gpio_pu_pin: pinmux_P2_04_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0868, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a10.gpio1_26 */
+	P2_04_gpio_pd_pin: pinmux_P2_04_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0868, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a10.gpio1_26 */
+	P2_04_gpio_input_pin: pinmux_P2_04_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0868, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a10.gpio1_26 */
+
+	/* P2_05 (ZCZ ball T17) uart4_rxd */
+	P2_05_default_pin: pinmux_P2_05_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wait0.uart4_rxd */
+	P2_05_gpio_pin: pinmux_P2_05_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wait0.gpio0_30 */
+	P2_05_gpio_pu_pin: pinmux_P2_05_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P2_05_gpio_pd_pin: pinmux_P2_05_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wait0.gpio0_30 */
+	P2_05_gpio_input_pin: pinmux_P2_05_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wait0.gpio0_30 */
+	P2_05_uart_pin: pinmux_P2_05_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0870, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wait0.uart4_rxd */
+
+	/* P2_06 (ZCZ ball U16) gpio1_25 */
+	P2_06_default_pin: pinmux_P2_06_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0864, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a9.gpio1_25 */
+	P2_06_gpio_pin: pinmux_P2_06_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0864, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a9.gpio1_25 */
+	P2_06_gpio_pu_pin: pinmux_P2_06_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0864, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a9.gpio1_25 */
+	P2_06_gpio_pd_pin: pinmux_P2_06_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0864, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a9.gpio1_25 */
+	P2_06_gpio_input_pin: pinmux_P2_06_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0864, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a9.gpio1_25 */
+
+	/* P2_07 (ZCZ ball U17) uart4_txd */
+	P2_07_default_pin: pinmux_P2_07_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wpn.uart4_txd */
+	P2_07_gpio_pin: pinmux_P2_07_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_wpn.gpio0_31 */
+	P2_07_gpio_pu_pin: pinmux_P2_07_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P2_07_gpio_pd_pin: pinmux_P2_07_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_wpn.gpio0_31 */
+	P2_07_gpio_input_pin: pinmux_P2_07_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_wpn.gpio0_31 */
+	P2_07_uart_pin: pinmux_P2_07_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0874, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_wpn.uart4_txd */
+
+	/* P2_08 (ZCZ ball U18) gpio1_28 */
+	P2_08_default_pin: pinmux_P2_08_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P2_08_gpio_pin: pinmux_P2_08_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_be1n.gpio1_28 */
+	P2_08_gpio_pu_pin: pinmux_P2_08_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P2_08_gpio_pd_pin: pinmux_P2_08_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_be1n.gpio1_28 */
+	P2_08_gpio_input_pin: pinmux_P2_08_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0878, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_be1n.gpio1_28 */
+
+	/* P2_09 (ZCZ ball D15) i2c1_scl */
+	P2_09_default_pin: pinmux_P2_09_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_txd.i2c1_scl */
+	P2_09_gpio_pin: pinmux_P2_09_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_txd.gpio0_15 */
+	P2_09_gpio_pu_pin: pinmux_P2_09_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P2_09_gpio_pd_pin: pinmux_P2_09_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_txd.gpio0_15 */
+	P2_09_gpio_input_pin: pinmux_P2_09_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE7) >; };			/* uart1_txd.gpio0_15 */
+	P2_09_uart_pin: pinmux_P2_09_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_txd.uart1_txd */
+	P2_09_can_pin: pinmux_P2_09_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart1_txd.dcan1_rx */
+	P2_09_i2c_pin: pinmux_P2_09_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_txd.i2c1_scl */
+	P2_09_pru_uart_pin: pinmux_P2_09_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_txd.pr1_uart0_txd */
+	P2_09_pruin_pin: pinmux_P2_09_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0984, PIN_INPUT | MUX_MODE6) >; };			/* uart1_txd.pru0_in16 */
+
+	/* P2_10 (ZCZ ball R14) gpio1_20 */
+	P2_10_default_pin: pinmux_P2_10_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a4.gpio1_20 */
+	P2_10_gpio_pin: pinmux_P2_10_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_a4.gpio1_20 */
+	P2_10_gpio_pu_pin: pinmux_P2_10_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a4.gpio1_20 */
+	P2_10_gpio_pd_pin: pinmux_P2_10_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_a4.gpio1_20 */
+	P2_10_gpio_input_pin: pinmux_P2_10_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_a4.gpio1_20 */
+	P2_10_qep_pin: pinmux_P2_10_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0850, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE6) >; };	/* gpmc_a4.eqep1a_in */
+
+	/* P2_11 (ZCZ ball D16) i2c1_sda */
+	P2_11_default_pin: pinmux_P2_11_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rxd.i2c1_sda */
+	P2_11_gpio_pin: pinmux_P2_11_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart1_rxd.gpio0_14 */
+	P2_11_gpio_pu_pin: pinmux_P2_11_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P2_11_gpio_pd_pin: pinmux_P2_11_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart1_rxd.gpio0_14 */
+	P2_11_gpio_input_pin: pinmux_P2_11_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE7) >; };			/* uart1_rxd.gpio0_14 */
+	P2_11_uart_pin: pinmux_P2_11_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE0) >; };	/* uart1_rxd.uart1_rxd */
+	P2_11_can_pin: pinmux_P2_11_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart1_rxd.dcan1_tx */
+	P2_11_i2c_pin: pinmux_P2_11_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart1_rxd.i2c1_sda */
+	P2_11_pru_uart_pin: pinmux_P2_11_pru_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart1_rxd.pr1_uart0_rxd */
+	P2_11_pruin_pin: pinmux_P2_11_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0980, PIN_INPUT | MUX_MODE6) >; };			/* uart1_rxd.pru1_in16 */
+
+	/* P2_12                POWER_BUTTON */
+
+	/* P2_13                VOUT-5V */
+
+	/* P2_14                BAT-VIN */
+
+	/* P2_15                GND */
+
+	/* P2_16                BAT-TEMP */
+
+	/* P2_17 (ZCZ ball V12) gpio2_1 */
+	P2_17_default_pin: pinmux_P2_17_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P2_17_gpio_pin: pinmux_P2_17_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_clk.gpio2_1 */
+	P2_17_gpio_pu_pin: pinmux_P2_17_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P2_17_gpio_pd_pin: pinmux_P2_17_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_clk.gpio2_1 */
+	P2_17_gpio_input_pin: pinmux_P2_17_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x088c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_clk.gpio2_1 */
+
+	/* P2_18 (ZCZ ball U13) gpio1_15 */
+	P2_18_default_pin: pinmux_P2_18_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P2_18_gpio_pin: pinmux_P2_18_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad15.gpio1_15 */
+	P2_18_gpio_pu_pin: pinmux_P2_18_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P2_18_gpio_pd_pin: pinmux_P2_18_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad15.gpio1_15 */
+	P2_18_gpio_input_pin: pinmux_P2_18_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad15.gpio1_15 */
+	P2_18_qep_pin: pinmux_P2_18_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad15.eqep2_strobe */
+	P2_18_pru_ecap_pin: pinmux_P2_18_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* gpmc_ad15.pr1_ecap0_ecap_capin_apwm_o */
+	P2_18_pruin_pin: pinmux_P2_18_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x083c, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad15.pru0_in15 */
+
+	/* P2_19 (ZCZ ball U12) gpio0_27 */
+	P2_19_default_pin: pinmux_P2_19_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P2_19_gpio_pin: pinmux_P2_19_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad11.gpio0_27 */
+	P2_19_gpio_pu_pin: pinmux_P2_19_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P2_19_gpio_pd_pin: pinmux_P2_19_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad11.gpio0_27 */
+	P2_19_gpio_input_pin: pinmux_P2_19_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad11.gpio0_27 */
+	P2_19_pwm_pin: pinmux_P2_19_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x082c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad11.ehrpwm0_synco */
+
+	/* P2_20 (ZCZ ball T13) gpio2_0 */
+	P2_20_default_pin: pinmux_P2_20_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0888, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn3.gpio2_0 */
+	P2_20_gpio_pin: pinmux_P2_20_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0888, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_csn3.gpio2_0 */
+	P2_20_gpio_pu_pin: pinmux_P2_20_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0888, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn3.gpio2_0 */
+	P2_20_gpio_pd_pin: pinmux_P2_20_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0888, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_csn3.gpio2_0 */
+	P2_20_gpio_input_pin: pinmux_P2_20_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0888, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_csn3.gpio2_0 */
+
+	/* P2_21                GND */
+
+	/* P2_22 (ZCZ ball V13) gpio1_14 */
+	P2_22_default_pin: pinmux_P2_22_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P2_22_gpio_pin: pinmux_P2_22_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad14.gpio1_14 */
+	P2_22_gpio_pu_pin: pinmux_P2_22_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P2_22_gpio_pd_pin: pinmux_P2_22_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad14.gpio1_14 */
+	P2_22_gpio_input_pin: pinmux_P2_22_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad14.gpio1_14 */
+	P2_22_qep_pin: pinmux_P2_22_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad14.eqep2_index */
+	P2_22_pruin_pin: pinmux_P2_22_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0838, PIN_INPUT | MUX_MODE6) >; };			/* gpmc_ad14.pru0_in14 */
+
+	/* P2_23                VOUT-3.3V */
+
+	/* P2_24 (ZCZ ball T12) gpio1_12 */
+	P2_24_default_pin: pinmux_P2_24_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P2_24_gpio_pin: pinmux_P2_24_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad12.gpio1_12 */
+	P2_24_gpio_pu_pin: pinmux_P2_24_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P2_24_gpio_pd_pin: pinmux_P2_24_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad12.gpio1_12 */
+	P2_24_gpio_input_pin: pinmux_P2_24_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad12.gpio1_12 */
+	P2_24_qep_pin: pinmux_P2_24_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad12.eqep2a_in */
+	P2_24_pruout_pin: pinmux_P2_24_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0830, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad12.pru0_out14 */
+
+	/* P2_25 (ZCZ ball E17) spi1_d1 */
+	P2_25_default_pin: pinmux_P2_25_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart0_rtsn.spi1_d1 */
+	P2_25_gpio_pin: pinmux_P2_25_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart0_rtsn.gpio1_9 */
+	P2_25_gpio_pu_pin: pinmux_P2_25_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart0_rtsn.gpio1_9 */
+	P2_25_gpio_pd_pin: pinmux_P2_25_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart0_rtsn.gpio1_9 */
+	P2_25_gpio_input_pin: pinmux_P2_25_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_INPUT | MUX_MODE7) >; };			/* uart0_rtsn.gpio1_9 */
+	P2_25_uart_pin: pinmux_P2_25_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart0_rtsn.uart4_txd */
+	P2_25_can_pin: pinmux_P2_25_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_INPUT_PULLUP | MUX_MODE2) >; };		/* uart0_rtsn.dcan1_rx */
+	P2_25_i2c_pin: pinmux_P2_25_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart0_rtsn.i2c1_scl */
+	P2_25_spi_pin: pinmux_P2_25_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart0_rtsn.spi1_d1 */
+	P2_25_spi_cs_pin: pinmux_P2_25_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x096c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE5) >; };	/* uart0_rtsn.spi1_cs0 */
+
+	/* P2_26                RESET# */
+
+	/* P2_27 (ZCZ ball E18) spi1_d0 */
+	P2_27_default_pin: pinmux_P2_27_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart0_ctsn.spi1_d0 */
+	P2_27_gpio_pin: pinmux_P2_27_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* uart0_ctsn.gpio1_8 */
+	P2_27_gpio_pu_pin: pinmux_P2_27_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* uart0_ctsn.gpio1_8 */
+	P2_27_gpio_pd_pin: pinmux_P2_27_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* uart0_ctsn.gpio1_8 */
+	P2_27_gpio_input_pin: pinmux_P2_27_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_INPUT | MUX_MODE7) >; };			/* uart0_ctsn.gpio1_8 */
+	P2_27_uart_pin: pinmux_P2_27_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* uart0_ctsn.uart4_rxd */
+	P2_27_can_pin: pinmux_P2_27_can_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | MUX_MODE2) >; };		/* uart0_ctsn.dcan1_tx */
+	P2_27_i2c_pin: pinmux_P2_27_i2c_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* uart0_ctsn.i2c1_sda */
+	P2_27_spi_pin: pinmux_P2_27_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0968, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* uart0_ctsn.spi1_d0 */
+
+	/* P2_28 (ZCZ ball D13) pru0_in6 */
+	P2_28_default_pin: pinmux_P2_28_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr1.pru0_in6 */
+	P2_28_gpio_pin: pinmux_P2_28_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_axr1.gpio3_20 */
+	P2_28_gpio_pu_pin: pinmux_P2_28_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P2_28_gpio_pd_pin: pinmux_P2_28_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr1.gpio3_20 */
+	P2_28_gpio_input_pin: pinmux_P2_28_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_axr1.gpio3_20 */
+	P2_28_qep_pin: pinmux_P2_28_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_axr1.eqep0_index */
+	P2_28_pruout_pin: pinmux_P2_28_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_axr1.pru0_out6 */
+	P2_28_pruin_pin: pinmux_P2_28_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a8, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr1.pru0_in6 */
+
+	/* P2_29 (ZCZ ball C18) spi1_sclk */
+	P2_29_default_pin: pinmux_P2_29_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* eCAP0_in_PWM0_out.spi1_sclk */
+	P2_29_gpio_pin: pinmux_P2_29_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* eCAP0_in_PWM0_out.gpio0_7 */
+	P2_29_gpio_pu_pin: pinmux_P2_29_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P2_29_gpio_pd_pin: pinmux_P2_29_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* eCAP0_in_PWM0_out.gpio0_7 */
+	P2_29_gpio_input_pin: pinmux_P2_29_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_INPUT | MUX_MODE7) >; };			/* eCAP0_in_PWM0_out.gpio0_7 */
+	P2_29_pwm_pin: pinmux_P2_29_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE0) >; };	/* eCAP0_in_PWM0_out.ecap0_in_pwm0_out */
+	P2_29_uart_pin: pinmux_P2_29_uart_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* eCAP0_in_PWM0_out.uart3_txd */
+	P2_29_spi_cs_pin: pinmux_P2_29_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE2) >; };	/* eCAP0_in_PWM0_out.spi1_cs1 */
+	P2_29_pru_ecap_pin: pinmux_P2_29_pru_ecap_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE3) >; };	/* eCAP0_in_PWM0_out.pr1_ecap0_ecap_capin_apwm_o */
+	P2_29_spi_sclk_pin: pinmux_P2_29_spi_sclk_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0964, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* eCAP0_in_PWM0_out.spi1_sclk */
+
+	/* P2_30 (ZCZ ball C12) pru0_in3 */
+	P2_30_default_pin: pinmux_P2_30_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkr.pru0_in3 */
+	P2_30_gpio_pin: pinmux_P2_30_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_ahclkr.gpio3_17 */
+	P2_30_gpio_pu_pin: pinmux_P2_30_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P2_30_gpio_pd_pin: pinmux_P2_30_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_ahclkr.gpio3_17 */
+	P2_30_gpio_input_pin: pinmux_P2_30_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_ahclkr.gpio3_17 */
+	P2_30_pwm_pin: pinmux_P2_30_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_ahclkr.ehrpwm0_synci */
+	P2_30_spi_cs_pin: pinmux_P2_30_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_ahclkr.spi1_cs0 */
+	P2_30_pruout_pin: pinmux_P2_30_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_ahclkr.pru0_out3 */
+	P2_30_pruin_pin: pinmux_P2_30_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x099c, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_ahclkr.pru0_in3 */
+
+	/* P2_31 (ZCZ ball A15) spi1_cs1 */
+	P2_31_default_pin: pinmux_P2_31_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* xdma_event_intr0.spi1_cs1 */
+	P2_31_gpio_pin: pinmux_P2_31_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* xdma_event_intr0.gpio0_19 */
+	P2_31_gpio_pu_pin: pinmux_P2_31_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr0.gpio0_19 */
+	P2_31_gpio_pd_pin: pinmux_P2_31_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* xdma_event_intr0.gpio0_19 */
+	P2_31_gpio_input_pin: pinmux_P2_31_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_INPUT | MUX_MODE7) >; };			/* xdma_event_intr0.gpio0_19 */
+	P2_31_spi_cs_pin: pinmux_P2_31_spi_cs_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* xdma_event_intr0.spi1_cs1 */
+	P2_31_pruin_pin: pinmux_P2_31_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09b0, PIN_INPUT | MUX_MODE5) >; };			/* xdma_event_intr0.pru1_in16 */
+
+	/* P2_32 (ZCZ ball D12) pru0_in2 */
+	P2_32_default_pin: pinmux_P2_32_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr0.pru0_in2 */
+	P2_32_gpio_pin: pinmux_P2_32_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_axr0.gpio3_16 */
+	P2_32_gpio_pu_pin: pinmux_P2_32_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr0.gpio3_16 */
+	P2_32_gpio_pd_pin: pinmux_P2_32_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_axr0.gpio3_16 */
+	P2_32_gpio_input_pin: pinmux_P2_32_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_axr0.gpio3_16 */
+	P2_32_pwm_pin: pinmux_P2_32_pwm_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_axr0.ehrpwm0_tripzone_input */
+	P2_32_spi_pin: pinmux_P2_32_spi_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE3) >; };	/* mcasp0_axr0.spi1_d1 */
+	P2_32_pruout_pin: pinmux_P2_32_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_axr0.pru0_out2 */
+	P2_32_pruin_pin: pinmux_P2_32_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0998, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_axr0.pru0_in2 */
+
+	/* P2_33 (ZCZ ball R12) gpio1_13 */
+	P2_33_default_pin: pinmux_P2_33_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P2_33_gpio_pin: pinmux_P2_33_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* gpmc_ad13.gpio1_13 */
+	P2_33_gpio_pu_pin: pinmux_P2_33_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P2_33_gpio_pd_pin: pinmux_P2_33_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* gpmc_ad13.gpio1_13 */
+	P2_33_gpio_input_pin: pinmux_P2_33_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_INPUT | MUX_MODE7) >; };			/* gpmc_ad13.gpio1_13 */
+	P2_33_qep_pin: pinmux_P2_33_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE4) >; };	/* gpmc_ad13.eqep2b_in */
+	P2_33_pruout_pin: pinmux_P2_33_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x0834, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE6) >; };	/* gpmc_ad13.pru0_out15 */
+
+	/* P2_34 (ZCZ ball C13) pru0_in5 */
+	P2_34_default_pin: pinmux_P2_34_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsr.pru0_in5 */
+	P2_34_gpio_pin: pinmux_P2_34_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* mcasp0_fsr.gpio3_19 */
+	P2_34_gpio_pu_pin: pinmux_P2_34_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P2_34_gpio_pd_pin: pinmux_P2_34_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* mcasp0_fsr.gpio3_19 */
+	P2_34_gpio_input_pin: pinmux_P2_34_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE7) >; };			/* mcasp0_fsr.gpio3_19 */
+	P2_34_qep_pin: pinmux_P2_34_qep_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE1) >; };	/* mcasp0_fsr.eqep0b_in */
+	P2_34_pruout_pin: pinmux_P2_34_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* mcasp0_fsr.pru0_out5 */
+	P2_34_pruin_pin: pinmux_P2_34_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x09a4, PIN_INPUT | MUX_MODE6) >; };			/* mcasp0_fsr.pru0_in5 */
+
+	/* P2_35 (ZCZ ball U5) gpio2_22 */
+	P2_35_default_pin: pinmux_P2_35_default_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_vsync.gpio2_22 */
+	P2_35_gpio_pin: pinmux_P2_35_gpio_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT | INPUT_EN | MUX_MODE7) >; };		/* lcd_vsync.gpio2_22 */
+	P2_35_gpio_pu_pin: pinmux_P2_35_gpio_pu_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLUP | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P2_35_gpio_pd_pin: pinmux_P2_35_gpio_pd_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE7) >; };	/* lcd_vsync.gpio2_22 */
+	P2_35_gpio_input_pin: pinmux_P2_35_gpio_input_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE7) >; };			/* lcd_vsync.gpio2_22 */
+	P2_35_pruout_pin: pinmux_P2_35_pruout_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_OUTPUT_PULLDOWN | INPUT_EN | MUX_MODE5) >; };	/* lcd_vsync.pru1_out8 */
+	P2_35_pruin_pin: pinmux_P2_35_pruin_pin { pinctrl-single,pins = <
+		AM33XX_IOPAD(0x08e0, PIN_INPUT | MUX_MODE6) >; };			/* lcd_vsync.pru1_in8 */
+
+	/* P2_36 (ZCZ ball C9)  AIN7         */
 };
 
 &epwmss0 {
@@ -140,7 +969,8 @@ &epwmss0 {
 &ehrpwm0 {
 	status = "okay";
 	pinctrl-names = "default";
-	pinctrl-0 = <&ehrpwm0_pins>;
+	//pinctrl-0 = <&ehrpwm0_pins>;
+	pinctrl-0 = <>;
 };
 
 &epwmss1 {
@@ -150,7 +980,18 @@ &epwmss1 {
 &ehrpwm1 {
 	status = "okay";
 	pinctrl-names = "default";
-	pinctrl-0 = <&ehrpwm1_pins>;
+	//pinctrl-0 = <&ehrpwm1_pins>;
+	pinctrl-0 = <>;
+};
+
+&epwmss2 {
+	status = "okay";
+};
+
+&ehrpwm2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
 };
 
 &i2c0 {
@@ -160,9 +1001,18 @@ eeprom: eeprom@50 {
 	};
 };
 
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+};
+
 &i2c2 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&i2c2_pins>;
+//	pinctrl-0 = <&i2c2_pins>;
+	pinctrl-0 = <>;
 
 	status = "okay";
 	clock-frequency = <400000>;
@@ -193,14 +1043,30 @@ adc {
 
 &uart0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins>;
+	//pinctrl-0 = <&uart0_pins>;
+	pinctrl-0 = <>;
+
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
 
 	status = "okay";
 };
 
 &uart4 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&uart4_pins>;
+	//pinctrl-0 = <&uart4_pins>;
+	pinctrl-0 = <>;
 
 	status = "okay";
 };
@@ -212,3 +1078,1092 @@ &usb0 {
 &usb1 {
 	dr_mode = "host";
 };
+
+&spi0 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "okay";
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/0.0";
+		reg = <0>;
+		spi-max-frequency = <24000000>;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/0.1";
+		reg = <1>;
+		spi-max-frequency = <24000000>;
+		status = "disabled";
+	};
+};
+
+&spi1 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "okay";
+
+	channel@0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/1.0";
+		reg = <0>;
+		spi-max-frequency = <24000000>;
+	};
+
+	channel@1 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spidev";
+		symlink = "spi/1.1";
+		reg = <1>;
+		spi-max-frequency = <24000000>;
+	};
+};
+
+&dcan0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&dcan1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <>;
+};
+
+&ocp {
+	/************************/
+	/* P1 Header */
+	/************************/
+
+	/* P1_01                VIN-AC */
+
+	/* P1_02 (ZCZ ball R5) gpio_input */
+	P1_02_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P1_02_default_pin>;
+		pinctrl-1 = <&P1_02_gpio_pin>;
+		pinctrl-2 = <&P1_02_gpio_pu_pin>;
+		pinctrl-3 = <&P1_02_gpio_pd_pin>;
+		pinctrl-4 = <&P1_02_gpio_input_pin>;
+		pinctrl-5 = <&P1_02_pruout_pin>;
+		pinctrl-6 = <&P1_02_pruin_pin>;
+	};
+
+	/* P1_03 (ZCZ ball F15)  usb1_vbus_out         */
+
+	/* P1_04 (ZCZ ball R6) */
+	P1_04_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P1_04_default_pin>;
+		pinctrl-1 = <&P1_04_gpio_pin>;
+		pinctrl-2 = <&P1_04_gpio_pu_pin>;
+		pinctrl-3 = <&P1_04_gpio_pd_pin>;
+		pinctrl-4 = <&P1_04_gpio_input_pin>;
+		pinctrl-5 = <&P1_04_pruout_pin>;
+		pinctrl-6 = <&P1_04_pruin_pin>;
+	};
+
+	/* P1_05 (ZCZ ball T18)  usb1_vbus_in         */
+
+	/* P1_06 (ZCZ ball A16) spi_cs */
+	P1_06_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P1_06_default_pin>;
+		pinctrl-1 = <&P1_06_gpio_pin>;
+		pinctrl-2 = <&P1_06_gpio_pu_pin>;
+		pinctrl-3 = <&P1_06_gpio_pd_pin>;
+		pinctrl-4 = <&P1_06_gpio_input_pin>;
+		pinctrl-5 = <&P1_06_spi_cs_pin>;
+		pinctrl-6 = <&P1_06_i2c_pin>;
+		pinctrl-7 = <&P1_06_pwm_pin>;
+		pinctrl-8 = <&P1_06_pru_uart_pin>;
+	};
+
+	/* P1_07                VIN-USB */
+
+	/* P1_08 (ZCZ ball A17) spi_sclk */
+	P1_08_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P1_08_default_pin>;
+		pinctrl-1 = <&P1_08_gpio_pin>;
+		pinctrl-2 = <&P1_08_gpio_pu_pin>;
+		pinctrl-3 = <&P1_08_gpio_pd_pin>;
+		pinctrl-4 = <&P1_08_gpio_input_pin>;
+		pinctrl-5 = <&P1_08_spi_sclk_pin>;
+		pinctrl-6 = <&P1_08_uart_pin>;
+		pinctrl-7 = <&P1_08_i2c_pin>;
+		pinctrl-8 = <&P1_08_pwm_pin>;
+		pinctrl-9 = <&P1_08_pru_uart_pin>;
+	};
+
+	/* P1_09 (ZCZ ball R18)  USB1-DN         */
+
+	/* P1_10 (ZCZ ball B17) spi */
+	P1_10_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "uart", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P1_10_default_pin>;
+		pinctrl-1 = <&P1_10_gpio_pin>;
+		pinctrl-2 = <&P1_10_gpio_pu_pin>;
+		pinctrl-3 = <&P1_10_gpio_pd_pin>;
+		pinctrl-4 = <&P1_10_gpio_input_pin>;
+		pinctrl-5 = <&P1_10_spi_pin>;
+		pinctrl-6 = <&P1_10_uart_pin>;
+		pinctrl-7 = <&P1_10_i2c_pin>;
+		pinctrl-8 = <&P1_10_pwm_pin>;
+		pinctrl-9 = <&P1_10_pru_uart_pin>;
+	};
+
+	/* P1_11 (ZCZ ball R17)  USB1-DP         */
+
+	/* P1_12 (ZCZ ball B16) spi */
+	P1_12_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "i2c", "pwm", "pru_uart";
+		pinctrl-0 = <&P1_12_default_pin>;
+		pinctrl-1 = <&P1_12_gpio_pin>;
+		pinctrl-2 = <&P1_12_gpio_pu_pin>;
+		pinctrl-3 = <&P1_12_gpio_pd_pin>;
+		pinctrl-4 = <&P1_12_gpio_input_pin>;
+		pinctrl-5 = <&P1_12_spi_pin>;
+		pinctrl-6 = <&P1_12_i2c_pin>;
+		pinctrl-7 = <&P1_12_pwm_pin>;
+		pinctrl-8 = <&P1_12_pru_uart_pin>;
+	};
+
+	/* P1_13 (ZCZ ball P17)  USB1-ID         */
+
+	/* P1_14                VOUT-3.3V */
+
+	/* P1_15                GND */
+
+	/* P1_16                GND */
+
+	/* P1_17 (ZCZ ball A9)  VREFN         */
+
+	/* P1_18 (ZCZ ball B9)  VREFP         */
+
+	/* P1_19 (ZCZ ball B6)  AIN0         */
+
+	/* P1_20 (ZCZ ball D14) */
+	P1_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruin";
+		pinctrl-0 = <&P1_20_default_pin>;
+		pinctrl-1 = <&P1_20_gpio_pin>;
+		pinctrl-2 = <&P1_20_gpio_pu_pin>;
+		pinctrl-3 = <&P1_20_gpio_pd_pin>;
+		pinctrl-4 = <&P1_20_gpio_input_pin>;
+		pinctrl-5 = <&P1_20_pruin_pin>;
+	};
+
+	/* P1_21 (ZCZ ball C7)  AIN1         */
+
+	/* P1_22                GND */
+
+	/* P1_23 (ZCZ ball B7)  AIN2         */
+
+	/* P1_24                VOUT-5V */
+
+	/* P1_25 (ZCZ ball A7)  AIN3         */
+
+	/* P1_26 (ZCZ ball D18) i2c */
+	P1_26_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart";
+		pinctrl-0 = <&P1_26_default_pin>;
+		pinctrl-1 = <&P1_26_gpio_pin>;
+		pinctrl-2 = <&P1_26_gpio_pu_pin>;
+		pinctrl-3 = <&P1_26_gpio_pd_pin>;
+		pinctrl-4 = <&P1_26_gpio_input_pin>;
+		pinctrl-5 = <&P1_26_spi_cs_pin>;
+		pinctrl-6 = <&P1_26_can_pin>;
+		pinctrl-7 = <&P1_26_i2c_pin>;
+		pinctrl-8 = <&P1_26_pru_uart_pin>;
+	};
+
+	/* P1_27 (ZCZ ball C8)  AIN4         */
+
+	/* P1_28 (ZCZ ball D17) i2c */
+	P1_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "can", "i2c", "pru_uart";
+		pinctrl-0 = <&P1_28_default_pin>;
+		pinctrl-1 = <&P1_28_gpio_pin>;
+		pinctrl-2 = <&P1_28_gpio_pu_pin>;
+		pinctrl-3 = <&P1_28_gpio_pd_pin>;
+		pinctrl-4 = <&P1_28_gpio_input_pin>;
+		pinctrl-5 = <&P1_28_spi_cs_pin>;
+		pinctrl-6 = <&P1_28_can_pin>;
+		pinctrl-7 = <&P1_28_i2c_pin>;
+		pinctrl-8 = <&P1_28_pru_uart_pin>;
+	};
+
+	/* P1_29 (ZCZ ball A14) pruin */
+	P1_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P1_29_default_pin>;
+		pinctrl-1 = <&P1_29_gpio_pin>;
+		pinctrl-2 = <&P1_29_gpio_pu_pin>;
+		pinctrl-3 = <&P1_29_gpio_pd_pin>;
+		pinctrl-4 = <&P1_29_gpio_input_pin>;
+		pinctrl-5 = <&P1_29_qep_pin>;
+		pinctrl-6 = <&P1_29_pruout_pin>;
+		pinctrl-7 = <&P1_29_pruin_pin>;
+	};
+
+	/* P1_30 (ZCZ ball E16) uart */
+	P1_30_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "uart", "can", "i2c", "pruout", "pruin";
+		pinctrl-0 = <&P1_30_default_pin>;
+		pinctrl-1 = <&P1_30_gpio_pin>;
+		pinctrl-2 = <&P1_30_gpio_pu_pin>;
+		pinctrl-3 = <&P1_30_gpio_pd_pin>;
+		pinctrl-4 = <&P1_30_gpio_input_pin>;
+		pinctrl-5 = <&P1_30_spi_cs_pin>;
+		pinctrl-6 = <&P1_30_uart_pin>;
+		pinctrl-7 = <&P1_30_can_pin>;
+		pinctrl-8 = <&P1_30_i2c_pin>;
+		pinctrl-9 = <&P1_30_pruout_pin>;
+		pinctrl-10 = <&P1_30_pruin_pin>;
+	};
+
+	/* P1_31 (ZCZ ball B12) pruin */
+	P1_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P1_31_default_pin>;
+		pinctrl-1 = <&P1_31_gpio_pin>;
+		pinctrl-2 = <&P1_31_gpio_pu_pin>;
+		pinctrl-3 = <&P1_31_gpio_pd_pin>;
+		pinctrl-4 = <&P1_31_gpio_input_pin>;
+		pinctrl-5 = <&P1_31_qep_pin>;
+		pinctrl-6 = <&P1_31_pruout_pin>;
+		pinctrl-7 = <&P1_31_pruin_pin>;
+	};
+
+	/* P1_32 (ZCZ ball E15) uart */
+	P1_32_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "uart", "can", "i2c", "pruout", "pruin";
+		pinctrl-0 = <&P1_32_default_pin>;
+		pinctrl-1 = <&P1_32_gpio_pin>;
+		pinctrl-2 = <&P1_32_gpio_pu_pin>;
+		pinctrl-3 = <&P1_32_gpio_pd_pin>;
+		pinctrl-4 = <&P1_32_gpio_input_pin>;
+		pinctrl-5 = <&P1_32_spi_cs_pin>;
+		pinctrl-6 = <&P1_32_uart_pin>;
+		pinctrl-7 = <&P1_32_can_pin>;
+		pinctrl-8 = <&P1_32_i2c_pin>;
+		pinctrl-9 = <&P1_32_pruout_pin>;
+		pinctrl-10 = <&P1_32_pruin_pin>;
+	};
+
+	/* P1_33 (ZCZ ball B13) pruin */
+	P1_33_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P1_33_default_pin>;
+		pinctrl-1 = <&P1_33_gpio_pin>;
+		pinctrl-2 = <&P1_33_gpio_pu_pin>;
+		pinctrl-3 = <&P1_33_gpio_pd_pin>;
+		pinctrl-4 = <&P1_33_gpio_input_pin>;
+		pinctrl-5 = <&P1_33_spi_pin>;
+		pinctrl-6 = <&P1_33_pwm_pin>;
+		pinctrl-7 = <&P1_33_pruout_pin>;
+		pinctrl-8 = <&P1_33_pruin_pin>;
+	};
+
+	/* P1_34 (ZCZ ball T11) */
+	P1_34_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P1_34_default_pin>;
+		pinctrl-1 = <&P1_34_gpio_pin>;
+		pinctrl-2 = <&P1_34_gpio_pu_pin>;
+		pinctrl-3 = <&P1_34_gpio_pd_pin>;
+		pinctrl-4 = <&P1_34_gpio_input_pin>;
+		pinctrl-5 = <&P1_34_pwm_pin>;
+	};
+
+	/* P1_35 (ZCZ ball V5) pruin */
+	P1_35_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P1_35_default_pin>;
+		pinctrl-1 = <&P1_35_gpio_pin>;
+		pinctrl-2 = <&P1_35_gpio_pu_pin>;
+		pinctrl-3 = <&P1_35_gpio_pd_pin>;
+		pinctrl-4 = <&P1_35_gpio_input_pin>;
+		pinctrl-5 = <&P1_35_pruout_pin>;
+		pinctrl-6 = <&P1_35_pruin_pin>;
+	};
+
+	/* P1_36 (ZCZ ball A13) pwm */
+	P1_36_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_sclk", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P1_36_default_pin>;
+		pinctrl-1 = <&P1_36_gpio_pin>;
+		pinctrl-2 = <&P1_36_gpio_pu_pin>;
+		pinctrl-3 = <&P1_36_gpio_pd_pin>;
+		pinctrl-4 = <&P1_36_gpio_input_pin>;
+		pinctrl-5 = <&P1_36_spi_sclk_pin>;
+		pinctrl-6 = <&P1_36_pwm_pin>;
+		pinctrl-7 = <&P1_36_pruout_pin>;
+		pinctrl-8 = <&P1_36_pruin_pin>;
+	};
+
+
+	/************************/
+	/* P2 Header */
+	/************************/
+
+	/* P2_01 (ZCZ ball U14) pwm */
+	P2_01_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P2_01_default_pin>;
+		pinctrl-1 = <&P2_01_gpio_pin>;
+		pinctrl-2 = <&P2_01_gpio_pu_pin>;
+		pinctrl-3 = <&P2_01_gpio_pd_pin>;
+		pinctrl-4 = <&P2_01_gpio_input_pin>;
+		pinctrl-5 = <&P2_01_pwm_pin>;
+	};
+
+	/* P2_02 (ZCZ ball V17) */
+	P2_02_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_02_default_pin>;
+		pinctrl-1 = <&P2_02_gpio_pin>;
+		pinctrl-2 = <&P2_02_gpio_pu_pin>;
+		pinctrl-3 = <&P2_02_gpio_pd_pin>;
+		pinctrl-4 = <&P2_02_gpio_input_pin>;
+	};
+
+	/* P2_03 (ZCZ ball T10) */
+	P2_03_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P2_03_default_pin>;
+		pinctrl-1 = <&P2_03_gpio_pin>;
+		pinctrl-2 = <&P2_03_gpio_pu_pin>;
+		pinctrl-3 = <&P2_03_gpio_pd_pin>;
+		pinctrl-4 = <&P2_03_gpio_input_pin>;
+		pinctrl-5 = <&P2_03_pwm_pin>;
+	};
+
+	/* P2_04 (ZCZ ball T16) */
+	P2_04_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_04_default_pin>;
+		pinctrl-1 = <&P2_04_gpio_pin>;
+		pinctrl-2 = <&P2_04_gpio_pu_pin>;
+		pinctrl-3 = <&P2_04_gpio_pd_pin>;
+		pinctrl-4 = <&P2_04_gpio_input_pin>;
+	};
+
+	/* P2_05 (ZCZ ball T17) uart */
+	P2_05_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P2_05_default_pin>;
+		pinctrl-1 = <&P2_05_gpio_pin>;
+		pinctrl-2 = <&P2_05_gpio_pu_pin>;
+		pinctrl-3 = <&P2_05_gpio_pd_pin>;
+		pinctrl-4 = <&P2_05_gpio_input_pin>;
+		pinctrl-5 = <&P2_05_uart_pin>;
+	};
+
+	/* P2_06 (ZCZ ball U16) */
+	P2_06_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_06_default_pin>;
+		pinctrl-1 = <&P2_06_gpio_pin>;
+		pinctrl-2 = <&P2_06_gpio_pu_pin>;
+		pinctrl-3 = <&P2_06_gpio_pd_pin>;
+		pinctrl-4 = <&P2_06_gpio_input_pin>;
+	};
+
+	/* P2_07 (ZCZ ball U17) uart */
+	P2_07_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart";
+		pinctrl-0 = <&P2_07_default_pin>;
+		pinctrl-1 = <&P2_07_gpio_pin>;
+		pinctrl-2 = <&P2_07_gpio_pu_pin>;
+		pinctrl-3 = <&P2_07_gpio_pd_pin>;
+		pinctrl-4 = <&P2_07_gpio_input_pin>;
+		pinctrl-5 = <&P2_07_uart_pin>;
+	};
+
+	/* P2_08 (ZCZ ball U18) */
+	P2_08_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_08_default_pin>;
+		pinctrl-1 = <&P2_08_gpio_pin>;
+		pinctrl-2 = <&P2_08_gpio_pu_pin>;
+		pinctrl-3 = <&P2_08_gpio_pd_pin>;
+		pinctrl-4 = <&P2_08_gpio_input_pin>;
+	};
+
+	/* P2_09 (ZCZ ball D15) i2c */
+	P2_09_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P2_09_default_pin>;
+		pinctrl-1 = <&P2_09_gpio_pin>;
+		pinctrl-2 = <&P2_09_gpio_pu_pin>;
+		pinctrl-3 = <&P2_09_gpio_pd_pin>;
+		pinctrl-4 = <&P2_09_gpio_input_pin>;
+		pinctrl-5 = <&P2_09_uart_pin>;
+		pinctrl-6 = <&P2_09_can_pin>;
+		pinctrl-7 = <&P2_09_i2c_pin>;
+		pinctrl-8 = <&P2_09_pru_uart_pin>;
+		pinctrl-9 = <&P2_09_pruin_pin>;
+	};
+
+	/* P2_10 (ZCZ ball R14) */
+	P2_10_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep";
+		pinctrl-0 = <&P2_10_default_pin>;
+		pinctrl-1 = <&P2_10_gpio_pin>;
+		pinctrl-2 = <&P2_10_gpio_pu_pin>;
+		pinctrl-3 = <&P2_10_gpio_pd_pin>;
+		pinctrl-4 = <&P2_10_gpio_input_pin>;
+		pinctrl-5 = <&P2_10_qep_pin>;
+	};
+
+	/* P2_11 (ZCZ ball D16) i2c */
+	P2_11_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "uart", "can", "i2c", "pru_uart", "pruin";
+		pinctrl-0 = <&P2_11_default_pin>;
+		pinctrl-1 = <&P2_11_gpio_pin>;
+		pinctrl-2 = <&P2_11_gpio_pu_pin>;
+		pinctrl-3 = <&P2_11_gpio_pd_pin>;
+		pinctrl-4 = <&P2_11_gpio_input_pin>;
+		pinctrl-5 = <&P2_11_uart_pin>;
+		pinctrl-6 = <&P2_11_can_pin>;
+		pinctrl-7 = <&P2_11_i2c_pin>;
+		pinctrl-8 = <&P2_11_pru_uart_pin>;
+		pinctrl-9 = <&P2_11_pruin_pin>;
+	};
+
+	/* P2_12                POWER_BUTTON */
+
+	/* P2_13                VOUT-5V */
+
+	/* P2_14                BAT-VIN */
+
+	/* P2_15                GND */
+
+	/* P2_16                BAT-TEMP */
+
+	/* P2_17 (ZCZ ball V12) */
+	P2_17_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_17_default_pin>;
+		pinctrl-1 = <&P2_17_gpio_pin>;
+		pinctrl-2 = <&P2_17_gpio_pu_pin>;
+		pinctrl-3 = <&P2_17_gpio_pd_pin>;
+		pinctrl-4 = <&P2_17_gpio_input_pin>;
+	};
+
+	/* P2_18 (ZCZ ball U13) */
+	P2_18_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pru_ecap", "pruin";
+		pinctrl-0 = <&P2_18_default_pin>;
+		pinctrl-1 = <&P2_18_gpio_pin>;
+		pinctrl-2 = <&P2_18_gpio_pu_pin>;
+		pinctrl-3 = <&P2_18_gpio_pd_pin>;
+		pinctrl-4 = <&P2_18_gpio_input_pin>;
+		pinctrl-5 = <&P2_18_qep_pin>;
+		pinctrl-6 = <&P2_18_pru_ecap_pin>;
+		pinctrl-7 = <&P2_18_pruin_pin>;
+	};
+
+	/* P2_19 (ZCZ ball U12) */
+	P2_19_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pwm";
+		pinctrl-0 = <&P2_19_default_pin>;
+		pinctrl-1 = <&P2_19_gpio_pin>;
+		pinctrl-2 = <&P2_19_gpio_pu_pin>;
+		pinctrl-3 = <&P2_19_gpio_pd_pin>;
+		pinctrl-4 = <&P2_19_gpio_input_pin>;
+		pinctrl-5 = <&P2_19_pwm_pin>;
+	};
+
+	/* P2_20 (ZCZ ball T13) */
+	P2_20_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input";
+		pinctrl-0 = <&P2_20_default_pin>;
+		pinctrl-1 = <&P2_20_gpio_pin>;
+		pinctrl-2 = <&P2_20_gpio_pu_pin>;
+		pinctrl-3 = <&P2_20_gpio_pd_pin>;
+		pinctrl-4 = <&P2_20_gpio_input_pin>;
+	};
+
+	/* P2_21                GND */
+
+	/* P2_22 (ZCZ ball V13) */
+	P2_22_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruin";
+		pinctrl-0 = <&P2_22_default_pin>;
+		pinctrl-1 = <&P2_22_gpio_pin>;
+		pinctrl-2 = <&P2_22_gpio_pu_pin>;
+		pinctrl-3 = <&P2_22_gpio_pd_pin>;
+		pinctrl-4 = <&P2_22_gpio_input_pin>;
+		pinctrl-5 = <&P2_22_qep_pin>;
+		pinctrl-6 = <&P2_22_pruin_pin>;
+	};
+
+	/* P2_23                VOUT-3.3V */
+
+	/* P2_24 (ZCZ ball T12) */
+	P2_24_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P2_24_default_pin>;
+		pinctrl-1 = <&P2_24_gpio_pin>;
+		pinctrl-2 = <&P2_24_gpio_pu_pin>;
+		pinctrl-3 = <&P2_24_gpio_pd_pin>;
+		pinctrl-4 = <&P2_24_gpio_input_pin>;
+		pinctrl-5 = <&P2_24_qep_pin>;
+		pinctrl-6 = <&P2_24_pruout_pin>;
+	};
+
+	/* P2_25 (ZCZ ball E17) spi */
+	P2_25_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "spi_cs", "uart", "can", "i2c";
+		pinctrl-0 = <&P2_25_default_pin>;
+		pinctrl-1 = <&P2_25_gpio_pin>;
+		pinctrl-2 = <&P2_25_gpio_pu_pin>;
+		pinctrl-3 = <&P2_25_gpio_pd_pin>;
+		pinctrl-4 = <&P2_25_gpio_input_pin>;
+		pinctrl-5 = <&P2_25_spi_pin>;
+		pinctrl-6 = <&P2_25_spi_cs_pin>;
+		pinctrl-7 = <&P2_25_uart_pin>;
+		pinctrl-8 = <&P2_25_can_pin>;
+		pinctrl-9 = <&P2_25_i2c_pin>;
+	};
+
+	/* P2_26                RESET# */
+
+	/* P2_27 (ZCZ ball E18) spi */
+	P2_27_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "uart", "can", "i2c";
+		pinctrl-0 = <&P2_27_default_pin>;
+		pinctrl-1 = <&P2_27_gpio_pin>;
+		pinctrl-2 = <&P2_27_gpio_pu_pin>;
+		pinctrl-3 = <&P2_27_gpio_pd_pin>;
+		pinctrl-4 = <&P2_27_gpio_input_pin>;
+		pinctrl-5 = <&P2_27_spi_pin>;
+		pinctrl-6 = <&P2_27_uart_pin>;
+		pinctrl-7 = <&P2_27_can_pin>;
+		pinctrl-8 = <&P2_27_i2c_pin>;
+	};
+
+	/* P2_28 (ZCZ ball D13) pruin */
+	P2_28_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P2_28_default_pin>;
+		pinctrl-1 = <&P2_28_gpio_pin>;
+		pinctrl-2 = <&P2_28_gpio_pu_pin>;
+		pinctrl-3 = <&P2_28_gpio_pd_pin>;
+		pinctrl-4 = <&P2_28_gpio_input_pin>;
+		pinctrl-5 = <&P2_28_qep_pin>;
+		pinctrl-6 = <&P2_28_pruout_pin>;
+		pinctrl-7 = <&P2_28_pruin_pin>;
+	};
+
+	/* P2_29 (ZCZ ball C18) spi_sclk */
+	P2_29_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "spi_sclk", "uart", "pwm", "pru_ecap";
+		pinctrl-0 = <&P2_29_default_pin>;
+		pinctrl-1 = <&P2_29_gpio_pin>;
+		pinctrl-2 = <&P2_29_gpio_pu_pin>;
+		pinctrl-3 = <&P2_29_gpio_pd_pin>;
+		pinctrl-4 = <&P2_29_gpio_input_pin>;
+		pinctrl-5 = <&P2_29_spi_cs_pin>;
+		pinctrl-6 = <&P2_29_spi_sclk_pin>;
+		pinctrl-7 = <&P2_29_uart_pin>;
+		pinctrl-8 = <&P2_29_pwm_pin>;
+		pinctrl-9 = <&P2_29_pru_ecap_pin>;
+	};
+
+	/* P2_30 (ZCZ ball C12) pruin */
+	P2_30_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P2_30_default_pin>;
+		pinctrl-1 = <&P2_30_gpio_pin>;
+		pinctrl-2 = <&P2_30_gpio_pu_pin>;
+		pinctrl-3 = <&P2_30_gpio_pd_pin>;
+		pinctrl-4 = <&P2_30_gpio_input_pin>;
+		pinctrl-5 = <&P2_30_spi_cs_pin>;
+		pinctrl-6 = <&P2_30_pwm_pin>;
+		pinctrl-7 = <&P2_30_pruout_pin>;
+		pinctrl-8 = <&P2_30_pruin_pin>;
+	};
+
+	/* P2_31 (ZCZ ball A15) spi_cs */
+	P2_31_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi_cs", "pruin";
+		pinctrl-0 = <&P2_31_default_pin>;
+		pinctrl-1 = <&P2_31_gpio_pin>;
+		pinctrl-2 = <&P2_31_gpio_pu_pin>;
+		pinctrl-3 = <&P2_31_gpio_pd_pin>;
+		pinctrl-4 = <&P2_31_gpio_input_pin>;
+		pinctrl-5 = <&P2_31_spi_cs_pin>;
+		pinctrl-6 = <&P2_31_pruin_pin>;
+	};
+
+	/* P2_32 (ZCZ ball D12) pruin */
+	P2_32_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "spi", "pwm", "pruout", "pruin";
+		pinctrl-0 = <&P2_32_default_pin>;
+		pinctrl-1 = <&P2_32_gpio_pin>;
+		pinctrl-2 = <&P2_32_gpio_pu_pin>;
+		pinctrl-3 = <&P2_32_gpio_pd_pin>;
+		pinctrl-4 = <&P2_32_gpio_input_pin>;
+		pinctrl-5 = <&P2_32_spi_pin>;
+		pinctrl-6 = <&P2_32_pwm_pin>;
+		pinctrl-7 = <&P2_32_pruout_pin>;
+		pinctrl-8 = <&P2_32_pruin_pin>;
+	};
+
+	/* P2_33 (ZCZ ball R12) */
+	P2_33_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout";
+		pinctrl-0 = <&P2_33_default_pin>;
+		pinctrl-1 = <&P2_33_gpio_pin>;
+		pinctrl-2 = <&P2_33_gpio_pu_pin>;
+		pinctrl-3 = <&P2_33_gpio_pd_pin>;
+		pinctrl-4 = <&P2_33_gpio_input_pin>;
+		pinctrl-5 = <&P2_33_qep_pin>;
+		pinctrl-6 = <&P2_33_pruout_pin>;
+	};
+
+	/* P2_34 (ZCZ ball C13) pruin */
+	P2_34_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "qep", "pruout", "pruin";
+		pinctrl-0 = <&P2_34_default_pin>;
+		pinctrl-1 = <&P2_34_gpio_pin>;
+		pinctrl-2 = <&P2_34_gpio_pu_pin>;
+		pinctrl-3 = <&P2_34_gpio_pd_pin>;
+		pinctrl-4 = <&P2_34_gpio_input_pin>;
+		pinctrl-5 = <&P2_34_qep_pin>;
+		pinctrl-6 = <&P2_34_pruout_pin>;
+		pinctrl-7 = <&P2_34_pruin_pin>;
+	};
+
+	/* P2_35 (ZCZ ball U5) gpio_input */
+	P2_35_pinmux {
+		compatible = "bone-pinmux-helper";
+		status = "okay";
+		pinctrl-names = "default", "gpio", "gpio_pu", "gpio_pd", "gpio_input", "pruout", "pruin";
+		pinctrl-0 = <&P2_35_default_pin>;
+		pinctrl-1 = <&P2_35_gpio_pin>;
+		pinctrl-2 = <&P2_35_gpio_pu_pin>;
+		pinctrl-3 = <&P2_35_gpio_pd_pin>;
+		pinctrl-4 = <&P2_35_gpio_input_pin>;
+		pinctrl-5 = <&P2_35_pruout_pin>;
+		pinctrl-6 = <&P2_35_pruin_pin>;
+	};
+
+	/* P2_36 (ZCZ ball C9)  AIN7         */
+
+	cape-universal {
+		compatible = "gpio-of-helper";
+		status = "okay";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		P1_02 {
+			gpio-name = "P1_02";
+			gpio = <&gpio2 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_04 {
+			gpio-name = "P1_04";
+			gpio = <&gpio2 25 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_06 {
+			gpio-name = "P1_06";
+			gpio = <&gpio0 5 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_08 {
+			gpio-name = "P1_08";
+			gpio = <&gpio0 2 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_10 {
+			gpio-name = "P1_10";
+			gpio = <&gpio0 3 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_12 {
+			gpio-name = "P1_12";
+			gpio = <&gpio0 4 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_20 {
+			gpio-name = "P1_20";
+			gpio = <&gpio0 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_26 {
+			gpio-name = "P1_26";
+			gpio = <&gpio0 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_28 {
+			gpio-name = "P1_28";
+			gpio = <&gpio0 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_29 {
+			gpio-name = "P1_29";
+			gpio = <&gpio3 21 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_30 {
+			gpio-name = "P1_30";
+			gpio = <&gpio1 11 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_31 {
+			gpio-name = "P1_31";
+			gpio = <&gpio3 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_32 {
+			gpio-name = "P1_32";
+			gpio = <&gpio1 10 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_33 {
+			gpio-name = "P1_33";
+			gpio = <&gpio3 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_34 {
+			gpio-name = "P1_34";
+			gpio = <&gpio0 26 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_35 {
+			gpio-name = "P1_35";
+			gpio = <&gpio2 24 0>;
+			input;
+			dir-changeable;
+		};
+
+		P1_36 {
+			gpio-name = "P1_36";
+			gpio = <&gpio3 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_01 {
+			gpio-name = "P2_01";
+			gpio = <&gpio1 18 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_02 {
+			gpio-name = "P2_02";
+			gpio = <&gpio1 27 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_03 {
+			gpio-name = "P2_03";
+			gpio = <&gpio0 23 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_04 {
+			gpio-name = "P2_04";
+			gpio = <&gpio1 26 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_05 {
+			gpio-name = "P2_05";
+			gpio = <&gpio0 30 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_06 {
+			gpio-name = "P2_06";
+			gpio = <&gpio1 25 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_07 {
+			gpio-name = "P2_07";
+			gpio = <&gpio0 31 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_08 {
+			gpio-name = "P2_08";
+			gpio = <&gpio1 28 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_09 {
+			gpio-name = "P2_09";
+			gpio = <&gpio0 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_10 {
+			gpio-name = "P2_10";
+			gpio = <&gpio1 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_11 {
+			gpio-name = "P2_11";
+			gpio = <&gpio0 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_17 {
+			gpio-name = "P2_17";
+			gpio = <&gpio2 1 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_18 {
+			gpio-name = "P2_18";
+			gpio = <&gpio1 15 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_19 {
+			gpio-name = "P2_19";
+			gpio = <&gpio0 27 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_20 {
+			gpio-name = "P2_20";
+			gpio = <&gpio2 0 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_22 {
+			gpio-name = "P2_22";
+			gpio = <&gpio1 14 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_24 {
+			gpio-name = "P2_24";
+			gpio = <&gpio1 12 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_25 {
+			gpio-name = "P2_25";
+			gpio = <&gpio1 9 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_27 {
+			gpio-name = "P2_27";
+			gpio = <&gpio1 8 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_28 {
+			gpio-name = "P2_28";
+			gpio = <&gpio3 20 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_29 {
+			gpio-name = "P2_29";
+			gpio = <&gpio0 7 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_30 {
+			gpio-name = "P2_30";
+			gpio = <&gpio3 17 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_31 {
+			gpio-name = "P2_31";
+			gpio = <&gpio0 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_32 {
+			gpio-name = "P2_32";
+			gpio = <&gpio3 16 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_33 {
+			gpio-name = "P2_33";
+			gpio = <&gpio1 13 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_34 {
+			gpio-name = "P2_34";
+			gpio = <&gpio3 19 0>;
+			input;
+			dir-changeable;
+		};
+
+		P2_35 {
+			gpio-name = "P2_35";
+			gpio = <&gpio2 22 0>;
+			input;
+			dir-changeable;
+		};
+
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-sancloud-bbe.dts b/arch/arm/boot/dts/am335x-sancloud-bbe.dts
index e5fdb7abb0d..820ff38cdf0 100644
--- a/arch/arm/boot/dts/am335x-sancloud-bbe.dts
+++ b/arch/arm/boot/dts/am335x-sancloud-bbe.dts
@@ -12,6 +12,11 @@
 / {
 	model = "SanCloud BeagleBone Enhanced";
 	compatible = "sancloud,am335x-boneenhanced", "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
+
+	chosen {
+		base_dtb = "am335x-sancloud-bbe.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &am33xx_pinmux {
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 5ed7f3c58c0..46217e305c1 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -157,6 +157,39 @@ gpio0: gpio@0 {
 				#interrupt-cells = <2>;
 				reg = <0x0 0x1000>;
 				interrupts = <96>;
+				gpio-line-names =
+					"MDIO_DATA",	// 0
+					"MDIO_CLK",	// 1
+					"SPI0_SCLK",	// 2
+					"SPI0_D0",	// 3
+					"SPI0_D1",	// 4
+					"SPI0_CS0",	// 5
+					"SPI0_CS1",	// 6
+					"ECAP0_IN_PWM0_OUT",	// 7
+					"LCD_DATA12",	// 8
+					"LCD_DATA13",	// 9
+					"LCD_DATA14",	// 10
+					"LCD_DATA15",	// 11
+					"UART1_CTSN",	// 12
+					"UART1_RTSN",	// 13
+					"UART1_RXD",	// 14
+					"UART1_TXD",	// 15
+					"GMII1_TXD3",	// 16
+					"GMII1_TXD2",	// 17
+					"USB0_DRVVBUS",	// 18
+					"XDMA_EVENT_INTR0",	// 19
+					"XDMA_EVENT_INTR1",	// 20
+					"GMII1_TXD1",	// 21
+					"GPMC_AD8",	// 22
+					"GPMC_AD9",	// 23
+					"NC",	// 24
+					"NC",	// 25
+					"GPMC_AD10",	// 26
+					"GPMC_AD11",	// 27
+					"GMII1_TXD0",	// 28
+					"RMII1_REFCLK",	// 29
+					"GPMC_WAIT0",	// 30
+					"GPMC_WPN";	// 31
 			};
 		};
 
@@ -1304,6 +1337,39 @@ gpio1: gpio@0 {
 				#interrupt-cells = <2>;
 				reg = <0x0 0x1000>;
 				interrupts = <98>;
+				gpio-line-names =
+					"GPMC_AD0",	// 0
+					"GPMC_AD1",	// 1
+					"GPMC_AD2",	// 2
+					"GPMC_AD3",	// 3
+					"GPMC_AD4",	// 4
+					"GPMC_AD5",	// 5
+					"GPMC_AD6",	// 6
+					"GPMC_AD7",	// 7
+					"UART0_CTSN",	// 8
+					"UART0_RTSN",	// 9
+					"UART0_RXD",	// 10
+					"UART0_TXD",	// 11
+					"GPMC_AD12",	// 12
+					"GPMC_AD13",	// 13
+					"GPMC_AD14",	// 14
+					"GPMC_AD15",	// 15
+					"GPMC_A0",	// 16
+					"GPMC_A1",	// 17
+					"GPMC_A2",	// 18
+					"GPMC_A3",	// 19
+					"GPMC_A4",	// 20
+					"GPMC_A5",	// 21
+					"GPMC_A6",	// 22
+					"GPMC_A7",	// 23
+					"GPMC_A8",	// 24
+					"GPMC_A9",	// 25
+					"GPMC_A10",	// 26
+					"GPMC_A11",	// 27
+					"GPMC_BE1N",	// 28
+					"GPMC_CSN0",	// 29
+					"GPMC_CSN1",	// 30
+					"GPMC_CSN2";	// 31
 			};
 		};
 
@@ -1706,6 +1772,39 @@ gpio2: gpio@0 {
 				#interrupt-cells = <2>;
 				reg = <0x0 0x1000>;
 				interrupts = <32>;
+				gpio-line-names =
+					"GPMC_CSN3",	// 0
+					"GPMC_CLK",	// 1
+					"GPMC_ADVN_ALE",	// 2
+					"GPMC_OEN_REN",	// 3
+					"GPMC_WEN",	// 4
+					"GPMC_BE0N_CLE",	// 5
+					"LCD_DATA0",	// 6
+					"LCD_DATA1",	// 7
+					"LCD_DATA2",	// 8
+					"LCD_DATA3",	// 9
+					"LCD_DATA4",	// 10
+					"LCD_DATA5",	// 11
+					"LCD_DATA6",	// 12
+					"LCD_DATA7",	// 13
+					"LCD_DATA8",	// 14
+					"LCD_DATA9",	// 15
+					"LCD_DATA10",	// 16
+					"LCD_DATA11",	// 17
+					"GMII1_RXD3",	// 18
+					"GMII1_RXD2",	// 19
+					"GMII1_RXD1",	// 20
+					"GMII1_RXD0",	// 21
+					"LCD_VSYNC",	// 22
+					"LCD_HSYNC",	// 23
+					"LCD_PCLK",	// 24
+					"LCD_AC_BIAS_EN",	// 25
+					"MMC0_DAT3",	// 26
+					"MMC0_DAT2",	// 27
+					"MMC0_DAT1",	// 28
+					"MMC0_DAT0",	// 29
+					"MMC0_CLK",	// 30
+					"MMC0_CMD";	// 31
 			};
 		};
 
@@ -1739,6 +1838,39 @@ gpio3: gpio@0 {
 				#interrupt-cells = <2>;
 				reg = <0x0 0x1000>;
 				interrupts = <62>;
+				gpio-line-names =
+					"GMII1_COL",	// 0
+					"GMII1_CRS",	// 1
+					"GMII1_RXER",	// 2
+					"GMII1_TXEN",	// 3
+					"GMII1_RXDV",	// 4
+					"I2C0_SDA",	// 5
+					"I2C0_SCL",	// 6
+					"EMU0",	// 7
+					"EMU1",	// 8
+					"GMII1_TXCLK",	// 9
+					"GMII1_RXCLK",	// 10
+					"NC",	// 11
+					"NC",	// 12
+					"USB1_DRVVBUS",	// 13
+					"MCASP0_ACLKX",	// 14
+					"MCASP0_FSX",	// 15
+					"MCASP0_AXR0",	// 16
+					"MCASP0_AHCLKR",	// 17
+					"MCASP0_ACLKR",	// 18
+					"MCASP0_FSR",	// 19
+					"MCASP0_AXR1",	// 20
+					"MCASP0_AHCLKX",	// 21
+					"NC",	// 22
+					"NC",	// 23
+					"NC",	// 24
+					"NC",	// 25
+					"NC",	// 26
+					"NC",	// 27
+					"NC",	// 28
+					"NC",	// 29
+					"NC",	// 30
+					"NC";	// 31
 			};
 		};
 
@@ -1904,6 +2036,15 @@ ecap0: ecap@100 {
 					status = "disabled";
 				};
 
+				eqep0: counter@180 {
+					compatible = "ti,am3352-eqep";
+					reg = <0x180 0x80>;
+					clocks = <&l4ls_gclk>;
+					clock-names = "sysclkout";
+					interrupts = <79>;
+					status = "disabled";
+				};
+
 				ehrpwm0: pwm@200 {
 					compatible = "ti,am3352-ehrpwm",
 						     "ti,am33xx-ehrpwm";
@@ -1956,6 +2097,15 @@ ecap1: ecap@100 {
 					status = "disabled";
 				};
 
+				eqep1: counter@180 {
+					compatible = "ti,am3352-eqep";
+					reg = <0x180 0x80>;
+					clocks = <&l4ls_gclk>;
+					clock-names = "sysclkout";
+					interrupts = <88>;
+					status = "disabled";
+				};
+
 				ehrpwm1: pwm@200 {
 					compatible = "ti,am3352-ehrpwm",
 						     "ti,am33xx-ehrpwm";
@@ -2008,6 +2158,15 @@ ecap2: ecap@100 {
 					status = "disabled";
 				};
 
+				eqep2: counter@180 {
+					compatible = "ti,am3352-eqep";
+					reg = <0x180 0x80>;
+					clocks = <&l4ls_gclk>;
+					clock-names = "sysclkout";
+					interrupts = <89>;
+					status = "disabled";
+				};
+
 				ehrpwm2: pwm@200 {
 					compatible = "ti,am3352-ehrpwm",
 						     "ti,am33xx-ehrpwm";
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 16640e2a270..c6a0bca0b8e 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -172,7 +172,7 @@ mpu {
 	 * for the moment, just use a fake OCP bus entry to represent
 	 * the whole bus hierarchy.
 	 */
-	ocp {
+	ocp: ocp {
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 565675354de..9cb1c3ff7b1 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -185,6 +185,10 @@ sound0_master: simple-audio-card,codec {
 	};
 };
 
+&bb2d {
+	status = "okay";
+};
+
 &i2c1 {
 	status = "okay";
 	clock-frequency = <400000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index 39d1c4ff574..da47c1389fd 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -7,6 +7,11 @@
 
 / {
 	model = "TI AM5728 BeagleBoard-X15 rev B1";
+
+	chosen {
+		base_dtb = "am57xx-beagle-x15-revb1.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &tpd12s015 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 4187a9729f9..131773f978b 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -7,6 +7,11 @@
 
 / {
 	model = "TI AM5728 BeagleBoard-X15 rev C";
+
+	chosen {
+		base_dtb = "am57xx-beagle-x15-revc.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &tpd12s015 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a5c24ed4d12..dbda396f92b 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -8,6 +8,11 @@
 / {
 	/* NOTE: This describes the "original" pre-production A2 revision */
 	model = "TI AM5728 BeagleBoard-X15";
+
+	chosen {
+		base_dtb = "am57xx-beagle-x15.dts";
+		base_dtb_timestamp = __TIMESTAMP__;
+	};
 };
 
 &tpd12s015 {
diff --git a/arch/arm/boot/dts/armada-370-smileplug.dts b/arch/arm/boot/dts/armada-370-smileplug.dts
new file mode 100644
index 00000000000..d01308ab848
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-smileplug.dts
@@ -0,0 +1,173 @@
+/*
+ * Device Tree file for Marvell SMILE Plug
+ *
+ * Kevin Mihelich <kevin@archlinuxarm.org>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-370.dtsi"
+
+/ {
+	model = "Marvell SMILE Plug";
+	compatible = "marvell,smileplug", "marvell,armada370", "marvell,armada-370-xp";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x20000000>; /* 512 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
+			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
+
+		pcie-controller {
+			status = "okay";
+
+			/* Internal mini-PCIe connector */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+
+			/* Connected on the PCB to a USB 3.0 XHCI controller */
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+
+		internal-regs {
+			serial@12000 {
+				status = "okay";
+			};
+			timer@20300 {
+				clock-frequency = <600000000>;
+				status = "okay";
+			};
+
+			gpio_leds {
+				compatible = "gpio-leds";
+				pinctrl-names = "default";
+				pinctrl-0 = <&smile_led_pins>;
+
+				red_eyes_led {
+					label = "smileplug:red:eyes";
+					gpios = <&gpio1 31 0>;
+					default-state = "off";
+				};
+
+				green_eyes_led {
+					label = "smileplug:green:eyes";
+					gpios = <&gpio2 0 0>;
+					linux,default-trigger = "default-on";
+				};
+
+				red_smile_led {
+					label = "smileplug:red:smile";
+					gpios = <&gpio1 15 0>;
+					default-state = "off";
+				};
+
+				green_smile_led {
+					label = "smileplug:green:smile";
+					gpios = <&gpio1 27 0>;
+					linux,default-trigger = "default-on";
+				};
+			};
+
+			mdio {
+				pinctrl-0 = <&mdio_pins>;
+				pinctrl-names = "default";
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+			ethernet@70000 {
+				pinctrl-0 = <&ge0_rgmii_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii-id";
+			};
+			ethernet@74000 {
+				pinctrl-0 = <&ge1_rgmii_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii-id";
+			};
+
+			mvsdio@d4000 {
+				pinctrl-0 = <&sdio_pins3>;
+				pinctrl-names = "default";
+				status = "okay";
+				/*
+				 * No CD or WP GPIOs: SDIO interface used for
+				 * Wifi/Bluetooth chip
+				 */
+				 broken-cd;
+			};
+
+			usb@50000 {
+				status = "okay";
+			};
+
+			usb@51000 {
+				status = "okay";
+			};
+
+			i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+				pca9505: pca9505@25 {
+					compatible = "nxp,pca9505";
+					gpio-controller;
+					#gpio-cells = <2>;
+					reg = <0x25>;
+				};
+			};
+
+			nand@d0000 {
+				status = "okay";
+				num-cs = <1>;
+				marvell,nand-keep-config;
+				marvell,nand-enable-arbiter;
+				nand-on-flash-bbt;
+
+				partition@0 {
+					label = "U-Boot";
+					reg = <0 0x400000>;
+				};
+				partition@400000 {
+					label = "Linux";
+					reg = <0x400000 0x400000>;
+				};
+				partition@800000 {
+					label = "Filesystem";
+					reg = <0x800000 0x3f800000>;
+				};
+			};
+		};
+	};
+};
+
+&pinctrl {
+	smile_led_pins: smile-led-pins {
+		marvell,pins = "mpp63", "mpp64", "mpp47", "mpp59";
+		marvell,function = "gpio";
+	};
+};
diff --git a/arch/arm/boot/dts/dove-d3plug.dts b/arch/arm/boot/dts/dove-d3plug.dts
index 826026c28f9..a4c9963e126 100644
--- a/arch/arm/boot/dts/dove-d3plug.dts
+++ b/arch/arm/boot/dts/dove-d3plug.dts
@@ -61,6 +61,13 @@ usb_power: regulator@1 {
 &uart0 { status = "okay"; };
 &sata0 { status = "okay"; };
 &i2c0 { status = "okay"; };
+&mdio { status = "okay"; };
+&eth { status = "okay"; };
+
+&ethphy {
+	compatible = "marvell,88e1310";
+	reg = <1>;
+};
 
 /* Samsung M8G2F eMMC */
 &sdio0 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 7191ee6a1b8..34994ca8781 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -410,6 +410,16 @@ dmm@4e000000 {
 			ti,hwmods = "dmm";
 		};
 
+		bb2d: bb2d@59000000 {
+			compatible = "ti,dra7-bb2d","vivante,gc";
+			reg = <0x59000000 0x0700>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+			ti,hwmods = "bb2d";
+			clocks = <&dpll_core_h24x2_ck>;
+			clock-names = "fclk";
+			status = "disabled";
+		};
+
 		target-module@40d01000 {
 			compatible = "ti,sysc-omap2", "ti,sysc";
 			reg = <0x40d01000 0x4>,
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 73d6a71da88..aabf994ec07 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -439,7 +439,7 @@ buck1_reg: BUCK1 {
 
 			buck2_reg: BUCK2 {
 				regulator-name = "vdd_arm";
-				regulator-min-microvolt = <900000>;
+				regulator-min-microvolt = <1100000>;
 				regulator-max-microvolt = <1350000>;
 				regulator-always-on;
 				regulator-boot-on;
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index c63f371ede8..546d9d4e8ca 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -55,18 +55,6 @@ memory@10000000 {
 		reg = <0x10000000 0x40000000>;
 	};
 
-	reg_usbh1_vbus: regulator-usbhubreset {
-		compatible = "regulator-fixed";
-		regulator-name = "usbh1_vbus";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		enable-active-high;
-		startup-delay-us = <2>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usbh1_hubreset>;
-		gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
-	};
-
 	reg_usb_otg_vbus: regulator-usbotgvbus {
 		compatible = "regulator-fixed";
 		regulator-name = "usb_otg_vbus";
@@ -214,12 +202,18 @@ &uart2 {
 };
 
 &usbh1 {
-	vbus-supply = <&reg_usbh1_vbus>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usbh1>;
 	dr_mode = "host";
 	disable-over-current;
 	status = "okay";
+
+	usb2415host: hub@1 {
+		compatible = "usb424,2513";
+		reg = <1>;
+		reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
+		reset-duration-us = <3000>;
+	};
 };
 
 &usbotg {
@@ -482,11 +476,6 @@ pinctrl_usbh1: usbh1grp {
 			MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0
 			/* usbh1_b OC */
 			MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0
-		>;
-	};
-
-	pinctrl_usbh1_hubreset: usbh1hubresetgrp {
-		fsl,pins = <
 			MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0
 		>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index 828dd20cd27..a6cf7c92548 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -5,6 +5,8 @@
  * Author: Fabio Estevam <fabio.estevam@freescale.com>
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 / {
 	aliases {
 		backlight = &backlight;
@@ -62,17 +64,6 @@ regulators {
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		reg_usb_h1_vbus: regulator@0 {
-			compatible = "regulator-fixed";
-			reg = <0>;
-			regulator-name = "usb_h1_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			enable-active-high;
-			startup-delay-us = <2>; /* USB2415 requires a POR of 1 us minimum */
-			gpio = <&gpio7 12 0>;
-		};
-
 		reg_panel: regulator@1 {
 			compatible = "regulator-fixed";
 			reg = <1>;
@@ -93,6 +84,17 @@ sound {
 		mux-int-port = <1>;
 		mux-ext-port = <6>;
 	};
+
+	udoo_ard: udoo_ard_manager {
+		compatible = "udoo,imx6q-udoo-ard";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_udooard>;
+		bossac-clk-gpio   = <&gpio6 3 0>;
+		bossac-dat-gpio   = <&gpio5 18 0>;
+		bossac-erase-gpio = <&gpio4 21 0>;
+		bossac-reset-gpio = <&gpio1 0 0>;
+		status = "okay";
+	};
 };
 
 &fec {
@@ -205,7 +207,7 @@ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA	0x1b0b1
 
 		pinctrl_usbh: usbhgrp {
 			fsl,pins = <
-				MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x80000000
+				MX6QDL_PAD_GPIO_17__GPIO7_IO12	0x1b0b0
 				MX6QDL_PAD_NANDF_CS2__CCM_CLKO2 0x130b0
 			>;
 		};
@@ -218,6 +220,15 @@ MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x17059
 			>;
 		};
 
+		pinctrl_udooard: udooardgrp {
+			fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT0__GPIO4_IO21       0x80000000
+			MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03       0x80000000
+			MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18      0x80000000
+			MX6QDL_PAD_GPIO_0__GPIO1_IO00           0x80000000
+			>;
+		};
+
 		pinctrl_usdhc3: usdhc3grp {
 			fsl,pins = <
 				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17059
@@ -290,9 +301,16 @@ &uart4 {
 &usbh1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usbh>;
-	vbus-supply = <&reg_usb_h1_vbus>;
-	clocks = <&clks IMX6QDL_CLK_CKO>;
 	status = "okay";
+
+	usb2415: hub@1 {
+		compatible = "usb424,2514";
+		reg = <1>;
+
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
+		reset-duration-us = <3000>;
+	};
 };
 
 &usbotg {
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
index e781a45785e..68daf5570ef 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
@@ -6,6 +6,24 @@
 
 #include "imx6qdl-wandboard.dtsi"
 
+/ {
+	rfkill {
+		compatible = "wand,imx6qdl-wandboard-rfkill";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		bluetooth-on = <&gpio3 13 0>;
+		bluetooth-wake = <&gpio3 14 0>;
+		bluetooth-host-wake = <&gpio3 15 0>;
+
+		wifi-ref-on = <&gpio2 29 0>;
+		wifi-rst-n = <&gpio5 2 0>;
+		wifi-reg-on = <&gpio1 26 0>;
+		wifi-host-wake = <&gpio1 29 0>;
+		wifi-wake = <&gpio1 30 0>;
+	};
+};
+
 &iomuxc {
 	pinctrl-0 = <&pinctrl_hog>;
 
@@ -31,6 +49,5 @@ MX6QDL_PAD_EIM_DA15__GPIO3_IO15		0x80000000	/* BT_HOST_WAKE */
 &usdhc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usdhc2>;
-	non-removable;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revc1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revc1.dtsi
index 3874e74703f..be5445d3fe9 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revc1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revc1.dtsi
@@ -6,6 +6,24 @@
 
 #include "imx6qdl-wandboard.dtsi"
 
+/ {
+	rfkill {
+		compatible = "wand,imx6qdl-wandboard-rfkill";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		bluetooth-on = <&gpio5 21 0>;
+		bluetooth-wake = <&gpio5 30 0>;
+		bluetooth-host-wake = <&gpio5 20 0>;
+
+		wifi-ref-on = <&gpio5 31 0>; /* Wifi Power Enable */
+		wifi-rst-n = <&gpio6 0 0>; /* WIFI_ON reset */
+		wifi-reg-on = <&gpio1 26 0>; /* WL_REG_ON */
+		wifi-host-wake = <&gpio1 29 0>; /* WL_HOST_WAKE */
+		wifi-wake = <&gpio1 30 0>; /* WL_WAKE */
+	};
+};
+
 &iomuxc {
 	pinctrl-0 = <&pinctrl_hog>;
 
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
index 93909796885..d595f4e2358 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
@@ -7,6 +7,26 @@
 #include "imx6qdl-wandboard.dtsi"
 
 / {
+	rfkill {
+		compatible = "wand,imx6qdl-wandboard-rfkill";
+		pinctrl-names = "default";
+		pinctrl-0 = <>;
+
+		bluetooth-on = <&gpio5 30 0>;		/* BT_RST_N */
+		bluetooth-wake = <&gpio5 21 0>;		/* BT_WAKE */
+		bluetooth-host-wake = <&gpio5 20 0>;	/* BT_HOST_WAKE */
+
+		wifi-ref-on = <&gpio6 0 0>;		/* WIFI_ON: M4: CSI0_DAT14 */
+		wifi-reg-on = <&gpio1 26 0>;		/* WL_REG_ON: W22: ENET_RXD1 */
+		wifi-host-wake = <&gpio1 29 0>;		/* WL_HOST_WAKE: W20: ENET_TXD1 */
+
+		//HACK: use un-populated pins for missing rfkill pins, dont want to fix the driver...
+		//GPIO6_IO17 - SD3_DAT7
+		//GPIO6_IO18 - SD3_DAT6
+		wifi-rst-n = <&gpio6 17 0>;	/* NOT POPULATED */
+		wifi-wake = <&gpio6 18 0>;	/* NOT POPULATED */
+	};
+
 	reg_eth_phy: regulator-eth-phy {
 		compatible = "regulator-fixed";
 		regulator-name = "ETH_PHY";
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index aa8e3d96427..ab6c606cd8e 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -986,6 +986,8 @@ usbotg: usb@2184000 {
 
 			usbh1: usb@2184200 {
 				compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+				#address-cells = <1>;
+				#size-cells = <0>;
 				reg = <0x02184200 0x200>;
 				interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6QDL_CLK_USBOH3>;
@@ -1000,6 +1002,8 @@ usbh1: usb@2184200 {
 
 			usbh2: usb@2184400 {
 				compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+				#address-cells = <1>;
+				#size-cells = <0>;
 				reg = <0x02184400 0x200>;
 				interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6QDL_CLK_USBOH3>;
@@ -1015,6 +1019,8 @@ usbh2: usb@2184400 {
 
 			usbh3: usb@2184600 {
 				compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+				#address-cells = <1>;
+				#size-cells = <0>;
 				reg = <0x02184600 0x200>;
 				interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6QDL_CLK_USBOH3>;
diff --git a/arch/arm/boot/dts/imx6ul-usbarmory.dts b/arch/arm/boot/dts/imx6ul-usbarmory.dts
new file mode 100644
index 00000000000..f26344965ae
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-usbarmory.dts
@@ -0,0 +1,255 @@
+/*
+ * USB armory Mk II device tree file
+ * https://github.com/inversepath/usbarmory
+ *
+ * Copyright (C) 2019, F-Secure Corporation
+ * Andrej Rosano <andrej.rosano@f-secure.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6ul.dtsi"
+
+/ {
+	model = "F-Secure USB armory Mk II";
+	compatible = "inversepath,imx6ul-usbarmory-mkII", "fsl,imx6ul";
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_led>;
+
+		led-white {
+			label = "LED_WHITE";
+			gpios = <&gpio4 21 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		led-blue {
+			label = "LED_BLUE";
+			gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_sd1_vmmc: sd1_regulator {
+			compatible = "regulator-fixed";
+			regulator-name = "VSD_3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+		};
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_bluetooth>;
+	uart-has-rtscts;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	non-removable;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX	0x1b0b1
+			MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX	0x1b0b1
+		>;
+	};
+
+	pinctrl_bluetooth: uart1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX	0x1b0b0 /* BT_UART_TX  */
+			MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX	0x1b0b0 /* BT_UART_RX  */
+			MX6UL_PAD_UART1_CTS_B__UART1_DCE_CTS	0x1b0b0 /* BT_UART_CTS */
+			MX6UL_PAD_GPIO1_IO07__UART1_DCE_RTS	0x130b0 /* BT_UART_RTS */
+			MX6UL_PAD_UART3_TX_DATA__GPIO1_IO24	0x1f020 /* BT_UART_DSR */
+			MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25	0x10020 /* BT_UART_DTR */
+			MX6UL_PAD_GPIO1_IO04__GPIO1_IO04	0x1f020 /* BT_SWDCLK   */
+			MX6UL_PAD_GPIO1_IO05__GPIO1_IO05	0x1f020 /* BT_SWDIO    */
+			MX6UL_PAD_GPIO1_IO09__GPIO1_IO09	0x1f020 /* BT_RESET    */
+			MX6UL_PAD_UART3_RTS_B__GPIO1_IO27	0x1f020 /* BT_SWITCH_1 */
+			MX6UL_PAD_UART3_CTS_B__GPIO1_IO26	0x1f020 /* BT_SWITCH_2 */
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO02__I2C1_SCL	0x4001b8b0
+			MX6UL_PAD_GPIO1_IO03__I2C1_SDA	0x4001b8b0
+		>;
+	};
+
+	pinctrl_led: ledgrp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA06__GPIO4_IO27	0x1f020
+			MX6UL_PAD_CSI_DATA07__GPIO4_IO28	0x1f020
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x1f019
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x1f019
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x1f019
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x1f019
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x1f019
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x1f019
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK		0x10071
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD		0x17059
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0	0x17059
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1	0x17059
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2	0x17059
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3	0x17059
+			MX6UL_PAD_NAND_DATA04__USDHC2_DATA4	0x17059
+			MX6UL_PAD_NAND_DATA05__USDHC2_DATA5	0x17059
+			MX6UL_PAD_NAND_DATA06__USDHC2_DATA6	0x17059
+			MX6UL_PAD_NAND_DATA07__USDHC2_DATA7	0x17059
+		>;
+	};
+};
+
+&aips2 {
+	crypto: crypto@2140000 {
+		compatible = "fsl,imx6ul-caam", "fsl,sec-v4.0";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2140000 0x3c000>;
+		ranges = <0 0x2140000 0x3c000>;
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clks IMX6UL_CLK_CAAM_IPG>, <&clks IMX6UL_CLK_CAAM_ACLK>,
+			 <&clks IMX6UL_CLK_CAAM_MEM>;
+		clock-names = "ipg", "aclk", "mem";
+
+		sec_jr0: jr@1000 {
+			compatible = "fsl,sec-v4.0-job-ring";
+			reg = <0x1000 0x1000>;
+			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		sec_jr1: jr@2000 {
+			compatible = "fsl,sec-v4.0-job-ring";
+			reg = <0x2000 0x1000>;
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+		};
+		sec_jr2: jr@3000 {
+			compatible = "fsl,sec-v4.0-job-ring";
+			reg = <0x3000 0x1000>;
+			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+		};
+	};
+};
+
+/ {
+	soc {
+		caam_sm: caam-sm@00100000 {
+			 compatible = "fsl,imx7d-caam-sm", "fsl,imx6q-caam-sm";
+			 reg = <0x00100000 0x3fff>;
+		};
+
+		irq_sec_vio: caam_secvio {
+			     compatible = "fsl,imx7d-caam-secvio", "fsl,imx6q-caam-secvio";
+			     interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+		};
+	};
+
+	caam_keyblob: caam-keyblob {
+		compatible = "fsl,sec-v4.0-keyblob";
+		status = "okay";
+	};
+};
+
+&usbotg1 {
+	dr_mode = "peripheral";
+	disable-over-current;
+	tpl-support;
+	status = "okay";
+};
+
+&usbotg2 {
+	dr_mode = "host";
+	disable-over-current;
+	tpl-support;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6ull-usbarmory.dts b/arch/arm/boot/dts/imx6ull-usbarmory.dts
new file mode 100644
index 00000000000..2d758a80526
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ull-usbarmory.dts
@@ -0,0 +1,256 @@
+/*
+ * USB armory Mk II device tree file
+ * https://github.com/inversepath/usbarmory
+ *
+ * Copyright (C) 2019, F-Secure Corporation
+ * Andrej Rosano <andrej.rosano@f-secure.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6ul.dtsi"
+
+/ {
+	model = "F-Secure USB armory Mk II";
+	compatible = "inversepath,imx6ull-usbarmory-mkII", "fsl,imx6ull";
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_led>;
+
+		led-white {
+			label = "LED_WHITE";
+			gpios = <&gpio4 21 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		led-blue {
+			label = "LED_BLUE";
+			gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_sd1_vmmc: sd1_regulator {
+			compatible = "regulator-fixed";
+			regulator-name = "VSD_3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+		};
+	};
+};
+
+/* Delete CAAM node in AIPS-2 (i.MX6UL specific) */
+/delete-node/ &crypto;
+
+&cpu0 {
+	operating-points = <
+		/* kHz	uV */
+		900000	1275000
+		792000	1225000
+		528000	1175000
+		396000	1025000
+		198000	950000
+	>;
+	fsl,soc-operating-points = <
+		/* KHz	uV */
+		900000	1250000
+		792000	1175000
+		528000	1175000
+		396000	1175000
+		198000	1175000
+	>;
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_bluetooth>;
+	uart-has-rtscts;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	non-removable;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX	0x1b0b1
+			MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX	0x1b0b1
+		>;
+	};
+
+	pinctrl_bluetooth: uart1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX	0x1b0b0 /* BT_UART_TX  */
+			MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX	0x1b0b0 /* BT_UART_RX  */
+			MX6UL_PAD_UART1_CTS_B__UART1_DCE_CTS	0x1b0b0 /* BT_UART_CTS */
+			MX6UL_PAD_GPIO1_IO07__UART1_DCE_RTS	0x130b0 /* BT_UART_RTS */
+			MX6UL_PAD_UART3_TX_DATA__GPIO1_IO24	0x1f020 /* BT_UART_DSR */
+			MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25	0x10020 /* BT_UART_DTR */
+			MX6UL_PAD_GPIO1_IO04__GPIO1_IO04	0x1f020 /* BT_SWDCLK   */
+			MX6UL_PAD_GPIO1_IO05__GPIO1_IO05	0x1f020 /* BT_SWDIO    */
+			MX6UL_PAD_GPIO1_IO09__GPIO1_IO09	0x1f020 /* BT_RESET    */
+			MX6UL_PAD_UART3_RTS_B__GPIO1_IO27	0x1f020 /* BT_SWITCH_1 */
+			MX6UL_PAD_UART3_CTS_B__GPIO1_IO26	0x1f020 /* BT_SWITCH_2 */
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO02__I2C1_SCL	0x4001b8b0
+			MX6UL_PAD_GPIO1_IO03__I2C1_SDA	0x4001b8b0
+		>;
+	};
+
+	pinctrl_led: ledgrp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA06__GPIO4_IO27	0x1f020
+			MX6UL_PAD_CSI_DATA07__GPIO4_IO28	0x1f020
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x1f019
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x1f019
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x1f019
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x1f019
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x1f019
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x1f019
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK		0x10071
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD		0x17059
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0	0x17059
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1	0x17059
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2	0x17059
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3	0x17059
+			MX6UL_PAD_NAND_DATA04__USDHC2_DATA4	0x17059
+			MX6UL_PAD_NAND_DATA05__USDHC2_DATA5	0x17059
+			MX6UL_PAD_NAND_DATA06__USDHC2_DATA6	0x17059
+			MX6UL_PAD_NAND_DATA07__USDHC2_DATA7	0x17059
+		>;
+	};
+};
+
+/ {
+	soc {
+		aips3: aips-bus@02200000 {
+			compatible = "fsl,aips-bus", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x02200000 0x100000>;
+			ranges;
+
+			dcp: dcp@02280000 {
+				compatible = "fsl,imx6ull-dcp", "fsl,imx6sl-dcp", "fsl,imx28-dcp";
+				reg = <0x02280000 0x4000>;
+				interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6ULL_CLK_DCP_CLK>;
+				clock-names = "dcp";
+			};
+
+			rngb: rngb@02284000 {
+				compatible = "fsl,imx6sl-rng", "fsl,imx25-rngb", "fsl,imx-rng", "imx-rng";
+				reg = <0x02284000 0x4000>;
+				interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>;
+			};
+		};
+	};
+};
+
+&usbotg1 {
+	dr_mode = "peripheral";
+	disable-over-current;
+	tpl-support;
+	status = "okay";
+};
+
+&usbotg2 {
+	dr_mode = "host";
+	disable-over-current;
+	tpl-support;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts
index 70bea95c06d..a50edba80c3 100644
--- a/arch/arm/boot/dts/imx7d-pico-pi.dts
+++ b/arch/arm/boot/dts/imx7d-pico-pi.dts
@@ -8,6 +8,10 @@ / {
 	model = "TechNexion PICO-IMX7D Board and PI baseboard";
 	compatible = "technexion,imx7d-pico-pi", "fsl,imx7d";
 
+	chosen {
+		stdout-path = "serial4:115200n8";
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 125ed933ca7..f23aca63bd5 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -152,6 +152,7 @@ tv_connector_in: endpoint {
 	};
 
 	etb@5401b000 {
+		status = "disabled";
 		compatible = "arm,coresight-etb10", "arm,primecell";
 		reg = <0x5401b000 0x1000>;
 
@@ -167,6 +168,7 @@ etb_in: endpoint {
 	};
 
 	etm@54010000 {
+		status = "disabled";
 		compatible = "arm,coresight-etm3x", "arm,primecell";
 		reg = <0x54010000 0x1000>;
 
@@ -214,6 +216,25 @@ OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)	/* uart3_tx_irtx.uart3_tx_irtx
 		>;
 	};
 
+	spi3_pins: pinmux_spi3_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x2128, PIN_INPUT | MUX_MODE1)	/* sdmmc2_clk.mcspi3_clk gpio_130 */
+			OMAP3_CORE1_IOPAD(0x212a, PIN_OUTPUT | MUX_MODE1)	/* sdmmc2_cmd.mcspi3_simo gpio_131 */
+			OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat0.mcspi3_somi gpio_132 */
+			OMAP3_CORE1_IOPAD(0x2130, PIN_OUTPUT | MUX_MODE1)	/* sdmmc2_dat2.mcspi3_cs1 gpio_134 */
+			OMAP3_CORE1_IOPAD(0x2132, PIN_OUTPUT | MUX_MODE1)	/* sdmmc2_dat3.mcspi3_cs0 gpio_135 */
+		>;
+	};
+
+	spi4_pins: pinmux_spi4_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT | MUX_MODE1)	/* mcbsp1_clkr.mcspi4_clk gpio_156 */
+			OMAP3_CORE1_IOPAD(0x2160, PIN_OUTPUT | MUX_MODE1)	/* mcbsp1_dx.mcspi4_simo gpio_158 */
+			OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE1)	/* mcbsp1_dr.mcspi4_somi gpio_159 */
+			OMAP3_CORE1_IOPAD(0x2166, PIN_OUTPUT | MUX_MODE1)	/* mcbsp1_fsx.mcspi4_cs0 gpio_161 */
+		>;
+	};
+
 	hsusb2_pins: pinmux_hsusb2_pins {
 		pinctrl-single,pins = <
 			OMAP3_CORE1_IOPAD(0x21d4, PIN_INPUT_PULLDOWN | MUX_MODE3)	/* mcspi1_cs3.hsusb2_data2 */
@@ -291,7 +312,7 @@ codec {
 		};
 
 		twl_power: power {
-			compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off";
+			compatible = "ti,twl4030-power-reset";
 			ti,use_poweroff;
 		};
 	};
@@ -322,6 +343,36 @@ &mmc3 {
 	status = "disabled";
 };
 
+&mcspi3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi3_pins>;
+	status = "okay";
+
+	spidev0: spi@0 {
+		compatible = "spidev";
+		reg = <0>;
+		spi-max-frequency = <48000000>;
+	};
+
+	spidev1: spi@1 {
+		compatible = "spidev";
+		reg = <1>;
+		spi-max-frequency = <48000000>;
+	};
+};
+
+&mcspi4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi4_pins>;
+	status = "okay";
+
+	spidev2: spi@0 {
+		compatible = "spidev";
+		reg = <0>;
+		spi-max-frequency = <48000000>;
+	};
+};
+
 &twl_gpio {
 	ti,use-leds;
 	/* pullups: BIT(1) */
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 4ed3f93f584..e306307561b 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -139,6 +139,7 @@ tv_connector_in: endpoint {
 	};
 
 	etb@540000000 {
+		status = "disabled";
 		compatible = "arm,coresight-etb10", "arm,primecell";
 		reg = <0x5401b000 0x1000>;
 
@@ -154,6 +155,7 @@ etb_in: endpoint {
 	};
 
 	etm@54010000 {
+		status = "disabled";
 		compatible = "arm,coresight-etm3x", "arm,primecell";
 		reg = <0x54010000 0x1000>;
 
@@ -272,9 +274,18 @@ twl_audio: audio {
 			codec {
 			};
 		};
+
+		twl_power: power {
+			compatible = "ti,twl4030-power-reset";
+			ti,use_poweroff;
+		};
 	};
 };
 
+&i2c2 {
+	clock-frequency = <400000>;
+};
+
 #include "twl4030.dtsi"
 #include "twl4030_omap3.dtsi"
 
diff --git a/arch/arm/boot/dts/omap4-panda-a4.dts b/arch/arm/boot/dts/omap4-panda-a4.dts
index 64083075dd5..065f3db3f29 100644
--- a/arch/arm/boot/dts/omap4-panda-a4.dts
+++ b/arch/arm/boot/dts/omap4-panda-a4.dts
@@ -7,6 +7,18 @@
 #include "omap443x.dtsi"
 #include "omap4-panda-common.dtsi"
 
+&emif1 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
+
+&emif2 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
+
 /* Pandaboard Rev A4+ have external pullups on SCL & SDA */
 &dss_hdmi_pins {
 	pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 55ea8b6189a..fe11be5cb29 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -481,16 +481,6 @@ wlcore: wlcore@2 {
 	};
 };
 
-&emif1 {
-	cs1-used;
-	device-handle = <&elpida_ECB240ABACN>;
-};
-
-&emif2 {
-	cs1-used;
-	device-handle = <&elpida_ECB240ABACN>;
-};
-
 &mcbsp1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mcbsp1_pins>;
diff --git a/arch/arm/boot/dts/omap4-panda-es-b3.dts b/arch/arm/boot/dts/omap4-panda-es-b3.dts
new file mode 100644
index 00000000000..9dd307b5260
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-panda-es-b3.dts
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+/dts-v1/;
+
+#include "omap4460.dtsi"
+#include "omap4-panda-common.dtsi"
+
+/ {
+	model = "TI OMAP4 PandaBoard-ES";
+	compatible = "ti,omap4-panda-es", "ti,omap4-panda", "ti,omap4460", "ti,omap4430", "ti,omap4";
+};
+
+/* Audio routing is differnet between PandaBoard4430 and PandaBoardES */
+&sound {
+	ti,model = "PandaBoardES";
+
+	/* Audio routing */
+	ti,audio-routing =
+		"Headset Stereophone", "HSOL",
+		"Headset Stereophone", "HSOR",
+		"Ext Spk", "HFL",
+		"Ext Spk", "HFR",
+		"Line Out", "AUXL",
+		"Line Out", "AUXR",
+		"AFML", "Line In",
+		"AFMR", "Line In";
+};
+
+/* PandaboardES has external pullups on SCL & SDA */
+&dss_hdmi_pins {
+	pinctrl-single,pins = <
+		OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0)		/* hdmi_cec.hdmi_cec */
+		OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0)		/* hdmi_scl.hdmi_scl */
+		OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0)		/* hdmi_sda.hdmi_sda */
+		>;
+};
+
+&omap4_pmx_core {
+	led_gpio_pins: gpio_led_pmx {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x0f6, PIN_OUTPUT | MUX_MODE3)	/* gpio_110 */
+		>;
+	};
+
+	button_pins: pinmux_button_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
+		>;
+	};
+};
+
+&led_wkgpio_pins {
+	pinctrl-single,pins = <
+		OMAP4_IOPAD(0x05c, PIN_OUTPUT | MUX_MODE3)	/* gpio_wk8 */
+	>;
+};
+
+&leds {
+	pinctrl-0 = <
+		&led_gpio_pins
+		&led_wkgpio_pins
+	>;
+
+	heartbeat {
+		gpios = <&gpio4 14 GPIO_ACTIVE_HIGH>;
+	};
+	mmc {
+		gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&gpio_keys {
+	buttonS2 {
+		gpios = <&gpio4 17 GPIO_ACTIVE_LOW>; /* gpio_113 */
+	};
+};
+
+&gpio1_target {
+	 ti,no-reset-on-init;
+};
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
index 9dd307b5260..ea4dce2f3a6 100644
--- a/arch/arm/boot/dts/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/omap4-panda-es.dts
@@ -12,6 +12,18 @@ / {
 	compatible = "ti,omap4-panda-es", "ti,omap4-panda", "ti,omap4460", "ti,omap4430", "ti,omap4";
 };
 
+&emif1 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
+
+&emif2 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
+
 /* Audio routing is differnet between PandaBoard4430 and PandaBoardES */
 &sound {
 	ti,model = "PandaBoardES";
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index fb2f47717b4..2ca852eadb8 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -11,3 +11,15 @@ / {
 	model = "TI OMAP4 PandaBoard";
 	compatible = "ti,omap4-panda", "ti,omap4430", "ti,omap4";
 };
+
+&emif1 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
+
+&emif2 {
+	cs1-used;
+	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 91480ac1f32..ded6a24cd45 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -521,11 +521,13 @@ wlcore: wlcore@2 {
 &emif1 {
 	cs1-used;
 	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
 };
 
 &emif2 {
 	cs1-used;
 	device-handle = <&elpida_ECB240ABACN>;
+	status = "ok";
 };
 
 &keypad {
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 763bdea8c82..393765eb55d 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -263,6 +263,7 @@ emif1: emif@4c000000 {
 			hw-caps-read-idle-ctrl;
 			hw-caps-ll-interface;
 			hw-caps-temp-alert;
+			status = "disabled";
 		};
 
 		emif2: emif@4d000000 {
@@ -275,6 +276,7 @@ emif2: emif@4d000000 {
 			hw-caps-read-idle-ctrl;
 			hw-caps-ll-interface;
 			hw-caps-temp-alert;
+			status = "disabled";
 		};
 
 		aes1_target: target-module@4b501000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 8c8dee6ea46..4f52a11b913 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -78,6 +78,7 @@ leds {
 		blue {
 			label = "cubietruck:blue:usr";
 			gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
 		};
 
 		orange {
@@ -93,6 +94,7 @@ white {
 		green {
 			label = "cubietruck:green:usr";
 			gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc0";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 9d588cfaa5c..c0094e0c4e0 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -80,6 +80,11 @@ v2v1: regulator-v2v1 {
 		regulator-always-on;
 	};
 
+	clk32kg: regulator-clk32kg {
+		compatible = "ti,twl6030-clk32kg";
+		regulator-always-on;
+	};
+
 	twl_usb_comparator: usb-comparator {
 		compatible = "ti,twl6030-usb";
 		interrupts = <4>, <10>;
diff --git a/arch/arm/include/uapi/asm/setup.h b/arch/arm/include/uapi/asm/setup.h
index 25ceda63b28..83578c54975 100644
--- a/arch/arm/include/uapi/asm/setup.h
+++ b/arch/arm/include/uapi/asm/setup.h
@@ -144,6 +144,18 @@ struct tag_memclk {
 	__u32 fmemclk;
 };
 
+/* Marvell uboot parameters */
+#define ATAG_MV_UBOOT          0x41000403
+struct tag_mv_uboot {
+	__u32 uboot_version;
+	__u32 tclk;
+	__u32 sysclk;
+	__u32 isUsbHost;
+	__u8  macAddr[4][6];
+	__u16 mtu[4];
+	__u32 nand_ecc;
+};
+
 struct tag {
 	struct tag_header hdr;
 	union {
@@ -166,6 +178,11 @@ struct tag {
 		 * DC21285 specific
 		 */
 		struct tag_memclk	memclk;
+
+		/*
+		 * Marvell specific
+		 */
+		struct tag_mv_uboot	mv_uboot;
 	} u;
 };
 
diff --git a/arch/arm/mach-imx/devices/Kconfig b/arch/arm/mach-imx/devices/Kconfig
index fdca73d117e..95e0e8ee5da 100644
--- a/arch/arm/mach-imx/devices/Kconfig
+++ b/arch/arm/mach-imx/devices/Kconfig
@@ -69,3 +69,9 @@ config IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 
 config IMX_HAVE_PLATFORM_SPI_IMX
 	bool
+
+config WAND_RFKILL
+	tristate "Wandboard RF Kill support"
+	depends on SOC_IMX6Q
+	default m
+	select RFKILL
diff --git a/arch/arm/mach-imx/devices/Makefile b/arch/arm/mach-imx/devices/Makefile
index e44758aaa11..c179a550092 100644
--- a/arch/arm/mach-imx/devices/Makefile
+++ b/arch/arm/mach-imx/devices/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_W1) += platform-mxc_w1.o
 obj-$(CONFIG_IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX) += platform-sdhci-esdhc-imx.o
 obj-$(CONFIG_IMX_HAVE_PLATFORM_SPI_IMX) +=  platform-spi_imx.o
 obj-$(CONFIG_IMX_HAVE_PLATFORM_MX2_EMMA) += platform-mx2-emma.o
+obj-$(CONFIG_WAND_RFKILL) += wand-rfkill.o
diff --git a/arch/arm/mach-imx/devices/wand-rfkill.c b/arch/arm/mach-imx/devices/wand-rfkill.c
new file mode 100644
index 00000000000..da7ef9f4268
--- /dev/null
+++ b/arch/arm/mach-imx/devices/wand-rfkill.c
@@ -0,0 +1,290 @@
+/*
+ * arch/arm/mach-imx/devices/wand-rfkill.c
+ *
+ * Copyright (C) 2013 Vladimir Ermakov <vooon341@gmail.com>
+ *
+ * based on net/rfkill/rfkill-gpio.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+
+struct wand_rfkill_data {
+	struct rfkill *rfkill_dev;
+	int shutdown_gpio;
+	const char *shutdown_name;
+};
+
+static int wand_rfkill_set_block(void *data, bool blocked)
+{
+	struct wand_rfkill_data *rfkill = data;
+
+	pr_debug("wandboard-rfkill: set block %d\n", blocked);
+
+	if (blocked) {
+		if (gpio_is_valid(rfkill->shutdown_gpio))
+			gpio_direction_output(rfkill->shutdown_gpio, 0);
+	} else {
+		if (gpio_is_valid(rfkill->shutdown_gpio))
+			gpio_direction_output(rfkill->shutdown_gpio, 1);
+	}
+
+	return 0;
+}
+
+static const struct rfkill_ops wand_rfkill_ops = {
+	.set_block = wand_rfkill_set_block,
+};
+
+static int wand_rfkill_wifi_probe(struct device *dev,
+		struct device_node *np,
+		struct wand_rfkill_data *rfkill)
+{
+	int ret;
+	int wl_ref_on, wl_rst_n, wl_reg_on, wl_wake, wl_host_wake;
+
+	wl_ref_on = of_get_named_gpio(np, "wifi-ref-on", 0);
+	wl_rst_n = of_get_named_gpio(np, "wifi-rst-n", 0);
+	wl_reg_on = of_get_named_gpio(np, "wifi-reg-on", 0);
+	wl_wake = of_get_named_gpio(np, "wifi-wake", 0);
+	wl_host_wake = of_get_named_gpio(np, "wifi-host-wake", 0);
+
+	if (!gpio_is_valid(wl_rst_n) || !gpio_is_valid(wl_ref_on) ||
+			!gpio_is_valid(wl_reg_on) || !gpio_is_valid(wl_wake) ||
+			!gpio_is_valid(wl_host_wake)) {
+
+		dev_err(dev, "incorrect wifi gpios (%d %d %d %d %d)\n",
+				wl_rst_n, wl_ref_on, wl_reg_on, wl_wake, wl_host_wake);
+		return -EINVAL;
+	}
+
+	dev_info(dev, "initialize wifi chip\n");
+
+	gpio_request(wl_rst_n, "wl_rst_n");
+	gpio_direction_output(wl_rst_n, 0);
+	msleep(11);
+	gpio_set_value(wl_rst_n, 1);
+
+	gpio_request(wl_ref_on, "wl_ref_on");
+	gpio_direction_output(wl_ref_on, 1);
+
+	gpio_request(wl_reg_on, "wl_reg_on");
+	gpio_direction_output(wl_reg_on, 1);
+
+	gpio_request(wl_wake, "wl_wake");
+	gpio_direction_output(wl_wake, 1);
+
+	gpio_request(wl_host_wake, "wl_host_wake");
+	gpio_direction_input(wl_host_wake);
+
+	rfkill->shutdown_name = "wifi_shutdown";
+	rfkill->shutdown_gpio = wl_wake;
+
+	rfkill->rfkill_dev = rfkill_alloc("wifi-rfkill", dev, RFKILL_TYPE_WLAN,
+			&wand_rfkill_ops, rfkill);
+	if (!rfkill->rfkill_dev) {
+		ret = -ENOMEM;
+		goto wifi_fail_free_gpio;
+	}
+
+	ret = rfkill_register(rfkill->rfkill_dev);
+	if (ret < 0)
+		goto wifi_fail_unregister;
+
+	dev_info(dev, "wifi-rfkill registered.\n");
+
+	return 0;
+
+wifi_fail_unregister:
+	rfkill_destroy(rfkill->rfkill_dev);
+wifi_fail_free_gpio:
+	if (gpio_is_valid(wl_rst_n))     gpio_free(wl_rst_n);
+	if (gpio_is_valid(wl_ref_on))    gpio_free(wl_ref_on);
+	if (gpio_is_valid(wl_reg_on))    gpio_free(wl_reg_on);
+	if (gpio_is_valid(wl_wake))      gpio_free(wl_wake);
+	if (gpio_is_valid(wl_host_wake)) gpio_free(wl_host_wake);
+
+	return ret;
+}
+
+static int wand_rfkill_bt_probe(struct device *dev,
+		struct device_node *np,
+		struct wand_rfkill_data *rfkill)
+{
+	int ret;
+	int bt_on, bt_wake, bt_host_wake;
+
+	bt_on = of_get_named_gpio(np, "bluetooth-on", 0);
+	bt_wake = of_get_named_gpio(np, "bluetooth-wake", 0);
+	bt_host_wake = of_get_named_gpio(np, "bluetooth-host-wake", 0);
+
+	if (!gpio_is_valid(bt_on) || !gpio_is_valid(bt_wake) ||
+			!gpio_is_valid(bt_host_wake)) {
+
+		dev_err(dev, "incorrect bt gpios (%d %d %d)\n",
+				bt_on, bt_wake, bt_host_wake);
+		return -EINVAL;
+	}
+
+	dev_info(dev, "initialize bluetooth chip\n");
+
+	gpio_request(bt_on, "bt_on");
+	gpio_direction_output(bt_on, 0);
+	msleep(11);
+	gpio_set_value(bt_on, 1);
+
+	gpio_request(bt_wake, "bt_wake");
+	gpio_direction_output(bt_wake, 1);
+
+	gpio_request(bt_host_wake, "bt_host_wake");
+	gpio_direction_input(bt_host_wake);
+
+	rfkill->shutdown_name = "bluetooth_shutdown";
+	rfkill->shutdown_gpio = bt_wake;
+
+	rfkill->rfkill_dev = rfkill_alloc("bluetooth-rfkill", dev, RFKILL_TYPE_BLUETOOTH,
+			&wand_rfkill_ops, rfkill);
+	if (!rfkill->rfkill_dev) {
+		ret = -ENOMEM;
+		goto bt_fail_free_gpio;
+	}
+
+	ret = rfkill_register(rfkill->rfkill_dev);
+	if (ret < 0)
+		goto bt_fail_unregister;
+
+	dev_info(dev, "bluetooth-rfkill registered.\n");
+
+	return 0;
+
+bt_fail_unregister:
+	rfkill_destroy(rfkill->rfkill_dev);
+bt_fail_free_gpio:
+	if (gpio_is_valid(bt_on))        gpio_free(bt_on);
+	if (gpio_is_valid(bt_wake))      gpio_free(bt_wake);
+	if (gpio_is_valid(bt_host_wake)) gpio_free(bt_host_wake);
+
+	return ret;
+}
+
+static int wand_rfkill_probe(struct platform_device *pdev)
+{
+	struct wand_rfkill_data *rfkill;
+	struct pinctrl *pinctrl;
+	int ret;
+
+	dev_info(&pdev->dev, "Wandboard rfkill initialization\n");
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "no device tree node\n");
+		return -ENODEV;
+	}
+
+	rfkill = kzalloc(sizeof(*rfkill) * 2, GFP_KERNEL);
+	if (!rfkill)
+		return -ENOMEM;
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		int ret = PTR_ERR(pinctrl);
+		dev_err(&pdev->dev, "failed to get default pinctrl: %d\n", ret);
+		return ret;
+	}
+
+	/* setup WiFi */
+	ret = wand_rfkill_wifi_probe(&pdev->dev, pdev->dev.of_node, &rfkill[0]);
+	if (ret < 0)
+		goto fail_free_rfkill;
+
+	/* setup bluetooth */
+	ret = wand_rfkill_bt_probe(&pdev->dev, pdev->dev.of_node, &rfkill[1]);
+	if (ret < 0)
+		goto fail_unregister_wifi;
+
+	platform_set_drvdata(pdev, rfkill);
+
+	return 0;
+
+fail_unregister_wifi:
+	if (rfkill[1].rfkill_dev) {
+		rfkill_unregister(rfkill[1].rfkill_dev);
+		rfkill_destroy(rfkill[1].rfkill_dev);
+	}
+
+	/* TODO free gpio */
+
+fail_free_rfkill:
+	kfree(rfkill);
+
+	return ret;
+}
+
+static int wand_rfkill_remove(struct platform_device *pdev)
+{
+	struct wand_rfkill_data *rfkill = platform_get_drvdata(pdev);
+
+	dev_info(&pdev->dev, "Module unloading\n");
+
+	if (!rfkill)
+		return 0;
+
+	/* WiFi */
+	if (gpio_is_valid(rfkill[0].shutdown_gpio))
+		gpio_free(rfkill[0].shutdown_gpio);
+
+	rfkill_unregister(rfkill[0].rfkill_dev);
+	rfkill_destroy(rfkill[0].rfkill_dev);
+
+	/* Bt */
+	if (gpio_is_valid(rfkill[1].shutdown_gpio))
+		gpio_free(rfkill[1].shutdown_gpio);
+
+	rfkill_unregister(rfkill[1].rfkill_dev);
+	rfkill_destroy(rfkill[1].rfkill_dev);
+
+	kfree(rfkill);
+
+	return 0;
+}
+
+static struct of_device_id wand_rfkill_match[] = {
+	{ .compatible = "wand,imx6q-wandboard-rfkill", },
+	{ .compatible = "wand,imx6dl-wandboard-rfkill", },
+	{ .compatible = "wand,imx6qdl-wandboard-rfkill", },
+	{}
+};
+
+static struct platform_driver wand_rfkill_driver = {
+	.driver = {
+		.name = "wandboard-rfkill",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(wand_rfkill_match),
+	},
+	.probe = wand_rfkill_probe,
+	.remove = wand_rfkill_remove
+};
+
+module_platform_driver(wand_rfkill_driver);
+
+MODULE_AUTHOR("Vladimir Ermakov <vooon341@gmail.com>");
+MODULE_DESCRIPTION("Wandboard rfkill driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5d513f46195..7cb8456280b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -942,6 +942,8 @@ config SCHED_SMT
 	  MultiThreading at a cost of slightly increased overhead in some
 	  places. If unsure say N here.
 
+source "kernel/Kconfig.MuQSS"
+
 config NR_CPUS
 	int "Maximum number of CPUs (2-4096)"
 	range 2 4096
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 310957b988e..5777021fb97 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -880,6 +880,8 @@ config SCHED_SMT
 	  when dealing with POWER5 cpus at a cost of slightly increased
 	  overhead in some places. If unsure say N here.
 
+source "kernel/Kconfig.MuQSS"
+
 config PPC_DENORMALISATION
 	bool "PowerPC denormalisation exception handling"
 	depends on PPC_BOOK3S_64
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index f18d5067cd0..d0a06fdcc92 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -54,7 +54,9 @@ static struct timer_list spuloadavg_timer;
 /*
  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
  */
+#ifndef CONFIG_SCHED_MUQSS
 #define NORMAL_PRIO		120
+#endif
 
 /*
  * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2d3f963fd6f..3ab85854565 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1006,6 +1006,22 @@ config NR_CPUS
 config SCHED_SMT
 	def_bool y if SMP
 
+config SMT_NICE
+	bool "SMT (Hyperthreading) aware nice priority and policy support"
+	depends on SCHED_MUQSS && SCHED_SMT
+	default y
+	---help---
+	  Enabling Hyperthreading on Intel CPUs decreases the effectiveness
+	  of the use of 'nice' levels and different scheduling policies
+	  (e.g. realtime) due to sharing of CPU power between hyperthreads.
+	  SMT nice support makes each logical CPU aware of what is running on
+	  its hyperthread siblings, maintaining appropriate distribution of
+	  CPU according to nice levels and scheduling policies at the expense
+	  of slightly increased overhead.
+
+	  If unsure say Y here.
+
+
 config SCHED_MC
 	def_bool y
 	prompt "Multi-core scheduler support"
@@ -1036,6 +1052,8 @@ config SCHED_MC_PRIO
 
 	  If unsure say Y here.
 
+source "kernel/Kconfig.MuQSS"
+
 config UP_LATE_INIT
 	def_bool y
 	depends on !SMP && X86_LOCAL_APIC
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index bc3a497c029..88d0ba2d249 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -123,6 +123,7 @@ config MPENTIUMM
 config MPENTIUM4
 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
 	depends on X86_32
+	select X86_P6_NOP
 	---help---
 	  Select this for Intel Pentium 4 chips.  This includes the
 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
@@ -155,9 +156,8 @@ config MPENTIUM4
 		-Paxville
 		-Dempsey
 
-
 config MK6
-	bool "K6/K6-II/K6-III"
+	bool "AMD K6/K6-II/K6-III"
 	depends on X86_32
 	---help---
 	  Select this for an AMD K6-family processor.  Enables use of
@@ -165,7 +165,7 @@ config MK6
 	  flags to GCC.
 
 config MK7
-	bool "Athlon/Duron/K7"
+	bool "AMD Athlon/Duron/K7"
 	depends on X86_32
 	---help---
 	  Select this for an AMD Athlon K7-family processor.  Enables use of
@@ -173,12 +173,90 @@ config MK7
 	  flags to GCC.
 
 config MK8
-	bool "Opteron/Athlon64/Hammer/K8"
+	bool "AMD Opteron/Athlon64/Hammer/K8"
 	---help---
 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
 	  Enables use of some extended instructions, and passes appropriate
 	  optimization flags to GCC.
 
+config MK8SSE3
+	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
+	---help---
+	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
+	  Enables use of some extended instructions, and passes appropriate
+	  optimization flags to GCC.
+
+config MK10
+	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
+	---help---
+	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
+		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
+	  Enables use of some extended instructions, and passes appropriate
+	  optimization flags to GCC.
+
+config MBARCELONA
+	bool "AMD Barcelona"
+	---help---
+	  Select this for AMD Family 10h Barcelona processors.
+
+	  Enables -march=barcelona
+
+config MBOBCAT
+	bool "AMD Bobcat"
+	---help---
+	  Select this for AMD Family 14h Bobcat processors.
+
+	  Enables -march=btver1
+
+config MJAGUAR
+	bool "AMD Jaguar"
+	---help---
+	  Select this for AMD Family 16h Jaguar processors.
+
+	  Enables -march=btver2
+
+config MBULLDOZER
+	bool "AMD Bulldozer"
+	---help---
+	  Select this for AMD Family 15h Bulldozer processors.
+
+	  Enables -march=bdver1
+
+config MPILEDRIVER
+	bool "AMD Piledriver"
+	---help---
+	  Select this for AMD Family 15h Piledriver processors.
+
+	  Enables -march=bdver2
+
+config MSTEAMROLLER
+	bool "AMD Steamroller"
+	---help---
+	  Select this for AMD Family 15h Steamroller processors.
+
+	  Enables -march=bdver3
+
+config MEXCAVATOR
+	bool "AMD Excavator"
+	---help---
+	  Select this for AMD Family 15h Excavator processors.
+
+	  Enables -march=bdver4
+
+config MPCK
+	bool "AMD Zen"
+	---help---
+	  Select this for AMD Family 17h Zen processors.
+
+	  Enables -march=znver1
+
+config MPCK2
+	bool "AMD Zen 2"
+	---help---
+	  Select this for AMD Family 17h Zen 2 processors.
+
+	  Enables -march=znver2
+
 config MCRUSOE
 	bool "Crusoe"
 	depends on X86_32
@@ -260,6 +338,7 @@ config MVIAC7
 
 config MPSC
 	bool "Intel P4 / older Netburst based Xeon"
+	select X86_P6_NOP
 	depends on X86_64
 	---help---
 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
@@ -269,8 +348,19 @@ config MPSC
 	  using the cpu family field
 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
 
+config MATOM
+	bool "Intel Atom"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+	  in-order pipelining architecture and thus can benefit from
+	  accordingly optimized code. Use a recent GCC with specific Atom
+	  support in order to fully benefit from selecting this option.
+
 config MCORE2
-	bool "Core 2/newer Xeon"
+	bool "Intel Core 2"
+	select X86_P6_NOP
 	---help---
 
 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
@@ -278,14 +368,151 @@ config MCORE2
 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
 	  (not a typo)
 
-config MATOM
-	bool "Intel Atom"
+	  Enables -march=core2
+
+config MNEHALEM
+	bool "Intel Nehalem"
+	select X86_P6_NOP
 	---help---
 
-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
-	  in-order pipelining architecture and thus can benefit from
-	  accordingly optimized code. Use a recent GCC with specific Atom
-	  support in order to fully benefit from selecting this option.
+	  Select this for 1st Gen Core processors in the Nehalem family.
+
+	  Enables -march=nehalem
+
+config MWESTMERE
+	bool "Intel Westmere"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for the Intel Westmere formerly Nehalem-C family.
+
+	  Enables -march=westmere
+
+config MSILVERMONT
+	bool "Intel Silvermont"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for the Intel Silvermont platform.
+
+	  Enables -march=silvermont
+
+config MGOLDMONT
+	bool "Intel Goldmont"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
+
+	  Enables -march=goldmont
+
+config MGOLDMONTPLUS
+	bool "Intel Goldmont Plus"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for the Intel Goldmont Plus platform including Gemini Lake.
+
+	  Enables -march=goldmont-plus
+
+config MSANDYBRIDGE
+	bool "Intel Sandy Bridge"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
+
+	  Enables -march=sandybridge
+
+config MIVYBRIDGE
+	bool "Intel Ivy Bridge"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
+
+	  Enables -march=ivybridge
+
+config MHASWELL
+	bool "Intel Haswell"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 4th Gen Core processors in the Haswell family.
+
+	  Enables -march=haswell
+
+config MBROADWELL
+	bool "Intel Broadwell"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 5th Gen Core processors in the Broadwell family.
+
+	  Enables -march=broadwell
+
+config MSKYLAKE
+	bool "Intel Skylake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 6th Gen Core processors in the Skylake family.
+
+	  Enables -march=skylake
+
+config MSKYLAKEX
+	bool "Intel Skylake X"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 6th Gen Core processors in the Skylake X family.
+
+	  Enables -march=skylake-avx512
+
+config MCANNONLAKE
+	bool "Intel Cannon Lake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 8th Gen Core processors
+
+	  Enables -march=cannonlake
+
+config MICELAKE
+	bool "Intel Ice Lake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for 10th Gen Core processors in the Ice Lake family.
+
+	  Enables -march=icelake-client
+
+config MCASCADELAKE
+	bool "Intel Cascade Lake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for Xeon processors in the Cascade Lake family.
+
+	  Enables -march=cascadelake
+
+config MCOOPERLAKE
+	bool "Intel Cooper Lake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for Xeon processors in the Cooper Lake family.
+
+	  Enables -march=cooperlake
+
+config MTIGERLAKE
+	bool "Intel Tiger Lake"
+	select X86_P6_NOP
+	---help---
+
+	  Select this for third-generation 10 nm process processors in the Tiger Lake family.
+
+	  Enables -march=tigerlake
 
 config GENERIC_CPU
 	bool "Generic-x86-64"
@@ -294,6 +521,19 @@ config GENERIC_CPU
 	  Generic x86-64 CPU.
 	  Run equally well on all x86-64 CPUs.
 
+config MNATIVE
+ bool "Native optimizations autodetected by GCC"
+ ---help---
+
+   GCC 4.2 and above support -march=native, which automatically detects
+   the optimum settings to use based on your processor. -march=native
+   also detects and applies additional settings beyond -march specific
+   to your CPU, (eg. -msse4). Unless you have a specific reason not to
+   (e.g. distcc cross-compiling), you should probably be using
+   -march=native rather than anything listed below.
+
+   Enables -march=native
+
 endchoice
 
 config X86_GENERIC
@@ -318,7 +558,7 @@ config X86_INTERNODE_CACHE_SHIFT
 config X86_L1_CACHE_SHIFT
 	int
 	default "7" if MPENTIUM4 || MPSC
-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MPCK || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
 	default "4" if MELAN || M486SX || M486 || MGEODEGX1
 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 
@@ -336,35 +576,36 @@ config X86_ALIGNMENT_16
 
 config X86_INTEL_USERCOPY
 	def_bool y
-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
 
 config X86_USE_PPRO_CHECKSUM
 	def_bool y
-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MATOM || MNATIVE
 
 config X86_USE_3DNOW
 	def_bool y
 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
 
-#
-# P6_NOPs are a relatively minor optimization that require a family >=
-# 6 processor, except that it is broken on certain VIA chips.
-# Furthermore, AMD chips prefer a totally different sequence of NOPs
-# (which work on all CPUs).  In addition, it looks like Virtual PC
-# does not understand them.
-#
-# As a result, disallow these if we're not compiling for X86_64 (these
-# NOPs do work on all x86-64 capable chips); the list of processors in
-# the right-hand clause are the cores that benefit from this optimization.
-#
 config X86_P6_NOP
-	def_bool y
-	depends on X86_64
-	depends on (MCORE2 || MPENTIUM4 || MPSC)
+	default n
+	bool "Support for P6_NOPs on Intel chips"
+	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
+	---help---
+	P6_NOPs are a relatively minor optimization that require a family >=
+	6 processor, except that it is broken on certain VIA chips.
+	Furthermore, AMD chips prefer a totally different sequence of NOPs
+	(which work on all CPUs).  In addition, it looks like Virtual PC
+	does not understand them.
+
+	As a result, disallow these if we're not compiling for X86_64 (these
+	NOPs do work on all x86-64 capable chips); the list of processors in
+	the right-hand clause are the cores that benefit from this optimization.
+
+	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
 
 config X86_TSC
 	def_bool y
-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM) || X86_64
 
 config X86_CMPXCHG64
 	def_bool y
@@ -374,7 +615,7 @@ config X86_CMPXCHG64
 # generates cmov.
 config X86_CMOV
 	def_bool y
-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MPCK || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
 
 config X86_MINIMUM_CPU_FAMILY
 	int
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b65ec63c7db..6330bc91f3d 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -119,13 +119,57 @@ else
 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
 
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
+        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
+        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
+        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
+        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
+        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
+        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
+        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
+        cflags-$(CONFIG_MPCK) += $(call cc-option,-march=znver1)
+        cflags-$(CONFIG_MPCK2) += $(call cc-option,-march=znver2)
         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
 
         cflags-$(CONFIG_MCORE2) += \
-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
+        cflags-$(CONFIG_MNEHALEM) += \
+                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
+        cflags-$(CONFIG_MWESTMERE) += \
+                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
+        cflags-$(CONFIG_MSILVERMONT) += \
+                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
+        cflags-$(CONFIG_MGOLDMONT) += \
+                $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
+        cflags-$(CONFIG_MGOLDMONTPLUS) += \
+                $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
+        cflags-$(CONFIG_MSANDYBRIDGE) += \
+                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
+        cflags-$(CONFIG_MIVYBRIDGE) += \
+                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
+        cflags-$(CONFIG_MHASWELL) += \
+                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
+        cflags-$(CONFIG_MBROADWELL) += \
+                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
+        cflags-$(CONFIG_MSKYLAKE) += \
+                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
+        cflags-$(CONFIG_MSKYLAKEX) += \
+                $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
+        cflags-$(CONFIG_MCANNONLAKE) += \
+                $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
+        cflags-$(CONFIG_MICELAKE) += \
+                $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
+        cflags-$(CONFIG_MCASCADELAKE) += \
+                $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
+        cflags-$(CONFIG_MCOOPERLAKE) += \
+                $(call cc-option,-march=cooperlake,$(call cc-option,-mtune=cooperlake))
+        cflags-$(CONFIG_MTIGERLAKE) += \
+                $(call cc-option,-march=tigerlake,$(call cc-option,-mtune=tigerlake))
+        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
+                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
         KBUILD_CFLAGS += $(cflags-y)
 
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index cd305675988..0e8fd778a04 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
 # Please note, that patches that add -march=athlon-xp and friends are pointless.
 # They make zero difference whatsosever to performance at this time.
 cflags-$(CONFIG_MK7)		+= -march=athlon
+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
 cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
+cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
+cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
+cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
+cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
+cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
+cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
+cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
+cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
+cflags-$(CONFIG_MPCK)	+= $(call cc-option,-march=znver1,-march=athlon)
+cflags-$(CONFIG_MPCK2)	+= $(call cc-option,-march=znver2,-march=athlon)
 cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
 cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
 cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
@@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) -falign-fu
 cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
 cflags-$(CONFIG_MVIAC7)		+= -march=i686
 cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
+cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
+cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
+cflags-$(CONFIG_MGOLDMONT)	+= -march=i686 $(call tune,goldmont)
+cflags-$(CONFIG_MGOLDMONTPLUS)	+= -march=i686 $(call tune,goldmont-plus)
+cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
+cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
+cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
+cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
+cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
+cflags-$(CONFIG_MSKYLAKEX)	+= -march=i686 $(call tune,skylake-avx512)
+cflags-$(CONFIG_MCANNONLAKE)	+= -march=i686 $(call tune,cannonlake)
+cflags-$(CONFIG_MICELAKE)	+= -march=i686 $(call tune,icelake-client)
+cflags-$(CONFIG_MCASCADELAKE)	+= -march=i686 $(call tune,cascadelake)
+cflags-$(CONFIG_MCOOPERLAKE)	+= -march=i686 $(call tune,cooperlake)
+cflags-$(CONFIG_MTIGERLAKE)	+= -march=i686 $(call tune,tigerlake)
+cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
+	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
 
 # AMD Elan support
 cflags-$(CONFIG_MELAN)		+= -march=i486
diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
index 75884d2cdec..2cffe0cdf29 100644
--- a/arch/x86/include/asm/vermagic.h
+++ b/arch/x86/include/asm/vermagic.h
@@ -17,6 +17,40 @@
 #define MODULE_PROC_FAMILY "586MMX "
 #elif defined CONFIG_MCORE2
 #define MODULE_PROC_FAMILY "CORE2 "
+#elif defined CONFIG_MNATIVE
+#define MODULE_PROC_FAMILY "NATIVE "
+#elif defined CONFIG_MNEHALEM
+#define MODULE_PROC_FAMILY "NEHALEM "
+#elif defined CONFIG_MWESTMERE
+#define MODULE_PROC_FAMILY "WESTMERE "
+#elif defined CONFIG_MSILVERMONT
+#define MODULE_PROC_FAMILY "SILVERMONT "
+#elif defined CONFIG_MGOLDMONT
+#define MODULE_PROC_FAMILY "GOLDMONT "
+#elif defined CONFIG_MGOLDMONTPLUS
+#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
+#elif defined CONFIG_MSANDYBRIDGE
+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
+#elif defined CONFIG_MIVYBRIDGE
+#define MODULE_PROC_FAMILY "IVYBRIDGE "
+#elif defined CONFIG_MHASWELL
+#define MODULE_PROC_FAMILY "HASWELL "
+#elif defined CONFIG_MBROADWELL
+#define MODULE_PROC_FAMILY "BROADWELL "
+#elif defined CONFIG_MSKYLAKE
+#define MODULE_PROC_FAMILY "SKYLAKE "
+#elif defined CONFIG_MSKYLAKEX
+#define MODULE_PROC_FAMILY "SKYLAKEX "
+#elif defined CONFIG_MCANNONLAKE
+#define MODULE_PROC_FAMILY "CANNONLAKE "
+#elif defined CONFIG_MICELAKE
+#define MODULE_PROC_FAMILY "ICELAKE "
+#elif defined CONFIG_MCASCADELAKE
+#define MODULE_PROC_FAMILY "CASCADELAKE "
+#elif defined CONFIG_MCOOPERLAKE
+#define MODULE_PROC_FAMILY "COOPERLAKE "
+#elif defined CONFIG_MTIGERLAKE
+#define MODULE_PROC_FAMILY "TIGERLAKE "
 #elif defined CONFIG_MATOM
 #define MODULE_PROC_FAMILY "ATOM "
 #elif defined CONFIG_M686
@@ -35,6 +69,28 @@
 #define MODULE_PROC_FAMILY "K7 "
 #elif defined CONFIG_MK8
 #define MODULE_PROC_FAMILY "K8 "
+#elif defined CONFIG_MK8SSE3
+#define MODULE_PROC_FAMILY "K8SSE3 "
+#elif defined CONFIG_MK10
+#define MODULE_PROC_FAMILY "K10 "
+#elif defined CONFIG_MBARCELONA
+#define MODULE_PROC_FAMILY "BARCELONA "
+#elif defined CONFIG_MBOBCAT
+#define MODULE_PROC_FAMILY "BOBCAT "
+#elif defined CONFIG_MBULLDOZER
+#define MODULE_PROC_FAMILY "BULLDOZER "
+#elif defined CONFIG_MPILEDRIVER
+#define MODULE_PROC_FAMILY "PILEDRIVER "
+#elif defined CONFIG_MSTEAMROLLER
+#define MODULE_PROC_FAMILY "STEAMROLLER "
+#elif defined CONFIG_MJAGUAR
+#define MODULE_PROC_FAMILY "JAGUAR "
+#elif defined CONFIG_MEXCAVATOR
+#define MODULE_PROC_FAMILY "EXCAVATOR "
+#elif defined CONFIG_MPCK
+#define MODULE_PROC_FAMILY "PCK "
+#elif defined CONFIG_MPCK2
+#define MODULE_PROC_FAMILY "PCK2 "
 #elif defined CONFIG_MELAN
 #define MODULE_PROC_FAMILY "ELAN "
 #elif defined CONFIG_MCRUSOE
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 7df14133adc..f86f3d97197 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -9,6 +9,15 @@ config MQ_IOSCHED_DEADLINE
 	---help---
 	  MQ version of the deadline IO scheduler.
 
+config MQ_IOSCHED_DEADLINE_NODEFAULT
+	bool "Rename mq-deadline scheduler to mq-deadline-nodefault"
+	depends on MQ_IOSCHED_DEADLINE
+	default n
+	---help---
+	  This renames the mq-deadline scheduler to "mq-deadline-nodefault" and
+	  also drops its alias of "deadline". This can prevent existing
+	  userspace from forcing this scheduler over the kernel's choice.
+
 config MQ_IOSCHED_KYBER
 	tristate "Kyber I/O scheduler"
 	default y
diff --git a/block/elevator.c b/block/elevator.c
index 4eab3d70e88..0a61a787c71 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -628,10 +628,18 @@ static inline bool elv_support_iosched(struct request_queue *q)
  */
 static struct elevator_type *elevator_get_default(struct request_queue *q)
 {
+#ifndef CONFIG_PCK_INTERACTIVE
 	if (q->nr_hw_queues != 1)
 		return NULL;
+#endif
 
+#if defined(CONFIG_PCK_INTERACTIVE) && defined(CONFIG_IOSCHED_BFQ)
+	return elevator_get(q, "bfq", false);
+#elif defined(CONFIG_MQ_IOSCHED_DEADLINE_NODEFAULT)
+	return elevator_get(q, "mq-deadline-nodefault", false);
+#else
 	return elevator_get(q, "mq-deadline", false);
+#endif
 }
 
 /*
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index b490f47fd55..fb161305c3b 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -792,8 +792,12 @@ static struct elevator_type mq_deadline = {
 	.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
 #endif
 	.elevator_attrs = deadline_attrs,
+#ifdef CONFIG_MQ_IOSCHED_DEADLINE_NODEFAULT
+	.elevator_name = "mq-deadline-nodefault",
+#else
 	.elevator_name = "mq-deadline",
 	.elevator_alias = "deadline",
+#endif
 	.elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
 	.elevator_owner = THIS_MODULE,
 };
diff --git a/drivers/Makefile b/drivers/Makefile
index c0cd1b9075e..a32a4651df2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -56,18 +56,14 @@ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
 obj-y				+= tty/
 obj-y				+= char/
 
+# put mmc early as many morden devices use emm/sd card as rootfs storage
+obj-y				+= mmc/
+
 # iommu/ comes before gpu as gpu are using iommu controllers
 obj-y				+= iommu/
 
-# gpu/ comes after char for AGP vs DRM startup and after iommu
-obj-y				+= gpu/
-
 obj-$(CONFIG_CONNECTOR)		+= connector/
 
-# i810fb and intelfb depend on char/agp/
-obj-$(CONFIG_FB_I810)           += video/fbdev/i810/
-obj-$(CONFIG_FB_INTEL)          += video/fbdev/intelfb/
-
 obj-$(CONFIG_PARPORT)		+= parport/
 obj-$(CONFIG_NVM)		+= lightnvm/
 obj-y				+= base/ block/ misc/ mfd/ nfc/
@@ -80,6 +76,14 @@ obj-$(CONFIG_IDE)		+= ide/
 obj-y				+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
+obj-y				+= gpu/
+
+# i810fb and intelfb depend on char/agp/
+obj-$(CONFIG_FB_I810)           += video/fbdev/i810/
+obj-$(CONFIG_FB_INTEL)          += video/fbdev/intelfb/
+
 obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
@@ -128,7 +132,6 @@ obj-$(CONFIG_EISA)		+= eisa/
 obj-$(CONFIG_PM_OPP)		+= opp/
 obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle/
-obj-y				+= mmc/
 obj-$(CONFIG_MEMSTICK)		+= memstick/
 obj-$(CONFIG_NEW_LEDS)		+= leds/
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 418bb462125..3c644124410 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -753,6 +753,24 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 	return error;
 }
 
+/*
+ * for AUFS
+ * no get/put for file.
+ */
+struct file *loop_backing_file(struct super_block *sb)
+{
+	struct file *ret;
+	struct loop_device *l;
+
+	ret = NULL;
+	if (MAJOR(sb->s_dev) == LOOP_MAJOR) {
+		l = sb->s_bdev->bd_disk->private_data;
+		ret = l->lo_backing_file;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(loop_backing_file);
+
 /* loop sysfs attributes */
 
 static ssize_t loop_attr_show(struct device *dev, char *page,
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 4c297f69171..5bc4f1be261 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
 			if (swim_readbit(base, MOTOR_ON))
 				break;
 			current->state = TASK_INTERRUPTIBLE;
-			schedule_timeout(1);
+			schedule_min_hrtimeout();
 		}
 	} else if (action == OFF) {
 		swim_action(base, MOTOR_OFF);
@@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
 		if (!swim_readbit(base, DISK_IN))
 			break;
 		current->state = TASK_INTERRUPTIBLE;
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 	}
 	swim_select(base, RELAX);
 }
@@ -371,7 +371,7 @@ static inline int swim_step(struct swim __iomem *base)
 	for (wait = 0; wait < HZ; wait++) {
 
 		current->state = TASK_INTERRUPTIBLE;
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 
 		swim_select(base, RELAX);
 		if (!swim_readbit(base, STEP))
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9afd220cd82..265a2dd089c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3544,7 +3544,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
 	/* Current message first, to preserve order */
 	while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
 		/* Wait for the message to clear out. */
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 	}
 
 	/* No need for locks, the interface is down. */
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 2704470e021..49504b7f3aa 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1295,7 +1295,7 @@ static void shutdown_ssif(void *send_info)
 
 	/* make sure the driver is not looking for flags any more. */
 	while (ssif_info->ssif_state != SSIF_NORMAL)
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 
 	ssif_info->stopping = true;
 	del_timer_sync(&ssif_info->watch_timer);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 82a4d37ddec..d5ac94d6e46 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -18,10 +18,22 @@
 #include "cpufreq_ondemand.h"
 
 /* On-demand governor macros */
+#if defined(CONFIG_PCK_INTERACTIVE) && defined(CONFIG_SCHED_MUQSS)
+/* MuQSS is less likely to keep a single threaded task on the same core, so
+   have on-demand run at max speed from a much lower core utilization */
+#define DEF_FREQUENCY_UP_THRESHOLD		(40)
+#define MICRO_FREQUENCY_UP_THRESHOLD		(45)
+#define DEF_SAMPLING_DOWN_FACTOR		(5)
+#elif defined(CONFIG_PCK_INTERACTIVE)
 #define DEF_FREQUENCY_UP_THRESHOLD		(80)
+#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
+#define DEF_SAMPLING_DOWN_FACTOR		(5)
+#else
+#define DEF_FREQUENCY_UP_THRESHOLD		(80)
+#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 #define DEF_SAMPLING_DOWN_FACTOR		(1)
+#endif
 #define MAX_SAMPLING_DOWN_FACTOR		(100000)
-#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 #define MIN_FREQUENCY_UP_THRESHOLD		(1)
 #define MAX_FREQUENCY_UP_THRESHOLD		(100)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4d3429b2058..8bf9e0adf26 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2824,6 +2824,8 @@ static int __init intel_pstate_setup(char *str)
 		pr_info("HWP disabled\n");
 		no_hwp = 1;
 	}
+	if (!strcmp(str, "enable"))
+		no_load = 0;
 	if (!strcmp(str, "force"))
 		force_load = 1;
 	if (!strcmp(str, "hwp_only"))
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1b96169d84f..49029a422a0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -69,6 +69,20 @@ config GPIO_SYSFS
 	  ioctl() operations instead. The character device is always
 	  available.
 
+config GPIO_OF_HELPER
+	bool "GPIO OF helper device (EXPERIMENTAL)"
+	depends on OF_GPIO
+	help
+	  Say Y here to add an GPIO OF helper driver
+
+	  Allows you specify a GPIO helper based on OF
+	  which allows simple export of GPIO functionality
+	  in user-space.
+
+	  Features include, value set/get, direction control,
+	  interrupt/value change poll support, event counting
+	  and others.
+
 config GPIO_GENERIC
 	depends on HAS_IOMEM # Only for IOMEM drivers
 	tristate
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2cfc21a97f..4000a559990 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIOLIB)		+= gpiolib-devprop.o
 obj-$(CONFIG_OF_GPIO)		+= gpiolib-of.o
 obj-$(CONFIG_GPIO_SYSFS)	+= gpiolib-sysfs.o
 obj-$(CONFIG_GPIO_ACPI)		+= gpiolib-acpi.o
+obj-$(CONFIG_GPIO_OF_HELPER)	+= gpio-of-helper.o
 
 # Device drivers. Generally keep list sorted alphabetically
 obj-$(CONFIG_GPIO_GENERIC)	+= gpio-generic.o
diff --git a/drivers/gpio/gpio-of-helper.c b/drivers/gpio/gpio-of-helper.c
new file mode 100644
index 00000000000..83f362f7da5
--- /dev/null
+++ b/drivers/gpio/gpio-of-helper.c
@@ -0,0 +1,435 @@
+/*
+ * GPIO OF based helper
+ *
+ * A simple DT based driver to provide access to GPIO functionality
+ * to user-space via sysfs.
+ *
+ * Copyright (C) 2013 Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/atomic.h>
+#include <linux/idr.h>
+
+/* fwd decl. */
+struct gpio_of_helper_info;
+
+enum gpio_type {
+	GPIO_TYPE_INPUT = 0,
+	GPIO_TYPE_OUTPUT = 1,
+};
+
+struct gpio_of_entry {
+	int id;
+	struct gpio_of_helper_info *info;
+	struct device_node *node;
+	enum gpio_type type;
+	int gpio;
+	int irq;
+	const char *name;
+	atomic64_t counter;
+	unsigned int count_flags;
+#define COUNT_RISING_EDGE	(1 << 0)
+#define COUNT_FALLING_EDGE	(1 << 1)
+};
+
+struct gpio_of_helper_info {
+	struct platform_device *pdev;
+	struct idr idr;
+};
+
+static const struct of_device_id gpio_of_helper_of_match[] = {
+	{
+		.compatible = "gpio-of-helper",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, gpio_of_helper_of_match);
+
+static ssize_t gpio_of_helper_show_status(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct gpio_of_helper_info *info = platform_get_drvdata(pdev);
+	struct gpio_of_entry *entry;
+	char *p, *e;
+	int id, n;
+
+	p = buf;
+	e = p + PAGE_SIZE;
+	n = 0;
+	idr_for_each_entry(&info->idr, entry, id) {
+		switch (entry->type) {
+		case GPIO_TYPE_INPUT:
+			n = snprintf(p, e - p, "%2d %-24s %3d %-3s %llu\n",
+				entry->id, entry->name, entry->gpio, "IN",
+				(unsigned long long)
+					atomic64_read(&entry->counter));
+			break;
+		case GPIO_TYPE_OUTPUT:
+			n = snprintf(p, e - p, "%2d %-24s %3d %-3s\n",
+				entry->id, entry->name, entry->gpio, "OUT");
+			break;
+		}
+		p += n;
+	}
+
+	return p - buf;
+}
+
+static DEVICE_ATTR(status, S_IRUGO,
+		gpio_of_helper_show_status, NULL);
+
+static irqreturn_t gpio_of_helper_handler(int irq, void *ptr)
+{
+	struct gpio_of_entry *entry = ptr;
+
+	/* caution - low speed interfaces only! */
+	atomic64_inc(&entry->counter);
+
+	return IRQ_HANDLED;
+}
+
+static struct gpio_of_entry *
+gpio_of_entry_create(struct gpio_of_helper_info *info,
+		struct device_node *node)
+{
+	struct platform_device *pdev = info->pdev;
+	struct device *dev = &pdev->dev;
+	struct gpio_of_entry *entry;
+	int err, gpio, irq;
+	unsigned int req_flags, count_flags, irq_flags;
+	enum gpio_type type;
+	enum of_gpio_flags gpio_flags;
+	const char *name;
+
+	/* get the type of the node first */
+	if (of_property_read_bool(node, "input"))
+		type = GPIO_TYPE_INPUT;
+	else if (of_property_read_bool(node, "output")
+			|| of_property_read_bool(node, "init-low")
+			|| of_property_read_bool(node, "init-high"))
+		type = GPIO_TYPE_OUTPUT;
+	else {
+		dev_err(dev, "Not valid gpio node type\n");
+		err = -EINVAL;
+		goto err_bad_node;
+	}
+
+	/* get the name */
+	if (of_property_read_string(node, "line-name", &name))
+		if (of_property_read_string(node, "gpio-name", &name))
+			name = node->name;
+
+	err = of_get_named_gpio_flags(node, "gpio", 0, &gpio_flags);
+	if (IS_ERR_VALUE(err)) {
+		dev_err(dev, "Failed to get gpio property of '%s'\n", name);
+		goto err_bad_node;
+	}
+	gpio = err;
+
+	req_flags = 0;
+	count_flags = 0;
+
+	/* set the request flags */
+	switch (type) {
+		case GPIO_TYPE_INPUT:
+			req_flags = GPIOF_DIR_IN | GPIOF_EXPORT;
+			if (of_property_read_bool(node, "count-falling-edge"))
+				count_flags |= COUNT_FALLING_EDGE;
+			if (of_property_read_bool(node, "count-rising-edge"))
+				count_flags |= COUNT_RISING_EDGE;
+			break;
+		case GPIO_TYPE_OUTPUT:
+			req_flags = GPIOF_DIR_OUT | GPIOF_EXPORT;
+			if (of_property_read_bool(node, "init-high"))
+				req_flags |= GPIOF_OUT_INIT_HIGH;
+			else if (of_property_read_bool(node, "init-low"))
+				req_flags |= GPIOF_OUT_INIT_LOW;
+			break;
+	}
+	if (of_property_read_bool(node, "dir-changeable"))
+		req_flags |= GPIOF_EXPORT_CHANGEABLE;
+	if (gpio_flags & OF_GPIO_ACTIVE_LOW)
+		req_flags |= GPIOF_ACTIVE_LOW;
+	if (gpio_flags & OF_GPIO_SINGLE_ENDED) {
+		if (gpio_flags & OF_GPIO_ACTIVE_LOW)
+			req_flags |= GPIOF_OPEN_DRAIN;
+		else
+			req_flags |= GPIOF_OPEN_SOURCE;
+	}
+
+	/* request the gpio */
+	err = devm_gpio_request_one(dev, gpio, req_flags, name);
+	if (err != 0) {
+		dev_err(dev, "Failed to request gpio '%s'\n", name);
+		goto err_bad_node;
+	}
+
+	irq = -1;
+	irq_flags = 0;
+
+	/* counter mode requested - need an interrupt */
+	if (count_flags != 0) {
+		irq = gpio_to_irq(gpio);
+		if (IS_ERR_VALUE(irq)) {
+			dev_err(dev, "Failed to request gpio '%s'\n", name);
+			goto err_bad_node;
+		}
+
+		if (count_flags & COUNT_RISING_EDGE)
+			irq_flags |= IRQF_TRIGGER_RISING;
+		if (count_flags & COUNT_FALLING_EDGE)
+			irq_flags |= IRQF_TRIGGER_FALLING;
+	}
+
+//	if (!idr_pre_get(&info->idr, GFP_KERNEL)) {
+//		dev_err(dev, "Failed on idr_pre_get of '%s'\n", name);
+//		err = -ENOMEM;
+//		goto err_no_mem;
+//	}
+
+	idr_preload(GFP_KERNEL);
+
+	entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		dev_err(dev, "Failed to allocate gpio entry of '%s'\n", name);
+		err = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	entry->id = -1;
+	entry->info = info;
+	entry->node = of_node_get(node);	/* get node reference */
+	entry->type = type;
+	entry->gpio = gpio;
+	entry->irq = irq;
+	entry->name = name;
+
+	/* interrupt enable is last thing done */
+	if (irq >= 0) {
+		atomic64_set(&entry->counter, 0);
+		entry->count_flags = count_flags;
+		err = devm_request_irq(dev, irq, gpio_of_helper_handler,
+				irq_flags, name, entry);
+		if (err != 0) {
+			dev_err(dev, "Failed to request irq of '%s'\n", name);
+			goto err_no_irq;
+		}
+	}
+
+	/* all done; insert */
+//	err = idr_get_new(&info->idr, entry, &entry->id);
+//	if (IS_ERR_VALUE(err)) {
+//		dev_err(dev, "Failed to idr_get_new  of '%s'\n", name);
+//		goto err_fail_idr;
+//	}
+
+	err = idr_alloc(&info->idr, entry, 0, 0, GFP_NOWAIT);
+	if (err >= 0)
+		entry->id = err;
+
+	idr_preload_end();
+
+	if (err < 0) {
+		dev_err(dev, "Failed to idr_get_new  of '%s'\n", name);
+		goto err_fail_idr;
+	}
+
+	dev_dbg(dev, "Allocated GPIO id=%d name='%s'\n", entry->id, name);
+
+	return entry;
+
+err_fail_idr:
+	/* nothing to do */
+err_no_irq:
+	/* release node ref */
+	of_node_put(node);
+	/* nothing else needs to be done, devres handles it */
+err_no_mem:
+err_bad_node:
+	return ERR_PTR(err);
+}
+
+static int gpio_of_entry_destroy(struct gpio_of_entry *entry)
+{
+	struct gpio_of_helper_info *info = entry->info;
+	struct platform_device *pdev = info->pdev;
+	struct device *dev = &pdev->dev;
+
+	dev_dbg(dev, "Destroying GPIO id=%d\n", entry->id);
+
+	/* remove from the IDR */
+	idr_remove(&info->idr, entry->id);
+
+	/* remove node ref */
+	of_node_put(entry->node);
+
+	/* free gpio */
+	devm_gpio_free(dev, entry->gpio);
+
+	/* gree irq */
+	if (entry->irq >= 0)
+		devm_free_irq(dev, entry->irq, entry);
+
+	/* and free */
+	devm_kfree(dev, entry);
+
+	return 0;
+}
+
+static int gpio_of_helper_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gpio_of_helper_info *info;
+	struct gpio_of_entry *entry;
+	struct device_node *pnode = pdev->dev.of_node;
+	struct device_node *cnode;
+	struct pinctrl *pinctrl;
+	int err;
+
+	/* we only support OF */
+	if (pnode == NULL) {
+		dev_err(&pdev->dev, "No platform of_node!\n");
+		return -ENODEV;
+	}
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		/* special handling for probe defer */
+		if (PTR_ERR(pinctrl) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		dev_warn(&pdev->dev,
+			"pins are not configured from the driver\n");
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (info == NULL) {
+		dev_err(&pdev->dev, "Failed to allocate info\n");
+		err = -ENOMEM;
+		goto err_no_mem;
+	}
+	platform_set_drvdata(pdev, info);
+	info->pdev = pdev;
+
+	idr_init(&info->idr);
+
+	err = device_create_file(dev, &dev_attr_status);
+	if (err != 0) {
+		dev_err(dev, "Failed to create status sysfs attribute\n");
+		goto err_no_sysfs;
+	}
+
+	for_each_child_of_node(pnode, cnode) {
+
+		entry = gpio_of_entry_create(info, cnode);
+		if (IS_ERR_OR_NULL(entry)) {
+			dev_err(dev, "Failed to create gpio entry\n");
+			err = PTR_ERR(entry);
+			goto err_fail_entry;
+		}
+	}
+
+	dev_info(&pdev->dev, "ready\n");
+
+	return 0;
+err_fail_entry:
+	device_remove_file(&pdev->dev, &dev_attr_status);
+err_no_sysfs:
+err_no_mem:
+	return err;
+}
+
+static int gpio_of_helper_remove(struct platform_device *pdev)
+{
+	struct gpio_of_helper_info *info = platform_get_drvdata(pdev);
+	struct gpio_of_entry *entry;
+	int id;
+
+	dev_info(&pdev->dev, "removing\n");
+
+	device_remove_file(&pdev->dev, &dev_attr_status);
+
+	id = 0;
+	idr_for_each_entry(&info->idr, entry, id) {
+		/* destroy each and every one */
+		gpio_of_entry_destroy(entry);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+//#ifdef CONFIG_PM_RUNTIME
+static int gpio_of_helper_runtime_suspend(struct device *dev)
+{
+	/* place holder */
+	return 0;
+}
+
+static int gpio_of_helper_runtime_resume(struct device *dev)
+{
+	/* place holder */
+	return 0;
+}
+//#endif /* CONFIG_PM_RUNTIME */
+
+static struct dev_pm_ops gpio_of_helper_pm_ops = {
+	SET_RUNTIME_PM_OPS(gpio_of_helper_runtime_suspend,
+			   gpio_of_helper_runtime_resume, NULL)
+};
+#define GPIO_OF_HELPER_PM_OPS (&gpio_of_helper_pm_ops)
+#else
+#define GPIO_OF_HELPER_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+struct platform_driver gpio_of_helper_driver = {
+	.probe		= gpio_of_helper_probe,
+	.remove		= gpio_of_helper_remove,
+	.driver = {
+		.name		= "gpio-of-helper",
+		.owner		= THIS_MODULE,
+		.pm		= GPIO_OF_HELPER_PM_OPS,
+		.of_match_table	= gpio_of_helper_of_match,
+	},
+};
+
+module_platform_driver(gpio_of_helper_driver);
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@antoniou-consulting.com>");
+MODULE_DESCRIPTION("GPIO OF Helper driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-of-helper");
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 23e3d335cd5..21fad1d3f4a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -37,10 +37,10 @@ static DEFINE_MUTEX(sysfs_lock);
 /*
  * /sys/class/gpio/gpioN... only for GPIOs that are exported
  *   /direction
- *      * MAY BE OMITTED if kernel won't allow direction changes
  *      * is read/write as "in" or "out"
  *      * may also be written as "high" or "low", initializing
  *        output value as specified ("out" implies "low")
+ *      * read-only if kernel won't allow direction changes
  *   /value
  *      * always readable, subject to hardware behavior
  *      * may be writable, as zero/nonzero
@@ -53,6 +53,8 @@ static DEFINE_MUTEX(sysfs_lock);
  *      * is read/write as zero/nonzero
  *      * also affects existing and subsequent "falling" and "rising"
  *        /edge configuration
+ *   /label
+ *      * descriptor label
  */
 
 static ssize_t direction_show(struct device *dev,
@@ -83,7 +85,9 @@ static ssize_t direction_store(struct device *dev,
 
 	mutex_lock(&data->mutex);
 
-	if (sysfs_streq(buf, "high"))
+	if (!data->direction_can_change)
+		status = -EPERM;
+	else if (sysfs_streq(buf, "high"))
 		status = gpiod_direction_output_raw(desc, 1);
 	else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
 		status = gpiod_direction_output_raw(desc, 0);
@@ -362,6 +366,23 @@ static ssize_t active_low_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(active_low);
 
+static ssize_t label_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct gpiod_data *data = dev_get_drvdata(dev);
+	struct gpio_desc *desc = data->desc;
+	ssize_t			status;
+
+	mutex_lock(&data->mutex);
+
+	status = sprintf(buf, "%s\n", desc->label);
+
+	mutex_unlock(&data->mutex);
+
+	return status;
+}
+static DEVICE_ATTR_RO(label);
+
 static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
 			       int n)
 {
@@ -373,12 +394,15 @@ static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
 
 	if (attr == &dev_attr_direction.attr) {
 		if (!show_direction)
-			mode = 0;
+			mode &= 0444;
 	} else if (attr == &dev_attr_edge.attr) {
 		if (gpiod_to_irq(desc) < 0)
 			mode = 0;
 		if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
 			mode = 0;
+	} else if (attr == &dev_attr_value.attr) {
+		if (!show_direction && !test_bit(FLAG_IS_OUT, &desc->flags))
+			mode &= 0444;
 	}
 
 	return mode;
@@ -389,6 +413,7 @@ static struct attribute *gpio_attrs[] = {
 	&dev_attr_edge.attr,
 	&dev_attr_value.attr,
 	&dev_attr_active_low.attr,
+	&dev_attr_label.attr,
 	NULL,
 };
 
@@ -402,6 +427,10 @@ static const struct attribute_group *gpio_groups[] = {
 	NULL
 };
 
+/* bwlegh, a second device in the same file... get out of my namespace! */
+#define dev_attr_label dev_attr_chip_label
+#define label_show chip_label_show
+
 /*
  * /sys/class/gpio/gpiochipN/
  *   /base ... matching gpio_chip.base (N)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c14f0784274..235a5778870 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2974,10 +2974,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
 	if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
 		desc_set_label(desc, label ? : "?");
 		ret = 0;
-	} else {
-		kfree_const(label);
-		ret = -EBUSY;
-		goto done;
+//	} else {
+//		kfree_const(label);
+//		ret = -EBUSY;
+//		goto done;
 	}
 
 	if (gc->request) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 6941689085e..ec5a24e9540 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 			DRM_ERROR("SVGA device lockup.\n");
 			break;
 		}
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 		if (interruptible && signal_pending(current)) {
 			ret = -ERESTARTSYS;
 			break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 75f3efee21a..09b1932ce85 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -203,7 +203,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 			break;
 		}
 		if (lazy)
-			schedule_timeout(1);
+			schedule_min_hrtimeout();
 		else if ((++count & 0x0F) == 0) {
 			/**
 			 * FIXME: Use schedule_hr_timeout here for
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 267eac00a3f..352af68c6cd 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -225,7 +225,7 @@ static ssize_t power1_average_show(struct device *dev,
 		prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
 	}
 
-	leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
+	leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
 	if (leftover)
 		return 0;
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2ddca08f8a7..72647850f08 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -217,6 +217,15 @@ config I2C_CHT_WC
 	  combined with a FUSB302 Type-C port-controller as such it is advised
 	  to also select CONFIG_TYPEC_FUSB302=m.
 
+config I2C_NCT6775
+	tristate "Nuvoton NCT6775 and compatible SMBus controller"
+	help
+		If you say yes to this option, support will be included for the
+		Nuvoton NCT6775 and compatible SMBus controllers.
+
+		This driver can also be built as a module.  If so, the module
+		will be called i2c-nct6775.
+
 config I2C_NFORCE2
 	tristate "Nvidia nForce2, nForce3 and nForce4"
 	depends on PCI
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 25d60889713..3c2a9b237ac 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_I2C_CHT_WC)	+= i2c-cht-wc.o
 obj-$(CONFIG_I2C_I801)		+= i2c-i801.o
 obj-$(CONFIG_I2C_ISCH)		+= i2c-isch.o
 obj-$(CONFIG_I2C_ISMT)		+= i2c-ismt.o
+obj-$(CONFIG_I2C_NCT6775)   += i2c-nct6775.o
 obj-$(CONFIG_I2C_NFORCE2)	+= i2c-nforce2.o
 obj-$(CONFIG_I2C_NFORCE2_S4985)	+= i2c-nforce2-s4985.o
 obj-$(CONFIG_I2C_NVIDIA_GPU)	+= i2c-nvidia-gpu.o
diff --git a/drivers/i2c/busses/i2c-nct6775.c b/drivers/i2c/busses/i2c-nct6775.c
new file mode 100644
index 00000000000..0462f095204
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nct6775.c
@@ -0,0 +1,647 @@
+/*
+ * i2c-nct6775 - Driver for the SMBus master functionality of
+ *	       Nuvoton NCT677x Super-I/O chips
+ *
+ * Copyright (C) 2019  Adam Honse <calcprogrammer1@gmail.com>
+ *
+ * Derived from nct6775 hwmon driver
+ * Copyright (C) 2012  Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/nospec.h>
+
+#define DRVNAME "i2c-nct6775"
+
+/* Nuvoton SMBus address offsets */
+#define SMBHSTDAT       (0 + nuvoton_nct6793d_smba)
+#define SMBBLKSZ        (1 + nuvoton_nct6793d_smba)
+#define SMBHSTCMD       (2 + nuvoton_nct6793d_smba)
+#define SMBHSTIDX       (3 + nuvoton_nct6793d_smba)  //Index field is the Command field on other controllers
+#define SMBHSTCTL       (4 + nuvoton_nct6793d_smba)
+#define SMBHSTADD       (5 + nuvoton_nct6793d_smba)
+#define SMBHSTERR       (9 + nuvoton_nct6793d_smba)
+#define SMBHSTSTS       (0xE + nuvoton_nct6793d_smba)
+
+/* Command register */
+#define NCT6793D_READ_BYTE      0
+#define NCT6793D_READ_WORD      1
+#define NCT6793D_READ_BLOCK     2
+#define NCT6793D_BLOCK_WRITE_READ_PROC_CALL 3
+#define NCT6793D_PROC_CALL      4
+#define NCT6793D_WRITE_BYTE     8
+#define NCT6793D_WRITE_WORD     9
+#define NCT6793D_WRITE_BLOCK    10
+
+/* Control register */
+#define NCT6793D_MANUAL_START   128
+#define NCT6793D_SOFT_RESET     64
+
+/* Error register */
+#define NCT6793D_NO_ACK         32
+
+/* Status register */
+#define NCT6793D_FIFO_EMPTY     1
+#define NCT6793D_FIFO_FULL      2
+#define NCT6793D_MANUAL_ACTIVE  4
+
+#define NCT6775_LD_SMBUS		0x0B
+
+/* Other settings */
+#define MAX_RETRIES		400
+
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
+	     nct6795, nct6796, nct6798 };
+
+struct nct6775_sio_data {
+	int sioreg;
+	enum kinds kind;
+};
+
+/* used to set data->name = nct6775_device_names[data->sio_kind] */
+static const char * const nct6775_device_names[] = {
+	"nct6106",
+	"nct6775",
+	"nct6776",
+	"nct6779",
+	"nct6791",
+	"nct6792",
+	"nct6793",
+	"nct6795",
+	"nct6796",
+	"nct6798",
+};
+
+static const char * const nct6775_sio_names[] __initconst = {
+	"NCT6106D",
+	"NCT6775F",
+	"NCT6776D/F",
+	"NCT6779D",
+	"NCT6791D",
+	"NCT6792D",
+	"NCT6793D",
+	"NCT6795D",
+	"NCT6796D",
+	"NCT6798D",
+};
+
+#define SIO_REG_LDSEL		0x07	/* Logical device select */
+#define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
+#define SIO_REG_SMBA		0x62	/* SMBus base address register */
+
+#define SIO_NCT6106_ID		0xc450
+#define SIO_NCT6775_ID		0xb470
+#define SIO_NCT6776_ID		0xc330
+#define SIO_NCT6779_ID		0xc560
+#define SIO_NCT6791_ID		0xc800
+#define SIO_NCT6792_ID		0xc910
+#define SIO_NCT6793_ID		0xd120
+#define SIO_NCT6795_ID		0xd350
+#define SIO_NCT6796_ID		0xd420
+#define SIO_NCT6798_ID		0xd428
+#define SIO_ID_MASK			0xFFF0
+
+static inline void
+superio_outb(int ioreg, int reg, int val)
+{
+	outb(reg, ioreg);
+	outb(val, ioreg + 1);
+}
+
+static inline int
+superio_inb(int ioreg, int reg)
+{
+	outb(reg, ioreg);
+	return inb(ioreg + 1);
+}
+
+static inline void
+superio_select(int ioreg, int ld)
+{
+	outb(SIO_REG_LDSEL, ioreg);
+	outb(ld, ioreg + 1);
+}
+
+static inline int
+superio_enter(int ioreg)
+{
+	/*
+	 * Try to reserve <ioreg> and <ioreg + 1> for exclusive access.
+	 */
+	if (!request_muxed_region(ioreg, 2, DRVNAME))
+		return -EBUSY;
+
+	outb(0x87, ioreg);
+	outb(0x87, ioreg);
+
+	return 0;
+}
+
+static inline void
+superio_exit(int ioreg)
+{
+	outb(0xaa, ioreg);
+	outb(0x02, ioreg);
+	outb(0x02, ioreg + 1);
+	release_region(ioreg, 2);
+}
+
+/*
+ * ISA constants
+ */
+
+#define IOREGION_ALIGNMENT	(~7)
+#define IOREGION_LENGTH		2
+#define ADDR_REG_OFFSET		0
+#define DATA_REG_OFFSET		1
+
+#define NCT6775_REG_BANK	0x4E
+#define NCT6775_REG_CONFIG	0x40
+
+static struct i2c_adapter *nct6775_adapter;
+
+struct i2c_nct6775_adapdata {
+	unsigned short smba;
+};
+
+/* Return negative errno on error. */
+static s32 nct6775_access(struct i2c_adapter * adap, u16 addr,
+		 unsigned short flags, char read_write,
+		 u8 command, int size, union i2c_smbus_data * data)
+{
+	struct i2c_nct6775_adapdata *adapdata = i2c_get_adapdata(adap);
+	unsigned short nuvoton_nct6793d_smba = adapdata->smba;
+	int i, len, cnt;
+	union i2c_smbus_data tmp_data;
+	int timeout = 0;
+
+	tmp_data.word = 0;
+	cnt = 0;
+	len = 0;
+
+	outb_p(NCT6793D_SOFT_RESET, SMBHSTCTL);
+
+	switch (size) {
+		case I2C_SMBUS_QUICK:
+			outb_p((addr << 1) | read_write,
+			       SMBHSTADD);
+			break;
+		case I2C_SMBUS_BYTE_DATA:
+			tmp_data.byte = data->byte;
+		case I2C_SMBUS_BYTE:
+			outb_p((addr << 1) | read_write,
+			       SMBHSTADD);
+			outb_p(command, SMBHSTIDX);
+			if (read_write == I2C_SMBUS_WRITE) {
+				outb_p(tmp_data.byte, SMBHSTDAT);
+				outb_p(NCT6793D_WRITE_BYTE, SMBHSTCMD);
+			}
+			else {
+				outb_p(NCT6793D_READ_BYTE, SMBHSTCMD);
+			}
+			break;
+		case I2C_SMBUS_WORD_DATA:
+			outb_p((addr << 1) | read_write,
+			       SMBHSTADD);
+			outb_p(command, SMBHSTIDX);
+			if (read_write == I2C_SMBUS_WRITE) {
+				outb_p(data->word & 0xff, SMBHSTDAT);
+				outb_p((data->word & 0xff00) >> 8, SMBHSTDAT);
+				outb_p(NCT6793D_WRITE_WORD, SMBHSTCMD);
+			}
+			else {
+				outb_p(NCT6793D_READ_WORD, SMBHSTCMD);
+			}
+			break;
+		case I2C_SMBUS_BLOCK_DATA:
+			outb_p((addr << 1) | read_write,
+			       SMBHSTADD);
+			outb_p(command, SMBHSTIDX);
+			if (read_write == I2C_SMBUS_WRITE) {
+				len = data->block[0];
+				if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
+					return -EINVAL;
+				outb_p(len, SMBBLKSZ);
+
+				cnt = 1;
+				if (len >= 4) {
+					for (i = cnt; i <= 4; i++) {
+						outb_p(data->block[i], SMBHSTDAT);
+					}
+
+					len -= 4;
+					cnt += 4;
+				}
+				else {
+					for (i = cnt; i <= len; i++ ) {
+						outb_p(data->block[i], SMBHSTDAT);
+					}
+
+					len = 0;
+				}
+
+				outb_p(NCT6793D_WRITE_BLOCK, SMBHSTCMD);
+			}
+			else {
+				return -ENOTSUPP;
+			}
+			break;
+		default:
+			dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+			return -EOPNOTSUPP;
+	}
+
+	outb_p(NCT6793D_MANUAL_START, SMBHSTCTL);
+
+	while ((size == I2C_SMBUS_BLOCK_DATA) && (len > 0)) {
+		if (read_write == I2C_SMBUS_WRITE) {
+			timeout = 0;
+			while ((inb_p(SMBHSTSTS) & NCT6793D_FIFO_EMPTY) == 0)
+			{
+				if(timeout > MAX_RETRIES)
+				{
+					return -ETIMEDOUT;
+				}
+				usleep_range(250, 500);
+				timeout++;
+			}
+
+			//Load more bytes into FIFO
+			if (len >= 4) {
+				for (i = cnt; i <= (cnt + 4); i++) {
+					outb_p(data->block[i], SMBHSTDAT);
+				}
+
+				len -= 4;
+				cnt += 4;
+			}
+			else {
+				for (i = cnt; i <= (cnt + len); i++) {
+					outb_p(data->block[i], SMBHSTDAT);
+				}
+
+				len = 0;
+			}
+		}
+		else {
+			return -ENOTSUPP;
+		}
+		
+	}
+
+	//wait for manual mode to complete
+	timeout = 0;
+	while ((inb_p(SMBHSTSTS) & NCT6793D_MANUAL_ACTIVE) != 0)
+	{
+		if(timeout > MAX_RETRIES)
+		{
+			return -ETIMEDOUT;
+		}
+		usleep_range(250, 500);
+		timeout++;
+	}
+
+	if ((inb_p(SMBHSTERR) & NCT6793D_NO_ACK) != 0) {    	
+		return -ENXIO;
+	}
+	else if ((read_write == I2C_SMBUS_WRITE) || (size == I2C_SMBUS_QUICK)) {
+		return 0;
+	}
+
+	switch (size) {
+		case I2C_SMBUS_QUICK:
+		case I2C_SMBUS_BYTE_DATA:
+			data->byte = inb_p(SMBHSTDAT);
+			break;
+		case I2C_SMBUS_WORD_DATA:
+			data->word = inb_p(SMBHSTDAT) + (inb_p(SMBHSTDAT) << 8);
+			break;
+	}
+	return 0;
+}
+
+static u32 nct6775_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+	    I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+	    I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm smbus_algorithm = {
+	.smbus_xfer	= nct6775_access,
+	.functionality	= nct6775_func,
+};
+
+static int nct6775_add_adapter(unsigned short smba, const char *name, struct i2c_adapter **padap)
+{
+	struct i2c_adapter *adap;
+	struct i2c_nct6775_adapdata *adapdata;
+	int retval;
+
+	adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+	if (adap == NULL) {
+		return -ENOMEM;
+	}
+
+	adap->owner = THIS_MODULE;
+	adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+	adap->algo = &smbus_algorithm;
+
+	adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
+	if (adapdata == NULL) {
+		kfree(adap);
+		return -ENOMEM;
+	}
+
+	adapdata->smba = smba;
+
+	snprintf(adap->name, sizeof(adap->name),
+		"SMBus NCT67xx adapter%s at %04x", name, smba);
+
+	i2c_set_adapdata(adap, adapdata);
+
+	retval = i2c_add_adapter(adap);
+	if (retval) {
+		kfree(adapdata);
+		kfree(adap);
+		return retval;
+	}
+
+	*padap = adap;
+	return 0;
+}
+
+static void nct6775_remove_adapter(struct i2c_adapter *adap)
+{
+	struct i2c_nct6775_adapdata *adapdata = i2c_get_adapdata(adap);
+
+	if (adapdata->smba) {
+		i2c_del_adapter(adap);
+		kfree(adapdata);
+		kfree(adap);
+	}
+}
+
+//static SIMPLE_DEV_PM_OPS(nct6775_dev_pm_ops, nct6775_suspend, nct6775_resume);
+
+/*
+ * when Super-I/O functions move to a separate file, the Super-I/O
+ * bus will manage the lifetime of the device and this module will only keep
+ * track of the nct6775 driver. But since we use platform_device_alloc(), we
+ * must keep track of the device
+ */
+static struct platform_device *pdev[2];
+
+static int nct6775_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
+				 DRVNAME))
+		return -EBUSY;
+
+	switch (sio_data->kind) {
+	case nct6791:
+	case nct6792:
+	case nct6793:
+	case nct6795:
+	case nct6796:
+	case nct6798:
+		nct6775_add_adapter(res->start, "", &nct6775_adapter);
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	return 0;
+}
+/*
+static void nct6791_enable_io_mapping(int sioaddr)
+{
+	int val;
+
+	val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+	if (val & 0x10) {
+		pr_info("Enabling hardware monitor logical device mappings.\n");
+		superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+			     val & ~0x10);
+	}
+}*/
+
+static struct platform_driver i2c_nct6775_driver = {
+	.driver = {
+		.name	= DRVNAME,
+//		.pm	= &nct6775_dev_pm_ops,
+	},
+	.probe		= nct6775_probe,
+};
+
+static void __exit i2c_nct6775_exit(void)
+{
+	int i;
+
+	if(nct6775_adapter)
+		nct6775_remove_adapter(nct6775_adapter);
+
+	for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+		if (pdev[i])
+			platform_device_unregister(pdev[i]);
+	}
+	platform_driver_unregister(&i2c_nct6775_driver);
+}
+
+/* nct6775_find() looks for a '627 in the Super-I/O config space */
+static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
+{
+	u16 val;
+	int err;
+	int addr;
+
+	err = superio_enter(sioaddr);
+	if (err)
+		return err;
+
+	val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8) |
+		superio_inb(sioaddr, SIO_REG_DEVID + 1);
+
+	switch (val & SIO_ID_MASK) {
+	case SIO_NCT6106_ID:
+		sio_data->kind = nct6106;
+		break;
+	case SIO_NCT6775_ID:
+		sio_data->kind = nct6775;
+		break;
+	case SIO_NCT6776_ID:
+		sio_data->kind = nct6776;
+		break;
+	case SIO_NCT6779_ID:
+		sio_data->kind = nct6779;
+		break;
+	case SIO_NCT6791_ID:
+		sio_data->kind = nct6791;
+		break;
+	case SIO_NCT6792_ID:
+		sio_data->kind = nct6792;
+		break;
+	case SIO_NCT6793_ID:
+		sio_data->kind = nct6793;
+		break;
+	case SIO_NCT6795_ID:
+		sio_data->kind = nct6795;
+		break;
+	case SIO_NCT6796_ID:
+		sio_data->kind = nct6796;
+		break;
+	case SIO_NCT6798_ID:
+		sio_data->kind = nct6798;
+		break;
+	default:
+		if (val != 0xffff)
+			pr_debug("unsupported chip ID: 0x%04x\n", val);
+		superio_exit(sioaddr);
+		return -ENODEV;
+	}
+
+	/* We have a known chip, find the SMBus I/O address */
+	superio_select(sioaddr, NCT6775_LD_SMBUS);
+	val = (superio_inb(sioaddr, SIO_REG_SMBA) << 8)
+	    | superio_inb(sioaddr, SIO_REG_SMBA + 1);
+	addr = val & IOREGION_ALIGNMENT;
+	if (addr == 0) {
+		pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
+		superio_exit(sioaddr);
+		return -ENODEV;
+	}
+
+	//if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
+	//    sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
+	//    sio_data->kind == nct6796)
+	//	nct6791_enable_io_mapping(sioaddr);
+
+	superio_exit(sioaddr);
+	pr_info("Found %s or compatible chip at %#x:%#x\n",
+		nct6775_sio_names[sio_data->kind], sioaddr, addr);
+	sio_data->sioreg = sioaddr;
+
+	return addr;
+}
+
+static int __init i2c_nct6775_init(void)
+{
+	int i, err;
+	bool found = false;
+	int address;
+	struct resource res;
+	struct nct6775_sio_data sio_data;
+	int sioaddr[2] = { 0x2e, 0x4e };
+
+	err = platform_driver_register(&i2c_nct6775_driver);
+	if (err)
+		return err;
+
+	/*
+	 * initialize sio_data->kind and sio_data->sioreg.
+	 *
+	 * when Super-I/O functions move to a separate file, the Super-I/O
+	 * driver will probe 0x2e and 0x4e and auto-detect the presence of a
+	 * nct6775 hardware monitor, and call probe()
+	 */
+	for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+		address = nct6775_find(sioaddr[i], &sio_data);
+		if (address <= 0)
+			continue;
+
+		found = true;
+
+		pdev[i] = platform_device_alloc(DRVNAME, address);
+		if (!pdev[i]) {
+			err = -ENOMEM;
+			goto exit_device_unregister;
+		}
+
+		err = platform_device_add_data(pdev[i], &sio_data,
+					       sizeof(struct nct6775_sio_data));
+		if (err)
+			goto exit_device_put;
+
+		memset(&res, 0, sizeof(res));
+		res.name = DRVNAME;
+		res.start = address;
+		res.end = address + IOREGION_LENGTH - 1;
+		res.flags = IORESOURCE_IO;
+
+		err = acpi_check_resource_conflict(&res);
+		if (err) {
+			platform_device_put(pdev[i]);
+			pdev[i] = NULL;
+			continue;
+		}
+
+		err = platform_device_add_resources(pdev[i], &res, 1);
+		if (err)
+			goto exit_device_put;
+
+		/* platform_device_add calls probe() */
+		err = platform_device_add(pdev[i]);
+		if (err)
+			goto exit_device_put;
+	}
+	if (!found) {
+		err = -ENODEV;
+		goto exit_unregister;
+	}
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(pdev[i]);
+exit_device_unregister:
+	while (--i >= 0) {
+		if (pdev[i])
+			platform_device_unregister(pdev[i]);
+	}
+exit_unregister:
+	platform_driver_unregister(&i2c_nct6775_driver);
+	return err;
+}
+
+MODULE_AUTHOR("Adam Honse <calcprogrammer1@gmail.com>");
+MODULE_DESCRIPTION("SMBus driver for NCT6775F and compatible chips");
+MODULE_LICENSE("GPL");
+
+module_init(i2c_nct6775_init);
+module_exit(i2c_nct6775_exit);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 69740a4ff1d..b70738fbe72 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -467,11 +467,11 @@ static int piix4_transaction(struct i2c_adapter *piix4_adapter)
 	if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */
 		usleep_range(2000, 2100);
 	else
-		usleep_range(250, 500);
+		usleep_range(25, 50);
 
 	while ((++timeout < MAX_TIMEOUT) &&
 	       ((temp = inb_p(SMBHSTSTS)) & 0x01))
-		usleep_range(250, 500);
+		usleep_range(25, 50);
 
 	/* If the SMBus is still busy, we give up */
 	if (timeout == MAX_TIMEOUT) {
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index d8c40a83097..b8d48a09207 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -273,7 +273,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
 	 * TODO: Make sure that we wait at least required delay but why we
 	 * have to extend it one tick more?
 	 */
-	schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
+	schedule_msec_hrtimeout_interruptible(delay + 2);
 }
 
 static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index f011447c44f..e0c6deacca1 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -36,7 +36,7 @@ struct tps6521x_data {
 static const struct tps6521x_data tps65217_data = {
 	.reg_status = TPS65217_REG_STATUS,
 	.pb_mask = TPS65217_STATUS_PB,
-	.name = "tps65217_pwrbutton",
+	.name = "tps65217_pwr_but",
 };
 
 static const struct tps6521x_data tps65218_data = {
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index c0d5c241335..740956f9431 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -17,6 +17,7 @@
 
 #define AR1021_MAX_X	4095
 #define AR1021_MAX_Y	4095
+#define AR1021_MAX_PRESSURE 255
 
 #define AR1021_CMD	0x55
 
@@ -26,8 +27,29 @@ struct ar1021_i2c {
 	struct i2c_client *client;
 	struct input_dev *input;
 	u8 data[AR1021_TOUCH_PKG_SIZE];
+	bool invert_x;
+	bool invert_y;
+	bool swap_xy;
 };
 
+static bool ar1021_get_prop_u32(struct device *dev,
+				     const char *property,
+				     unsigned int default_value,
+				     unsigned int *value)
+{
+	u32 val;
+	int error;
+
+	error = device_property_read_u32(dev, property, &val);
+	if (error) {
+		*value = default_value;
+		return false;
+	}
+
+	*value = val;
+	return true;
+}
+
 static irqreturn_t ar1021_i2c_irq(int irq, void *dev_id)
 {
 	struct ar1021_i2c *ar1021 = dev_id;
@@ -49,9 +71,22 @@ static irqreturn_t ar1021_i2c_irq(int irq, void *dev_id)
 	x = ((data[2] & 0x1f) << 7) | (data[1] & 0x7f);
 	y = ((data[4] & 0x1f) << 7) | (data[3] & 0x7f);
 
-	input_report_abs(input, ABS_X, x);
-	input_report_abs(input, ABS_Y, y);
+	if (ar1021->invert_x)
+		x = AR1021_MAX_X - x;
+
+	if (ar1021->invert_y)
+		y = AR1021_MAX_Y - y;
+
+	if (ar1021->swap_xy) {
+		input_report_abs(input, ABS_X, y);
+		input_report_abs(input, ABS_Y, x);
+	} else {
+		input_report_abs(input, ABS_X, x);
+		input_report_abs(input, ABS_Y, y);
+	}
+
 	input_report_key(input, BTN_TOUCH, button);
+	input_report_abs(input, ABS_PRESSURE, AR1021_MAX_PRESSURE);
 	input_sync(input);
 
 out:
@@ -93,6 +128,8 @@ static int ar1021_i2c_probe(struct i2c_client *client,
 	struct ar1021_i2c *ar1021;
 	struct input_dev *input;
 	int error;
+	unsigned int offset_x, offset_y;
+	bool data_present;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		dev_err(&client->dev, "i2c_check_functionality error\n");
@@ -116,10 +153,44 @@ static int ar1021_i2c_probe(struct i2c_client *client,
 	input->open = ar1021_i2c_open;
 	input->close = ar1021_i2c_close;
 
+	ar1021->invert_x = device_property_read_bool(&client->dev, "touchscreen-inverted-x");
+	ar1021->invert_y = device_property_read_bool(&client->dev, "touchscreen-inverted-y");
+	ar1021->swap_xy = device_property_read_bool(&client->dev, "touchscreen-swapped-x-y");
+
+	data_present = ar1021_get_prop_u32(&client->dev,
+						"touchscreen-offset-x",
+						0,
+						&offset_x);
+
+	if (data_present)
+		dev_info(&client->dev, "touchscreen-offset-x: %d\n", offset_x);
+
+	data_present = ar1021_get_prop_u32(&client->dev,
+						"touchscreen-offset-y",
+						0,
+						&offset_y);
+
+	if (data_present)
+		dev_info(&client->dev, "touchscreen-offset-y: %d\n", offset_y);
+
 	__set_bit(INPUT_PROP_DIRECT, input->propbit);
-	input_set_capability(input, EV_KEY, BTN_TOUCH);
-	input_set_abs_params(input, ABS_X, 0, AR1021_MAX_X, 0, 0);
-	input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_Y, 0, 0);
+	//input_set_capability(input, EV_KEY, BTN_TOUCH);
+
+	input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	if(ar1021->swap_xy)
+	{
+		input_set_abs_params(input, ABS_X, 0, AR1021_MAX_Y, 0, 0);
+		input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_X, 0, 0);
+	}
+	else
+	{
+		input_set_abs_params(input, ABS_X, offset_x, AR1021_MAX_X-offset_x, 0, 0);
+		input_set_abs_params(input, ABS_Y, offset_y, AR1021_MAX_Y-offset_y, 0, 0);
+	}
+
+	input_set_abs_params(input, ABS_PRESSURE, 0, AR1021_MAX_PRESSURE, 0, 0);
 
 	input_set_drvdata(input, ar1021);
 
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 92cb9774da1..0fd401da6d1 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2142,7 +2142,7 @@ static int mxt_initialize(struct mxt_data *data)
 	if (error)
 		return error;
 
-	error = reject_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
+	error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
 					&client->dev, GFP_KERNEL, data,
 					mxt_config_cb);
 	if (error) {
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 83e685557a1..dd358bff4a5 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -34,6 +34,7 @@
 #define ADCFSM_STEPID		0x10
 #define SEQ_SETTLE		275
 #define MAX_12BIT		((1 << 12) - 1)
+#define PRESSURE_MAX		1000
 
 #define TSC_IRQENB_MASK		(IRQENB_FIFO0THRES | IRQENB_EOS | IRQENB_HW_PEN)
 
@@ -234,6 +235,7 @@ static void titsc_read_coordinates(struct titsc *ts_dev,
 	for (i = 0; i < creads; i++) {
 		xvals[i] = titsc_readl(ts_dev, REG_FIFO0);
 		xvals[i] &= 0xfff;
+		pr_debug("i %d xval %d yval %d z1 %d z2 %d\n", i, xvals[i], yvals[i], *z1, *z2);
 	}
 
 	/*
@@ -312,13 +314,13 @@ static irqreturn_t titsc_irq(int irq, void *dev)
 			 * Resistance(touch) = x plate resistance *
 			 * x postion/4096 * ((z2 / z1) - 1)
 			 */
-			z = z1 - z2;
+			z = z2 - z1;
 			z *= x;
 			z *= ts_dev->x_plate_resistance;
-			z /= z2;
+			z /= z1;
 			z = (z + 2047) >> 12;
-
-			if (z <= MAX_12BIT) {
+			pr_debug("x %d y %d z1 %d z2 %d z %d\n", x, y, z1, z2, z);
+			if (z <= PRESSURE_MAX) {
 				input_report_abs(input_dev, ABS_X, x);
 				input_report_abs(input_dev, ABS_Y, y);
 				input_report_abs(input_dev, ABS_PRESSURE, z);
@@ -459,6 +461,7 @@ static int titsc_probe(struct platform_device *pdev)
 	input_dev->name = "ti-tsc";
 	input_dev->dev.parent = &pdev->dev;
 
+	__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
 	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
 	input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
 
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 39530d43590..a7caf2eb577 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -170,7 +170,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
 			break;
 		dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
 		       dev, addr);
-		schedule_timeout_interruptible(msecs_to_jiffies(10));
+		schedule_msec_hrtimeout_interruptible((10));
 	}
 	if (err == 3) {
 		dev_warn(&client->dev, "resetting chip, sound will go off.\n");
@@ -211,7 +211,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
 			break;
 		dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
 		       dev, addr);
-		schedule_timeout_interruptible(msecs_to_jiffies(10));
+		schedule_msec_hrtimeout_interruptible((10));
 	}
 	if (err == 3) {
 		dev_warn(&client->dev, "resetting chip, sound will go off.\n");
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index cf7cfda9410..f63e1748954 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -81,11 +81,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
 
 	/* Assert */
 	gpio_update(cx, mask, ~active_lo);
-	schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
+	schedule_msec_hrtimeout_uninterruptible((assert_msecs));
 
 	/* Deassert */
 	gpio_update(cx, mask, ~active_hi);
-	schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
+	schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
 }
 
 /*
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index 856e7ab7f33..766a2625133 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -105,7 +105,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
 	curout = (curout & ~0xF) | 1;
 	write_reg(curout, IVTV_REG_GPIO_OUT);
 	/* We could use something else for smaller time */
-	schedule_timeout_interruptible(msecs_to_jiffies(1));
+	schedule_msec_hrtimeout_interruptible((1));
 	curout |= 2;
 	write_reg(curout, IVTV_REG_GPIO_OUT);
 	curdir &= ~0x80;
@@ -125,11 +125,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
 	curout = read_reg(IVTV_REG_GPIO_OUT);
 	curout &= ~(1 << itv->card->xceive_pin);
 	write_reg(curout, IVTV_REG_GPIO_OUT);
-	schedule_timeout_interruptible(msecs_to_jiffies(1));
+	schedule_msec_hrtimeout_interruptible((1));
 
 	curout |= 1 << itv->card->xceive_pin;
 	write_reg(curout, IVTV_REG_GPIO_OUT);
-	schedule_timeout_interruptible(msecs_to_jiffies(1));
+	schedule_msec_hrtimeout_interruptible((1));
 	return 0;
 }
 
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 137853944e4..76830892f37 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -1137,7 +1137,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
 				TASK_UNINTERRUPTIBLE);
 		if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
 			break;
-		schedule_timeout(msecs_to_jiffies(25));
+		schedule_msec_hrtimeout((25));
 	}
 	finish_wait(&itv->vsync_waitq, &wait);
 	mutex_lock(&itv->serialize_lock);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f04ee84bab5..c4469b4b8f9 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
 			while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
 				time_before(jiffies,
 					    then + msecs_to_jiffies(2000))) {
-				schedule_timeout(msecs_to_jiffies(10));
+				schedule_msec_hrtimeout((10));
 			}
 
 			/* To convert jiffies to ms, we must multiply by 1000
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index cb0437b4c33..163fffc0e1d 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -366,7 +366,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
 			retval = -ENODATA;
 			break;
 		}
-		if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+		if (schedule_msec_hrtimeout_interruptible((10))) {
 			retval = -ERESTARTSYS;
 			break;
 		}
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
index fb9de7bbcd1..e53cf45e7f3 100644
--- a/drivers/media/radio/radio-tea5777.c
+++ b/drivers/media/radio/radio-tea5777.c
@@ -235,7 +235,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
 	}
 
 	if (wait) {
-		if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
+		if (schedule_msec_hrtimeout_interruptible((wait)))
 			return -ERESTARTSYS;
 	}
 
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index b0303cf0038..0925b506514 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -401,7 +401,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
 	for (;;) {
 		if (time_after(jiffies, timeout))
 			break;
-		if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+		if (schedule_msec_hrtimeout_interruptible((10))) {
 			/* some signal arrived, stop search */
 			tea->val &= ~TEA575X_BIT_SEARCH;
 			snd_tea575x_set_freq(tea);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index b690796d24d..448b13da62b 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
 			break;
 		/* yield to other processes */
 		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 	}
 
 	return UCB_ADC_DAT(val);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8..a8e8ae5a336 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,13 @@ config PVPANIC
 	  a paravirtualized device provided by QEMU; it lets a virtual machine
 	  (guest) communicate panic events to the host.
 
+config UDOO_ARD
+	tristate "UDOO-Arduino erase/reset Driver"
+	default y
+	help
+	  This driver is used to erase and reset arduino board via command sent
+	  over USB-to-SERIAL connection.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -475,6 +482,7 @@ source "drivers/misc/mei/Kconfig"
 source "drivers/misc/vmw_vmci/Kconfig"
 source "drivers/misc/mic/Kconfig"
 source "drivers/misc/genwqe/Kconfig"
+source "drivers/misc/cape/Kconfig"
 source "drivers/misc/echo/Kconfig"
 source "drivers/misc/cxl/Kconfig"
 source "drivers/misc/ocxl/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d83..b35666d25fa 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -48,9 +48,11 @@ obj-$(CONFIG_SRAM)		+= sram.o
 obj-$(CONFIG_SRAM_EXEC)		+= sram-exec.o
 obj-y				+= mic/
 obj-$(CONFIG_GENWQE)		+= genwqe/
+obj-y				+= cape/
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
+obj-$(CONFIG_UDOO_ARD)		+= udoo_ard.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)	+= pci_endpoint_test.o
 obj-$(CONFIG_OCXL)		+= ocxl/
 obj-y				+= cardreader/
diff --git a/drivers/misc/cape/Kconfig b/drivers/misc/cape/Kconfig
new file mode 100644
index 00000000000..a2ef85e0415
--- /dev/null
+++ b/drivers/misc/cape/Kconfig
@@ -0,0 +1,5 @@
+#
+# Capes
+#
+
+source "drivers/misc/cape/beaglebone/Kconfig"
diff --git a/drivers/misc/cape/Makefile b/drivers/misc/cape/Makefile
new file mode 100644
index 00000000000..7c4eb96ab29
--- /dev/null
+++ b/drivers/misc/cape/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for cape like devices
+#
+
+obj-y				+= beaglebone/
diff --git a/drivers/misc/cape/beaglebone/Kconfig b/drivers/misc/cape/beaglebone/Kconfig
new file mode 100644
index 00000000000..eeb67828fd8
--- /dev/null
+++ b/drivers/misc/cape/beaglebone/Kconfig
@@ -0,0 +1,10 @@
+#
+# Beaglebone capes
+#
+
+config BEAGLEBONE_PINMUX_HELPER
+	tristate "Beaglebone Pinmux Helper"
+	depends on ARCH_OMAP2PLUS && OF
+	default n
+	help
+	  Say Y here to include support for the pinmux helper
diff --git a/drivers/misc/cape/beaglebone/Makefile b/drivers/misc/cape/beaglebone/Makefile
new file mode 100644
index 00000000000..7f4617a01e2
--- /dev/null
+++ b/drivers/misc/cape/beaglebone/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for beaglebone capes
+#
+
+obj-$(CONFIG_BEAGLEBONE_PINMUX_HELPER)	+= bone-pinmux-helper.o
diff --git a/drivers/misc/cape/beaglebone/bone-pinmux-helper.c b/drivers/misc/cape/beaglebone/bone-pinmux-helper.c
new file mode 100644
index 00000000000..57703d8bcac
--- /dev/null
+++ b/drivers/misc/cape/beaglebone/bone-pinmux-helper.c
@@ -0,0 +1,242 @@
+/*
+ * Pinmux helper driver
+ *
+ * Copyright (C) 2013 Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+
+static const struct of_device_id bone_pinmux_helper_of_match[] = {
+	{
+		.compatible = "bone-pinmux-helper",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bone_pinmux_helper_of_match);
+
+struct pinmux_helper_data {
+	struct pinctrl *pinctrl;
+	char *selected_state_name;
+};
+
+static ssize_t pinmux_helper_show_state(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pinmux_helper_data *data = platform_get_drvdata(pdev);
+	const char *name;
+
+	name = data->selected_state_name;
+	if (name == NULL || strlen(name) == 0)
+		name = "none";
+	return sprintf(buf, "%s\n", name);
+}
+
+static ssize_t pinmux_helper_store_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pinmux_helper_data *data = platform_get_drvdata(pdev);
+	struct pinctrl_state *state;
+	char *state_name;
+	char *s;
+	int err;
+
+	/* duplicate (as a null terminated string) */
+	state_name = kmalloc(count + 1, GFP_KERNEL);
+	if (state_name == NULL)
+		return -ENOMEM;
+	memcpy(state_name, buf, count);
+	state_name[count] = '\0';
+
+	/* and chop off newline */
+	s = strchr(state_name, '\n');
+	if (s != NULL)
+		*s = '\0';
+
+	/* try to select default state at first (if it exists) */
+	state = pinctrl_lookup_state(data->pinctrl, state_name);
+	if (!IS_ERR(state)) {
+		err = pinctrl_select_state(data->pinctrl, state);
+		if (err != 0)
+			dev_err(dev, "Failed to select state %s\n",
+					state_name);
+	} else {
+		dev_err(dev, "Failed to find state %s\n", state_name);
+		err = PTR_ERR_OR_ZERO(state);
+	}
+
+	if (err == 0) {
+		kfree(data->selected_state_name);
+		data->selected_state_name = state_name;
+	}
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(state, S_IWUSR | S_IRUGO,
+		   pinmux_helper_show_state, pinmux_helper_store_state);
+
+static struct attribute *pinmux_helper_attributes[] = {
+	&dev_attr_state.attr,
+	NULL
+};
+
+static const struct attribute_group pinmux_helper_attr_group = {
+	.attrs = pinmux_helper_attributes,
+};
+
+static int bone_pinmux_helper_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pinmux_helper_data *data;
+	struct pinctrl_state *state;
+	char *state_name;
+	const char *mode_name;
+	int mode_len;
+	int err;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(dev, "Failed to allocate data\n");
+		err = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	state_name = kmalloc(strlen(PINCTRL_STATE_DEFAULT) + 1,
+			GFP_KERNEL);
+	if (state_name == NULL) {
+		dev_err(dev, "Failed to allocate state name\n");
+		err = -ENOMEM;
+		goto err_no_state_mem;
+	}
+	data->selected_state_name = state_name;
+	strcpy(data->selected_state_name, PINCTRL_STATE_DEFAULT);
+
+	platform_set_drvdata(pdev, data);
+
+	data->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(data->pinctrl)) {
+		dev_err(dev, "Failed to get pinctrl\n");
+		err = PTR_ERR_OR_ZERO(data->pinctrl);
+		goto err_no_pinctrl;
+	}
+
+	/* See if an initial mode is specified in the device tree */
+	mode_name = of_get_property(dev->of_node, "mode", &mode_len);
+
+	err = -1;
+	if (mode_name != NULL ) {
+		state_name = kmalloc(mode_len + 1, GFP_KERNEL);
+		if (state_name == NULL) {
+			dev_err(dev, "Failed to allocate state name\n");
+			err = -ENOMEM;
+			goto err_no_mode_mem;
+		}
+		strncpy(state_name, mode_name, mode_len);
+
+		/* try to select requested mode */
+		state = pinctrl_lookup_state(data->pinctrl, state_name);
+		if (!IS_ERR(state)) {
+			err = pinctrl_select_state(data->pinctrl, state);
+			if (err != 0) {
+				dev_warn(dev, "Unable to select requested mode %s\n", state_name);
+				kfree(state_name);
+			} else {
+				kfree(data->selected_state_name);
+				data->selected_state_name = state_name;
+				dev_notice(dev, "Set initial pinmux mode to %s\n", state_name);
+			}
+		}
+	}
+
+	/* try to select default state if mode_name failed */
+	if ( err != 0) {
+		state = pinctrl_lookup_state(data->pinctrl,
+				data->selected_state_name);
+		if (!IS_ERR(state)) {
+			err = pinctrl_select_state(data->pinctrl, state);
+			if (err != 0) {
+				dev_err(dev, "Failed to select default state\n");
+				goto err_no_state;
+			}
+		} else {
+			data->selected_state_name = '\0';
+		}
+	}
+
+	/* Register sysfs hooks */
+	err = sysfs_create_group(&dev->kobj, &pinmux_helper_attr_group);
+	if (err) {
+		dev_err(dev, "Failed to create sysfs group\n");
+		goto err_no_sysfs;
+	}
+
+	return 0;
+
+err_no_sysfs:
+err_no_state:
+err_no_mode_mem:
+	devm_pinctrl_put(data->pinctrl);
+err_no_pinctrl:
+	devm_kfree(dev, data->selected_state_name);
+err_no_state_mem:
+	devm_kfree(dev, data);
+err_no_mem:
+	return err;
+}
+
+static int bone_pinmux_helper_remove(struct platform_device *pdev)
+{
+	struct pinmux_helper_data *data = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
+
+	sysfs_remove_group(&dev->kobj, &pinmux_helper_attr_group);
+	kfree(data->selected_state_name);
+	devm_pinctrl_put(data->pinctrl);
+	devm_kfree(dev, data);
+
+	return 0;
+}
+
+struct platform_driver bone_pinmux_helper_driver = {
+	.probe		= bone_pinmux_helper_probe,
+	.remove		= bone_pinmux_helper_remove,
+	.driver = {
+		.name		= "bone-pinmux-helper",
+		.owner		= THIS_MODULE,
+		.of_match_table	= bone_pinmux_helper_of_match,
+	},
+};
+
+module_platform_driver(bone_pinmux_helper_driver);
+
+MODULE_AUTHOR("Pantelis Antoniou");
+MODULE_DESCRIPTION("Beaglebone pinmux helper driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bone-pinmux-helper");
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 8e6607fc8a6..b9ab770bbdb 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
 
 	atomic_inc(&ch->n_on_msg_allocate_wq);
 	prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
-	ret = schedule_timeout(1);
+	ret = schedule_min_hrtimeout();
 	finish_wait(&ch->msg_allocate_wq, &wait);
 	atomic_dec(&ch->n_on_msg_allocate_wq);
 
diff --git a/drivers/misc/udoo_ard.c b/drivers/misc/udoo_ard.c
new file mode 100755
index 00000000000..2210738e09c
--- /dev/null
+++ b/drivers/misc/udoo_ard.c
@@ -0,0 +1,417 @@
+/*
+ * udoo_ard.c
+ * UDOO quad/dual Arduino flash erase / CPU resetter
+ *
+ * Copyright (C) 2013-2015 Aidilab srl
+ * Author: UDOO Team <social@udoo.org>
+ * Author: Giuseppe Pagano <giuseppe.pagano@seco.com>
+ * Author: Francesco Montefoschi <francesco.monte@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+
+#define DRIVER_NAME              "udoo_ard"
+#define PINCTRL_DEFAULT          "default"
+#define AUTH_TOKEN               0x5A5A
+#define MAX_MSEC_SINCE_LAST_IRQ  400
+#define GRAY_TIME_BETWEEN_RESET  10000 // In this time we can't accept new erase/reset code
+
+static struct workqueue_struct *erase_reset_wq;
+typedef struct {
+    struct work_struct erase_reset_work;
+    struct pinctrl *pinctrl;
+    struct pinctrl_state *pins_default;
+    int    step;
+    int    cmdcode;
+    int    erase_reset_lock;
+    int    gpio_bossac_clk;
+    int    gpio_bossac_dat;
+    int    gpio_ard_erase;
+    int    gpio_ard_reset;
+    unsigned long    last_int_time_in_ns;
+    unsigned long    last_int_time_in_sec;
+} erase_reset_work_t;
+
+erase_reset_work_t *work;
+static u32 origTX, origRX; // original UART4 TX/RX pad control registers
+static int major; // for /dev/udoo_ard
+static struct class *udoo_class;
+
+static struct platform_device_id udoo_ard_devtype[] = {
+    {
+        /* keep it for coldfire */
+        .name = DRIVER_NAME,
+        .driver_data = 0,
+    }, {
+        /* sentinel */
+    }
+};
+MODULE_DEVICE_TABLE(platform, udoo_ard_devtype);
+
+static const struct of_device_id udoo_ard_dt_ids[] = {
+    { .compatible = "udoo,imx6q-udoo-ard", .data = &udoo_ard_devtype[0], },
+    { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, udoo_ard_dt_ids);
+
+static void disable_serial(void)
+{
+    u32 addrTX;
+    void __iomem *_addrTX;
+
+    printk("[bossac] Disable UART4 serial port.\n");
+
+    addrTX = 0x20E01F8;
+    _addrTX = ioremap(addrTX, 8);
+
+    origTX = __raw_readl(_addrTX);
+    origRX = __raw_readl(_addrTX + 0x4);
+
+    __raw_writel(0x15, _addrTX);
+    __raw_writel(0x15, _addrTX + 0x4);
+
+    iounmap(_addrTX);
+}
+
+static void enable_serial(void)
+{
+    u32 addrTX;
+    void __iomem *_addrTX;
+
+    printk("[bossac] Enable UART4 serial port.\n");
+
+    addrTX = 0x20E01F8;
+    _addrTX = ioremap(addrTX, 8);
+
+    __raw_writel(origTX, _addrTX);
+    __raw_writel(origRX, _addrTX + 0x4);
+
+    iounmap(_addrTX);
+}
+
+static void erase_reset(void)
+{
+    printk("[bossac] UDOO ERASE and RESET on Sam3x started.\n");
+
+    gpio_direction_input(work->gpio_ard_erase);
+    gpio_set_value(work->gpio_ard_reset, 1);
+    msleep(1);
+
+    gpio_direction_output(work->gpio_ard_erase, 1);
+    msleep(300);
+    gpio_direction_input(work->gpio_ard_erase);
+
+    msleep(10);
+    gpio_set_value(work->gpio_ard_reset, 0);
+
+    msleep(80);
+    gpio_set_value(work->gpio_ard_reset, 1);
+
+    printk("[bossac] UDOO ERASE and RESET on Sam3x EXECUTED.\n");
+}
+
+static void shutdown_sam3x(void)
+{
+    printk("[bossac] RESET on Sam3x.\n");
+
+    gpio_set_value(work->gpio_ard_reset, 0);
+}
+
+static void erase_reset_wq_function( struct work_struct *work2)
+{
+    disable_serial();
+    erase_reset();
+    msleep(GRAY_TIME_BETWEEN_RESET);
+
+    work->erase_reset_lock = 0;
+}
+
+/*
+ * Called everytime the gpio_bossac_clk signal toggles.
+ * If the auth token (16 bit) is found, we look for the command code (4 bit).
+ * The code 0x0F is sent by Bossac to trigger an erase/reset (to achieve this,
+ * erase_reset_wq is scheduled). Before starting to program the flash, we disable
+ * the UART4 serial port, otherwise there is too noise on the serial lines (the
+ * programming port and UART4 port are connected together, see hw schematics).
+ * When Bossac finishes to flash/verify, the code 0x00 is sent which re-enables
+ * the UART4 port.
+ */
+static irqreturn_t udoo_bossac_req(int irq, void *dev_id)
+{
+    int retval, auth_bit, expected_bit, msec_since_last_irq;
+    u64 nowsec;
+    unsigned long rem_nsec;
+    erase_reset_work_t *erase_reset_work;
+
+    auth_bit = 0;
+    if (gpio_get_value(work->gpio_bossac_dat) != 0x0) {
+        auth_bit = 1;
+    }
+
+    erase_reset_work = (erase_reset_work_t *)work;
+
+    nowsec = local_clock();
+    rem_nsec = do_div(nowsec, 1000000000) ;
+    msec_since_last_irq = (((unsigned long)nowsec * 1000) + rem_nsec/1000000 ) - (((unsigned long)erase_reset_work->last_int_time_in_sec * 1000) + erase_reset_work->last_int_time_in_ns/1000000);
+
+    if (msec_since_last_irq > MAX_MSEC_SINCE_LAST_IRQ) {
+        erase_reset_work->step = 0;
+#ifdef DEBUG
+        printk("[bossac] Reset authentication timeout!\n");
+#endif
+    }
+
+#ifdef DEBUG
+    printk("[bossac] STEP %d -> 0x%d \n", erase_reset_work->step, auth_bit);
+#endif
+    erase_reset_work->last_int_time_in_ns = rem_nsec;
+    erase_reset_work->last_int_time_in_sec = nowsec;
+
+    if ( erase_reset_work->step < 16 ) {  // Authenticating received token bit.
+        expected_bit = (( AUTH_TOKEN >> erase_reset_work->step ) & 0x01 );
+        if ( auth_bit == expected_bit ) {
+            erase_reset_work->step = erase_reset_work->step + 1;
+        } else {
+            erase_reset_work->step = 0;
+        }
+    } else { // Passed all authentication step. Receiving command code.
+        erase_reset_work->cmdcode = erase_reset_work->cmdcode | (auth_bit << (erase_reset_work->step - 16));
+        erase_reset_work->step = erase_reset_work->step + 1;
+    }
+
+#ifdef DEBUG
+    printk("erase_reset_work->erase_reset_lock = %d \n", erase_reset_work->erase_reset_lock);
+#endif
+    if ( erase_reset_work->step == 20 ) {  // Passed authentication and code acquiring step.
+#ifdef DEBUG
+        printk("[bossac] Received code = 0x%04x \n", erase_reset_work->cmdcode);
+#endif
+        if (erase_reset_work->cmdcode == 0xF) {
+            if (erase_reset_work->erase_reset_lock == 0) {
+		erase_reset_work->erase_reset_lock = 1;
+		retval = queue_work( erase_reset_wq, (struct work_struct *)work );
+            } else {
+#ifdef DEBUG
+                printk("Erase and reset operation already in progress. Do nothing.\n");
+#endif
+            }
+        } else {
+            enable_serial();
+        }
+        erase_reset_work->step = 0;
+        erase_reset_work->cmdcode = 0;
+    }
+
+    return IRQ_HANDLED;
+}
+
+/*
+ * Takes control of clock, data, erase, reset GPIOs.
+ */
+static int gpio_setup(void)
+{
+    int ret;
+
+    ret = gpio_request(work->gpio_bossac_clk, "BOSSA_CLK");
+    if (ret) {
+        printk(KERN_ERR "request BOSSA_CLK IRQ\n");
+        return -1;
+    } else {
+        gpio_direction_input(work->gpio_bossac_clk);
+    }
+
+    ret = gpio_request(work->gpio_bossac_dat, "BOSSA_DAT");
+    if (ret) {
+        printk(KERN_ERR "request BOSSA_DAT IRQ\n");
+        return -1;
+    } else {
+        gpio_direction_input(work->gpio_bossac_dat);
+    }
+
+    ret = gpio_request(work->gpio_ard_erase, "BOSSAC");
+    if (ret) {
+        printk(KERN_ERR "request GPIO FOR ARDUINO ERASE\n");
+        return -1;
+    } else {
+        gpio_direction_input(work->gpio_ard_erase);
+    }
+
+    ret = gpio_request(work->gpio_ard_reset, "BOSSAC");
+    if (ret) {
+        printk(KERN_ERR "request GPIO FOR ARDUINO RESET\n");
+        return -1;
+    } else {
+        gpio_direction_output(work->gpio_ard_reset, 1);
+    }
+
+    return 0;
+}
+
+static ssize_t device_write(struct file *filp, const char *buff, size_t len, loff_t *off)
+{
+    char msg[10];
+    long res;
+
+    if (len > 10)
+		return -EINVAL;
+
+
+	res = copy_from_user(msg, buff, len);
+    if (res) {
+        return -EFAULT;
+    }
+	msg[len] = '\0';
+
+    if (strcmp(msg, "erase")==0) {
+        erase_reset();
+    } else if (strcmp(msg, "shutdown")==0) {
+        shutdown_sam3x();
+    } else if (strcmp(msg, "uartoff")==0) {
+        disable_serial();
+    } else if (strcmp(msg, "uarton")==0) {
+        enable_serial();
+    } else {
+        printk("[bossac] udoo_ard invalid operation! %s", msg);
+    }
+
+	return len;
+}
+
+static struct file_operations fops = {
+    .write = device_write,
+};
+
+/*
+ * If a fdt udoo_ard entry is found, we register an IRQ on bossac clock line
+ * and we create /dev/udoo_ard
+ */
+static int udoo_ard_probe(struct platform_device *pdev)
+{
+    int retval;
+    struct device *temp_class;
+    struct platform_device *bdev;
+    struct device_node *np;
+
+    bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+    np = pdev->dev.of_node;
+
+    if (!np)
+            return -ENODEV;
+
+    work = (erase_reset_work_t *)kmalloc(sizeof(erase_reset_work_t), GFP_KERNEL);
+    if (work) {
+	    work->gpio_ard_reset = of_get_named_gpio(np, "bossac-reset-gpio", 0);
+	    work->gpio_ard_erase = of_get_named_gpio(np, "bossac-erase-gpio", 0);
+	    work->gpio_bossac_clk = of_get_named_gpio(np, "bossac-clk-gpio", 0);
+	    work->gpio_bossac_dat = of_get_named_gpio(np, "bossac-dat-gpio", 0);
+	    work->pinctrl = devm_pinctrl_get(&pdev->dev);
+        work->pins_default = pinctrl_lookup_state(work->pinctrl, PINCTRL_DEFAULT);
+    } else {
+	    printk("[bossac] Failed to allocate data structure.");
+	    return -ENOMEM;
+    }
+
+    pinctrl_select_state(work->pinctrl, work->pins_default);
+    gpio_setup();
+
+    printk("[bossac] Registering IRQ %d for BOSSAC Arduino erase/reset operation\n", gpio_to_irq(work->gpio_bossac_clk));
+    retval = request_irq(gpio_to_irq(work->gpio_bossac_clk), udoo_bossac_req, IRQF_TRIGGER_FALLING, "UDOO", bdev);
+
+    major = register_chrdev(major, "udoo_ard", &fops);
+    if (major < 0) {
+		printk(KERN_ERR "[bossac] Cannot get major for UDOO Ard\n");
+		return -EBUSY;
+	}
+
+    udoo_class = class_create(THIS_MODULE, "udoo_ard");
+	if (IS_ERR(udoo_class)) {
+		return PTR_ERR(udoo_class);
+	}
+
+	temp_class = device_create(udoo_class, NULL, MKDEV(major, 0), NULL, "udoo_ard");
+	if (IS_ERR(temp_class)) {
+		return PTR_ERR(temp_class);
+	}
+
+    printk("[bossac] Created device file /dev/udoo_ard\n");
+
+    erase_reset_wq = create_workqueue("erase_reset_queue");
+    if (erase_reset_wq) {
+
+        /* Queue some work (item 1) */
+        if (work) {
+            INIT_WORK( (struct work_struct *)work, erase_reset_wq_function );
+            work->step = 1;
+            work->cmdcode = 0;
+            work->last_int_time_in_ns = 0;
+            work->last_int_time_in_sec = 0;
+            work->erase_reset_lock = 0;
+            //  retval = queue_work( erase_reset_wq, (struct work_struct *)work );
+        }
+    }
+    return  0;
+}
+
+static int udoo_ard_remove(struct platform_device *pdev)
+{
+    printk("[bossac] Unloading UDOO ard driver.\n");
+    free_irq(gpio_to_irq(work->gpio_bossac_clk), NULL);
+
+    gpio_free(work->gpio_ard_reset);
+    gpio_free(work->gpio_ard_erase);
+    gpio_free(work->gpio_bossac_clk);
+    gpio_free(work->gpio_bossac_dat);
+
+    device_destroy(udoo_class, MKDEV(major, 0));
+    class_destroy(udoo_class);
+    unregister_chrdev(major, "udoo_ard");
+
+    return 0;
+}
+
+static struct platform_driver udoo_ard_driver = {
+    .driver = {
+        .name   = DRIVER_NAME,
+        .owner  = THIS_MODULE,
+        .of_match_table = udoo_ard_dt_ids,
+    },
+    .id_table = udoo_ard_devtype,
+    .probe  = udoo_ard_probe,
+    .remove = udoo_ard_remove,
+};
+
+module_platform_driver(udoo_ard_driver);
+
+MODULE_ALIAS("platform:"DRIVER_NAME);
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 203b6171260..c26211c7637 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -90,7 +90,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
 	tmout_index = fls(tmout - 1) - 12;
 	if (tmout_index < 0)
 		tmout_index = 0;
-	if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
+//	if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)	//by steven, try to setup the timeout to maximum value
 		tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
 
 	dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
@@ -613,6 +613,8 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1;
 		if (m > MVSD_BASE_DIV_MAX)
 			m = MVSD_BASE_DIV_MAX;
+		if(ios->clock==50000000 )	//by steven
+			m=1;
 		mvsd_write(MVSD_CLK_DIV, m);
 		host->clock = ios->clock;
 		host->ns_per_clk = 1000000000 / (host->base_clock / (m+1));
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index bbb2575d472..63775714422 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -939,7 +939,7 @@ static void cfhsi_wake_down(struct work_struct *work)
 			break;
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 		retry--;
 	}
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index d2539c95adb..0c2f31a03ce 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -242,7 +242,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
 	} else {
 		/* the PCAN-USB needs time to init */
 		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
+		schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
 	}
 
 	return err;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 702fdc393da..e201a20e2e1 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -92,6 +92,10 @@ struct davinci_mdio_data {
 	u32		clk_div;
 };
 
+#if IS_ENABLED(CONFIG_OF)
+static void davinci_mdio_update_dt_from_phymask(u32 phy_mask);
+#endif
+
 static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
 {
 	u32 mdio_in, div, mdio_out_khz, access_time;
@@ -159,6 +163,12 @@ static int davinci_mdio_reset(struct mii_bus *bus)
 		/* restrict mdio bus to live phys only */
 		dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
 		phy_mask = ~phy_mask;
+
+		#if IS_ENABLED(CONFIG_OF)
+		if (of_machine_is_compatible("ti,am335x-bone"))
+			davinci_mdio_update_dt_from_phymask(phy_mask);
+		#endif
+
 	} else {
 		/* desperately scan all phys */
 		dev_warn(data->dev, "no live phy, scanning all\n");
@@ -474,6 +484,93 @@ static int davinci_mdio_runtime_resume(struct device *dev)
 	davinci_mdio_enable(data);
 	return 0;
 }
+static void davinci_mdio_update_dt_from_phymask(u32 phy_mask)
+{
+	int i, len, skip;
+	u32 addr;
+	__be32 *old_phy_p, *phy_id_p;
+	struct property *phy_id_property = NULL;
+	struct device_node *node_p, *slave_p;
+
+	addr = 0;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++) {
+		if ((phy_mask & (1 << i)) == 0) {
+			addr = (u32) i;
+		break;
+		}
+	}
+
+	for_each_compatible_node(node_p, NULL, "ti,cpsw") {
+		for_each_node_by_name(slave_p, "slave") {
+
+#if IS_ENABLED(CONFIG_OF_OVERLAY)
+			skip = 1;
+			// Hack, the overlay fixup "slave" doesn't have phy-mode...
+			old_phy_p = (__be32 *) of_get_property(slave_p, "phy-mode", &len);
+
+			if (len != (sizeof(__be32 *) * 1))
+			{
+				skip = 0;
+			}
+
+			if (skip) {
+#endif
+
+			old_phy_p = (__be32 *) of_get_property(slave_p, "phy_id", &len);
+
+			if (len != (sizeof(__be32 *) * 2))
+				goto err_out;
+
+			if (old_phy_p) {
+
+				phy_id_property = kzalloc(sizeof(*phy_id_property), GFP_KERNEL);
+
+				if (! phy_id_property)
+					goto err_out;
+
+				phy_id_property->length = len;
+				phy_id_property->name = kstrdup("phy_id", GFP_KERNEL);
+				phy_id_property->value = kzalloc(len, GFP_KERNEL);
+
+				if (! phy_id_property->name)
+					goto err_out;
+
+				if (! phy_id_property->value)
+					goto err_out;
+
+				memcpy(phy_id_property->value, old_phy_p, len);
+
+				phy_id_p = (__be32 *) phy_id_property->value + 1;
+
+				*phy_id_p = cpu_to_be32(addr);
+
+				of_update_property(slave_p, phy_id_property);
+				pr_info("davinci_mdio: dt: updated phy_id[%d] from phy_mask[%x]\n", addr, phy_mask);
+
+				++addr;
+			}
+#if IS_ENABLED(CONFIG_OF_OVERLAY)
+		}
+#endif
+		}
+	}
+
+	return;
+
+err_out:
+
+	if (phy_id_property) {
+		if (phy_id_property->name)
+			kfree(phy_id_property->name);
+
+	if (phy_id_property->value)
+		kfree(phy_id_property->value);
+
+	if (phy_id_property)
+		kfree(phy_id_property);
+	}
+}
 #endif
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index eccbf4cd714..03d285f022b 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2670,7 +2670,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
 	while (!skb_queue_empty(&dev->rxq) &&
 	       !skb_queue_empty(&dev->txq) &&
 	       !skb_queue_empty(&dev->done)) {
-		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+		schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		netif_dbg(dev, ifdown, dev->net,
 			  "waited for %d urb completions\n", temp);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bb4ccbda031..84e40f5aea9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -48,6 +48,7 @@
 #define SUSPEND_SUSPEND3		(0x08)
 #define SUSPEND_ALLMODES		(SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
 					 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define MAC_ADDR_LEN                    (6)
 
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
@@ -70,6 +71,10 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
+static char *macaddr = ":";
+module_param(macaddr, charp, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+
 static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
 					    u32 *data, int in_pm)
 {
@@ -899,8 +904,59 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
 }
 
+/* Check the macaddr module parameter for a MAC address */
+static int smsc95xx_is_macaddr_param(struct usbnet *dev, u8 *dev_mac)
+{
+       int i, j, got_num, num;
+       u8 mtbl[MAC_ADDR_LEN];
+
+       if (macaddr[0] == ':')
+               return 0;
+
+       i = 0;
+       j = 0;
+       num = 0;
+       got_num = 0;
+       while (j < MAC_ADDR_LEN) {
+               if (macaddr[i] && macaddr[i] != ':') {
+                       got_num++;
+                       if ('0' <= macaddr[i] && macaddr[i] <= '9')
+                               num = num * 16 + macaddr[i] - '0';
+                       else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
+                               num = num * 16 + 10 + macaddr[i] - 'A';
+                       else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
+                               num = num * 16 + 10 + macaddr[i] - 'a';
+                       else
+                               break;
+                       i++;
+               } else if (got_num == 2) {
+                       mtbl[j++] = (u8) num;
+                       num = 0;
+                       got_num = 0;
+                       i++;
+               } else {
+                       break;
+               }
+       }
+
+       if (j == MAC_ADDR_LEN) {
+               netif_dbg(dev, ifup, dev->net, "Overriding MAC address with: "
+               "%02x:%02x:%02x:%02x:%02x:%02x\n", mtbl[0], mtbl[1], mtbl[2],
+                                               mtbl[3], mtbl[4], mtbl[5]);
+               for (i = 0; i < MAC_ADDR_LEN; i++)
+                       dev_mac[i] = mtbl[i];
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
 static void smsc95xx_init_mac_address(struct usbnet *dev)
 {
+       /* Check module parameters */
+       if (smsc95xx_is_macaddr_param(dev, dev->net->dev_addr))
+               return;
+
 	const u8 *mac_addr;
 
 	/* maybe the boot loader passed the MAC address in devicetree */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 5ec97def351..9e2bf55bbcc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -767,7 +767,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
 	spin_lock_irqsave(&q->lock, flags);
 	while (!skb_queue_empty(q)) {
 		spin_unlock_irqrestore(&q->lock, flags);
-		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+		schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		spin_lock_irqsave(&q->lock, flags);
 	}
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index a5bf16c4f49..b239e067bba 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -37,3 +37,14 @@ config BRCMDBG
 	select WANT_DEV_COREDUMP if BRCMFMAC
 	---help---
 	  Selecting this enables additional code for debug purposes.
+
+config BRCMFMAC_PCIE_BARWIN_SZ
+	bool "Custom PCIE BAR window size support for FullMAC driver"
+	depends on BRCMFMAC
+	depends on PCI
+	default n
+	---help---
+	  If you say Y here, the FMAC driver will use custom PCIE BAR
+	  window size. Say Y to allow developers to use custom PCIE
+	  BAR window size when HOST PCIE IP can support less then 4MB
+	  BAR window.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 2c95a08a587..705130cf27d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -87,6 +87,8 @@ struct brcmf_proto_bcdc_header {
 					 * plus any space that might be needed
 					 * for bus alignment padding.
 					 */
+#define ROUND_UP_MARGIN 2048
+
 struct brcmf_bcdc {
 	u16 reqid;
 	u8 bus_header[BUS_HEADER_LEN];
@@ -471,7 +473,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
 
 	drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
 	drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
-			sizeof(struct brcmf_proto_bcdc_dcmd);
+			sizeof(struct brcmf_proto_bcdc_dcmd) + ROUND_UP_MARGIN;
 	return 0;
 
 fail:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index b684a5b6d90..87f115547cb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -36,6 +36,7 @@
 #include "sdio.h"
 #include "core.h"
 #include "common.h"
+#include "cfg80211.h"
 
 #define SDIOH_API_ACCESS_RETRY_LIMIT	2
 
@@ -43,7 +44,8 @@
 
 #define SDIO_FUNC1_BLOCKSIZE		64
 #define SDIO_FUNC2_BLOCKSIZE		512
-#define SDIO_4359_FUNC2_BLOCKSIZE	256
+#define SDIO_4373_FUNC2_BLOCKSIZE	256
+#define SDIO_435X_FUNC2_BLOCKSIZE	256
 /* Maximum milliseconds to wait for F2 to come up */
 #define SDIO_WAIT_F2RDY	3000
 
@@ -120,7 +122,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 			brcmf_err("enable_irq_wake failed %d\n", ret);
 			return ret;
 		}
-		disable_irq_wake(pdata->oob_irq_nr);
+		sdiodev->irq_wake = true;
 
 		sdio_claim_host(sdiodev->func1);
 
@@ -179,6 +181,10 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 		sdio_release_host(sdiodev->func1);
 
 		sdiodev->oob_irq_requested = false;
+		if (sdiodev->irq_wake) {
+			disable_irq_wake(pdata->oob_irq_nr);
+			sdiodev->irq_wake = false;
+		}
 		free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
 		sdiodev->irq_en = false;
 		sdiodev->oob_irq_requested = false;
@@ -862,7 +868,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
 	sdiodev->state = BRCMF_SDIOD_DOWN;
 	if (sdiodev->bus) {
@@ -897,7 +903,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
 	host->caps |= MMC_CAP_NONREMOVABLE;
 }
 
-static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
 	int ret = 0;
 	unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
@@ -910,13 +916,28 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 		sdio_release_host(sdiodev->func1);
 		goto out;
 	}
-	if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
-		f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
+
+	switch (sdiodev->func2->device) {
+	case SDIO_DEVICE_ID_CYPRESS_4373:
+		f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
+		break;
+	case SDIO_DEVICE_ID_BROADCOM_4359:
+	case SDIO_DEVICE_ID_CYPRESS_89359:
+	case SDIO_DEVICE_ID_BROADCOM_4354:
+	case SDIO_DEVICE_ID_BROADCOM_4356:
+		f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
+		break;
+	default:
+		break;
+	}
+
 	ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
 	if (ret) {
 		brcmf_err("Failed to set F2 blocksize\n");
 		sdio_release_host(sdiodev->func1);
 		goto out;
+	} else {
+		brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
 	}
 
 	/* increase F2 timeout */
@@ -1041,6 +1062,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 	dev_set_drvdata(&func->dev, bus_if);
 	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
 	sdiodev->dev = &sdiodev->func1->dev;
+	dev_set_drvdata(&sdiodev->func2->dev, bus_if);
 
 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
 
@@ -1057,6 +1079,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 fail:
 	dev_set_drvdata(&func->dev, NULL);
 	dev_set_drvdata(&sdiodev->func1->dev, NULL);
+	dev_set_drvdata(&sdiodev->func2->dev, NULL);
 	kfree(sdiodev);
 	kfree(bus_if);
 	return err;
@@ -1110,45 +1133,42 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
 	struct sdio_func *func;
 	struct brcmf_bus *bus_if;
 	struct brcmf_sdio_dev *sdiodev;
-	mmc_pm_flag_t pm_caps, sdio_flags;
-	int ret = 0;
+	mmc_pm_flag_t sdio_flags;
+	struct brcmf_cfg80211_info *config;
+	int retry = BRCMF_PM_WAIT_MAXRETRY;
 
 	func = container_of(dev, struct sdio_func, dev);
+	bus_if = dev_get_drvdata(dev);
+	config = bus_if->drvr->config;
+
 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+
+	while (retry &&
+	       config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING) {
+		usleep_range(10000, 20000);
+		retry--;
+	}
+	if (!retry && config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING)
+		brcmf_err("timed out wait for cfg80211 suspended\n");
+
 	if (func->num != 1)
 		return 0;
 
-
-	bus_if = dev_get_drvdata(dev);
 	sdiodev = bus_if->bus_priv.sdio;
 
-	pm_caps = sdio_get_host_pm_caps(func);
+	brcmf_sdiod_freezer_on(sdiodev);
+	brcmf_sdio_wd_timer(sdiodev->bus, 0);
 
-	if (pm_caps & MMC_PM_KEEP_POWER) {
-		/* preserve card power during suspend */
-		brcmf_sdiod_freezer_on(sdiodev);
-		brcmf_sdio_wd_timer(sdiodev->bus, 0);
-
-		sdio_flags = MMC_PM_KEEP_POWER;
-		if (sdiodev->wowl_enabled) {
-			if (sdiodev->settings->bus.sdio.oob_irq_supported)
-				enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
-			else
-				sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
-		}
-
-		if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
-			brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
-
-	} else {
-		/* power will be cut so remove device, probe again in resume */
-		brcmf_sdiod_intr_unregister(sdiodev);
-		ret = brcmf_sdiod_remove(sdiodev);
-		if (ret)
-			brcmf_err("Failed to remove device on suspend\n");
+	sdio_flags = MMC_PM_KEEP_POWER;
+	if (sdiodev->wowl_enabled) {
+		if (sdiodev->settings->bus.sdio.oob_irq_supported)
+			enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+		else
+			sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
 	}
-
-	return ret;
+	if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
+		brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+	return 0;
 }
 
 static int brcmf_ops_sdio_resume(struct device *dev)
@@ -1156,27 +1176,13 @@ static int brcmf_ops_sdio_resume(struct device *dev)
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
-	mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
-	int ret = 0;
 
 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
 	if (func->num != 2)
 		return 0;
 
-	if (!(pm_caps & MMC_PM_KEEP_POWER)) {
-		/* bus was powered off and device removed, probe again */
-		ret = brcmf_sdiod_probe(sdiodev);
-		if (ret)
-			brcmf_err("Failed to probe device on resume\n");
-	} else {
-		if (sdiodev->wowl_enabled &&
-		    sdiodev->settings->bus.sdio.oob_irq_supported)
-			disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
-
-		brcmf_sdiod_freezer_off(sdiodev);
-	}
-
-	return ret;
+	brcmf_sdiod_freezer_off(sdiodev);
+	return 0;
 }
 
 static const struct dev_pm_ops brcmf_sdio_pm_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 623c0168da7..e62c34849b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -255,7 +255,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
 
 int brcmf_alloc(struct device *dev, struct brcmf_mp_device *settings);
 /* Indication from bus module regarding presence/insertion of dongle. */
-int brcmf_attach(struct device *dev);
+int brcmf_attach(struct device *dev, bool start_bus);
 /* Indication from bus module regarding removal/absence of dongle */
 void brcmf_detach(struct device *dev);
 void brcmf_free(struct device *dev);
@@ -271,6 +271,7 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
 
 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
 void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
+int brcmf_fwlog_attach(struct device *dev);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
 void brcmf_sdio_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index bacd762cdf3..256604b1527 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -11,7 +11,6 @@
 #include <linux/vmalloc.h>
 #include <net/cfg80211.h>
 #include <net/netlink.h>
-#include <uapi/linux/if_arp.h>
 
 #include <brcmu_utils.h>
 #include <defs.h>
@@ -23,6 +22,7 @@
 #include "p2p.h"
 #include "btcoex.h"
 #include "pno.h"
+#include "fwsignal.h"
 #include "cfg80211.h"
 #include "feature.h"
 #include "fwil.h"
@@ -55,6 +55,7 @@
 #define RSN_AKM_PSK			2	/* Pre-shared Key */
 #define RSN_AKM_SHA256_1X		5	/* SHA256, 802.1X */
 #define RSN_AKM_SHA256_PSK		6	/* SHA256, Pre-shared Key */
+#define RSN_AKM_SAE			8	/* SAE */
 #define RSN_CAP_LEN			2	/* Length of RSN capabilities */
 #define RSN_CAP_PTK_REPLAY_CNTR_MASK	(BIT(2) | BIT(3))
 #define RSN_CAP_MFPR_MASK		BIT(6)
@@ -229,6 +230,9 @@ struct parsed_vndr_ies {
 	struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
+#define WLC_E_IF_ROLE_STA		0	/* Infra STA */
+#define WLC_E_IF_ROLE_AP		1	/* Access Point */
+
 static u8 nl80211_band_to_fwil(enum nl80211_band band)
 {
 	switch (band) {
@@ -620,82 +624,6 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
 	return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
 }
 
-/**
- * brcmf_mon_add_vif() - create monitor mode virtual interface
- *
- * @wiphy: wiphy device of new interface.
- * @name: name of the new interface.
- */
-static struct wireless_dev *brcmf_mon_add_vif(struct wiphy *wiphy,
-					      const char *name)
-{
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_cfg80211_vif *vif;
-	struct net_device *ndev;
-	struct brcmf_if *ifp;
-	int err;
-
-	if (cfg->pub->mon_if) {
-		err = -EEXIST;
-		goto err_out;
-	}
-
-	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_MONITOR);
-	if (IS_ERR(vif)) {
-		err = PTR_ERR(vif);
-		goto err_out;
-	}
-
-	ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, ether_setup);
-	if (!ndev) {
-		err = -ENOMEM;
-		goto err_free_vif;
-	}
-	ndev->type = ARPHRD_IEEE80211_RADIOTAP;
-	ndev->ieee80211_ptr = &vif->wdev;
-	ndev->needs_free_netdev = true;
-	ndev->priv_destructor = brcmf_cfg80211_free_netdev;
-	SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
-
-	ifp = netdev_priv(ndev);
-	ifp->vif = vif;
-	ifp->ndev = ndev;
-	ifp->drvr = cfg->pub;
-
-	vif->ifp = ifp;
-	vif->wdev.netdev = ndev;
-
-	err = brcmf_net_mon_attach(ifp);
-	if (err) {
-		brcmf_err("Failed to attach %s device\n", ndev->name);
-		free_netdev(ndev);
-		goto err_free_vif;
-	}
-
-	cfg->pub->mon_if = ifp;
-
-	return &vif->wdev;
-
-err_free_vif:
-	brcmf_free_vif(vif);
-err_out:
-	return ERR_PTR(err);
-}
-
-static int brcmf_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
-{
-	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct net_device *ndev = wdev->netdev;
-
-	ndev->netdev_ops->ndo_stop(ndev);
-
-	brcmf_net_detach(ndev, true);
-
-	cfg->pub->mon_if = NULL;
-
-	return 0;
-}
-
 static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
 						     const char *name,
 						     unsigned char name_assign_type,
@@ -718,10 +646,9 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_MESH_POINT:
 		return ERR_PTR(-EOPNOTSUPP);
-	case NL80211_IFTYPE_MONITOR:
-		return brcmf_mon_add_vif(wiphy, name);
 	case NL80211_IFTYPE_AP:
 		wdev = brcmf_ap_add_vif(wiphy, name, params);
 		break;
@@ -904,10 +831,9 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_MESH_POINT:
 		return -EOPNOTSUPP;
-	case NL80211_IFTYPE_MONITOR:
-		return brcmf_mon_del_vif(wiphy, wdev);
 	case NL80211_IFTYPE_AP:
 		return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
 	case NL80211_IFTYPE_P2P_CLIENT:
@@ -1207,11 +1133,6 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 	if (err)
 		goto scan_out;
 
-	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
-				    request->ie, request->ie_len);
-	if (err)
-		goto scan_out;
-
 	err = brcmf_do_escan(vif->ifp, request);
 	if (err)
 		goto scan_out;
@@ -1386,7 +1307,8 @@ static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
 	return err;
 }
 
-static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
+static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
+			    bool locally_generated)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
 	struct brcmf_pub *drvr = cfg->pub;
@@ -1408,7 +1330,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
 		if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) ||
 		    (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
 			cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
-					      true, GFP_KERNEL);
+					      locally_generated, GFP_KERNEL);
 	}
 	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
 	clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
@@ -1587,7 +1509,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 		return 0;
 	}
 
-	brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING);
+	brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING, true);
 	brcmf_net_setcarrier(ifp, false);
 
 	brcmf_dbg(TRACE, "Exit\n");
@@ -1748,6 +1670,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 	struct brcmf_pub *drvr = ifp->drvr;
 	s32 val;
 	s32 err;
+	s32 okc_enable;
 	const struct brcmf_tlv *rsn_ie;
 	const u8 *ie;
 	u32 ie_len;
@@ -1758,6 +1681,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 
 	profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
 	profile->is_ft = false;
+	profile->is_okc = false;
 
 	if (!sme->crypto.n_akm_suites)
 		return 0;
@@ -1831,8 +1755,17 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 		}
 	}
 
-	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X)
+	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X) {
 		brcmf_dbg(INFO, "using 1X offload\n");
+		err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "okc_enable",
+					       &okc_enable);
+		if (err) {
+			bphy_err(drvr, "get okc_enable failed (%d)\n", err);
+		} else {
+			brcmf_dbg(INFO, "get okc_enable (%d)\n", okc_enable);
+			profile->is_okc = okc_enable;
+		}
+	}
 
 	if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
 		goto skip_mfp_config;
@@ -2467,6 +2400,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
 	if (!ext_key)
 		key->flags = BRCMF_PRIMARY_KEY;
 
+	if (params->seq && params->seq_len == 6) {
+		/* rx iv */
+		u8 *ivptr;
+
+		ivptr = (u8 *)params->seq;
+		key->rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+			(ivptr[3] << 8) | ivptr[2];
+		key->rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+		key->iv_initialized = true;
+	}
+
 	switch (params->cipher) {
 	case WLAN_CIPHER_SUITE_WEP40:
 		key->algo = CRYPTO_ALGO_WEP1;
@@ -2914,7 +2858,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
 		goto done;
 	}
 
-	pm = enabled ? PM_FAST : PM_OFF;
+	pm = enabled ? ifp->drvr->settings->default_pm : PM_OFF;
 	/* Do not enable the power save after assoc if it is a p2p interface */
 	if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
 		brcmf_dbg(INFO, "Do not enable power save for P2P clients\n");
@@ -3783,10 +3727,24 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct net_device *ndev = cfg_to_ndev(cfg);
 	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_bus *bus_if = drvr->bus_if;
+	struct brcmf_cfg80211_info *config = drvr->config;
+	int retry = BRCMF_PM_WAIT_MAXRETRY;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
+	config->pm_state = BRCMF_CFG80211_PM_STATE_RESUMING;
+
 	if (cfg->wowl.active) {
+		/* wait for bus resumed */
+		while (retry && bus_if->state != BRCMF_BUS_UP) {
+			usleep_range(10000, 20000);
+			retry--;
+		}
+		if (!retry && bus_if->state != BRCMF_BUS_UP)
+			brcmf_err("timed out wait for bus resume\n");
+
 		brcmf_report_wowl_wakeind(wiphy, ifp);
 		brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0);
 		brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0);
@@ -3802,7 +3760,12 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
 					    brcmf_notify_sched_scan_results);
 			cfg->wowl.nd_enabled = false;
 		}
+
+		/* disable packet filters */
+		brcmf_pktfilter_enable(ifp->ndev, false);
+
 	}
+	config->pm_state = BRCMF_CFG80211_PM_STATE_RESUMED;
 	return 0;
 }
 
@@ -3860,6 +3823,9 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
 	brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
 	brcmf_bus_wowl_config(cfg->pub->bus_if, true);
 	cfg->wowl.active = true;
+
+	/* enable packet filters */
+	brcmf_pktfilter_enable(ifp->ndev, true);
 }
 
 static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
@@ -3869,9 +3835,12 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
 	struct net_device *ndev = cfg_to_ndev(cfg);
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_cfg80211_info *config = ifp->drvr->config;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
+	config->pm_state = BRCMF_CFG80211_PM_STATE_SUSPENDING;
+
 	/* if the primary net_device is not READY there is nothing
 	 * we can do but pray resume goes smoothly.
 	 */
@@ -3886,7 +3855,8 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
 	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
 		brcmf_abort_scanning(cfg);
 
-	if (wowl == NULL) {
+	if (!wowl || !test_bit(BRCMF_VIF_STATUS_CONNECTED,
+			       &ifp->vif->sme_state)) {
 		brcmf_bus_wowl_config(cfg->pub->bus_if, false);
 		list_for_each_entry(vif, &cfg->vif_list, list) {
 			if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
@@ -3895,7 +3865,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
 			 * disassociate from AP to save power while system is
 			 * in suspended state
 			 */
-			brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED);
+			brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED, true);
 			/* Make sure WPA_Supplicant receives all the event
 			 * generated due to DISASSOC call to the fw to keep
 			 * the state fw and WPA_Supplicant state consistent
@@ -3906,14 +3876,19 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
 		brcmf_set_mpc(ifp, 1);
 
 	} else {
-		/* Configure WOWL paramaters */
-		brcmf_configure_wowl(cfg, ifp, wowl);
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
+			/* Configure WOWL parameters */
+			brcmf_configure_wowl(cfg, ifp, wowl);
 	}
 
 exit:
-	brcmf_dbg(TRACE, "Exit\n");
+	/* set cfg80211 pm state to cfg80211 suspended state */
+	config->pm_state = BRCMF_CFG80211_PM_STATE_SUSPENDED;
+
 	/* clear any scanning activity */
 	cfg->scan_status = 0;
+
+	brcmf_dbg(TRACE, "Exit\n");
 	return 0;
 }
 
@@ -4224,6 +4199,10 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
 			brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n");
 			wpa_auth |= WPA2_AUTH_1X_SHA256;
 			break;
+		case RSN_AKM_SAE:
+			brcmf_dbg(TRACE, "RSN_AKM_SAE\n");
+			wpa_auth |= WPA3_AUTH_SAE_PSK;
+			break;
 		default:
 			bphy_err(drvr, "Invalid key mgmt info\n");
 		}
@@ -4241,11 +4220,12 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
 				brcmf_dbg(TRACE, "MFP Required\n");
 				mfp = BRCMF_MFP_REQUIRED;
 				/* Firmware only supports mfp required in
-				 * combination with WPA2_AUTH_PSK_SHA256 or
-				 * WPA2_AUTH_1X_SHA256.
+				 * combination with WPA2_AUTH_PSK_SHA256,
+				 * WPA2_AUTH_1X_SHA256, or WPA3_AUTH_SAE_PSK.
 				 */
 				if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 |
-						  WPA2_AUTH_1X_SHA256))) {
+						  WPA2_AUTH_1X_SHA256 |
+						  WPA3_AUTH_SAE_PSK))) {
 					err = -EINVAL;
 					goto exit;
 				}
@@ -4448,6 +4428,11 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
 		mgmt_ie_len = &saved_ie->assoc_req_ie_len;
 		mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
 		break;
+	case BRCMF_VNDR_IE_ASSOCRSP_FLAG:
+		mgmt_ie_buf = saved_ie->assoc_res_ie;
+		mgmt_ie_len = &saved_ie->assoc_res_ie_len;
+		mgmt_ie_buf_len = sizeof(saved_ie->assoc_res_ie);
+		break;
 	default:
 		err = -EPERM;
 		bphy_err(drvr, "not suitable type\n");
@@ -4594,6 +4579,57 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
 	else
 		brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
 
+	/* Set Assoc Response IEs to FW */
+	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG,
+				    beacon->assocresp_ies,
+				    beacon->assocresp_ies_len);
+	if (err)
+		brcmf_err("Set Assoc Resp IE Failed\n");
+	else
+		brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc Resp\n");
+
+	return err;
+}
+
+static s32
+brcmf_parse_configure_security(struct brcmf_if *ifp,
+			       struct cfg80211_ap_settings *settings,
+			       enum nl80211_iftype dev_role)
+{
+	const struct brcmf_tlv *rsn_ie;
+	const struct brcmf_vs_tlv *wpa_ie;
+	s32 err = 0;
+
+	/* find the RSN_IE */
+	rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+				  settings->beacon.tail_len, WLAN_EID_RSN);
+
+	/* find the WPA_IE */
+	wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+				  settings->beacon.tail_len);
+
+	if (wpa_ie || rsn_ie) {
+		brcmf_dbg(TRACE, "WPA(2) IE is found\n");
+		if (wpa_ie) {
+			/* WPA IE */
+			err = brcmf_configure_wpaie(ifp, wpa_ie, false);
+			if (err < 0)
+				return err;
+		} else {
+			struct brcmf_vs_tlv *tmp_ie;
+
+			tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
+
+			/* RSN IE */
+			err = brcmf_configure_wpaie(ifp, tmp_ie, true);
+			if (err < 0)
+				return err;
+		}
+	} else {
+		brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
+		brcmf_configure_opensecurity(ifp);
+	}
+
 	return err;
 }
 
@@ -4605,12 +4641,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_pub *drvr = cfg->pub;
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct cfg80211_crypto_settings *crypto = &settings->crypto;
 	const struct brcmf_tlv *ssid_ie;
 	const struct brcmf_tlv *country_ie;
 	struct brcmf_ssid_le ssid_le;
 	s32 err = -EPERM;
-	const struct brcmf_tlv *rsn_ie;
-	const struct brcmf_vs_tlv *wpa_ie;
 	struct brcmf_join_params join_params;
 	enum nl80211_iftype dev_role;
 	struct brcmf_fil_bss_enable_le bss_enable;
@@ -4628,6 +4664,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 		  settings->inactivity_timeout);
 	dev_role = ifp->vif->wdev.iftype;
 	mbss = ifp->vif->mbss;
+	brcmf_dbg(TRACE, "mbss %s\n", mbss ? "enabled" : "disabled");
 
 	/* store current 11d setting */
 	if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
@@ -4664,36 +4701,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 		brcmf_configure_arp_nd_offload(ifp, false);
 	}
 
-	/* find the RSN_IE */
-	rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
-				  settings->beacon.tail_len, WLAN_EID_RSN);
-
-	/* find the WPA_IE */
-	wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
-				  settings->beacon.tail_len);
-
-	if ((wpa_ie != NULL || rsn_ie != NULL)) {
-		brcmf_dbg(TRACE, "WPA(2) IE is found\n");
-		if (wpa_ie != NULL) {
-			/* WPA IE */
-			err = brcmf_configure_wpaie(ifp, wpa_ie, false);
-			if (err < 0)
-				goto exit;
-		} else {
-			struct brcmf_vs_tlv *tmp_ie;
-
-			tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
-
-			/* RSN IE */
-			err = brcmf_configure_wpaie(ifp, tmp_ie, true);
-			if (err < 0)
-				goto exit;
-		}
-	} else {
-		brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
-		brcmf_configure_opensecurity(ifp);
-	}
-
 	/* Parameters shared by all radio interfaces */
 	if (!mbss) {
 		if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
@@ -4724,9 +4731,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 			}
 		}
 
-		if ((dev_role == NL80211_IFTYPE_AP) &&
-		    ((ifp->ifidx == 0) ||
-		     !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+		if (dev_role == NL80211_IFTYPE_AP && ifp->ifidx == 0) {
 			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
 			if (err < 0) {
 				bphy_err(drvr, "BRCMF_C_DOWN error %d\n",
@@ -4746,7 +4751,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 		err = -EINVAL;
 		goto exit;
 	}
-
+	ifp->isap = false;
 	/* Interface specific setup */
 	if (dev_role == NL80211_IFTYPE_AP) {
 		if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
@@ -4774,6 +4779,33 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 			bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err);
 			goto exit;
 		}
+
+		if (crypto->psk) {
+			brcmf_dbg(INFO, "using PSK offload\n");
+			profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_PSK);
+			err = brcmf_set_pmk(ifp, crypto->psk,
+					    BRCMF_WSEC_MAX_PSK_LEN);
+			if (err < 0)
+				goto exit;
+		}
+		if (crypto->sae_pwd) {
+			brcmf_dbg(INFO, "using SAE offload\n");
+			profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
+			err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
+						     crypto->sae_pwd_len);
+			if (err < 0)
+				goto exit;
+		}
+		if (profile->use_fwauth == 0)
+			profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+
+		err = brcmf_parse_configure_security(ifp, settings,
+						     NL80211_IFTYPE_AP);
+		if (err < 0) {
+			bphy_err(drvr, "brcmf_parse_configure_security error\n");
+			goto exit;
+		}
+
 		/* On DOWN the firmware removes the WEP keys, reconfigure
 		 * them if they were set.
 		 */
@@ -4790,14 +4822,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 			goto exit;
 		}
 
-		if (settings->hidden_ssid) {
-			err = brcmf_fil_iovar_int_set(ifp, "closednet", 1);
-			if (err) {
-				bphy_err(drvr, "closednet error (%d)\n", err);
-				goto exit;
-			}
+		err = brcmf_fil_iovar_int_set(ifp, "closednet",
+					      settings->hidden_ssid);
+		if (err) {
+			bphy_err(drvr, "%s closednet error (%d)\n",
+				 settings->hidden_ssid ?
+				 "enabled" : "disabled",
+				 err);
+			goto exit;
 		}
-
+		ifp->isap = true;
 		brcmf_dbg(TRACE, "AP mode configuration complete\n");
 	} else if (dev_role == NL80211_IFTYPE_P2P_GO) {
 		err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
@@ -4806,6 +4840,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 				 chanspec, err);
 			goto exit;
 		}
+
+		err = brcmf_parse_configure_security(ifp, settings,
+						     NL80211_IFTYPE_P2P_GO);
+		if (err < 0) {
+			brcmf_err("brcmf_parse_configure_security error\n");
+			goto exit;
+		}
+
 		err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
 						sizeof(ssid_le));
 		if (err < 0) {
@@ -4821,6 +4863,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 			goto exit;
 		}
 
+		ifp->isap = true;
 		brcmf_dbg(TRACE, "GO mode configuration complete\n");
 	} else {
 		WARN_ON(1);
@@ -4834,6 +4877,9 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 	if ((err) && (!mbss)) {
 		brcmf_set_mpc(ifp, 1);
 		brcmf_configure_arp_nd_offload(ifp, true);
+	} else {
+		cfg->num_softap++;
+		brcmf_dbg(TRACE, "Num of SoftAP %u\n", cfg->num_softap);
 	}
 	return err;
 }
@@ -4843,9 +4889,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_pub *drvr = cfg->pub;
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	s32 err;
 	struct brcmf_fil_bss_enable_le bss_enable;
 	struct brcmf_join_params join_params;
+	s32 apsta = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -4854,6 +4902,35 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 		/* first to make sure they get processed by fw. */
 		msleep(400);
 
+		cfg->num_softap--;
+
+		/* Clear bss configuration and SSID */
+		bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx);
+		bss_enable.enable = cpu_to_le32(0);
+		err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+					       sizeof(bss_enable));
+		if (err < 0)
+			brcmf_err("bss_enable config failed %d\n", err);
+
+		memset(&join_params, 0, sizeof(join_params));
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+					     &join_params, sizeof(join_params));
+		if (err < 0)
+			bphy_err(drvr, "SET SSID error (%d)\n", err);
+
+		if (cfg->num_softap) {
+			brcmf_dbg(TRACE, "Num of SoftAP %u\n", cfg->num_softap);
+			return 0;
+		}
+
+		if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+			if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
+				brcmf_set_pmk(ifp, NULL, 0);
+			if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
+				brcmf_set_sae_password(ifp, NULL, 0);
+			profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+		}
+
 		if (ifp->vif->mbss) {
 			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
 			return err;
@@ -4863,17 +4940,18 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 		if (ifp->bsscfgidx == 0)
 			brcmf_fil_iovar_int_set(ifp, "closednet", 0);
 
-		memset(&join_params, 0, sizeof(join_params));
-		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
-					     &join_params, sizeof(join_params));
-		if (err < 0)
-			bphy_err(drvr, "SET SSID error (%d)\n", err);
-		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+		err = brcmf_fil_iovar_int_get(ifp, "apsta", &apsta);
 		if (err < 0)
-			bphy_err(drvr, "BRCMF_C_DOWN error %d\n", err);
-		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
-		if (err < 0)
-			bphy_err(drvr, "setting AP mode failed %d\n", err);
+			brcmf_err("wl apsta failed (%d)\n", err);
+
+		if (!apsta) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+			if (err < 0)
+				bphy_err(drvr, "BRCMF_C_DOWN error %d\n", err);
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
+			if (err < 0)
+				bphy_err(drvr, "Set AP mode error %d\n", err);
+		}
 		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
 			brcmf_fil_iovar_int_set(ifp, "mbss", 0);
 		brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
@@ -5348,17 +5426,27 @@ static int brcmf_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
 				  const struct cfg80211_pmk_conf *conf)
 {
 	struct brcmf_if *ifp;
+	struct brcmf_pub *drvr;
+	int ret;
 
 	brcmf_dbg(TRACE, "enter\n");
 
 	/* expect using firmware supplicant for 1X */
 	ifp = netdev_priv(dev);
+	drvr = ifp->drvr;
 	if (WARN_ON(ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_1X))
 		return -EINVAL;
 
 	if (conf->pmk_len > BRCMF_WSEC_MAX_PSK_LEN)
 		return -ERANGE;
 
+	if (ifp->vif->profile.is_okc) {
+		ret = brcmf_fil_iovar_data_set(ifp, "okc_info_pmk", conf->pmk,
+					       conf->pmk_len);
+		if (ret < 0)
+			bphy_err(drvr, "okc_info_pmk iovar failed: ret=%d\n", ret);
+	}
+
 	return brcmf_set_pmk(ifp, conf->pmk, conf->pmk_len);
 }
 
@@ -5375,6 +5463,37 @@ static int brcmf_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
 	return brcmf_set_pmk(ifp, NULL, 0);
 }
 
+static int
+brcmf_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev,
+			  struct bss_parameters *params)
+{
+	struct brcmf_if *ifp;
+	int ret = 0;
+	u32 ap_isolate, val;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	ifp = netdev_priv(dev);
+	if (params->ap_isolate >= 0) {
+		ap_isolate = (u32)params->ap_isolate;
+		ret = brcmf_fil_iovar_int_set(ifp, "ap_isolate", ap_isolate);
+		if (ret < 0)
+			brcmf_err("ap_isolate iovar failed: ret=%d\n", ret);
+	}
+
+	/* Get ap_isolate value from firmware to detemine whether fmac */
+	/* driver supports packet forwarding. */
+	if (brcmf_fil_iovar_int_get(ifp, "ap_isolate", &val) == 0) {
+		ifp->fmac_pkt_fwd_en =
+			((params->ap_isolate == 0) && (val == 1)) ?
+			true : false;
+	} else {
+		brcmf_err("get ap_isolate iovar failed: ret=%d\n", ret);
+		ifp->fmac_pkt_fwd_en = false;
+	}
+
+	return ret;
+}
+
 static struct cfg80211_ops brcmf_cfg80211_ops = {
 	.add_virtual_intf = brcmf_cfg80211_add_iface,
 	.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -5420,6 +5539,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
 	.update_connect_params = brcmf_cfg80211_update_conn_params,
 	.set_pmk = brcmf_cfg80211_set_pmk,
 	.del_pmk = brcmf_cfg80211_del_pmk,
+	.change_bss = brcmf_cfg80211_change_bss,
 };
 
 struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
@@ -5484,8 +5604,10 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
 	ifp = netdev_priv(ndev);
 	vif = ifp->vif;
 
-	if (vif)
+	if (vif) {
 		brcmf_free_vif(vif);
+		ifp->vif = NULL;
+	}
 }
 
 static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
@@ -5571,12 +5693,153 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
 	conn_info->resp_ie_len = 0;
 }
 
+u8 brcmf_map_prio_to_prec(void *config, u8 prio)
+{
+	struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
+
+	if (!cfg)
+		return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
+		       (prio ^ 2) : prio;
+
+	/* For those AC(s) with ACM flag set to 1, convert its 4-level priority
+	 * to an 8-level precedence which is the same as BE's
+	 */
+	if (prio > PRIO_8021D_EE &&
+	    cfg->ac_priority[prio] == cfg->ac_priority[PRIO_8021D_BE])
+		return cfg->ac_priority[prio] * 2;
+
+	/* Conversion of 4-level priority to 8-level precedence */
+	if (prio == PRIO_8021D_BE || prio == PRIO_8021D_BK ||
+	    prio == PRIO_8021D_CL || prio == PRIO_8021D_VO)
+		return cfg->ac_priority[prio] * 2;
+	else
+		return cfg->ac_priority[prio] * 2 + 1;
+}
+
+u8 brcmf_map_prio_to_aci(void *config, u8 prio)
+{
+	/* Prio here refers to the 802.1d priority in range of 0 to 7.
+	 * ACI here refers to the WLAN AC Index in range of 0 to 3.
+	 * This function will return ACI corresponding to input prio.
+	 */
+	struct brcmf_cfg80211_info *cfg = (struct brcmf_cfg80211_info *)config;
+
+	if (cfg)
+		return cfg->ac_priority[prio];
+
+	return prio;
+}
+
+static void brcmf_init_wmm_prio(u8 *priority)
+{
+	/* Initialize AC priority array to default
+	 * 802.1d priority as per following table:
+	 * 802.1d prio 0,3 maps to BE
+	 * 802.1d prio 1,2 maps to BK
+	 * 802.1d prio 4,5 maps to VI
+	 * 802.1d prio 6,7 maps to VO
+	 */
+	priority[0] = BRCMF_FWS_FIFO_AC_BE;
+	priority[3] = BRCMF_FWS_FIFO_AC_BE;
+	priority[1] = BRCMF_FWS_FIFO_AC_BK;
+	priority[2] = BRCMF_FWS_FIFO_AC_BK;
+	priority[4] = BRCMF_FWS_FIFO_AC_VI;
+	priority[5] = BRCMF_FWS_FIFO_AC_VI;
+	priority[6] = BRCMF_FWS_FIFO_AC_VO;
+	priority[7] = BRCMF_FWS_FIFO_AC_VO;
+}
+
+static void brcmf_wifi_prioritize_acparams(const
+	struct brcmf_cfg80211_edcf_acparam *acp, u8 *priority)
+{
+	u8 aci;
+	u8 aifsn;
+	u8 ecwmin;
+	u8 ecwmax;
+	u8 acm;
+	u8 ranking_basis[EDCF_AC_COUNT];
+	u8 aci_prio[EDCF_AC_COUNT]; /* AC_BE, AC_BK, AC_VI, AC_VO */
+	u8 index;
+
+	for (aci = 0; aci < EDCF_AC_COUNT; aci++, acp++) {
+		aifsn  = acp->ACI & EDCF_AIFSN_MASK;
+		acm = (acp->ACI & EDCF_ACM_MASK) ? 1 : 0;
+		ecwmin = acp->ECW & EDCF_ECWMIN_MASK;
+		ecwmax = (acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT;
+		brcmf_dbg(CONN, "ACI %d aifsn %d acm %d ecwmin %d ecwmax %d\n",
+			  aci, aifsn, acm, ecwmin, ecwmax);
+		/* Default AC_VO will be the lowest ranking value */
+		ranking_basis[aci] = aifsn + ecwmin + ecwmax;
+		/* Initialise priority starting at 0 (AC_BE) */
+		aci_prio[aci] = 0;
+
+		/* If ACM is set, STA can't use this AC as per 802.11.
+		 * Change the ranking to BE
+		 */
+		if (aci != AC_BE && aci != AC_BK && acm == 1)
+			ranking_basis[aci] = ranking_basis[AC_BE];
+	}
+
+	/* Ranking method which works for AC priority
+	 * swapping when values for cwmin, cwmax and aifsn are varied
+	 * Compare each aci_prio against each other aci_prio
+	 */
+	for (aci = 0; aci < EDCF_AC_COUNT; aci++) {
+		for (index = 0; index < EDCF_AC_COUNT; index++) {
+			if (index != aci) {
+				/* Smaller ranking value has higher priority,
+				 * so increment priority for each ACI which has
+				 * a higher ranking value
+				 */
+				if (ranking_basis[aci] < ranking_basis[index])
+					aci_prio[aci]++;
+			}
+		}
+	}
+
+	/* By now, aci_prio[] will be in range of 0 to 3.
+	 * Use ACI prio to get the new priority value for
+	 * each 802.1d traffic type, in this range.
+	 */
+	if (!(aci_prio[AC_BE] == aci_prio[AC_BK] &&
+	      aci_prio[AC_BK] == aci_prio[AC_VI] &&
+	      aci_prio[AC_VI] == aci_prio[AC_VO])) {
+
+		/* 802.1d 0,3 maps to BE */
+		priority[0] = aci_prio[AC_BE];
+		priority[3] = aci_prio[AC_BE];
+
+		/* 802.1d 1,2 maps to BK */
+		priority[1] = aci_prio[AC_BK];
+		priority[2] = aci_prio[AC_BK];
+
+		/* 802.1d 4,5 maps to VO */
+		priority[4] = aci_prio[AC_VI];
+		priority[5] = aci_prio[AC_VI];
+
+		/* 802.1d 6,7 maps to VO */
+		priority[6] = aci_prio[AC_VO];
+		priority[7] = aci_prio[AC_VO];
+
+	} else {
+		/* Initialize to default priority */
+		brcmf_init_wmm_prio(priority);
+	}
+
+	brcmf_dbg(CONN, "Adj prio BE 0->%d, BK 1->%d, BK 2->%d, BE 3->%d\n",
+		  priority[0], priority[1], priority[2], priority[3]);
+
+	brcmf_dbg(CONN, "Adj prio VI 4->%d, VI 5->%d, VO 6->%d, VO 7->%d\n",
+		  priority[4], priority[5], priority[6], priority[7]);
+}
+
 static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 			       struct brcmf_if *ifp)
 {
 	struct brcmf_pub *drvr = cfg->pub;
 	struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
 	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+	struct brcmf_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT];
 	u32 req_len;
 	u32 resp_len;
 	s32 err = 0;
@@ -5625,6 +5888,17 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 			    GFP_KERNEL);
 		if (!conn_info->resp_ie)
 			conn_info->resp_ie_len = 0;
+
+		err = brcmf_fil_iovar_data_get(ifp, "wme_ac_sta",
+					       edcf_acparam_info,
+					       sizeof(edcf_acparam_info));
+		if (err) {
+			brcmf_err("could not get wme_ac_sta (%d)\n", err);
+			return err;
+		}
+
+		brcmf_wifi_prioritize_acparams(edcf_acparam_info,
+					       cfg->ac_priority);
 	} else {
 		conn_info->resp_ie_len = 0;
 		conn_info->resp_ie = NULL;
@@ -5635,6 +5909,47 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 	return err;
 }
 
+static bool
+brcmf_has_pmkid(const u8 *parse, u32 len)
+{
+	const struct brcmf_tlv *rsn_ie;
+	const u8 *ie;
+	u32 ie_len;
+	u32 offset;
+	u16 count;
+
+	rsn_ie = brcmf_parse_tlvs(parse, len, WLAN_EID_RSN);
+	if (!rsn_ie)
+		goto done;
+	ie = (const u8 *)rsn_ie;
+	ie_len = rsn_ie->len + TLV_HDR_LEN;
+	/* Skip group data cipher suite */
+	offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN;
+	if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+		goto done;
+	/* Skip pairwise cipher suite(s) */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+	if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+		goto done;
+	/* Skip auth key management suite(s) */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+	if (offset + RSN_CAP_LEN >= ie_len)
+		goto done;
+	/* Skip rsn capabilities */
+	offset += RSN_CAP_LEN;
+	if (offset + RSN_PMKID_COUNT_LEN > ie_len)
+		goto done;
+	/* Extract PMKID count */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	if (count)
+		return true;
+
+done:
+	return false;
+}
+
 static s32
 brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 		       struct net_device *ndev,
@@ -5695,14 +6010,14 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 	roam_info.resp_ie = conn_info->resp_ie;
 	roam_info.resp_ie_len = conn_info->resp_ie_len;
 
+	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X &&
+	    (brcmf_has_pmkid(roam_info.req_ie, roam_info.req_ie_len) ||
+	     profile->is_ft || profile->is_okc))
+		roam_info.authorized = true;
+
 	cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
 	brcmf_dbg(CONN, "Report roaming result\n");
 
-	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
-		cfg80211_port_authorized(ndev, profile->bssid, GFP_KERNEL);
-		brcmf_dbg(CONN, "Report port authorized\n");
-	}
-
 	set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
@@ -5737,6 +6052,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
 		conn_params.req_ie_len = conn_info->req_ie_len;
 		conn_params.resp_ie = conn_info->resp_ie;
 		conn_params.resp_ie_len = conn_info->resp_ie_len;
+
+		if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X &&
+		    brcmf_has_pmkid(conn_params.req_ie, conn_params.req_ie_len))
+			conn_params.authorized = true;
+
 		cfg80211_connect_done(ndev, &conn_params, GFP_KERNEL);
 		brcmf_dbg(CONN, "Report connect result - connection %s\n",
 			  completed ? "succeeded" : "failed");
@@ -5809,6 +6129,14 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
 	}
 
 	if (brcmf_is_apmode(ifp->vif)) {
+		if (e->event_code == BRCMF_E_ASSOC_IND ||
+		    e->event_code == BRCMF_E_REASSOC_IND) {
+			brcmf_findadd_sta(ifp, e->addr);
+		} else if ((e->event_code == BRCMF_E_DISASSOC_IND) ||
+				(e->event_code == BRCMF_E_DEAUTH_IND) ||
+				(e->event_code == BRCMF_E_DEAUTH)) {
+			brcmf_del_sta(ifp, e->addr);
+		}
 		err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
 	} else if (brcmf_is_linkup(ifp->vif, e)) {
 		brcmf_dbg(CONN, "Linkup\n");
@@ -5826,10 +6154,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
 		brcmf_net_setcarrier(ifp, true);
 	} else if (brcmf_is_linkdown(e)) {
 		brcmf_dbg(CONN, "Linkdown\n");
-		if (!brcmf_is_ibssmode(ifp->vif)) {
+		if (!brcmf_is_ibssmode(ifp->vif) &&
+		    test_bit(BRCMF_VIF_STATUS_CONNECTED,
+			     &ifp->vif->sme_state)) {
+			if (memcmp(profile->bssid, e->addr, ETH_ALEN))
+				return err;
+
 			brcmf_bss_connect_done(cfg, ndev, e, false);
 			brcmf_link_down(ifp->vif,
-					brcmf_map_fw_linkdown_reason(e));
+					brcmf_map_fw_linkdown_reason(e),
+					e->event_code &
+					(BRCMF_E_DEAUTH_IND |
+					BRCMF_E_DISASSOC_IND)
+					? false : true);
 			brcmf_init_prof(ndev_to_prof(ndev));
 			if (ndev != cfg_to_ndev(cfg))
 				complete(&cfg->vif_disabled);
@@ -5892,6 +6229,9 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
 	struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
 	struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
 	struct brcmf_cfg80211_vif *vif;
+	enum nl80211_iftype iftype = NL80211_IFTYPE_UNSPECIFIED;
+	bool vif_pend = false;
+	int err;
 
 	brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfgidx %u\n",
 		  ifevent->action, ifevent->flags, ifevent->ifidx,
@@ -5904,9 +6244,28 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
 	switch (ifevent->action) {
 	case BRCMF_E_IF_ADD:
 		/* waiting process may have timed out */
-		if (!cfg->vif_event.vif) {
+		if (!vif) {
+			/* handle IF_ADD event from firmware */
 			spin_unlock(&event->vif_event_lock);
-			return -EBADF;
+			vif_pend = true;
+			if (ifevent->role == WLC_E_IF_ROLE_STA)
+				iftype = NL80211_IFTYPE_STATION;
+			else if (ifevent->role == WLC_E_IF_ROLE_AP)
+				iftype = NL80211_IFTYPE_AP;
+			else
+				vif_pend = false;
+
+			if (vif_pend) {
+				vif = brcmf_alloc_vif(cfg, iftype);
+				if (IS_ERR(vif)) {
+					brcmf_err("Role:%d failed to alloc vif\n",
+						  ifevent->role);
+					return PTR_ERR(vif);
+				}
+			} else {
+				brcmf_err("Invalid Role:%d\n", ifevent->role);
+				return -EBADF;
+			}
 		}
 
 		ifp->vif = vif;
@@ -5916,6 +6275,18 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
 			ifp->ndev->ieee80211_ptr = &vif->wdev;
 			SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
 		}
+
+		if (vif_pend) {
+			err = brcmf_net_attach(ifp, false);
+			if (err) {
+				brcmf_err("netdevice register failed with err:%d\n",
+					  err);
+				brcmf_free_vif(vif);
+				free_netdev(ifp->ndev);
+			}
+			return err;
+		}
+
 		spin_unlock(&event->vif_event_lock);
 		wake_up(&event->vif_wq);
 		return 0;
@@ -6041,6 +6412,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
 	mutex_init(&cfg->usr_sync);
 	brcmf_init_escan(cfg);
 	brcmf_init_conf(cfg->conf);
+	brcmf_init_wmm_prio(cfg->ac_priority);
 	init_completion(&cfg->vif_disabled);
 	return err;
 }
@@ -6093,17 +6465,19 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
 	roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
 	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
 				     (void *)roamtrigger, sizeof(roamtrigger));
-	if (err)
+	if (err) {
 		bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
+		goto roam_setup_done;
+	}
 
 	roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
 	roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
 	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
 				     (void *)roam_delta, sizeof(roam_delta));
-	if (err)
+	if (err) {
 		bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err);
-
-	return 0;
+		goto roam_setup_done;
+	}
 
 roam_setup_done:
 	return err;
@@ -6602,7 +6976,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
  *	#AP <= 4, matching BI, channels = 1, 4 total
  *
  * no p2p and rsdb:
- *	#STA <= 2, #AP <= 2, channels = 2, 4 total
+ *	#STA <= 1, #AP <= 2, channels = 2, 3 total
  *
  * p2p, no mchan, and mbss:
  *
@@ -6617,8 +6991,14 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
  *	#AP <= 4, matching BI, channels = 1, 4 total
  *
  * p2p, rsdb, and no mbss:
- *	#STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
  *	 channels = 2, 4 total
+ *
+ * p2p, rsdb, mbss
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ *	 channels = 2, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
  */
 static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
@@ -6626,14 +7006,12 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 	struct ieee80211_iface_limit *c0_limits = NULL;
 	struct ieee80211_iface_limit *p2p_limits = NULL;
 	struct ieee80211_iface_limit *mbss_limits = NULL;
-	bool mon_flag, mbss, p2p, rsdb, mchan;
-	int i, c, n_combos, n_limits;
+	bool mbss, p2p, rsdb;
+	int i, c, n_combos;
 
-	mon_flag = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FLAG);
 	mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
 	p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
 	rsdb = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB);
-	mchan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN);
 
 	n_combos = 1 + !!(p2p && !rsdb) + !!mbss;
 	combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
@@ -6643,45 +7021,59 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				 BIT(NL80211_IFTYPE_ADHOC) |
 				 BIT(NL80211_IFTYPE_AP);
-	if (mon_flag)
-		wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
-	if (p2p)
-		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
-					  BIT(NL80211_IFTYPE_P2P_GO) |
-					  BIT(NL80211_IFTYPE_P2P_DEVICE);
 
 	c = 0;
 	i = 0;
-	n_limits = 1 + mon_flag + (p2p ? 2 : 0) + (rsdb || !p2p);
-	c0_limits = kcalloc(n_limits, sizeof(*c0_limits), GFP_KERNEL);
+	if (p2p && rsdb)
+		c0_limits = kcalloc(4, sizeof(*c0_limits), GFP_KERNEL);
+	else if (p2p)
+		c0_limits = kcalloc(3, sizeof(*c0_limits), GFP_KERNEL);
+	else
+		c0_limits = kcalloc(2, sizeof(*c0_limits), GFP_KERNEL);
 	if (!c0_limits)
 		goto err;
-
-	combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
-	c0_limits[i].max = 1 + rsdb;
-	c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
-	if (mon_flag) {
+	if (p2p && rsdb) {
+		combo[c].num_different_channels = 2;
+		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					  BIT(NL80211_IFTYPE_P2P_GO) |
+					  BIT(NL80211_IFTYPE_P2P_DEVICE);
 		c0_limits[i].max = 1;
-		c0_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
-	}
-	if (p2p) {
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
 		c0_limits[i].max = 1;
 		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
-		c0_limits[i].max = 1 + rsdb;
+		c0_limits[i].max = 2;
 		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
 				       BIT(NL80211_IFTYPE_P2P_GO);
-	}
-	if (p2p && rsdb) {
 		c0_limits[i].max = 2;
 		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
-		combo[c].max_interfaces = 5;
+		combo[c].max_interfaces = 4;
 	} else if (p2p) {
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+			combo[c].num_different_channels = 2;
+		else
+			combo[c].num_different_channels = 1;
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					  BIT(NL80211_IFTYPE_P2P_GO) |
+					  BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				       BIT(NL80211_IFTYPE_P2P_GO);
 		combo[c].max_interfaces = i;
 	} else if (rsdb) {
+		combo[c].num_different_channels = 2;
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
 		c0_limits[i].max = 2;
 		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
 		combo[c].max_interfaces = 3;
 	} else {
+		combo[c].num_different_channels = 1;
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
 		c0_limits[i].max = 1;
 		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
 		combo[c].max_interfaces = i;
@@ -6712,20 +7104,14 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 	if (mbss) {
 		c++;
 		i = 0;
-		n_limits = 1 + mon_flag;
-		mbss_limits = kcalloc(n_limits, sizeof(*mbss_limits),
-				      GFP_KERNEL);
+		mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
 		if (!mbss_limits)
 			goto err;
 		mbss_limits[i].max = 4;
 		mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP);
-		if (mon_flag) {
-			mbss_limits[i].max = 1;
-			mbss_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR);
-		}
 		combo[c].beacon_int_infra_match = true;
 		combo[c].num_different_channels = 1;
-		combo[c].max_interfaces = 4 + mon_flag;
+		combo[c].max_interfaces = 4;
 		combo[c].n_limits = i;
 		combo[c].limits = mbss_limits;
 	}
@@ -6758,6 +7144,7 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_pub *drvr = cfg->pub;
 	struct wiphy_wowlan_support *wowl;
+	struct cfg80211_wowlan *brcmf_wowlan_config = NULL;
 
 	wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support),
 		       GFP_KERNEL);
@@ -6780,12 +7167,34 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
 	}
 
 	wiphy->wowlan = wowl;
+
+	/* wowlan_config structure report for kernels */
+	brcmf_wowlan_config = kzalloc(sizeof(*brcmf_wowlan_config),
+				      GFP_KERNEL);
+	if (brcmf_wowlan_config) {
+		brcmf_wowlan_config->any = false;
+		brcmf_wowlan_config->disconnect = true;
+		brcmf_wowlan_config->eap_identity_req = true;
+		brcmf_wowlan_config->four_way_handshake = true;
+		brcmf_wowlan_config->rfkill_release = false;
+		brcmf_wowlan_config->patterns = NULL;
+		brcmf_wowlan_config->n_patterns = 0;
+		brcmf_wowlan_config->tcp = NULL;
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
+			brcmf_wowlan_config->gtk_rekey_failure = true;
+		else
+			brcmf_wowlan_config->gtk_rekey_failure = false;
+	} else {
+		brcmf_err("Can not allocate memory for brcm_wowlan_config\n");
+	}
+	wiphy->wowlan_config = brcmf_wowlan_config;
 #endif
 }
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	const struct ieee80211_iface_combination *combo;
 	struct ieee80211_supported_band *band;
 	u16 max_interfaces = 0;
@@ -6847,6 +7256,13 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 			wiphy_ext_feature_set(wiphy,
 					      NL80211_EXT_FEATURE_SAE_OFFLOAD);
 	}
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWAUTH)) {
+		wiphy_ext_feature_set(wiphy,
+				      NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK);
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+			wiphy_ext_feature_set(wiphy,
+					      NL80211_EXT_FEATURE_SAE_OFFLOAD);
+	}
 	wiphy->mgmt_stypes = brcmf_txrx_stypes;
 	wiphy->max_remain_on_channel_duration = 5000;
 	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
@@ -6856,9 +7272,12 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 	/* vendor commands/events support */
 	wiphy->vendor_commands = brcmf_vendor_cmds;
 	wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
+	wiphy->vendor_events = brcmf_vendor_events;
+	wiphy->n_vendor_events = BRCMF_VNDR_EVTS_LAST;
+	brcmf_fweh_register(cfg->pub, BRCMF_E_PHY_TEMP,
+			    brcmf_wiphy_phy_temp_evt_handler);
 
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
-		brcmf_wiphy_wowl_params(wiphy, ifp);
+	brcmf_wiphy_wowl_params(wiphy, ifp);
 	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
 				     sizeof(bandlist));
 	if (err) {
@@ -6921,6 +7340,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
 	struct wireless_dev *wdev;
 	struct brcmf_if *ifp;
 	s32 power_mode;
+	s32 eap_restrict;
 	s32 err = 0;
 
 	if (cfg->dongle_up)
@@ -6935,7 +7355,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
 
 	brcmf_dongle_scantime(ifp);
 
-	power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
+	power_mode = cfg->pwr_save ? ifp->drvr->settings->default_pm : PM_OFF;
 	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode);
 	if (err)
 		goto default_conf_out;
@@ -6945,6 +7365,14 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
 	err = brcmf_dongle_roam(ifp);
 	if (err)
 		goto default_conf_out;
+
+	eap_restrict = ifp->drvr->settings->eap_restrict;
+	if (eap_restrict) {
+		err = brcmf_fil_iovar_int_set(ifp, "eap_restrict",
+					      eap_restrict);
+		if (err)
+			brcmf_info("eap_restrict error (%d)\n", err);
+	}
 	err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
 					  NULL);
 	if (err)
@@ -6981,7 +7409,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
 	 * from AP to save power
 	 */
 	if (check_vif_up(ifp->vif)) {
-		brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED);
+		brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED, true);
 
 		/* Make sure WPA_Supplicant receives all the event
 		   generated due to DISASSOC call to the fw to keep
@@ -7225,6 +7653,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
 
 	cfg->wiphy = wiphy;
 	cfg->pub = drvr;
+	cfg->pm_state = BRCMF_CFG80211_PM_STATE_RESUMED;
+	cfg->num_softap = 0;
 	init_vif_event(&cfg->vif_event);
 	INIT_LIST_HEAD(&cfg->vif_list);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 6ce48f6275a..bd4ea5e38dd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -23,6 +23,23 @@
 #define WL_ROAM_TRIGGER_LEVEL		-75
 #define WL_ROAM_DELTA			20
 
+/* WME Access Category Indices (ACIs) */
+#define AC_BE			0	/* Best Effort */
+#define AC_BK			1	/* Background */
+#define AC_VI			2	/* Video */
+#define AC_VO			3	/* Voice */
+#define EDCF_AC_COUNT		4
+#define MAX_8021D_PRIO		8
+
+#define EDCF_ACI_MASK			0x60
+#define EDCF_ACI_SHIFT			5
+#define EDCF_ACM_MASK                  0x10
+#define EDCF_ECWMIN_MASK		0x0f
+#define EDCF_ECWMAX_SHIFT		4
+#define EDCF_AIFSN_MASK			0x0f
+#define EDCF_AIFSN_MAX			15
+#define EDCF_ECWMAX_MASK		0xf0
+
 /* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be
  * problematic on some systems and should be avoided.
  */
@@ -75,6 +92,15 @@
 
 #define BRCMF_VIF_EVENT_TIMEOUT		msecs_to_jiffies(1500)
 
+#define BRCMF_PM_WAIT_MAXRETRY			100
+
+/* cfg80211 wowlan definitions */
+#define WL_WOWLAN_MAX_PATTERNS			8
+#define WL_WOWLAN_MIN_PATTERN_LEN		1
+#define WL_WOWLAN_MAX_PATTERN_LEN		255
+#define WL_WOWLAN_PKT_FILTER_ID_FIRST	201
+#define WL_WOWLAN_PKT_FILTER_ID_LAST	(WL_WOWLAN_PKT_FILTER_ID_FIRST + \
+					WL_WOWLAN_MAX_PATTERNS - 1)
 /**
  * enum brcmf_scan_status - scan engine status
  *
@@ -111,6 +137,19 @@ enum brcmf_profile_fwsup {
 	BRCMF_PROFILE_FWSUP_SAE
 };
 
+/**
+ * enum brcmf_profile_fwauth - firmware authenticator profile
+ *
+ * @BRCMF_PROFILE_FWAUTH_NONE: no firmware authenticator
+ * @BRCMF_PROFILE_FWAUTH_PSK: authenticator for WPA/WPA2-PSK
+ * @BRCMF_PROFILE_FWAUTH_PSK: authenticator for SAE
+ */
+enum brcmf_profile_fwauth {
+	BRCMF_PROFILE_FWAUTH_NONE,
+	BRCMF_PROFILE_FWAUTH_PSK,
+	BRCMF_PROFILE_FWAUTH_SAE
+};
+
 /**
  * struct brcmf_cfg80211_profile - profile information.
  *
@@ -123,7 +162,9 @@ struct brcmf_cfg80211_profile {
 	struct brcmf_cfg80211_security sec;
 	struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
 	enum brcmf_profile_fwsup use_fwsup;
+	u16 use_fwauth;
 	bool is_ft;
+	bool is_okc;
 };
 
 /**
@@ -147,25 +188,36 @@ enum brcmf_vif_status {
 	BRCMF_VIF_STATUS_ASSOC_SUCCESS,
 };
 
+enum brcmf_cfg80211_pm_state {
+	BRCMF_CFG80211_PM_STATE_RESUMED,
+	BRCMF_CFG80211_PM_STATE_RESUMING,
+	BRCMF_CFG80211_PM_STATE_SUSPENDED,
+	BRCMF_CFG80211_PM_STATE_SUSPENDING,
+};
+
 /**
  * struct vif_saved_ie - holds saved IEs for a virtual interface.
  *
  * @probe_req_ie: IE info for probe request.
  * @probe_res_ie: IE info for probe response.
  * @beacon_ie: IE info for beacon frame.
+ * @assoc_res_ie: IE info for association response frame.
  * @probe_req_ie_len: IE info length for probe request.
  * @probe_res_ie_len: IE info length for probe response.
  * @beacon_ie_len: IE info length for beacon frame.
+ * @assoc_res_ie_len: IE info length for association response frame.
  */
 struct vif_saved_ie {
 	u8  probe_req_ie[IE_MAX_LEN];
 	u8  probe_res_ie[IE_MAX_LEN];
 	u8  beacon_ie[IE_MAX_LEN];
 	u8  assoc_req_ie[IE_MAX_LEN];
+	u8  assoc_res_ie[IE_MAX_LEN];
 	u32 probe_req_ie_len;
 	u32 probe_res_ie_len;
 	u32 beacon_ie_len;
 	u32 assoc_req_ie_len;
+	u32 assoc_res_ie_len;
 };
 
 /**
@@ -205,6 +257,12 @@ struct brcmf_cfg80211_assoc_ielen_le {
 	__le32 resp_len;
 };
 
+struct brcmf_cfg80211_edcf_acparam {
+	u8 ACI;
+	u8 ECW;
+	u16 TXOP;        /* stored in network order (ls octet first) */
+};
+
 /* dongle escan state */
 enum wl_escan_state {
 	WL_ESCAN_STATE_IDLE,
@@ -323,6 +381,9 @@ struct brcmf_cfg80211_info {
 	struct brcmf_assoclist_le assoclist;
 	struct brcmf_cfg80211_wowl wowl;
 	struct brcmf_pno_info *pno;
+	u8 ac_priority[MAX_8021D_PRIO];
+	u8 pm_state;
+	u8 num_softap;
 };
 
 /**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a3a25708969..8285045a962 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -213,6 +213,24 @@ struct sbsocramregs {
 #define	ARMCR4_BSZ_MASK		0x3f
 #define	ARMCR4_BSZ_MULT		8192
 
+/* Minimum PMU resource mask for 43012C0 */
+#define CY_43012_PMU_MIN_RES_MASK       0xF8BFE77
+
+/* PMU STATUS mask for 43012C0 */
+#define CY_43012_PMU_STATUS_MASK        0x1AC
+
+/* PMU CONTROL EXT mask for 43012C0 */
+#define CY_43012_PMU_CONTROL_EXT_MASK   0x11
+
+/* PMU Watchdog Counter Tick value for 43012C0 */
+#define CY_43012_PMU_WATCHDOG_TICK_VAL  0x04
+
+/* PMU Watchdog Counter Tick value for 4373 */
+#define CY_4373_PMU_WATCHDOG_TICK_VAL  0x04
+
+/* Minimum PMU resource mask for 4373 */
+#define CY_4373_PMU_MIN_RES_MASK       0xFCAFF7F
+
 struct brcmf_core_priv {
 	struct brcmf_core pub;
 	u32 wrapbase;
@@ -712,6 +730,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
 	case BRCM_CC_43569_CHIP_ID:
 	case BRCM_CC_43570_CHIP_ID:
 	case BRCM_CC_4358_CHIP_ID:
+	case BRCM_CC_4359_CHIP_ID:
 	case BRCM_CC_43602_CHIP_ID:
 	case BRCM_CC_4371_CHIP_ID:
 		return 0x180000;
@@ -721,11 +740,10 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
 	case BRCM_CC_4366_CHIP_ID:
 	case BRCM_CC_43664_CHIP_ID:
 		return 0x200000;
-	case BRCM_CC_4359_CHIP_ID:
-		return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
-	case BRCM_CC_4364_CHIP_ID:
 	case CY_CC_4373_CHIP_ID:
 		return 0x160000;
+	case CY_CC_89459_CHIP_ID:
+		return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
 	default:
 		brcmf_err("unknown chip: %s\n", ci->pub.name);
 		break;
@@ -815,6 +833,7 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
 {
 	u8 desc;
 	u32 val, szdesc;
+	u8 mpnum = 0;
 	u8 stype, sztype, wraptype;
 
 	*regbase = 0;
@@ -822,6 +841,7 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
 
 	val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
 	if (desc == DMP_DESC_MASTER_PORT) {
+		mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
 		wraptype = DMP_SLAVE_TYPE_MWRAP;
 	} else if (desc == DMP_DESC_ADDRESS) {
 		/* revert erom address */
@@ -889,7 +909,7 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
 	u8 desc_type = 0;
 	u32 val;
 	u16 id;
-	u8 nmw, nsw, rev;
+	u8 nmp, nsp, nmw, nsw, rev;
 	u32 base, wrap;
 	int err;
 
@@ -915,6 +935,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
 			return -EFAULT;
 
 		/* only look at cores with master port(s) */
+		nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+		nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
 		nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
 		nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
 		rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
@@ -1203,6 +1225,14 @@ struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub)
 	return cc;
 }
 
+struct brcmf_core *brcmf_chip_get_gci(struct brcmf_chip *pub)
+{
+	struct brcmf_core *gci;
+
+	gci = brcmf_chip_get_core(pub, BCMA_CORE_GCI);
+	return gci;
+}
+
 bool brcmf_chip_iscoreup(struct brcmf_core *pub)
 {
 	struct brcmf_core_priv *core;
@@ -1405,6 +1435,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
 		reg = chip->ops->read32(chip->ctx, addr);
 		return reg != 0;
 	case CY_CC_4373_CHIP_ID:
+	case CY_CC_89459_CHIP_ID:
 		/* explicitly check SR engine enable bit */
 		addr = CORE_CC_REG(base, sr_control0);
 		reg = chip->ops->read32(chip->ctx, addr);
@@ -1427,3 +1458,151 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
 			       PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
 	}
 }
+
+void brcmf_chip_reset_pmu_regs(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	u32 addr;
+	u32 base;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	base = brcmf_chip_get_pmu(pub)->base;
+
+	switch (pub->chip) {
+	case CY_CC_43012_CHIP_ID:
+		/* SW scratch */
+		addr = CORE_CC_REG(base, swscratch);
+		chip->ops->write32(chip->ctx, addr, 0);
+
+		/* PMU status */
+		addr = CORE_CC_REG(base, pmustatus);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_STATUS_MASK);
+
+		/* PMU control ext */
+		addr = CORE_CC_REG(base, pmucontrol_ext);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_CONTROL_EXT_MASK);
+		break;
+
+	default:
+		brcmf_err("Unsupported chip id\n");
+		break;
+	}
+}
+
+void brcmf_chip_set_default_min_res_mask(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	u32 addr;
+	u32 base;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	base = brcmf_chip_get_pmu(pub)->base;
+	switch (pub->chip) {
+	case CY_CC_43012_CHIP_ID:
+		addr = CORE_CC_REG(base, min_res_mask);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_MIN_RES_MASK);
+		break;
+
+	default:
+		brcmf_err("Unsupported chip id\n");
+		break;
+	}
+}
+
+void brcmf_chip_ulp_reset_lhl_regs(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	u32 base;
+	u32 addr;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	base = brcmf_chip_get_gci(pub)->base;
+
+	/* LHL Top Level Power Sequence Control */
+	addr = CORE_GCI_REG(base, lhl_top_pwrseq_ctl_adr);
+	chip->ops->write32(chip->ctx, addr, 0);
+
+	/* GPIO Interrupt Enable0 */
+	addr = CORE_GCI_REG(base, gpio_int_en_port_adr[0]);
+	chip->ops->write32(chip->ctx, addr, 0);
+
+	/* GPIO Interrupt Status0 */
+	addr = CORE_GCI_REG(base, gpio_int_st_port_adr[0]);
+	chip->ops->write32(chip->ctx, addr, ~0);
+
+	/* WL ARM Timer0 Interrupt Mask */
+	addr = CORE_GCI_REG(base, lhl_wl_armtim0_intrp_adr);
+	chip->ops->write32(chip->ctx, addr, 0);
+
+	/* WL ARM Timer0 Interrupt Status */
+	addr = CORE_GCI_REG(base, lhl_wl_armtim0_st_adr);
+	chip->ops->write32(chip->ctx, addr, ~0);
+
+	/* WL ARM Timer */
+	addr = CORE_GCI_REG(base, lhl_wl_armtim0_adr);
+	chip->ops->write32(chip->ctx, addr, 0);
+
+	/* WL MAC Timer0 Interrupt Mask */
+	addr = CORE_GCI_REG(base, lhl_wl_mactim0_intrp_adr);
+	chip->ops->write32(chip->ctx, addr, 0);
+
+	/* WL MAC Timer0 Interrupt Status */
+	addr = CORE_GCI_REG(base, lhl_wl_mactim0_st_adr);
+	chip->ops->write32(chip->ctx, addr, ~0);
+
+	/* WL MAC TimerInt0 */
+	addr = CORE_GCI_REG(base, lhl_wl_mactim_int0_adr);
+	chip->ops->write32(chip->ctx, addr, 0x0);
+}
+
+void brcmf_chip_reset_watchdog(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	u32 base;
+	u32 addr;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	base = brcmf_chip_get_pmu(pub)->base;
+
+	switch (pub->chip) {
+	case CY_CC_43012_CHIP_ID:
+		addr = CORE_CC_REG(base, min_res_mask);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_MIN_RES_MASK);
+		/* Watchdog res mask */
+		addr = CORE_CC_REG(base, watchdog_res_mask);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_MIN_RES_MASK);
+		/* PMU watchdog */
+		addr = CORE_CC_REG(base, pmuwatchdog);
+		chip->ops->write32(chip->ctx, addr,
+			CY_43012_PMU_WATCHDOG_TICK_VAL);
+		break;
+	case CY_CC_4373_CHIP_ID:
+		addr = CORE_CC_REG(base, min_res_mask);
+		chip->ops->write32(chip->ctx, addr,
+			CY_4373_PMU_MIN_RES_MASK);
+		addr = CORE_CC_REG(base, watchdog_res_mask);
+		chip->ops->write32(chip->ctx, addr,
+			CY_4373_PMU_MIN_RES_MASK);
+		addr = CORE_CC_REG(base, pmuwatchdog);
+		chip->ops->write32(chip->ctx, addr,
+			CY_4373_PMU_WATCHDOG_TICK_VAL);
+		mdelay(100);
+		break;
+	default:
+		break;
+	}
+}
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index 8fa38658e72..216e5540a13 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -8,7 +8,10 @@
 #include <linux/types.h>
 
 #define CORE_CC_REG(base, field) \
-		(base + offsetof(struct chipcregs, field))
+		((base) + offsetof(struct chipcregs, field))
+
+#define CORE_GCI_REG(base, field) \
+		((base) + offsetof(struct chipgciregs, field))
 
 /**
  * struct brcmf_chip - chip level information.
@@ -85,5 +88,9 @@ void brcmf_chip_set_passive(struct brcmf_chip *ci);
 bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
 char *brcmf_chip_name(u32 chipid, u32 chiprev, char *buf, uint len);
+void brcmf_chip_reset_watchdog(struct brcmf_chip *pub);
+void brcmf_chip_ulp_reset_lhl_regs(struct brcmf_chip *pub);
+void brcmf_chip_reset_pmu_regs(struct brcmf_chip *pub);
+void brcmf_chip_set_default_min_res_mask(struct brcmf_chip *pub);
 
 #endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 1fc9cfcdd24..22a4aa38c0e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -20,6 +20,9 @@
 #include "of.h"
 #include "firmware.h"
 #include "chip.h"
+#include "defs.h"
+#include "fweh.h"
+#include <brcm_hw_ids.h>
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -67,6 +70,18 @@ static int brcmf_iapp_enable;
 module_param_named(iapp, brcmf_iapp_enable, int, 0);
 MODULE_PARM_DESC(iapp, "Enable partial support for the obsoleted Inter-Access Point Protocol");
 
+static int brcmf_eap_restrict;
+module_param_named(eap_restrict, brcmf_eap_restrict, int, 0400);
+MODULE_PARM_DESC(eap_restrict, "Block non-802.1X frames until auth finished");
+
+static int brcmf_sdio_wq_highpri;
+module_param_named(sdio_wq_highpri, brcmf_sdio_wq_highpri, int, 0);
+MODULE_PARM_DESC(sdio_wq_highpri, "SDIO workqueue is set to high priority");
+
+static int brcmf_max_pm;
+module_param_named(max_pm, brcmf_max_pm, int, 0);
+MODULE_PARM_DESC(max_pm, "Use max power management mode by default");
+
 #ifdef DEBUG
 /* always succeed brcmf_bus_started() */
 static int brcmf_ignore_probe_fail;
@@ -201,8 +216,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 	char *clmver;
 	char *ptr;
 	s32 err;
+	struct eventmsgs_ext *eventmask_msg = NULL;
+	u8 msglen;
 
-	/* retreive mac address */
+	/* retrieve mac addresses */
 	err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
 				       sizeof(ifp->mac_addr));
 	if (err < 0) {
@@ -292,6 +309,11 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 		brcmf_dbg(INFO, "CLM version = %s\n", clmver);
 	}
 
+	/* set apsta */
+	err = brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+	if (err)
+		brcmf_info("failed setting apsta, %d\n", err);
+
 	/* set mpc */
 	err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
 	if (err) {
@@ -316,6 +338,41 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 		goto done;
 	}
 
+	/* Enable event_msg_ext specific to 43012 chip */
+	if (bus->chip == CY_CC_43012_CHIP_ID) {
+		/* Program event_msg_ext to support event larger than 128 */
+		msglen = (roundup(BRCMF_E_LAST, NBBY) / NBBY) +
+				  EVENTMSGS_EXT_STRUCT_SIZE;
+		/* Allocate buffer for eventmask_msg */
+		eventmask_msg = kzalloc(msglen, GFP_KERNEL);
+		if (!eventmask_msg) {
+			err = -ENOMEM;
+			goto done;
+		}
+
+		/* Read the current programmed event_msgs_ext */
+		eventmask_msg->ver = EVENTMSGS_VER;
+		eventmask_msg->len = roundup(BRCMF_E_LAST, NBBY) / NBBY;
+		err = brcmf_fil_iovar_data_get(ifp, "event_msgs_ext",
+					       eventmask_msg,
+					       msglen);
+
+		/* Enable ULP event */
+		brcmf_dbg(EVENT, "enable event ULP\n");
+		setbit(eventmask_msg->mask, BRCMF_E_ULP);
+
+		/* Write updated Event mask */
+		eventmask_msg->ver = EVENTMSGS_VER;
+		eventmask_msg->command = EVENTMSGS_SET_MASK;
+		eventmask_msg->len = (roundup(BRCMF_E_LAST, NBBY) / NBBY);
+
+		err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext",
+					       eventmask_msg, msglen);
+		if (err) {
+			brcmf_err("Set event_msgs_ext error (%d)\n", err);
+			goto done;
+		}
+	}
 	/* Setup default scan channel time */
 	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
 				    BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
@@ -336,6 +393,18 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 
 	/* Enable tx beamforming, errors can be ignored (not supported) */
 	(void)brcmf_fil_iovar_int_set(ifp, "txbf", 1);
+
+	/* add unicast packet filter */
+	err = brcmf_pktfilter_add_remove(ifp->ndev,
+					 BRCMF_UNICAST_FILTER_NUM, true);
+	if (err == -BRCMF_FW_UNSUPPORTED) {
+		/* FW not support can be ignored */
+		err = 0;
+		goto done;
+	} else if (err) {
+		bphy_err(drvr, "Add unicast filter error (%d)\n", err);
+	}
+
 done:
 	return err;
 }
@@ -407,12 +476,15 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
 	if (!settings)
 		return NULL;
 
-	/* start by using the module paramaters */
+	/* start by using the module parameters */
 	settings->p2p_enable = !!brcmf_p2p_enable;
 	settings->feature_disable = brcmf_feature_disable;
 	settings->fcmode = brcmf_fcmode;
 	settings->roamoff = !!brcmf_roamoff;
 	settings->iapp = !!brcmf_iapp_enable;
+	settings->eap_restrict = !!brcmf_eap_restrict;
+	settings->sdio_wq_highpri = !!brcmf_sdio_wq_highpri;
+	settings->default_pm = !!brcmf_max_pm ? PM_MAX : PM_FAST;
 #ifdef DEBUG
 	settings->ignore_probe_fail = !!brcmf_ignore_probe_fail;
 #endif
@@ -492,7 +564,7 @@ static int __init brcmfmac_module_init(void)
 	if (err == -ENODEV)
 		brcmf_dbg(INFO, "No platform data available.\n");
 
-	/* Initialize global module paramaters */
+	/* Initialize global module parameters */
 	brcmf_mp_attach();
 
 	/* Continue the initialization by registering the different busses */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 144cf4570bc..c0bf5867af5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -37,6 +37,9 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
  * @feature_disable: Feature_disable bitmask.
  * @fcmode: FWS flow control.
  * @roamoff: Firmware roaming off?
+ * @eap_restrict: Not allow data tx/rx until 802.1X auth succeeds
+ * @sdio_wq_highpri: Tasks submitted to SDIO workqueue will run immediately.
+ * @default_pm: default power management (PM) mode.
  * @ignore_probe_fail: Ignore probe failure.
  * @country_codes: If available, pointer to struct for translating country codes
  * @bus: Bus specific platform data. Only SDIO at the mmoment.
@@ -47,6 +50,9 @@ struct brcmf_mp_device {
 	int		fcmode;
 	bool		roamoff;
 	bool		iapp;
+	bool		eap_restrict;
+	bool		sdio_wq_highpri;
+	int		default_pm;
 	bool		ignore_probe_fail;
 	struct brcmfmac_pd_cc *country_codes;
 	const char	*board_type;
@@ -72,4 +78,8 @@ static inline void
 brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
 #endif
 
+u8 brcmf_map_prio_to_prec(void *cfg, u8 prio);
+
+u8 brcmf_map_prio_to_aci(void *cfg, u8 prio);
+
 #endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 436f501be93..fe083f8e95f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -62,6 +62,14 @@ struct wlc_d11rxhdr {
 	s8 rxpwr[4];
 } __packed;
 
+#define BRCMF_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define BRCMF_IF_STA_LIST_LOCK(ifp, flags) \
+	spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define BRCMF_IF_STA_LIST_UNLOCK(ifp, flags) \
+	spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+
+#define BRCMF_STA_NULL ((struct brcmf_sta *)NULL)
+
 char *brcmf_ifname(struct brcmf_if *ifp)
 {
 	if (!ifp)
@@ -191,7 +199,6 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
 	if (err < 0)
 		bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
 			 err);
-	brcmf_configure_arp_nd_offload(ifp, !cmd_value);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -352,6 +359,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
 	if ((skb->priority == 0) || (skb->priority > 7))
 		skb->priority = cfg80211_classify8021d(skb, NULL);
 
+	/* set pacing shift for packet aggregation */
+	sk_pacing_shift_update(skb->sk, 8);
+
 	ret = brcmf_proto_tx_queue_data(drvr, ifp->ifidx, skb);
 	if (ret < 0)
 		brcmf_txfinalize(ifp, skb, false);
@@ -536,6 +546,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
 	struct ethhdr *eh;
 	u16 type;
 
+	if (!ifp) {
+		brcmu_pkt_buf_free_skb(txp);
+		return;
+	}
+
 	eh = (struct ethhdr *)(txp->data);
 	type = ntohs(eh->h_proto);
 
@@ -579,9 +594,6 @@ static int brcmf_netdev_stop(struct net_device *ndev)
 
 	brcmf_cfg80211_down(ndev);
 
-	if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
-		brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
-
 	brcmf_net_setcarrier(ifp, false);
 
 	return 0;
@@ -661,8 +673,6 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
 		goto fail;
 	}
 
-	netif_carrier_off(ndev);
-
 	ndev->priv_destructor = brcmf_cfg80211_free_netdev;
 	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
 	return 0;
@@ -673,7 +683,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
 	return -EBADE;
 }
 
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
+static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
 {
 	if (ndev->reg_state == NETREG_REGISTERED) {
 		if (rtnl_locked)
@@ -686,81 +696,6 @@ void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
 	}
 }
 
-static int brcmf_net_mon_open(struct net_device *ndev)
-{
-	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
-	u32 monitor;
-	int err;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_MONITOR, &monitor);
-	if (err) {
-		bphy_err(drvr, "BRCMF_C_GET_MONITOR error (%d)\n", err);
-		return err;
-	} else if (monitor) {
-		bphy_err(drvr, "Monitor mode is already enabled\n");
-		return -EEXIST;
-	}
-
-	monitor = 3;
-	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
-	if (err)
-		bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
-
-	return err;
-}
-
-static int brcmf_net_mon_stop(struct net_device *ndev)
-{
-	struct brcmf_if *ifp = netdev_priv(ndev);
-	struct brcmf_pub *drvr = ifp->drvr;
-	u32 monitor;
-	int err;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	monitor = 0;
-	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_MONITOR, monitor);
-	if (err)
-		bphy_err(drvr, "BRCMF_C_SET_MONITOR error (%d)\n", err);
-
-	return err;
-}
-
-static netdev_tx_t brcmf_net_mon_start_xmit(struct sk_buff *skb,
-					    struct net_device *ndev)
-{
-	dev_kfree_skb_any(skb);
-
-	return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops brcmf_netdev_ops_mon = {
-	.ndo_open = brcmf_net_mon_open,
-	.ndo_stop = brcmf_net_mon_stop,
-	.ndo_start_xmit = brcmf_net_mon_start_xmit,
-};
-
-int brcmf_net_mon_attach(struct brcmf_if *ifp)
-{
-	struct brcmf_pub *drvr = ifp->drvr;
-	struct net_device *ndev;
-	int err;
-
-	brcmf_dbg(TRACE, "Enter\n");
-
-	ndev = ifp->ndev;
-	ndev->netdev_ops = &brcmf_netdev_ops_mon;
-
-	err = register_netdevice(ndev);
-	if (err)
-		bphy_err(drvr, "Failed to register %s device\n", ndev->name);
-
-	return err;
-}
-
 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
 {
 	struct net_device *ndev;
@@ -894,7 +829,9 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
 
 	init_waitqueue_head(&ifp->pend_8021x_wait);
 	spin_lock_init(&ifp->netif_stop_lock);
-
+	BRCMF_IF_STA_LIST_LOCK_INIT(ifp);
+	 /* Initialize STA info list */
+	INIT_LIST_HEAD(&ifp->sta_list);
 	if (mac_addr != NULL)
 		memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
 
@@ -1126,6 +1063,15 @@ static int brcmf_inet6addr_changed(struct notifier_block *nb,
 }
 #endif
 
+
+int brcmf_fwlog_attach(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	return brcmf_debug_fwlog_init(drvr);
+}
+
 static int brcmf_revinfo_read(struct seq_file *s, void *data)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
@@ -1315,7 +1261,7 @@ int brcmf_alloc(struct device *dev, struct brcmf_mp_device *settings)
 	return 0;
 }
 
-int brcmf_attach(struct device *dev)
+int brcmf_attach(struct device *dev, bool start_bus)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
@@ -1346,10 +1292,13 @@ int brcmf_attach(struct device *dev)
 	/* attach firmware event handler */
 	brcmf_fweh_attach(drvr);
 
-	ret = brcmf_bus_started(drvr, drvr->ops);
-	if (ret != 0) {
-		bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
-		goto fail;
+	if (start_bus) {
+		ret = brcmf_bus_started(drvr, drvr->ops);
+		if (ret != 0) {
+			bphy_err(drvr, "dongle is not responding: err=%d\n",
+				 ret);
+			goto fail;
+		}
 	}
 
 	return 0;
@@ -1482,8 +1431,10 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
 				 !brcmf_get_pend_8021x_cnt(ifp),
 				 MAX_WAIT_FOR_8021X_TX);
 
-	if (!err)
+	if (!err) {
 		bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n");
+		atomic_set(&ifp->pend_8021x_cnt, 0);
+	}
 
 	return !err;
 }
@@ -1552,3 +1503,268 @@ void __exit brcmf_core_exit(void)
 #endif
 }
 
+int
+brcmf_pktfilter_add_remove(struct net_device *ndev, int filter_num, bool add)
+{
+	struct brcmf_if *ifp =  netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_pkt_filter_le *pkt_filter;
+	int filter_fixed_len = offsetof(struct brcmf_pkt_filter_le, u);
+	int pattern_fixed_len = offsetof(struct brcmf_pkt_filter_pattern_le,
+				  mask_and_pattern);
+	u16 mask_and_pattern[MAX_PKTFILTER_PATTERN_SIZE];
+	int buflen = 0;
+	int ret = 0;
+
+	brcmf_dbg(INFO, "%s packet filter number %d\n",
+		  (add ? "add" : "remove"), filter_num);
+
+	pkt_filter = kzalloc(sizeof(*pkt_filter) +
+			(MAX_PKTFILTER_PATTERN_SIZE * 2), GFP_ATOMIC);
+	if (!pkt_filter)
+		return -ENOMEM;
+
+	switch (filter_num) {
+	case BRCMF_UNICAST_FILTER_NUM:
+		pkt_filter->id = 100;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 1;
+		mask_and_pattern[0] = 0x0001;
+		break;
+	case BRCMF_BROADCAST_FILTER_NUM:
+		//filter_pattern = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+		pkt_filter->id = 101;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 6;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0xFFFF;
+		mask_and_pattern[2] = 0xFFFF;
+		mask_and_pattern[3] = 0xFFFF;
+		mask_and_pattern[4] = 0xFFFF;
+		mask_and_pattern[5] = 0xFFFF;
+		break;
+	case BRCMF_MULTICAST4_FILTER_NUM:
+		//filter_pattern = "102 0 0 0 0xFFFFFF 0x01005E";
+		pkt_filter->id = 102;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 3;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0x01FF;
+		mask_and_pattern[2] = 0x5E00;
+		break;
+	case BRCMF_MULTICAST6_FILTER_NUM:
+		//filter_pattern = "103 0 0 0 0xFFFF 0x3333";
+		pkt_filter->id = 103;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 2;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0x3333;
+		break;
+	case BRCMF_MDNS_FILTER_NUM:
+		//filter_pattern = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+		pkt_filter->id = 104;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 6;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0xFFFF;
+		mask_and_pattern[2] = 0xFFFF;
+		mask_and_pattern[3] = 0x0001;
+		mask_and_pattern[4] = 0x005E;
+		mask_and_pattern[5] = 0xFB00;
+		break;
+	case BRCMF_ARP_FILTER_NUM:
+		//filter_pattern = "105 0 0 12 0xFFFF 0x0806";
+		pkt_filter->id = 105;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 12;
+		pkt_filter->u.pattern.size_bytes = 2;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0x0608;
+		break;
+	case BRCMF_BROADCAST_ARP_FILTER_NUM:
+		//filter_pattern = "106 0 0 0
+		//0xFFFFFFFFFFFF0000000000000806
+		//0xFFFFFFFFFFFF0000000000000806";
+		pkt_filter->id = 106;
+		pkt_filter->type = 0;
+		pkt_filter->negate_match = 0;
+		pkt_filter->u.pattern.offset = 0;
+		pkt_filter->u.pattern.size_bytes = 14;
+		mask_and_pattern[0] = 0xFFFF;
+		mask_and_pattern[1] = 0xFFFF;
+		mask_and_pattern[2] = 0xFFFF;
+		mask_and_pattern[3] = 0x0000;
+		mask_and_pattern[4] = 0x0000;
+		mask_and_pattern[5] = 0x0000;
+		mask_and_pattern[6] = 0x0608;
+		mask_and_pattern[7] = 0xFFFF;
+		mask_and_pattern[8] = 0xFFFF;
+		mask_and_pattern[9] = 0xFFFF;
+		mask_and_pattern[10] = 0x0000;
+		mask_and_pattern[11] = 0x0000;
+		mask_and_pattern[12] = 0x0000;
+		mask_and_pattern[13] = 0x0608;
+		break;
+	default:
+		ret = -EINVAL;
+		goto failed;
+	}
+	memcpy(pkt_filter->u.pattern.mask_and_pattern, mask_and_pattern,
+	       pkt_filter->u.pattern.size_bytes * 2);
+	buflen = filter_fixed_len + pattern_fixed_len +
+		  pkt_filter->u.pattern.size_bytes * 2;
+
+	if (add) {
+		/* Add filter */
+		ifp->fwil_fwerr = true;
+		ret = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add",
+					       pkt_filter, buflen);
+		ifp->fwil_fwerr = false;
+		if (ret)
+			goto failed;
+		drvr->pkt_filter[filter_num].id = pkt_filter->id;
+		drvr->pkt_filter[filter_num].enable  = 0;
+
+	} else {
+		/* Delete filter */
+		ret = brcmf_fil_iovar_int_set(ifp, "pkt_filter_delete",
+					      pkt_filter->id);
+		if (ret == -ENOENT)
+			ret = 0;
+		if (ret)
+			goto failed;
+
+		drvr->pkt_filter[filter_num].id = 0;
+		drvr->pkt_filter[filter_num].enable  = 0;
+	}
+failed:
+	if (ret)
+		brcmf_err("%s packet filter failed, ret=%d\n",
+			  (add ? "add" : "remove"), ret);
+
+	kfree(pkt_filter);
+	return ret;
+}
+
+int brcmf_pktfilter_enable(struct net_device *ndev, bool enable)
+{
+	struct brcmf_if *ifp =  netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	int ret = 0;
+	int idx = 0;
+
+	for (idx = 0; idx < MAX_PKT_FILTER_COUNT; ++idx) {
+		if (drvr->pkt_filter[idx].id != 0) {
+			drvr->pkt_filter[idx].enable = enable;
+			ret = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable",
+						       &drvr->pkt_filter[idx],
+				sizeof(struct brcmf_pkt_filter_enable_le));
+			if (ret) {
+				brcmf_err("%s packet filter id(%d) failed, ret=%d\n",
+					  (enable ? "enable" : "disable"),
+					  drvr->pkt_filter[idx].id, ret);
+			}
+		}
+	}
+	return ret;
+}
+
+/** Find STA with MAC address ea in an interface's STA list. */
+struct brcmf_sta *
+brcmf_find_sta(struct brcmf_if *ifp, const u8 *ea)
+{
+	struct brcmf_sta  *sta;
+	unsigned long flags;
+
+	BRCMF_IF_STA_LIST_LOCK(ifp, flags);
+	list_for_each_entry(sta, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETH_ALEN)) {
+			brcmf_dbg(INFO, "Found STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x into sta list\n",
+				  sta->ea.octet[0], sta->ea.octet[1],
+				  sta->ea.octet[2], sta->ea.octet[3],
+				  sta->ea.octet[4], sta->ea.octet[5]);
+			BRCMF_IF_STA_LIST_UNLOCK(ifp, flags);
+			return sta;
+		}
+	}
+	BRCMF_IF_STA_LIST_UNLOCK(ifp, flags);
+
+	return BRCMF_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+struct brcmf_sta *
+brcmf_add_sta(struct brcmf_if *ifp, const u8 *ea)
+{
+	struct brcmf_sta *sta;
+	unsigned long flags;
+
+	sta =  kzalloc(sizeof(*sta), GFP_KERNEL);
+	if (sta == BRCMF_STA_NULL) {
+		brcmf_err("Alloc failed\n");
+		return BRCMF_STA_NULL;
+	}
+	memcpy(sta->ea.octet, ea, ETH_ALEN);
+	brcmf_dbg(INFO, "Add STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x into sta list\n",
+		  sta->ea.octet[0], sta->ea.octet[1],
+		  sta->ea.octet[2], sta->ea.octet[3],
+		  sta->ea.octet[4], sta->ea.octet[5]);
+
+	/* link the sta and the dhd interface */
+	sta->ifp = ifp;
+	INIT_LIST_HEAD(&sta->list);
+
+	BRCMF_IF_STA_LIST_LOCK(ifp, flags);
+
+	list_add_tail(&sta->list, &ifp->sta_list);
+
+	BRCMF_IF_STA_LIST_UNLOCK(ifp, flags);
+	return sta;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+brcmf_del_sta(struct brcmf_if *ifp, const u8 *ea)
+{
+	struct brcmf_sta *sta, *next;
+	unsigned long flags;
+
+	BRCMF_IF_STA_LIST_LOCK(ifp, flags);
+	list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+		if (!memcmp(sta->ea.octet, ea, ETH_ALEN)) {
+			brcmf_dbg(INFO, "del STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x from sta list\n",
+				  ea[0], ea[1], ea[2], ea[3],
+				  ea[4], ea[5]);
+			list_del(&sta->list);
+			kfree(sta);
+		}
+	}
+
+	BRCMF_IF_STA_LIST_UNLOCK(ifp, flags);
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+struct brcmf_sta*
+brcmf_findadd_sta(struct brcmf_if *ifp, const u8 *ea)
+{
+	struct brcmf_sta *sta = NULL;
+
+	sta = brcmf_find_sta(ifp, ea);
+
+	if (!sta) {
+		/* Add entry */
+		sta = brcmf_add_sta(ifp, ea);
+	}
+	return sta;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 33b2ab3b54b..29c931b1f97 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -12,6 +12,7 @@
 
 #include <net/cfg80211.h>
 #include "fweh.h"
+#include "fwil_types.h"
 
 #define TOE_TX_CSUM_OL		0x00000001
 #define TOE_RX_CSUM_OL		0x00000002
@@ -136,6 +137,8 @@ struct brcmf_pub {
 	struct work_struct bus_reset;
 
 	u8 clmver[BRCMF_DCMD_SMLEN];
+	struct brcmf_pkt_filter_enable_le pkt_filter[MAX_PKT_FILTER_COUNT];
+
 };
 
 /* forward declarations */
@@ -185,6 +188,7 @@ struct brcmf_if {
 	struct brcmf_fws_mac_descriptor *fws_desc;
 	int ifidx;
 	s32 bsscfgidx;
+	bool isap;
 	u8 mac_addr[ETH_ALEN];
 	u8 netif_stop;
 	spinlock_t netif_stop_lock;
@@ -193,6 +197,20 @@ struct brcmf_if {
 	struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
 	u8 ipv6addr_idx;
 	bool fwil_fwerr;
+	struct list_head sta_list;              /* sll of associated stations */
+	spinlock_t sta_list_lock;
+	bool fmac_pkt_fwd_en;
+};
+
+struct ether_addr {
+	u8 octet[ETH_ALEN];
+};
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+struct brcmf_sta {
+	void *ifp;             /* associated brcm_if */
+	struct ether_addr ea;   /* stations ethernet mac address */
+	struct list_head list;  /* link into brcmf_if::sta_list */
 };
 
 int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
@@ -210,10 +228,13 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
-int brcmf_net_mon_attach(struct brcmf_if *ifp);
 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
 int __init brcmf_core_init(void);
 void __exit brcmf_core_exit(void);
-
+int brcmf_pktfilter_add_remove(struct net_device *ndev, int filter_num,
+			       bool add);
+int brcmf_pktfilter_enable(struct net_device *ndev, bool enable);
+void brcmf_del_sta(struct brcmf_if *ifp, const u8 *ea);
+struct brcmf_sta *brcmf_find_sta(struct brcmf_if *ifp, const u8 *ea);
+struct brcmf_sta *brcmf_findadd_sta(struct brcmf_if *ifp, const u8 *ea);
 #endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index 120515fe825..26efe50160c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -14,6 +14,82 @@
 #include "fweh.h"
 #include "debug.h"
 
+static int
+brcmf_debug_msgtrace_seqchk(u32 *prev, u32 cur)
+{
+	if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) {
+		goto done;
+	} else if (cur == *prev) {
+		brcmf_dbg(FWCON, "duplicate trace\n");
+		return -1;
+	} else if (cur > *prev) {
+		brcmf_dbg(FWCON, "lost %d packets\n", cur - *prev);
+	} else {
+		brcmf_dbg(FWCON, "seq out of order, host %d, dongle %d\n",
+			  *prev, cur);
+	}
+done:
+	*prev = cur;
+	return 0;
+}
+
+static int
+brcmf_debug_msg_parser(void *event_data)
+{
+	int err = 0;
+	struct msgtrace_hdr *hdr;
+	char *data, *s;
+	static u32 seqnum_prev;
+
+	hdr = (struct msgtrace_hdr *)event_data;
+	data = (char *)event_data + MSGTRACE_HDRLEN;
+
+	/* There are 2 bytes available at the end of data */
+	data[ntohs(hdr->len)] = '\0';
+
+	if (ntohl(hdr->discarded_bytes) || ntohl(hdr->discarded_printf)) {
+		brcmf_dbg(FWCON, "Discarded_bytes %d discarded_printf %d\n",
+			  ntohl(hdr->discarded_bytes),
+				ntohl(hdr->discarded_printf));
+	}
+
+	err = brcmf_debug_msgtrace_seqchk(&seqnum_prev, ntohl(hdr->seqnum));
+	if (err)
+		return err;
+
+	while (*data != '\0' && (s = strstr(data, "\n")) != NULL) {
+		*s = '\0';
+		brcmf_dbg(FWCON, "CONSOLE: %s\n", data);
+		data = s + 1;
+	}
+	if (*data)
+		brcmf_dbg(FWCON, "CONSOLE: %s", data);
+
+	return err;
+}
+
+static int
+brcmf_debug_trace_parser(struct brcmf_if *ifp,
+			 const struct brcmf_event_msg *evtmsg,
+			 void *event_data)
+{
+	int err = 0;
+	struct msgtrace_hdr *hdr;
+
+	hdr = (struct msgtrace_hdr *)event_data;
+	if (hdr->version != MSGTRACE_VERSION) {
+		brcmf_dbg(FWCON, "trace version mismatch host %d dngl %d\n",
+			  MSGTRACE_VERSION, hdr->version);
+		err = -EPROTO;
+		return err;
+	}
+
+	if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG)
+		err = brcmf_debug_msg_parser(event_data);
+
+	return err;
+}
+
 int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
 			       size_t len)
 {
@@ -42,6 +118,13 @@ int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
 	return 0;
 }
 
+
+int brcmf_debug_fwlog_init(struct brcmf_pub *drvr)
+{
+	return brcmf_fweh_register(drvr, BRCMF_E_TRACE,
+				brcmf_debug_trace_parser);
+}
+
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
 {
 	return drvr->wiphy->debugfsdir;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 9b221b509ad..f67f54c30d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -29,6 +29,7 @@
 #define BRCMF_MSGBUF_VAL	0x00040000
 #define BRCMF_PCIE_VAL		0x00080000
 #define BRCMF_FWCON_VAL		0x00100000
+#define BRCMF_ULP_VAL		0x00200000
 
 /* set default print format */
 #undef pr_fmt
@@ -103,6 +104,10 @@ do {								\
 
 #endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
+#define MSGTRACE_VERSION 1
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+
 #define brcmf_dbg_hex_dump(test, data, len, fmt, ...)			\
 do {									\
 	trace_brcmf_hexdump((void *)data, len);				\
@@ -120,6 +125,7 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
 			    int (*read_fn)(struct seq_file *seq, void *data));
 int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
 			       size_t len);
+int brcmf_debug_fwlog_init(struct brcmf_pub *drvr);
 #else
 static inline struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
 {
@@ -137,6 +143,25 @@ int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
 {
 	return 0;
 }
+
+static inline
+int brcmf_debug_fwlog_init(struct brcmf_pub *drvr)
+{
+	return 0;
+}
 #endif
 
+/* Message trace header */
+struct msgtrace_hdr {
+	u8	version;
+	u8	trace_type;
+	u16	len;    /* Len of the trace */
+	u32	seqnum; /* Sequence number of message */
+	/* Number of discarded bytes because of trace overflow  */
+	u32	discarded_bytes;
+	/* Number of discarded printf because of trace overflow */
+	u32	discarded_printf;
+};
+
+#define MSGTRACE_HDRLEN		sizeof(struct msgtrace_hdr)
 #endif /* BRCMFMAC_DEBUG_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 47496b871e9..52b2ce6d0f5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -16,7 +16,6 @@
 #include "feature.h"
 #include "common.h"
 
-#define BRCMF_FW_UNSUPPORTED	23
 
 /*
  * expand feature list to array of feature strings.
@@ -38,10 +37,10 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
 	{ BRCMF_FEAT_MCHAN, "mchan" },
 	{ BRCMF_FEAT_P2P, "p2p" },
 	{ BRCMF_FEAT_MONITOR, "monitor" },
-	{ BRCMF_FEAT_MONITOR_FLAG, "rtap" },
 	{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
 	{ BRCMF_FEAT_DOT11H, "802.11h" },
 	{ BRCMF_FEAT_SAE, "sae" },
+	{ BRCMF_FEAT_FWAUTH, "idauth" },
 };
 
 #ifdef DEBUG
@@ -285,14 +284,13 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 	if (!err)
 		ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
 
-	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
-
 	if (drvr->settings->feature_disable) {
 		brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
 			  ifp->drvr->feat_flags,
 			  drvr->settings->feature_disable);
 		ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
 	}
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
 
 	brcmf_feat_firmware_overrides(drvr);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index cda3fc1bab7..56d991cae1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -23,11 +23,11 @@
  * GSCAN: enhanced scan offload feature.
  * FWSUP: Firmware supplicant.
  * MONITOR: firmware can pass monitor packets to host.
- * MONITOR_FLAG: firmware flags monitor packets.
  * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
  * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
  * DOT11H: firmware supports 802.11h
  * SAE: simultaneous authentication of equals
+ * FWAUTH: Firmware authenticator
  */
 #define BRCMF_FEAT_LIST \
 	BRCMF_FEAT_DEF(MBSS) \
@@ -45,11 +45,11 @@
 	BRCMF_FEAT_DEF(GSCAN) \
 	BRCMF_FEAT_DEF(FWSUP) \
 	BRCMF_FEAT_DEF(MONITOR) \
-	BRCMF_FEAT_DEF(MONITOR_FLAG) \
 	BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
 	BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
 	BRCMF_FEAT_DEF(DOT11H) \
-	BRCMF_FEAT_DEF(SAE)
+	BRCMF_FEAT_DEF(SAE) \
+	BRCMF_FEAT_DEF(FWAUTH)
 
 /*
  * Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index fe2f42cee59..7964d09480a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -59,7 +59,7 @@ struct brcmf_fw_request {
 	u16 bus_nr;
 	u32 n_items;
 	const char *board_type;
-	struct brcmf_fw_item items[];
+	struct brcmf_fw_item items[0];
 };
 
 struct brcmf_fw_name {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 8e9d067bdfe..e1127d7e086 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -26,10 +26,10 @@
 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
 
 static const u8 brcmf_flowring_prio2fifo[] = {
-	1,
-	0,
 	0,
 	1,
+	1,
+	0,
 	2,
 	2,
 	3,
@@ -419,7 +419,6 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
 				flowid = flow->hash[i].flowid;
 				if (flow->rings[flowid]->status != RING_OPEN)
 					continue;
-				flow->rings[flowid]->status = RING_CLOSING;
 				brcmf_msgbuf_delete_flowring(drvr, flowid);
 			}
 		}
@@ -458,10 +457,8 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
 		if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
 		    (hash[i].ifidx == ifidx)) {
 			flowid = flow->hash[i].flowid;
-			if (flow->rings[flowid]->status == RING_OPEN) {
-				flow->rings[flowid]->status = RING_CLOSING;
+			if (flow->rings[flowid]->status == RING_OPEN)
 				brcmf_msgbuf_delete_flowring(drvr, flowid);
-			}
 		}
 	}
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index a5cced2c89a..79c8a858b6d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -32,7 +32,7 @@ struct brcmf_fweh_queue_item {
 	u8 ifaddr[ETH_ALEN];
 	struct brcmf_event_msg_be emsg;
 	u32 datalen;
-	u8 data[];
+	u8 data[0];
 };
 
 /**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index a82f51bc1e6..e740c2f00d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -90,7 +90,9 @@ struct brcmf_cfg80211_info;
 	BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
 	BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
-	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+	BRCMF_ENUM_DEF(PHY_TEMP, 111) \
+	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+	BRCMF_ENUM_DEF(ULP, 146)
 
 #define BRCMF_ENUM_DEF(id, val) \
 	BRCMF_E_##id = (val),
@@ -102,7 +104,7 @@ enum brcmf_fweh_event_code {
 	 * minimum length check in device firmware so it is
 	 * hard-coded here.
 	 */
-	BRCMF_E_LAST = 139
+	BRCMF_E_LAST = 147
 };
 #undef BRCMF_ENUM_DEF
 
@@ -283,6 +285,28 @@ struct brcmf_if_event {
 	u8 role;
 };
 
+enum event_msgs_ext_command {
+	EVENTMSGS_NONE		=	0,
+	EVENTMSGS_SET_BIT	=	1,
+	EVENTMSGS_RESET_BIT	=	2,
+	EVENTMSGS_SET_MASK	=	3
+};
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE	offsetof(struct eventmsgs_ext, mask[0])
+
+/* len-	for SET it would be mask size from the application to the firmware */
+/*		for GET it would be actual firmware mask size */
+/* maxgetsize -	is only used for GET. indicate max mask size that the */
+/*				application can read from the firmware */
+struct eventmsgs_ext {
+	u8	ver;
+	u8	command;
+	u8	len;
+	u8	maxgetsize;
+	u8	mask[1];
+};
+
 typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
 				    const struct brcmf_event_msg *evtmsg,
 				    void *data);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index ae4cf437290..f2e00a3c19e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -49,8 +49,6 @@
 #define BRCMF_C_GET_PM				85
 #define BRCMF_C_SET_PM				86
 #define BRCMF_C_GET_REVINFO			98
-#define BRCMF_C_GET_MONITOR			107
-#define BRCMF_C_SET_MONITOR			108
 #define BRCMF_C_GET_CURR_RATESET		114
 #define BRCMF_C_GET_AP				117
 #define BRCMF_C_SET_AP				118
@@ -79,6 +77,8 @@
 #define BRCMF_C_SET_VAR				263
 #define BRCMF_C_SET_WSEC_PMK			268
 
+#define BRCMF_FW_UNSUPPORTED			23
+
 s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
 s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
 s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index de0ef1b545c..b7c16dc605c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -19,7 +19,7 @@
 #define BRCMF_ARP_OL_PEER_AUTO_REPLY	0x00000008
 
 #define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
-#define BRCMF_BSS_RSSI_ON_CHANNEL	0x0002
+#define BRCMF_BSS_RSSI_ON_CHANNEL	0x0004
 
 #define BRCMF_STA_BRCM			0x00000001	/* Running a Broadcom driver */
 #define BRCMF_STA_WME			0x00000002	/* WMM association */
@@ -135,9 +135,22 @@
 /* Link Down indication in WoWL mode: */
 #define BRCMF_WOWL_LINKDOWN		(1 << 31)
 
-#define BRCMF_WOWL_MAXPATTERNS		8
+#define BRCMF_WOWL_MAXPATTERNS		16
 #define BRCMF_WOWL_MAXPATTERNSIZE	128
 
+enum {
+	BRCMF_UNICAST_FILTER_NUM = 0,
+	BRCMF_BROADCAST_FILTER_NUM,
+	BRCMF_MULTICAST4_FILTER_NUM,
+	BRCMF_MULTICAST6_FILTER_NUM,
+	BRCMF_MDNS_FILTER_NUM,
+	BRCMF_ARP_FILTER_NUM,
+	BRCMF_BROADCAST_ARP_FILTER_NUM,
+	MAX_PKT_FILTER_COUNT
+};
+
+#define MAX_PKTFILTER_PATTERN_SIZE		16
+
 #define BRCMF_COUNTRY_BUF_SZ		4
 #define BRCMF_ANT_MAX			4
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 8cc52935fd4..23575351763 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -311,28 +311,6 @@ struct brcmf_skbuff_cb {
 /* How long to defer borrowing in jiffies */
 #define BRCMF_FWS_BORROW_DEFER_PERIOD		(HZ / 10)
 
-/**
- * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
- *
- * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
- * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
- * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
- * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
- * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
- * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
- * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
- * @BRCMF_FWS_FIFO_COUNT: number of fifos.
- */
-enum brcmf_fws_fifo {
-	BRCMF_FWS_FIFO_FIRST,
-	BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
-	BRCMF_FWS_FIFO_AC_BE,
-	BRCMF_FWS_FIFO_AC_VI,
-	BRCMF_FWS_FIFO_AC_VO,
-	BRCMF_FWS_FIFO_BCMC,
-	BRCMF_FWS_FIFO_ATIM,
-	BRCMF_FWS_FIFO_COUNT
-};
 
 /**
  * enum brcmf_fws_txstatus - txstatus flag values.
@@ -345,6 +323,10 @@ enum brcmf_fws_fifo {
  *	firmware suppress the packet as device is already in PS mode.
  * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
  *	firmware tossed the packet.
+ * @BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK:
+ *	firmware tossed the packet after retries.
+ * @BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED:
+ *	firmware wrongly reported suppressed previously, now fixing to acked.
  * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
  *	host tossed the packet.
  */
@@ -353,6 +335,8 @@ enum brcmf_fws_txstatus {
 	BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
 	BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
 	BRCMF_FWS_TXSTATUS_FW_TOSSED,
+	BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK,
+	BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED,
 	BRCMF_FWS_TXSTATUS_HOST_TOSSED
 };
 
@@ -405,6 +389,7 @@ struct brcmf_fws_mac_descriptor {
 };
 
 #define BRCMF_FWS_HANGER_MAXITEMS	3072
+#define BRCMF_BORROW_RATIO			3
 
 /**
  * enum brcmf_fws_hanger_item_state - state of hanger item.
@@ -501,7 +486,8 @@ struct brcmf_fws_info {
 	u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
 	int fifo_credit[BRCMF_FWS_FIFO_COUNT];
 	int init_fifo_credit[BRCMF_FWS_FIFO_COUNT];
-	int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
+	int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]
+		[BRCMF_FWS_FIFO_AC_VO + 1];
 	int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
 	u32 fifo_credit_map;
 	u32 fifo_delay_map;
@@ -643,6 +629,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
 static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
 				int ifidx)
 {
+	struct brcmf_fws_hanger_item *hi;
 	bool (*matchfn)(struct sk_buff *, void *) = NULL;
 	struct sk_buff *skb;
 	int prec;
@@ -654,6 +641,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
 		skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
 		while (skb) {
 			hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+			hi = &fws->hanger.items[hslot];
+			WARN_ON(skb != hi->pkt);
+			hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
 			brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
 						true);
 			brcmu_pkt_buf_free_skb(skb);
@@ -908,7 +898,7 @@ static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
 	wlh += wlh[1] + 2;
 
 	if (entry->send_tim_signal) {
-		entry->send_tim_signal = false;
+		entry->send_tim_signal = 0;
 		wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
 		wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
 		wlh[2] = entry->mac_handle;
@@ -1209,11 +1199,11 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
 
 	fws->fifo_credit_map |= 1 << fifo;
 
-	if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-	    (fws->credits_borrowed[0])) {
+	if (fifo > BRCMF_FWS_FIFO_AC_BK &&
+	    fifo <= BRCMF_FWS_FIFO_AC_VO) {
 		for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
 		     lender_ac--) {
-			borrowed = &fws->credits_borrowed[lender_ac];
+			borrowed = &fws->credits_borrowed[fifo][lender_ac];
 			if (*borrowed) {
 				fws->fifo_credit_map |= (1 << lender_ac);
 				fifo_credit = &fws->fifo_credit[lender_ac];
@@ -1230,7 +1220,10 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
 		}
 	}
 
-	fws->fifo_credit[fifo] += credits;
+	if (credits) {
+		fws->fifo_credit[fifo] += credits;
+	}
+
 	if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo])
 		fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo];
 
@@ -1473,6 +1466,10 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
 		remove_from_hanger = false;
 	} else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
 		fws->stats.txs_tossed += compcnt;
+	else if (flags == BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK)
+		fws->stats.txs_discard += compcnt;
+	else if (flags == BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED)
+		fws->stats.txs_discard += compcnt;
 	else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
 		fws->stats.txs_host_tossed += compcnt;
 	else
@@ -1628,6 +1625,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
 			fws->fifo_credit_map |= 1 << i;
 		else
 			fws->fifo_credit_map &= ~(1 << i);
+
 		WARN_ONCE(fws->fifo_credit[i] < 0,
 			  "fifo_credit[%d] is negative(%d)\n", i,
 			  fws->fifo_credit[i]);
@@ -1865,6 +1863,9 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 
 	WARN_ON(siglen > skb->len);
 
+	if (siglen > skb->len)
+		siglen = skb->len;
+
 	if (!siglen)
 		return;
 	/* if flow control disabled, skip to packet data and leave */
@@ -2027,27 +2028,31 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
 	}
 }
 
-static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
+static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws,
+				   int highest_lender_ac, int borrower_ac,
+				   bool borrow_all)
 {
-	int lender_ac;
+	int lender_ac, borrow_limit = 0;
 
-	if (time_after(fws->borrow_defer_timestamp, jiffies)) {
-		fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
-		return -ENAVAIL;
-	}
+	for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+
+		if (!borrow_all)
+			borrow_limit =
+			  fws->init_fifo_credit[lender_ac] / BRCMF_BORROW_RATIO;
+		else
+			borrow_limit = 0;
 
-	for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
-		if (fws->fifo_credit[lender_ac] > 0) {
-			fws->credits_borrowed[lender_ac]++;
+		if (fws->fifo_credit[lender_ac] > borrow_limit) {
+			fws->credits_borrowed[borrower_ac][lender_ac]++;
 			fws->fifo_credit[lender_ac]--;
 			if (fws->fifo_credit[lender_ac] == 0)
 				fws->fifo_credit_map &= ~(1 << lender_ac);
-			fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
+			fws->fifo_credit_map |= (1 << borrower_ac);
 			brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
 			return 0;
 		}
 	}
-	fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
+	fws->fifo_credit_map &= ~(1 << borrower_ac);
 	return -ENAVAIL;
 }
 
@@ -2130,8 +2135,10 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
 	skcb->if_flags = 0;
 	skcb->state = BRCMF_FWS_SKBSTATE_NEW;
 	brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
+
+	/* mapping from 802.1d priority to firmware fifo index */
 	if (!multicast)
-		fifo = brcmf_fws_prio2fifo[skb->priority];
+		fifo = brcmf_map_prio_to_aci(drvr->config, skb->priority);
 
 	brcmf_fws_lock(fws);
 	if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
@@ -2191,6 +2198,7 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
 
 	brcmf_fws_lock(fws);
 	ifp->fws_desc = NULL;
+	brcmf_fws_macdesc_cleanup(fws, entry, ifp->ifidx);
 	brcmf_dbg(TRACE, "deleting %s\n", entry->name);
 	brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
 				  ifp->ifidx);
@@ -2236,9 +2244,10 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
 			}
 			continue;
 		}
-		while ((fws->fifo_credit[fifo] > 0) ||
+
+		while ((fws->fifo_credit[fifo]) ||
 		       ((!fws->bcmc_credit_check) &&
-			(fifo == BRCMF_FWS_FIFO_BCMC))) {
+				(fifo == BRCMF_FWS_FIFO_BCMC))) {
 			skb = brcmf_fws_deq(fws, fifo);
 			if (!skb)
 				break;
@@ -2248,10 +2257,14 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
 			if (fws->bus_flow_blocked)
 				break;
 		}
-		if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-		    (fws->fifo_credit[fifo] <= 0) &&
-		    (!fws->bus_flow_blocked)) {
-			while (brcmf_fws_borrow_credit(fws) == 0) {
+
+		if (fifo >= BRCMF_FWS_FIFO_AC_BE &&
+		    fifo <= BRCMF_FWS_FIFO_AC_VO &&
+		    fws->fifo_credit[fifo] == 0 &&
+		    !fws->bus_flow_blocked) {
+			while (brcmf_fws_borrow_credit(fws,
+						       fifo - 1, fifo,
+						       true) == 0) {
 				skb = brcmf_fws_deq(fws, fifo);
 				if (!skb) {
 					brcmf_fws_return_credits(fws, fifo, 1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b486d578ec9..b16a9d1c050 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -6,6 +6,29 @@
 #ifndef FWSIGNAL_H_
 #define FWSIGNAL_H_
 
+/**
+ * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
+ *
+ * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
+ * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
+ * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
+ * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
+ * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
+ * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
+ * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
+ * @BRCMF_FWS_FIFO_COUNT: number of fifos.
+ */
+enum brcmf_fws_fifo {
+	BRCMF_FWS_FIFO_FIRST,
+	BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
+	BRCMF_FWS_FIFO_AC_BE,
+	BRCMF_FWS_FIFO_AC_VI,
+	BRCMF_FWS_FIFO_AC_VO,
+	BRCMF_FWS_FIFO_BCMC,
+	BRCMF_FWS_FIFO_ATIM,
+	BRCMF_FWS_FIFO_COUNT
+};
+
 struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
 void brcmf_fws_detach(struct brcmf_fws_info *fws);
 void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 8bb4f1fa790..373afdcf970 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -54,6 +54,7 @@
 #define BRCMF_IOCTL_REQ_PKTID			0xFFFE
 
 #define BRCMF_MSGBUF_MAX_PKT_SIZE		2048
+#define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE           8192
 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD	32
 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST	8
 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST		8
@@ -70,6 +71,7 @@
 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS	32
 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS		48
 
+#define BRCMF_MAX_TXSTATUS_WAIT_RETRIES		10
 
 struct msgbuf_common_hdr {
 	u8				msgtype;
@@ -365,7 +367,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
 	struct brcmf_msgbuf_pktid *pktid;
 	struct sk_buff *skb;
 
-	if (idx >= pktids->array_size) {
+	if (idx < 0 || idx >= pktids->array_size) {
 		brcmf_err("Invalid packet id %d (max %d)\n", idx,
 			  pktids->array_size);
 		return NULL;
@@ -805,8 +807,12 @@ static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
 	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
 	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 		flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
-		if (flowid == BRCMF_FLOWRING_INVALID_ID)
+		if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 			return -ENOMEM;
+		} else {
+			brcmf_flowring_enqueue(flow, flowid, skb);
+			return 0;
+		}
 	}
 	queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
 	force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
@@ -1028,7 +1034,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
 		rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
 		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
 
-		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE);
 
 		if (skb == NULL) {
 			bphy_err(drvr, "Failed to alloc SKB\n");
@@ -1139,7 +1145,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 {
 	struct brcmf_pub *drvr = msgbuf->drvr;
 	struct msgbuf_rx_complete *rx_complete;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *cpskb = NULL;
+	struct ethhdr *eh;
 	u16 data_offset;
 	u16 buflen;
 	u16 flags;
@@ -1188,6 +1195,34 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 		return;
 	}
 
+	if (ifp->isap && ifp->fmac_pkt_fwd_en) {
+		eh = (struct ethhdr *)(skb->data);
+		skb_set_network_header(skb, sizeof(struct ethhdr));
+		skb->protocol = eh->h_proto;
+		skb->priority = cfg80211_classify8021d(skb, NULL);
+		if (is_unicast_ether_addr(eh->h_dest)) {
+			if (brcmf_find_sta(ifp, eh->h_dest)) {
+				 /* determine the priority */
+				if (skb->priority == 0 || skb->priority > 7) {
+					skb->priority =
+						cfg80211_classify8021d(skb,
+								       NULL);
+				}
+				brcmf_proto_tx_queue_data(ifp->drvr,
+							  ifp->ifidx, skb);
+				return;
+			}
+		} else {
+			cpskb = pskb_copy(skb, GFP_ATOMIC);
+			if (cpskb) {
+				brcmf_proto_tx_queue_data(ifp->drvr,
+							  ifp->ifidx,
+							  cpskb);
+			} else {
+				brcmf_err("Unable to do skb copy\n");
+			}
+		}
+	}
 	skb->protocol = eth_type_trans(skb, ifp->ndev);
 	brcmf_netif_rx(ifp, skb);
 }
@@ -1394,9 +1429,27 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 	struct msgbuf_tx_flowring_delete_req *delete;
 	struct brcmf_commonring *commonring;
+	struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid];
+	struct brcmf_flowring *flow = msgbuf->flow;
 	void *ret_ptr;
 	u8 ifidx;
 	int err;
+	int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES;
+
+	/* make sure it is not in txflow */
+	brcmf_commonring_lock(commonring_del);
+	flow->rings[flowid]->status = RING_CLOSING;
+	brcmf_commonring_unlock(commonring_del);
+
+	/* wait for commonring txflow finished */
+	while (retry && atomic_read(&commonring_del->outstanding_tx)) {
+		usleep_range(5000, 10000);
+		retry--;
+	}
+	if (!retry) {
+		brcmf_err("timed out waiting for txstatus\n");
+		atomic_set(&commonring_del->outstanding_tx, 0);
+	}
 
 	/* no need to submit if firmware can not be reached */
 	if (drvr->bus_if->state != BRCMF_BUS_UP) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index b886b56a5e5..e4f3493a5d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -20,7 +20,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
 	struct property *prop;
 	int irq;
 	u32 irqf;
-	u32 val;
+	u32 val32;
+	u16 val16;
 
 	/* Set board-type to the first string of the machine compatible prop */
 	root = of_find_node_by_path("/");
@@ -34,8 +35,15 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
 	    !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
 		return;
 
-	if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
-		sdio->drive_strength = val;
+	if (of_property_read_u32(np, "brcm,drive-strength", &val32) == 0)
+		sdio->drive_strength = val32;
+
+	sdio->broken_sg_support = of_property_read_bool(np,
+			"brcm,broken_sg_support");
+	if (of_property_read_u16(np, "brcm,sd_head_align", &val16) == 0)
+		sdio->sd_head_align = val16;
+	if (of_property_read_u16(np, "brcm,sd_sgentry_align", &val16) == 0)
+		sdio->sd_sgentry_align = val16;
 
 	/* make sure there are interrupts defined in the node */
 	if (!of_find_property(np, "interrupts", NULL))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 1f5deea5a28..8eb6d4f74b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -17,6 +17,7 @@
 #include "fwil_types.h"
 #include "p2p.h"
 #include "cfg80211.h"
+#include "feature.h"
 
 /* parameters used for p2p escan */
 #define P2PAPI_SCAN_NPROBES 1
@@ -59,12 +60,13 @@
 #define P2P_AF_MIN_DWELL_TIME		100
 #define P2P_AF_MED_DWELL_TIME		400
 #define P2P_AF_LONG_DWELL_TIME		1000
-#define P2P_AF_TX_MAX_RETRY		1
+#define P2P_AF_TX_MAX_RETRY		5
 #define P2P_AF_MAX_WAIT_TIME		msecs_to_jiffies(2000)
 #define P2P_INVALID_CHANNEL		-1
 #define P2P_CHANNEL_SYNC_RETRY		5
 #define P2P_AF_FRM_SCAN_MAX_WAIT	msecs_to_jiffies(450)
 #define P2P_DEFAULT_SLEEP_TIME_VSDB	200
+#define P2P_AF_RETRY_DELAY_TIME		40
 
 /* WiFi P2P Public Action Frame OUI Subtypes */
 #define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
@@ -92,6 +94,9 @@
 #define P2PSD_ACTION_ID_GAS_CRESP	0x0d	/* GAS Comback Response AF */
 
 #define BRCMF_P2P_DISABLE_TIMEOUT	msecs_to_jiffies(500)
+
+/* Mask for retry counter of custom dwell time */
+#define CUSTOM_RETRY_MASK 0xff000000
 /**
  * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
  *
@@ -457,10 +462,21 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
  */
 static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
 {
+	struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
 	bool random_addr = false;
+	bool local_admin = false;
 
-	if (!dev_addr || is_zero_ether_addr(dev_addr))
-		random_addr = true;
+	if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+		/* If the primary interface address is already locally
+		 * administered, create a new random address.
+		 */
+		if (pri_ifp->mac_addr[0] & 0x02) {
+			random_addr = true;
+		} else {
+			dev_addr = pri_ifp->mac_addr;
+			local_admin = true;
+		}
+	}
 
 	/* Generate the P2P Device Address obtaining a random ethernet
 	 * address with the locally administered bit set.
@@ -470,13 +486,20 @@ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
 	else
 		memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
 
+	if (local_admin)
+		p2p->dev_addr[0] |= 0x02;
+
 	/* Generate the P2P Interface Address.  If the discovery and connection
 	 * BSSCFGs need to simultaneously co-exist, then this address must be
 	 * different from the P2P Device Address, but also locally administered.
 	 */
-	memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
-	p2p->int_addr[0] |= 0x02;
-	p2p->int_addr[4] ^= 0x80;
+	memcpy(p2p->conn_int_addr, p2p->dev_addr, ETH_ALEN);
+	p2p->conn_int_addr[0] |= 0x02;
+	p2p->conn_int_addr[4] ^= 0x80;
+
+	memcpy(p2p->conn2_int_addr, p2p->dev_addr, ETH_ALEN);
+	p2p->conn2_int_addr[0] |= 0x02;
+	p2p->conn2_int_addr[4] ^= 0x90;
 }
 
 /**
@@ -872,7 +895,7 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_p2p_info *p2p = &cfg->p2p;
-	int err;
+	int err = 0;
 
 	if (brcmf_p2p_scan_is_p2p_request(request)) {
 		/* find my listen channel */
@@ -895,7 +918,9 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
 		/* override .run_escan() callback. */
 		cfg->escan_info.run = brcmf_p2p_run_escan;
 	}
-	return 0;
+	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+				    request->ie, request->ie_len);
+	return err;
 }
 
 
@@ -1244,6 +1269,30 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
 	return true;
 }
 
+/**
+ * brcmf_p2p_abort_action_frame() - abort action frame.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static s32 brcmf_p2p_abort_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_cfg80211_vif *vif;
+	s32 err;
+	s32 int_val = 1;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe_abort", &int_val,
+					sizeof(s32));
+	if (err)
+		brcmf_err(" aborting action frame has failed (%d)\n", err);
+
+	return err;
+}
+
 /**
  * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
  *
@@ -1255,6 +1304,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
 {
 	struct brcmf_p2p_info *p2p = &cfg->p2p;
 	struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+	s32 err;
 
 	if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
 	    (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1263,8 +1313,13 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
 		/* if channel is not zero, "actfame" uses off channel scan.
 		 * So abort scan for off channel completion.
 		 */
-		if (p2p->af_sent_channel)
-			brcmf_notify_escan_complete(cfg, ifp, true, true);
+		if (p2p->af_sent_channel) {
+			/* abort actframe using actframe_abort or abort scan */
+			err = brcmf_p2p_abort_action_frame(cfg);
+			if (err)
+				brcmf_notify_escan_complete(cfg, ifp, true,
+							    true);
+		}
 	} else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
 			    &p2p->status)) {
 		brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
@@ -1491,6 +1546,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
 {
 	struct brcmf_pub *drvr = p2p->cfg->pub;
 	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_p2p_action_frame *p2p_act_frame;
 	s32 err = 0;
 	s32 timeout = 0;
 
@@ -1500,7 +1556,13 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
 	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
 	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
 
-	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	/* check if it is a p2p_presence response */
+	p2p_act_frame = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
+	if (p2p_act_frame->subtype == P2P_AF_PRESENCE_RSP)
+		vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+	else
+		vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
 	err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
 					sizeof(*af_params));
 	if (err) {
@@ -1640,6 +1702,17 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
 	return err;
 }
 
+static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell,
+					   unsigned long dwell_jiffies)
+{
+	if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+	    (jiffies_to_msecs(jiffies - dwell_jiffies) >
+	    (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+		brcmf_err("Action frame TX retry time over dwell time!\n");
+		return true;
+	}
+	return false;
+}
 /**
  * brcmf_p2p_send_action_frame() - send action frame .
  *
@@ -1664,6 +1737,10 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
 	s32 tx_retry;
 	s32 extra_listen_time;
 	uint delta_ms;
+	unsigned long dwell_jiffies = 0;
+	bool dwell_overflow = false;
+
+	s32 requested_dwell = af_params->dwell_time;
 
 	action_frame = &af_params->action_frame;
 	action_frame_len = le16_to_cpu(action_frame->len);
@@ -1775,12 +1852,21 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
 		/* update channel */
 		af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
 	}
+	dwell_jiffies = jiffies;
+	dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+							dwell_jiffies);
 
 	tx_retry = 0;
 	while (!p2p->block_gon_req_tx &&
-	       (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+	       (!ack) && (tx_retry < P2P_AF_TX_MAX_RETRY) &&
+		!dwell_overflow) {
+		if (af_params->channel)
+			msleep(P2P_AF_RETRY_DELAY_TIME);
+
 		ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
 		tx_retry++;
+		dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
+								dwell_jiffies);
 	}
 	if (ack == false) {
 		bphy_err(drvr, "Failed to send Action Frame(retry %d)\n",
@@ -1994,7 +2080,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
 
 	if_request.type = cpu_to_le16((u16)if_type);
 	if_request.chspec = cpu_to_le16(chanspec);
-	memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+	memcpy(if_request.addr, p2p->conn_int_addr, sizeof(if_request.addr));
 
 	brcmf_cfg80211_arm_vif_event(cfg, vif);
 	err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
@@ -2092,8 +2178,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
 	/* firmware requires unique mac address for p2pdev interface */
 	if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
 		bphy_err(drvr, "discovery vif must be different from primary interface\n");
-		err = -EINVAL;
-		goto fail;
+		return ERR_PTR(-EINVAL);
 	}
 
 	brcmf_p2p_generate_bss_mac(p2p, addr);
@@ -2149,6 +2234,27 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
 	return ERR_PTR(err);
 }
 
+int brcmf_p2p_get_conn_idx(struct brcmf_cfg80211_info *cfg)
+{
+	int i;
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+
+	if (!ifp)
+		return -ENODEV;
+
+	for (i = P2PAPI_BSSCFG_CONNECTION; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (!cfg->p2p.bss_idx[i].vif) {
+			if (i == P2PAPI_BSSCFG_CONNECTION2 &&
+			    !(brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) {
+				brcmf_err("Multi p2p not supported");
+				return -EIO;
+			}
+			return i;
+		}
+	}
+	return -EIO;
+}
+
 /**
  * brcmf_p2p_add_vif() - create a new P2P virtual interface.
  *
@@ -2168,7 +2274,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 	struct brcmf_pub *drvr = cfg->pub;
 	struct brcmf_cfg80211_vif *vif;
 	enum brcmf_fil_p2p_if_types iftype;
-	int err;
+	int err = 0;
+	int connidx;
+	u8 *p2p_intf_addr;
 
 	if (brcmf_cfg80211_vif_event_armed(cfg))
 		return ERR_PTR(-EBUSY);
@@ -2194,9 +2302,21 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 		return (struct wireless_dev *)vif;
 	brcmf_cfg80211_arm_vif_event(cfg, vif);
 
-	err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
-				       iftype);
+	connidx = brcmf_p2p_get_conn_idx(cfg);
+
+	if (connidx == P2PAPI_BSSCFG_CONNECTION)
+		p2p_intf_addr = cfg->p2p.conn_int_addr;
+	else if (connidx == P2PAPI_BSSCFG_CONNECTION2)
+		p2p_intf_addr = cfg->p2p.conn2_int_addr;
+	else
+		err = -EINVAL;
+
+	if (!err)
+		err =  brcmf_p2p_request_p2p_if(&cfg->p2p, ifp,
+						p2p_intf_addr, iftype);
+
 	if (err) {
+		brcmf_err("request p2p interface failed\n");
 		brcmf_cfg80211_arm_vif_event(cfg, NULL);
 		goto fail;
 	}
@@ -2228,7 +2348,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 		goto fail;
 	}
 
-	cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+	cfg->p2p.bss_idx[connidx].vif = vif;
 	/* Disable firmware roaming for P2P interface  */
 	brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
 	if (iftype == BRCMF_FIL_P2P_IF_GO) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 64ab9b6a677..d2ecee565bf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -14,13 +14,15 @@ struct brcmf_cfg80211_info;
  *
  * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
  * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
- * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's 1st P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION2: maps to driver's 2nd P2P connection bsscfg.
  * @P2PAPI_BSSCFG_MAX: used for range checking.
  */
 enum p2p_bss_type {
 	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
 	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
-	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* driver's 1st P2P connection bsscfg */
+	P2PAPI_BSSCFG_CONNECTION2, /* driver's 2nd P2P connection bsscfg */
 	P2PAPI_BSSCFG_MAX
 };
 
@@ -119,7 +121,8 @@ struct brcmf_p2p_info {
 	struct brcmf_cfg80211_info *cfg;
 	unsigned long status;
 	u8 dev_addr[ETH_ALEN];
-	u8 int_addr[ETH_ALEN];
+	u8 conn_int_addr[ETH_ALEN];
+	u8 conn2_int_addr[ETH_ALEN];
 	struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
 	struct timer_list listen_timer;
 	u8 listen_channel;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 41645ebe7e2..0f2d1f5f60e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -38,6 +38,7 @@
 #include "chip.h"
 #include "core.h"
 #include "common.h"
+#include "cfg80211.h"
 
 
 enum brcmf_pcie_state {
@@ -52,12 +53,13 @@ BRCMF_FW_DEF(4356, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(43570, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4358, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4359, "/*(DEBLOBBED)*/");
-BRCMF_FW_DEF(4364, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4365B, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4365C, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4366B, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4366C, "/*(DEBLOBBED)*/");
 BRCMF_FW_DEF(4371, "/*(DEBLOBBED)*/");
+BRCMF_FW_DEF(4355, "/*(DEBLOBBED)*/");
+BRCMF_FW_DEF(54591, "/*(DEBLOBBED)*/");
 
 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 	BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
@@ -71,13 +73,14 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 	BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
 	BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
 	BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
-	BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
 	BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
 	BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+	BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
+	BRCMF_FW_ENTRY(CY_CC_54591_CHIP_ID, 0xFFFFFFFF, 54591),
 };
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT		5000 /* msec */
@@ -271,6 +274,9 @@ struct brcmf_pciedev_info {
 	void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 			  u16 value);
 	struct brcmf_mp_device *settings;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	ulong bar1_size;
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 };
 
 struct brcmf_pcie_ringbuf {
@@ -342,6 +348,10 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 static struct brcmf_fw_request *
 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
 
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+DEFINE_RAW_SPINLOCK(pcie_lock);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
+
 static u32
 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
 {
@@ -365,8 +375,24 @@ static u8
 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
 	void __iomem *address = devinfo->tcm + mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
+	u8 value;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
+	value = ioread8(address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
 
+	return value;
+#else
 	return (ioread8(address));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -374,8 +400,24 @@ static u16
 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
 	void __iomem *address = devinfo->tcm + mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	u16 value;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
+	value = ioread16(address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
 
+	return value;
+#else
 	return (ioread16(address));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -384,8 +426,22 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 		       u16 value)
 {
 	void __iomem *address = devinfo->tcm + mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
 
 	iowrite16(value, address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#else
+	iowrite16(value, address);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -412,8 +468,24 @@ static u32
 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
 	void __iomem *address = devinfo->tcm + mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	u32 value;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
+	value = ioread32(address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
 
+	return value;
+#else
 	return (ioread32(address));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -422,17 +494,47 @@ brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 		       u32 value)
 {
 	void __iomem *address = devinfo->tcm + mem_offset;
-
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
 	iowrite32(value, address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#else
+	iowrite32(value, address);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
 static u32
 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
-	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+	void __iomem *address = devinfo->tcm + devinfo->ci->rambase
+		+ mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	u32 value;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
+	value = ioread32(address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
 
-	return (ioread32(addr));
+	return value;
+#else
+	return (ioread32(address));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -440,9 +542,23 @@ static void
 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 		       u32 value)
 {
-	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+	void __iomem *address = devinfo->tcm + devinfo->ci->rambase
+		+ mem_offset;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
 
-	iowrite32(value, addr);
+	raw_spin_lock_irqsave(&pcie_lock, flags);
+	if ((address - devinfo->tcm) >= devinfo->bar1_size) {
+		pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN,
+				       devinfo->bar1_size);
+		address = address - devinfo->bar1_size;
+	}
+	iowrite32(value, address);
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+	raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#else
+	iowrite32(value, address);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -454,12 +570,30 @@ brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 	__le32 *src32;
 	__le16 *src16;
 	u8 *src8;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 
 	if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
 		if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
 			src8 = (u8 *)srcaddr;
 			while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_lock_irqsave(&pcie_lock, flags);
+				if ((address - devinfo->tcm) >=
+				    devinfo->bar1_size) {
+					pci_write_config_dword
+						(devinfo->pdev,
+						 BCMA_PCI_BAR1_WIN,
+						 devinfo->bar1_size);
+					address = address -
+						devinfo->bar1_size;
+				}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				iowrite8(*src8, address);
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				address++;
 				src8++;
 				len--;
@@ -468,7 +602,22 @@ brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 			len = len / 2;
 			src16 = (__le16 *)srcaddr;
 			while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_lock_irqsave(&pcie_lock, flags);
+				if ((address - devinfo->tcm) >=
+					devinfo->bar1_size) {
+					pci_write_config_dword
+						(devinfo->pdev,
+						BCMA_PCI_BAR1_WIN,
+						devinfo->bar1_size);
+					address = address -
+						devinfo->bar1_size;
+				}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				iowrite16(le16_to_cpu(*src16), address);
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				address += 2;
 				src16++;
 				len--;
@@ -478,12 +627,29 @@ brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 		len = len / 4;
 		src32 = (__le32 *)srcaddr;
 		while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+			raw_spin_lock_irqsave(&pcie_lock, flags);
+			if ((address - devinfo->tcm) >=
+			    devinfo->bar1_size) {
+				pci_write_config_dword
+					(devinfo->pdev,
+					 BCMA_PCI_BAR1_WIN,
+					 devinfo->bar1_size);
+				address = address - devinfo->bar1_size;
+			}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 			iowrite32(le32_to_cpu(*src32), address);
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+			raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 			address += 4;
 			src32++;
 			len--;
 		}
 	}
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -495,12 +661,30 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 	__le32 *dst32;
 	__le16 *dst16;
 	u8 *dst8;
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	unsigned long flags;
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 
 	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
 		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
 			dst8 = (u8 *)dstaddr;
 			while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_lock_irqsave(&pcie_lock, flags);
+				if ((address - devinfo->tcm) >=
+				    devinfo->bar1_size) {
+					pci_write_config_dword
+						(devinfo->pdev,
+						BCMA_PCI_BAR1_WIN,
+						devinfo->bar1_size);
+					address = address -
+						devinfo->bar1_size;
+				}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				*dst8 = ioread8(address);
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				address++;
 				dst8++;
 				len--;
@@ -509,7 +693,22 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 			len = len / 2;
 			dst16 = (__le16 *)dstaddr;
 			while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_lock_irqsave(&pcie_lock, flags);
+				if ((address - devinfo->tcm) >=
+				    devinfo->bar1_size) {
+					pci_write_config_dword
+						(devinfo->pdev,
+						BCMA_PCI_BAR1_WIN,
+						devinfo->bar1_size);
+					address = address -
+						devinfo->bar1_size;
+				}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				*dst16 = cpu_to_le16(ioread16(address));
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+				raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 				address += 2;
 				dst16++;
 				len--;
@@ -519,12 +718,29 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 		len = len / 4;
 		dst32 = (__le32 *)dstaddr;
 		while (len) {
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+			raw_spin_lock_irqsave(&pcie_lock, flags);
+			if ((address - devinfo->tcm) >=
+			    devinfo->bar1_size) {
+				pci_write_config_dword
+					(devinfo->pdev,
+					BCMA_PCI_BAR1_WIN,
+					devinfo->bar1_size);
+				address = address - devinfo->bar1_size;
+			}
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 			*dst32 = cpu_to_le32(ioread32(address));
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+			raw_spin_unlock_irqrestore(&pcie_lock, flags);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 			address += 4;
 			dst32++;
 			len--;
 		}
 	}
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	pci_write_config_dword(devinfo->pdev, BCMA_PCI_BAR1_WIN, 0x0);
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 }
 
 
@@ -568,6 +784,9 @@ static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
 			     BRCMF_PCIE_CFGREG_MSI_ADDR_L,
 			     BRCMF_PCIE_CFGREG_MSI_ADDR_H,
 			     BRCMF_PCIE_CFGREG_MSI_DATA,
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+			     BCMA_PCI_BAR1_WIN,
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 			     BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
 			     BRCMF_PCIE_CFGREG_RBAR_CTRL,
 			     BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
@@ -1026,6 +1245,8 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
 			       address & 0xffffffff);
 	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
 
+	memset(ring, 0, size);
+
 	return (ring);
 }
 
@@ -1136,9 +1357,14 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
 	u16 max_flowrings;
 	u16 max_submissionrings;
 	u16 max_completionrings;
-
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->shared.ring_info_addr,
+				  &ringinfo, sizeof(ringinfo));
+#else
 	memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
 		      sizeof(ringinfo));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
+
 	if (devinfo->shared.version >= 6) {
 		max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
 		max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
@@ -1207,8 +1433,14 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
 		ringinfo.d2h_r_idx_hostaddr.high_addr =
 			cpu_to_le32(address >> 32);
 
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+		brcmf_pcie_copy_mem_todev(devinfo,
+					  devinfo->shared.ring_info_addr,
+					  &ringinfo, sizeof(ringinfo));
+#else
 		memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
 			    &ringinfo, sizeof(ringinfo));
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 		brcmf_dbg(PCIE, "Using host memory indices\n");
 	}
 
@@ -1403,12 +1635,21 @@ static
 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
 	struct brcmf_fw_request *fwreq;
 	struct brcmf_fw_name fwnames[] = {
 		{ ext, fw_name },
 	};
+	u32 chip;
+
+	if (devinfo->ci->chip == CY_CC_89459_CHIP_ID &&
+	    devinfo->pdev->device == CY_PCIE_54591_DEVICE_ID)
+		chip = CY_CC_54591_CHIP_ID;
+	else
+		chip = bus_if->chip;
 
-	fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
+	fwreq = brcmf_fw_alloc_request(chip, bus_if->chiprev,
 				       brcmf_pcie_fwnames,
 				       ARRAY_SIZE(brcmf_pcie_fwnames),
 				       fwnames, ARRAY_SIZE(fwnames));
@@ -1647,6 +1888,9 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
 
 	devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
 	devinfo->tcm = ioremap(bar1_addr, bar1_size);
+#ifdef CONFIG_BRCMFMAC_PCIE_BARWIN_SZ
+	devinfo->bar1_size = bar1_size;
+#endif /* CONFIG_BRCMFMAC_PCIE_BARWIN_SZ */
 
 	if (!devinfo->regs || !devinfo->tcm) {
 		brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
@@ -1827,7 +2071,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 	brcmf_pcie_intr_enable(devinfo);
 	brcmf_pcie_hostready(devinfo);
 
-	ret = brcmf_attach(&devinfo->pdev->dev);
+	ret = brcmf_attach(&devinfo->pdev->dev, true);
 	if (ret)
 		goto fail;
 
@@ -1847,8 +2091,14 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
 		{ ".bin", devinfo->fw_name },
 		{ ".txt", devinfo->nvram_name },
 	};
+	u32 chip;
 
-	fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
+	if (devinfo->ci->chip == CY_CC_89459_CHIP_ID &&
+	    devinfo->pdev->device == CY_PCIE_54591_DEVICE_ID)
+		chip = CY_CC_54591_CHIP_ID;
+	else
+		chip = devinfo->ci->chip;
+	fwreq = brcmf_fw_alloc_request(chip, devinfo->ci->chiprev,
 				       brcmf_pcie_fwnames,
 				       ARRAY_SIZE(brcmf_pcie_fwnames),
 				       fwnames, ARRAY_SIZE(fwnames));
@@ -2011,11 +2261,22 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev)
 {
 	struct brcmf_pciedev_info *devinfo;
 	struct brcmf_bus *bus;
+	struct brcmf_cfg80211_info *config;
+	int retry = BRCMF_PM_WAIT_MAXRETRY;
 
 	brcmf_dbg(PCIE, "Enter\n");
 
 	bus = dev_get_drvdata(dev);
 	devinfo = bus->bus_priv.pcie->devinfo;
+	config = bus->drvr->config;
+
+	while (retry &&
+	       config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING) {
+		usleep_range(10000, 20000);
+		retry--;
+	}
+	if (!retry && config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING)
+		brcmf_err(bus, "timed out wait for cfg80211 suspended\n");
 
 	brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
 
@@ -2101,13 +2362,13 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_RAW_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
-	BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
@@ -2116,6 +2377,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(CY_PCIE_54591_DEVICE_ID),
 	{ /* end: all zeroes */ }
 };
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index fabfbb0b40b..14e530601ef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -57,10 +57,6 @@ static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
 
 	mutex_lock(&pi->req_lock);
 
-	/* Nothing to do if we have no requests */
-	if (pi->n_reqs == 0)
-		goto done;
-
 	/* find request */
 	for (i = 0; i < pi->n_reqs; i++) {
 		if (pi->reqs[i]->reqid == reqid)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 2508fe1391c..0764c1c051e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -16,6 +16,7 @@
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
 #include <linux/semaphore.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
@@ -34,17 +35,30 @@
 #include "core.h"
 #include "common.h"
 #include "bcdc.h"
+#include "fwil.h"
 
 #define DCMD_RESP_TIMEOUT	msecs_to_jiffies(2500)
 #define CTL_DONE_TIMEOUT	msecs_to_jiffies(2500)
+#define ULP_HUDI_PROC_DONE_TIME	msecs_to_jiffies(2500)
 
 /* watermark expressed in number of words */
 #define DEFAULT_F2_WATERMARK    0x8
 #define CY_4373_F2_WATERMARK    0x40
 #define CY_43012_F2_WATERMARK    0x60
-#define CY_4359_F2_WATERMARK	0x40
-#define CY_4359_F1_MESBUSYCTRL	(CY_4359_F2_WATERMARK | SBSDIO_MESBUSYCTRL_ENAB)
-
+#define CY_43012_MES_WATERMARK  0x50
+#define CY_43012_MESBUSYCTRL    (CY_43012_MES_WATERMARK | \
+				 SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_4339_F2_WATERMARK    48
+#define CY_4339_MES_WATERMARK	80
+#define CY_4339_MESBUSYCTRL	(CY_4339_MES_WATERMARK | \
+				 SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_43455_F2_WATERMARK	0x60
+#define CY_43455_MES_WATERMARK	0x50
+#define CY_43455_MESBUSYCTRL	(CY_43455_MES_WATERMARK | \
+				 SBSDIO_MESBUSYCTRL_ENAB)
+#define CY_435X_F2_WATERMARK	0x40
+#define CY_435X_F1_MESBUSYCTRL	(CY_435X_F2_WATERMARK | \
+				 SBSDIO_MESBUSYCTRL_ENAB)
 #ifdef DEBUG
 
 #define BRCMF_TRAP_INFO_SIZE	80
@@ -313,16 +327,16 @@ struct rte_console {
 
 #define KSO_WAIT_US 50
 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
-#define BRCMF_SDIO_MAX_ACCESS_ERRORS	5
+#define BRCMF_SDIO_MAX_ACCESS_ERRORS	20
 
-/*
- * Conversion of 802.1D priority to precedence level
- */
-static uint prio2prec(u32 prio)
-{
-	return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
-	       (prio^2) : prio;
-}
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
+					 struct brcmf_fw_request *fwreq);
+static struct brcmf_fw_request *
+	brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus);
+static int brcmf_sdio_f2_ready(struct brcmf_sdio *bus);
+static int brcmf_ulp_event_notify(struct brcmf_if *ifp,
+				  const struct brcmf_event_msg *evtmsg,
+				  void *data);
 
 #ifdef DEBUG
 /* Device console log buffer state */
@@ -644,6 +658,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
 	BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
 };
 
+#define TXCTL_CREDITS	2
+
 static void pkt_align(struct sk_buff *p, int len, int align)
 {
 	uint datalign;
@@ -654,8 +670,17 @@ static void pkt_align(struct sk_buff *p, int len, int align)
 	__skb_trim(p, len);
 }
 
-/* To check if there's window offered */
+/* To check if there's window offered
+ * Reserve 3 credits for txctl
+ */
 static bool data_ok(struct brcmf_sdio *bus)
+{
+	return (u8)(bus->tx_max - bus->tx_seq) > TXCTL_CREDITS &&
+	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
+}
+
+/* To check if there's window offered */
+static bool txctl_ok(struct brcmf_sdio *bus)
 {
 	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
 	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -1072,7 +1097,7 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
 }
 #endif /* DEBUG */
 
-static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus, u32 *hmbd)
 {
 	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
 	struct brcmf_core *core = bus->sdio_core;
@@ -1161,6 +1186,9 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
 		brcmf_err("Unknown mailbox data content: 0x%02x\n",
 			  hmb_data);
+	/* Populate hmb_data if argument is passed for DS1 check later */
+	if (hmbd)
+		*hmbd = hmb_data;
 
 	return intstatus;
 }
@@ -1938,10 +1966,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
 					       BRCMF_SDIO_FT_NORMAL)) {
 				rd->len = 0;
-				brcmf_sdio_rxfail(bus, true, true);
-				sdio_release_host(bus->sdiodev->func1);
 				brcmu_pkt_buf_free_skb(pkt);
-				continue;
 			}
 			bus->sdcnt.rx_readahead_cnt++;
 			if (rd->len != roundup(rd_new.len, 16)) {
@@ -2547,6 +2572,182 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 	return ret;
 }
 
+/* This Function is used to retrieve important
+ * details from dongle related to ULP mode Mostly
+ * values/SHM details that will be vary depending
+ * on the firmware branches
+ */
+static void
+brcmf_sdio_ulp_preinit(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_if *ifp = bus_if->drvr->iflist[0];
+
+	brcmf_dbg(ULP, "Enter\n");
+
+	/* Query ulp_sdioctrl iovar to get the ULP related SHM offsets */
+	brcmf_fil_iovar_data_get(ifp, "ulp_sdioctrl",
+				 &sdiodev->fmac_ulp.ulp_shm_offset,
+				 sizeof(sdiodev->fmac_ulp.ulp_shm_offset));
+
+	sdiodev->ulp = false;
+
+	brcmf_dbg(ULP, "m_ulp_ctrl_sdio[%x] m_ulp_wakeevt_ind [%x]\n",
+		  M_DS1_CTRL_SDIO(sdiodev->fmac_ulp),
+		  M_WAKEEVENT_IND(sdiodev->fmac_ulp));
+	brcmf_dbg(ULP, "m_ulp_wakeind [%x]\n",
+		  M_ULP_WAKE_IND(sdiodev->fmac_ulp));
+}
+
+/* Reinitialize ARM because In DS1 mode ARM got off */
+static int
+brcmf_sdio_ulp_reinit_fw(struct brcmf_sdio *bus)
+{
+	struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
+	struct brcmf_fw_request *fwreq;
+	int err = 0;
+
+	/* After firmware redownload tx/rx seq are reset accordingly
+	 * these values are reset on FMAC side tx_max is initially set to 4,
+	 * which later is updated by FW.
+	 */
+	bus->tx_seq = 0;
+	bus->rx_seq = 0;
+	bus->tx_max = 4;
+
+	fwreq = brcmf_sdio_prepare_fw_request(bus);
+	if (!fwreq)
+		return -ENOMEM;
+
+	err = brcmf_fw_get_firmwares(sdiodev->dev, fwreq,
+				     brcmf_sdio_firmware_callback);
+	if (err != 0) {
+		brcmf_err("async firmware request failed: %d\n", err);
+		kfree(fwreq);
+	}
+
+	return err;
+}
+
+/* Check if device is in DS1 mode and handshake with ULP UCODE */
+static bool
+brcmf_sdio_ulp_pre_redownload_check(struct brcmf_sdio *bus, u32 hmb_data)
+{
+	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+	int err = 0;
+	u32 value = 0;
+	u32 val32, ulp_wake_ind, wowl_wake_ind;
+	int reg_addr;
+	unsigned long timeout;
+	struct brcmf_ulp *fmac_ulp = &bus->sdiodev->fmac_ulp;
+	int i = 0;
+
+	/* If any host mail box data is present, ignore DS1 exit sequence */
+	if (hmb_data)
+		return false;
+	/* Skip if DS1 Exit is already in progress
+	 * This can happen if firmware download is taking more time
+	 */
+	if (fmac_ulp->ulp_state == FMAC_ULP_TRIGGERED)
+		return false;
+
+	value = brcmf_sdiod_func0_rb(sdiod, SDIO_CCCR_IOEx, &err);
+
+	if (value == SDIO_FUNC_ENABLE_1) {
+		brcmf_dbg(ULP, "GOT THE INTERRUPT FROM UCODE\n");
+		sdiod->ulp = true;
+		fmac_ulp->ulp_state = FMAC_ULP_TRIGGERED;
+		ulp_wake_ind = D11SHM_RDW(sdiod,
+					  M_ULP_WAKE_IND(sdiod->fmac_ulp),
+					  &err);
+		wowl_wake_ind = D11SHM_RDW(sdiod,
+					   M_WAKEEVENT_IND(sdiod->fmac_ulp),
+					   &err);
+
+		brcmf_dbg(ULP, "wowl_wake_ind: 0x%08x, ulp_wake_ind: 0x%08x state %s\n",
+			  wowl_wake_ind, ulp_wake_ind, (fmac_ulp->ulp_state) ?
+			  "DS1 Exit Triggered" : "IDLE State");
+
+		if (wowl_wake_ind || ulp_wake_ind) {
+			/* RX wake Don't do anything.
+			 * Just bail out and re-download firmware.
+			 */
+			 /* Print out PHY TX error block when bit 9 set */
+			if ((ulp_wake_ind & C_DS1_PHY_TXERR) &&
+			    M_DS1_PHYTX_ERR_BLK(sdiod->fmac_ulp)) {
+				brcmf_err("Dump PHY TX Error SHM Locations\n");
+				for (i = 0; i < PHYTX_ERR_BLK_SIZE; i++) {
+					pr_err("0x%x",
+					       D11SHM_RDW(sdiod,
+					       (M_DS1_PHYTX_ERR_BLK(sdiod->fmac_ulp) +
+						(i * 2)), &err));
+				}
+				brcmf_err("\n");
+			}
+		} else {
+			/* TX wake negotiate with MAC */
+			brcmf_dbg(ULP, "M_DS1_CTRL_SDIO: 0x%08x\n",
+				  (u32)D11SHM_RDW(sdiod,
+				  M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+				  &err));
+			val32 = D11SHM_RD(sdiod,
+					  M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+					  &err);
+			D11SHM_WR(sdiod, M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+				  val32, (C_DS1_CTRL_SDIO_DS1_EXIT |
+				  C_DS1_CTRL_REQ_VALID), &err);
+			val32 = D11REG_RD(sdiod, D11_MACCONTROL_REG, &err);
+			val32 = val32 | D11_MACCONTROL_REG_WAKE;
+			D11REG_WR(sdiod, D11_MACCONTROL_REG, val32, &err);
+
+			/* Poll for PROC_DONE to be set by ucode */
+			value = D11SHM_RDW(sdiod,
+					   M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+					   &err);
+			/* Wait here (polling) for C_DS1_CTRL_PROC_DONE */
+			timeout = jiffies + ULP_HUDI_PROC_DONE_TIME;
+			while (!(value & C_DS1_CTRL_PROC_DONE)) {
+				value = D11SHM_RDW(sdiod,
+						   M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+						   &err);
+				if (time_after(jiffies, timeout))
+					break;
+				usleep_range(1000, 2000);
+			}
+			brcmf_dbg(ULP, "M_DS1_CTRL_SDIO: 0x%08x\n",
+				  (u32)D11SHM_RDW(sdiod,
+				  M_DS1_CTRL_SDIO(sdiod->fmac_ulp), &err));
+			value = D11SHM_RDW(sdiod,
+					   M_DS1_CTRL_SDIO(sdiod->fmac_ulp),
+					   &err);
+			if (!(value & C_DS1_CTRL_PROC_DONE)) {
+				brcmf_err("Timeout Failed to enter DS1 Exit state!\n");
+				return false;
+			}
+		}
+
+		ulp_wake_ind = D11SHM_RDW(sdiod,
+					  M_ULP_WAKE_IND(sdiod->fmac_ulp),
+					  &err);
+		wowl_wake_ind = D11SHM_RDW(sdiod,
+					   M_WAKEEVENT_IND(sdiod->fmac_ulp),
+					   &err);
+		brcmf_dbg(ULP, "wowl_wake_ind: 0x%08x, ulp_wake_ind: 0x%08x\n",
+			  wowl_wake_ind, ulp_wake_ind);
+		reg_addr = CORE_CC_REG(
+			  brcmf_chip_get_pmu(bus->ci)->base, min_res_mask);
+		brcmf_sdiod_writel(sdiod, reg_addr,
+				   DEFAULT_43012_MIN_RES_MASK, &err);
+		if (err)
+			brcmf_err("min_res_mask failed\n");
+
+		return true;
+	}
+
+	return false;
+}
+
 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
 	struct brcmf_sdio_dev *sdiod = bus->sdiodev;
@@ -2618,8 +2819,11 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 	/* Handle host mailbox indication */
 	if (intstatus & I_HMB_HOST_INT) {
+		u32 hmb_data = 0;
 		intstatus &= ~I_HMB_HOST_INT;
-		intstatus |= brcmf_sdio_hostmail(bus);
+		intstatus |= brcmf_sdio_hostmail(bus, &hmb_data);
+		if (brcmf_sdio_ulp_pre_redownload_check(bus, hmb_data))
+			brcmf_sdio_ulp_reinit_fw(bus);
 	}
 
 	sdio_release_host(bus->sdiodev->func1);
@@ -2664,7 +2868,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 	brcmf_sdio_clrintr(bus);
 
 	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
-	    data_ok(bus)) {
+	    txctl_ok(bus) && brcmf_sdio_f2_ready(bus)) {
 		sdio_claim_host(bus->sdiodev->func1);
 		if (bus->ctrl_frame_stat) {
 			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
@@ -2672,6 +2876,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 			bus->ctrl_frame_err = err;
 			wmb();
 			bus->ctrl_frame_stat = false;
+			if (err)
+				brcmf_err("sdio ctrlframe tx failed err=%d\n",
+					  err);
 		}
 		sdio_release_host(bus->sdiodev->func1);
 		brcmf_sdio_wait_event_wakeup(bus);
@@ -2774,7 +2981,13 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
 	skb_push(pkt, bus->tx_hdrlen);
 	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
 
-	prec = prio2prec((pkt->priority & PRIOMASK));
+	/* In WLAN, priority is always set by the AP using WMM parameters
+	 * and this need not always follow the standard 802.1d priority.
+	 * Based on AP WMM config, map from 802.1d priority to corresponding
+	 * precedence level.
+	 */
+	prec = brcmf_map_prio_to_prec(bus_if->drvr->config,
+				      (pkt->priority & PRIOMASK));
 
 	/* Check for existing queue, current flow-control,
 			 pending event, or pending clock */
@@ -3382,7 +3595,12 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
 
 static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
 {
-	if (bus->ci->chip == CY_CC_43012_CHIP_ID)
+	if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
+	    bus->ci->chip == CY_CC_4373_CHIP_ID ||
+	    bus->ci->chip == BRCM_CC_4339_CHIP_ID ||
+	    bus->ci->chip == BRCM_CC_4345_CHIP_ID ||
+	    bus->ci->chip == BRCM_CC_4354_CHIP_ID ||
+	    bus->ci->chip == BRCM_CC_4356_CHIP_ID)
 		return true;
 	else
 		return false;
@@ -3522,6 +3740,10 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	if (err < 0)
 		goto done;
 
+	/* initialize SHM address from firmware for DS1 */
+	if (!bus->sdiodev->ulp)
+		brcmf_sdio_ulp_preinit(dev);
+
 	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
 	if (sdiodev->sg_support) {
 		bus->txglom = false;
@@ -3689,7 +3911,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 			if (bus->idlecount > bus->idletime) {
 				brcmf_dbg(SDIO, "idle\n");
 				sdio_claim_host(bus->sdiodev->func1);
-				brcmf_sdio_wd_timer(bus, false);
+#ifdef DEBUG
+				if (!BRCMF_FWCON_ON() ||
+				    bus->console_interval == 0)
+#endif
+					brcmf_sdio_wd_timer(bus, false);
 				bus->idlecount = 0;
 				brcmf_sdio_bus_sleep(bus, true, false);
 				sdio_release_host(bus->sdiodev->func1);
@@ -4099,6 +4325,36 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
 	return 0;
 }
 
+static int brcmf_sdio_bus_reset(struct device *dev)
+{
+	int ret = 0;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	/* start by unregistering irqs */
+	brcmf_sdiod_intr_unregister(sdiodev);
+
+	brcmf_sdiod_remove(sdiodev);
+
+	/* reset the adapter */
+	sdio_claim_host(sdiodev->func1);
+	mmc_hw_reset(sdiodev->func1->card->host);
+	sdio_release_host(sdiodev->func1);
+
+	brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+	ret = brcmf_sdiod_probe(sdiodev);
+	if (ret) {
+		brcmf_err("Failed to probe after sdio device reset: ret %d\n",
+			  ret);
+		brcmf_sdiod_remove(sdiodev);
+	}
+
+	return ret;
+}
+
 static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
 	.stop = brcmf_sdio_bus_stop,
 	.preinit = brcmf_sdio_bus_preinit,
@@ -4110,7 +4366,8 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
 	.get_ramsize = brcmf_sdio_bus_get_ramsize,
 	.get_memdump = brcmf_sdio_bus_get_memdump,
 	.get_fwname = brcmf_sdio_get_fwname,
-	.debugfs_create = brcmf_sdio_debugfs_create
+	.debugfs_create = brcmf_sdio_debugfs_create,
+	.reset = brcmf_sdio_bus_reset
 };
 
 #define BRCMF_SDIO_FW_CODE	0
@@ -4129,7 +4386,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	u8 saveclk, bpreq;
 	u8 devctl;
 
-	brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+	brcmf_dbg(ULP, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
 
 	if (err)
 		goto fail;
@@ -4211,19 +4468,50 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
 			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
 					   &err);
+			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+					   CY_43012_MESBUSYCTRL, &err);
+			break;
+		case SDIO_DEVICE_ID_BROADCOM_4339:
+			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+				  CY_4339_F2_WATERMARK);
+			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+					   CY_4339_F2_WATERMARK, &err);
+			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+						   &err);
+			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+					   &err);
+			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+					   CY_4339_MESBUSYCTRL, &err);
+			break;
+		case SDIO_DEVICE_ID_BROADCOM_43455:
+			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n",
+				  CY_43455_F2_WATERMARK);
+			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+					   CY_43455_F2_WATERMARK, &err);
+			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+						   &err);
+			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+					   &err);
+			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+					   CY_43455_MESBUSYCTRL, &err);
 			break;
 		case SDIO_DEVICE_ID_BROADCOM_4359:
+		case SDIO_DEVICE_ID_CYPRESS_89359:
+		case SDIO_DEVICE_ID_BROADCOM_4354:
+		case SDIO_DEVICE_ID_BROADCOM_4356:
 			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
-				  CY_4359_F2_WATERMARK);
+				  CY_435X_F2_WATERMARK);
 			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
-					   CY_4359_F2_WATERMARK, &err);
+					   CY_435X_F2_WATERMARK, &err);
 			devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
 						   &err);
 			devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
 			brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
 					   &err);
 			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
-					   CY_4359_F1_MESBUSYCTRL, &err);
+					   CY_435X_F1_MESBUSYCTRL, &err);
 			break;
 		default:
 			brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
@@ -4245,12 +4533,6 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	}
 
 	if (err == 0) {
-		/* Assign bus interface call back */
-		sdiod->bus_if->dev = sdiod->dev;
-		sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
-		sdiod->bus_if->chip = bus->ci->chip;
-		sdiod->bus_if->chiprev = bus->ci->chiprev;
-
 		/* Allow full data communication using DPC from now on. */
 		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
 
@@ -4267,6 +4549,12 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 
 	sdio_release_host(sdiod->func1);
 
+	/* Assign bus interface call back */
+	sdiod->bus_if->dev = sdiod->dev;
+	sdiod->bus_if->ops = &brcmf_sdio_bus_ops;
+	sdiod->bus_if->chip = bus->ci->chip;
+	sdiod->bus_if->chiprev = bus->ci->chiprev;
+
 	err = brcmf_alloc(sdiod->dev, sdiod->settings);
 	if (err) {
 		brcmf_err("brcmf_alloc failed\n");
@@ -4274,12 +4562,25 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	}
 
 	/* Attach to the common layer, reserve hdr space */
-	err = brcmf_attach(sdiod->dev);
+	err = brcmf_attach(sdiod->dev, !bus->sdiodev->ulp);
 	if (err != 0) {
 		brcmf_err("brcmf_attach failed\n");
 		goto free;
 	}
 
+	/* Register for ULP events */
+	if (sdiod->func1->device == SDIO_DEVICE_ID_CYPRESS_43012)
+		brcmf_fweh_register(bus_if->drvr, BRCMF_E_ULP,
+				    brcmf_ulp_event_notify);
+
+	if (bus->sdiodev->ulp) {
+		/* For ULP, after firmware redownload complete
+		 * set ULP state to IDLE
+		 */
+		if (bus->sdiodev->fmac_ulp.ulp_state == FMAC_ULP_TRIGGERED)
+			bus->sdiodev->fmac_ulp.ulp_state = FMAC_ULP_IDLE;
+	}
+
 	/* ready */
 	return;
 
@@ -4342,9 +4643,21 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 	bus->txminmax = BRCMF_TXMINMAX;
 	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
 
+	/* attempt to attach to the dongle */
+	if (!(brcmf_sdio_probe_attach(bus))) {
+		brcmf_err("brcmf_sdio_probe_attach failed\n");
+		goto fail;
+	}
+
 	/* single-threaded workqueue */
-	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
-				     dev_name(&sdiodev->func1->dev));
+	if (sdiodev->settings->sdio_wq_highpri) {
+		wq = alloc_workqueue("brcmf_wq/%s",
+				     WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND,
+				     1, dev_name(&sdiodev->func1->dev));
+	} else {
+		wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
+					     dev_name(&sdiodev->func1->dev));
+	}
 	if (!wq) {
 		brcmf_err("insufficient memory to create txworkqueue\n");
 		goto fail;
@@ -4353,12 +4666,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
 	bus->brcmf_wq = wq;
 
-	/* attempt to attach to the dongle */
-	if (!(brcmf_sdio_probe_attach(bus))) {
-		brcmf_err("brcmf_sdio_probe_attach failed\n");
-		goto fail;
-	}
-
 	spin_lock_init(&bus->rxctl_lock);
 	spin_lock_init(&bus->txq_lock);
 	init_waitqueue_head(&bus->ctrl_wait);
@@ -4461,7 +4768,17 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
 				 * necessary cores.
 				 */
 				msleep(20);
-				brcmf_chip_set_passive(bus->ci);
+				if (bus->sdiodev->fmac_ulp.ulp_state ==
+					FMAC_ULP_ENTRY_RECV) {
+					brcmf_chip_ulp_reset_lhl_regs(bus->ci);
+					brcmf_chip_reset_pmu_regs(bus->ci);
+				} else {
+					brcmf_chip_set_passive(bus->ci);
+				}
+				/* Reset the PMU, backplane and all the
+				 * cores by using the PMUWatchdogCounter.
+				 */
+				brcmf_chip_reset_watchdog(bus->ci);
 				brcmf_sdio_clkctl(bus, CLK_NONE, false);
 				sdio_release_host(bus->sdiodev->func1);
 			}
@@ -4517,3 +4834,39 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
 	return ret;
 }
 
+/* Check F2 Ready bit before sending data to Firmware */
+static int
+brcmf_sdio_f2_ready(struct brcmf_sdio *bus)
+{
+	int ret = -1;
+	int iordy_status = 0;
+
+	sdio_claim_host(bus->sdiodev->func1);
+	/* Read the status of IOR2 */
+	iordy_status = brcmf_sdiod_func0_rb(bus->sdiodev, SDIO_CCCR_IORx, NULL);
+
+	sdio_release_host(bus->sdiodev->func1);
+	ret = iordy_status & SDIO_FUNC_ENABLE_2;
+	return ret;
+}
+
+static int brcmf_ulp_event_notify(struct brcmf_if *ifp,
+				  const struct brcmf_event_msg *evtmsg,
+				  void *data)
+{
+	int err = 0;
+	struct brcmf_bus *bus_if = ifp->drvr->bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_sdio *bus;
+	struct brcmf_ulp_event *ulp_event = (struct brcmf_ulp_event *)data;
+
+	sdiodev = bus_if->bus_priv.sdio;
+	bus = sdiodev->bus;
+
+	brcmf_dbg(ULP, "Chip went to DS1 state : action %d\n",
+		  ulp_event->ulp_dongle_action);
+	if (ulp_event->ulp_dongle_action == FMAC_ULP_ENTRY)
+		bus->sdiodev->fmac_ulp.ulp_state = FMAC_ULP_ENTRY_RECV;
+
+	return err;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 163fd664780..d008689d0d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -165,6 +165,35 @@ struct brcmf_sdreg {
 struct brcmf_sdio;
 struct brcmf_sdiod_freezer;
 
+/* ULP SHM Offsets info */
+struct ulp_shm_info {
+	u32 m_ulp_ctrl_sdio;
+	u32 m_ulp_wakeevt_ind;
+	u32 m_ulp_wakeind;
+	u32 m_ulp_phytxblk;
+};
+
+/* FMAC ULP state machine */
+#define FMAC_ULP_IDLE		(0)
+#define FMAC_ULP_ENTRY_RECV		(1)
+#define FMAC_ULP_TRIGGERED		(2)
+
+/* BRCMF_E_ULP event data */
+#define FMAC_ULP_EVENT_VERSION		1
+#define FMAC_ULP_DISABLE_CONSOLE		1 /* Disable console */
+#define FMAC_ULP_UCODE_DOWNLOAD		2 /* Download ULP ucode file */
+#define FMAC_ULP_ENTRY		3 /* Inform ulp entry to Host */
+
+struct brcmf_ulp {
+	uint ulp_state;
+	struct ulp_shm_info ulp_shm_offset;
+};
+
+struct brcmf_ulp_event {
+	u16 version;
+	u16 ulp_dongle_action;
+};
+
 struct brcmf_sdio_dev {
 	struct sdio_func *func1;
 	struct sdio_func *func2;
@@ -178,6 +207,7 @@ struct brcmf_sdio_dev {
 	bool sd_irq_requested;
 	bool irq_en;			/* irq enable flags */
 	spinlock_t irq_en_lock;
+	bool irq_wake;			/* irq wake enable flags */
 	bool sg_support;
 	uint max_request_size;
 	ushort max_segment_count;
@@ -189,6 +219,8 @@ struct brcmf_sdio_dev {
 	bool wowl_enabled;
 	enum brcmf_sdiod_state state;
 	struct brcmf_sdiod_freezer *freezer;
+	struct brcmf_ulp fmac_ulp;
+	bool ulp;
 };
 
 /* sdio core registers */
@@ -367,6 +399,9 @@ static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
+int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
+
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdio_remove(struct brcmf_sdio *bus);
 void brcmf_sdio_isr(struct brcmf_sdio *bus);
@@ -376,4 +411,83 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
 int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
 
+/* SHM offsets */
+#define M_DS1_CTRL_SDIO(ptr)	((ptr).ulp_shm_offset.m_ulp_ctrl_sdio)
+#define M_WAKEEVENT_IND(ptr)	((ptr).ulp_shm_offset.m_ulp_wakeevt_ind)
+#define M_ULP_WAKE_IND(ptr)		((ptr).ulp_shm_offset.m_ulp_wakeind)
+#define M_DS1_PHYTX_ERR_BLK(ptr)	((ptr).ulp_shm_offset.m_ulp_phytxblk)
+
+#define D11_BASE_ADDR			0x18001000
+#define D11_AXI_BASE_ADDR		0xE8000000
+#define D11_SHM_BASE_ADDR		(D11_AXI_BASE_ADDR + 0x4000)
+
+#define D11REG_ADDR(offset)	(D11_BASE_ADDR + (offset))
+#define D11IHR_ADDR(offset)	(D11_AXI_BASE_ADDR + 0x400 + (2 * (offset)))
+#define D11SHM_ADDR(offset)	(D11_SHM_BASE_ADDR + (offset))
+
+/* MacControl register */
+#define D11_MACCONTROL_REG			D11REG_ADDR(0x120)
+#define D11_MACCONTROL_REG_WAKE		0x4000000
+
+/* HUDI Sequence SHM bits */
+#define	C_DS1_CTRL_SDIO_DS1_SLEEP		0x1
+#define	C_DS1_CTRL_SDIO_MAC_ON			0x2
+#define	C_DS1_CTRL_SDIO_RADIO_PHY_ON	0x4
+#define	C_DS1_CTRL_SDIO_DS1_EXIT		0x8
+#define	C_DS1_CTRL_PROC_DONE			0x100
+#define	C_DS1_CTRL_REQ_VALID			0x200
+
+/* M_ULP_WAKEIND bits */
+#define	C_WATCHDOG_EXPIRY	BIT(0)
+#define	C_FCBS_ERROR		BIT(1)
+#define	C_RETX_FAILURE		BIT(2)
+#define	C_HOST_WAKEUP		BIT(3)
+#define	C_INVALID_FCBS_BLOCK	BIT(4)
+#define	C_HUDI_DS1_EXIT		BIT(5)
+#define	C_LOB_SLEEP		BIT(6)
+#define	C_DS1_PHY_TXERR		BIT(9)
+#define	C_DS1_WAKE_TIMER	BIT(10)
+
+#define PHYTX_ERR_BLK_SIZE		18
+#define D11SHM_FIRST2BYTE_MASK		0xFFFF0000
+#define D11SHM_SECOND2BYTE_MASK		0x0000FFFF
+#define D11SHM_2BYTE_SHIFT		16
+
+#define D11SHM_RD(sdh, offset, ret) \
+	brcmf_sdiod_readl(sdh, D11SHM_ADDR(offset), ret)
+
+/* SHM Read is motified based on SHM 4 byte alignment as SHM size is 2 bytes and
+ * 2 byte is currently not working on FMAC
+ * If SHM address is not 4 byte aligned, then right shift by 16
+ * otherwise, mask the first two MSB bytes
+ * Suppose data in address 7260 is 0x440002 and it is 4 byte aligned
+ * Correct SHM value is 0x2 for this SHM offset and next SHM value is 0x44
+ */
+#define D11SHM_RDW(sdh, offset, ret) \
+	((offset % 4) ? \
+		(brcmf_sdiod_readl(sdh, D11SHM_ADDR(offset), ret) \
+		>> D11SHM_2BYTE_SHIFT) : \
+		(brcmf_sdiod_readl(sdh, D11SHM_ADDR(offset), ret) \
+		& D11SHM_SECOND2BYTE_MASK))
+
+/* SHM is of size 2 bytes, 4 bytes write will overwrite other SHM's
+ * First read 4 bytes and then clear the required two bytes based on
+ * 4 byte alignment, then update the required value and write the
+ * 4 byte value now
+ */
+#define D11SHM_WR(sdh, offset, val, mask, ret) \
+	do { \
+		if ((offset) % 4) \
+			val = (val & D11SHM_SECOND2BYTE_MASK) | \
+				((mask) << D11SHM_2BYTE_SHIFT); \
+		else \
+			val = (mask) | (val & D11SHM_FIRST2BYTE_MASK); \
+		brcmf_sdiod_writel(sdh, D11SHM_ADDR(offset), val, ret); \
+	} while (0)
+#define D11REG_WR(sdh, addr, val, ret) \
+	brcmf_sdiod_writel(sdh, addr, val, ret)
+
+#define D11REG_RD(sdh, addr, ret) \
+	brcmf_sdiod_readl(sdh, addr, ret)
+
 #endif /* BRCMFMAC_SDIO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 944146f4c6b..8a214062aea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -19,6 +19,7 @@
 #include "core.h"
 #include "common.h"
 #include "bcdc.h"
+#include "cfg80211.h"
 
 
 #define IOCTL_RESP_TIMEOUT		msecs_to_jiffies(2000)
@@ -459,7 +460,6 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
 			usb_free_urb(req->urb);
 		list_del(q->next);
 	}
-	kfree(reqs);
 	return NULL;
 
 }
@@ -1219,8 +1219,14 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
 	if (ret)
 		goto error;
 
+	if (BRCMF_FWCON_ON()) {
+		ret = brcmf_fwlog_attach(devinfo->dev);
+		if (ret)
+			goto error;
+	}
+
 	/* Attach to the common driver interface */
-	ret = brcmf_attach(devinfo->dev);
+	ret = brcmf_attach(devinfo->dev, true);
 	if (ret)
 		goto error;
 
@@ -1295,9 +1301,17 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
 		ret = brcmf_alloc(devinfo->dev, devinfo->settings);
 		if (ret)
 			goto fail;
-		ret = brcmf_attach(devinfo->dev);
+
+		if (BRCMF_FWCON_ON()) {
+			ret = brcmf_fwlog_attach(devinfo->dev);
+			if (ret)
+				goto fail;
+		}
+
+		ret = brcmf_attach(devinfo->dev, true);
 		if (ret)
 			goto fail;
+
 		/* we are done */
 		complete(&devinfo->dev_init_done);
 		return 0;
@@ -1480,8 +1494,22 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
 {
 	struct usb_device *usb = interface_to_usbdev(intf);
 	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+	struct brcmf_bus *bus;
+	struct brcmf_cfg80211_info *config;
+	int retry = BRCMF_PM_WAIT_MAXRETRY;
 
 	brcmf_dbg(USB, "Enter\n");
+
+	bus = devinfo->bus_pub.bus;
+	config = bus->drvr->config;
+	while (retry &&
+	       config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING) {
+		usleep_range(10000, 20000);
+		retry--;
+	}
+	if (!retry && config->pm_state == BRCMF_CFG80211_PM_STATE_SUSPENDING)
+		brcmf_err("timed out wait for cfg80211 suspended\n");
+
 	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
 	brcmf_cancel_all_urbs(devinfo);
 	device_set_wakeup_enable(devinfo->dev, true);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index d07e7c7355d..0bad78d5f5c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -64,6 +64,15 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
 		*(char *)(dcmd_buf + len)  = '\0';
 	}
 
+	if (cmdhdr->cmd == BRCMF_C_SET_AP) {
+		if (*(int *)(dcmd_buf) == 1) {
+			ifp->vif->wdev.iftype = NL80211_IFTYPE_AP;
+			brcmf_net_setcarrier(ifp, true);
+		} else {
+			ifp->vif->wdev.iftype = NL80211_IFTYPE_STATION;
+		}
+	}
+
 	if (cmdhdr->set)
 		ret = brcmf_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf,
 					     ret_len);
@@ -104,6 +113,56 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
 	return ret;
 }
 
+s32
+brcmf_wiphy_phy_temp_evt_handler(struct brcmf_if *ifp,
+				 const struct brcmf_event_msg *e, void *data)
+
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct wiphy *wiphy = cfg_to_wiphy(cfg);
+	struct sk_buff *skb;
+	struct nlattr *phy_temp_data;
+	u32 version, temp, tempdelta;
+	struct brcmf_phy_temp_evt *phy_temp_evt;
+
+	phy_temp_evt = (struct brcmf_phy_temp_evt *)data;
+
+	version = le32_to_cpu(phy_temp_evt->version);
+	temp = le32_to_cpu(phy_temp_evt->temp);
+	tempdelta = le32_to_cpu(phy_temp_evt->tempdelta);
+
+	skb = cfg80211_vendor_event_alloc(wiphy, NULL,
+					  sizeof(*phy_temp_evt),
+					  BRCMF_VNDR_EVTS_PHY_TEMP,
+					  GFP_KERNEL);
+
+	if (!skb) {
+		brcmf_dbg(EVENT, "NO MEM: can't allocate skb for vendor PHY_TEMP_EVENT\n");
+		return -ENOMEM;
+	}
+
+	phy_temp_data = nla_nest_start(skb, NL80211_ATTR_VENDOR_EVENTS);
+	if (!phy_temp_data) {
+		nla_nest_cancel(skb, phy_temp_data);
+		kfree_skb(skb);
+		brcmf_dbg(EVENT, "skb could not nest vendor attributes\n");
+		return -EMSGSIZE;
+	}
+
+	if (nla_put_u32(skb, BRCMF_NLATTR_VERS, version) ||
+	    nla_put_u32(skb, BRCMF_NLATTR_PHY_TEMP, temp) ||
+	    nla_put_u32(skb, BRCMF_NLATTR_PHY_TEMPDELTA, tempdelta)) {
+		kfree_skb(skb);
+		brcmf_dbg(EVENT, "NO ROOM in skb for vendor PHY_TEMP_EVENT\n");
+		return -EMSGSIZE;
+	}
+
+	nla_nest_end(skb, phy_temp_data);
+
+	cfg80211_vendor_event(skb, GFP_KERNEL);
+	return 0;
+}
+
 const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
 	{
 		{
@@ -116,3 +175,10 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
 		.doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
 	},
 };
+
+const struct nl80211_vendor_cmd_info brcmf_vendor_events[] = {
+	{
+		.vendor_id = BROADCOM_OUI,
+		.subcmd = BRCMF_VNDR_EVTS_PHY_TEMP,
+	},
+};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
index 418f33ea6fd..3bdf4736978 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
@@ -14,6 +14,11 @@ enum brcmf_vndr_cmds {
 	BRCMF_VNDR_CMDS_LAST
 };
 
+enum brcmf_vndr_evts {
+	BRCMF_VNDR_EVTS_PHY_TEMP,
+	BRCMF_VNDR_EVTS_LAST
+};
+
 /**
  * enum brcmf_nlattrs - nl80211 message attributes
  *
@@ -25,11 +30,21 @@ enum brcmf_nlattrs {
 
 	BRCMF_NLATTR_LEN,
 	BRCMF_NLATTR_DATA,
+	BRCMF_NLATTR_VERS,
+	BRCMF_NLATTR_PHY_TEMP,
+	BRCMF_NLATTR_PHY_TEMPDELTA,
 
 	__BRCMF_NLATTR_AFTER_LAST,
 	BRCMF_NLATTR_MAX = __BRCMF_NLATTR_AFTER_LAST - 1
 };
 
+/* structure of event sent up by firmware: is this the right place for it? */
+struct brcmf_phy_temp_evt {
+	__le32 version;
+	__le32 temp;
+	__le32 tempdelta;
+} __packed;
+
 /**
  * struct brcmf_vndr_dcmd_hdr - message header for cfg80211 vendor command dcmd
  *				support
@@ -49,5 +64,9 @@ struct brcmf_vndr_dcmd_hdr {
 };
 
 extern const struct wiphy_vendor_command brcmf_vendor_cmds[];
+extern const struct nl80211_vendor_cmd_info brcmf_vendor_events[];
+s32 brcmf_wiphy_phy_temp_evt_handler(struct brcmf_if *ifp,
+				     const struct brcmf_event_msg *e,
+				     void *data);
 
 #endif /* _vendor_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 5a6d9c86552..db783e94f92 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -496,11 +496,13 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
 	 * table and override CDD later
 	 */
 	if (li_mimo == &locale_bn) {
-		maxpwr20 = QDB(16);
-		maxpwr40 = 0;
+		if (li_mimo == &locale_bn) {
+			maxpwr20 = QDB(16);
+			maxpwr40 = 0;
 
-		if (chan >= 3 && chan <= 11)
-			maxpwr40 = QDB(16);
+			if (chan >= 3 && chan <= 11)
+				maxpwr40 = QDB(16);
+		}
 
 		for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
 			txpwr->mcs_20_siso[i] = (u8) maxpwr20;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 0be8968bf9d..5040058ab8d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -849,7 +849,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
 				     "START: tid %d is not agg\'able\n", tid);
 			return -EINVAL;
 		}
-		return IEEE80211_AMPDU_TX_START_IMMEDIATE;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
 
 	case IEEE80211_AMPDU_TX_STOP_CONT:
 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 7f2c15c799d..080e829da9b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -838,8 +838,9 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 	struct dma_pub *dma = NULL;
 	struct d11txh *txh = NULL;
 	struct scb *scb = NULL;
-	int tx_frame_count;
-	uint supr_status;
+	bool free_pdu;
+	int tx_rts, tx_frame_count, tx_rts_count;
+	uint totlen, supr_status;
 	bool lastframe;
 	struct ieee80211_hdr *h;
 	u16 mcl;
@@ -916,8 +917,11 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 			     CHSPEC_CHANNEL(wlc->default_bss->chanspec));
 	}
 
+	tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
 	tx_frame_count =
 	    (txs->status & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT;
+	tx_rts_count =
+	    (txs->status & TX_STATUS_RTS_RTX_MASK) >> TX_STATUS_RTS_RTX_SHIFT;
 
 	lastframe = !ieee80211_has_morefrags(h->frame_control);
 
@@ -985,6 +989,9 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 			tx_info->flags |= IEEE80211_TX_STAT_ACK;
 	}
 
+	totlen = p->len;
+	free_pdu = true;
+
 	if (lastframe) {
 		/* remove PLCP & Broadcom tx descriptor header */
 		skb_pull(p, D11_PHY_HDR_LEN);
@@ -1809,7 +1816,8 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
 	udelay(2);
 	brcms_b_core_phy_clk(wlc_hw, ON);
 
-	wlc_phy_anacore(pih, ON);
+	if (pih)
+		wlc_phy_anacore(pih, ON);
 }
 
 /* switch to and initialize new band */
@@ -5408,7 +5416,7 @@ int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
 {
 	u16 chspec = ch20mhz_chspec(channel);
 
-	if (channel > MAXCHANNEL)
+	if (channel < 0 || channel > MAXCHANNEL)
 		return -EINVAL;
 
 	if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec))
@@ -7376,7 +7384,9 @@ static void brcms_c_update_beacon_hw(struct brcms_c_info *wlc,
 				     false, true);
 		/* mark beacon0 valid */
 		bcma_set32(core, D11REGOFFS(maccommand), MCMD_BCN1VLD);
+		return;
 	}
+	return;
 }
 
 /*
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index c6c4be05159..1a041577dc8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -44,13 +44,14 @@
 #define BRCM_CC_4358_CHIP_ID		0x4358
 #define BRCM_CC_4359_CHIP_ID		0x4359
 #define BRCM_CC_43602_CHIP_ID		43602
-#define BRCM_CC_4364_CHIP_ID		0x4364
 #define BRCM_CC_4365_CHIP_ID		0x4365
 #define BRCM_CC_4366_CHIP_ID		0x4366
 #define BRCM_CC_43664_CHIP_ID		43664
 #define BRCM_CC_4371_CHIP_ID		0x4371
 #define CY_CC_4373_CHIP_ID		0x4373
 #define CY_CC_43012_CHIP_ID		43012
+#define CY_CC_89459_CHIP_ID             0x4355
+#define CY_CC_54591_CHIP_ID             0x54591
 
 /* USB Device IDs */
 #define BRCM_USB_43143_DEVICE_ID	0xbd1e
@@ -69,13 +70,13 @@
 #define BRCM_PCIE_4356_DEVICE_ID	0x43ec
 #define BRCM_PCIE_43567_DEVICE_ID	0x43d3
 #define BRCM_PCIE_43570_DEVICE_ID	0x43d9
+#define BRCM_PCIE_43570_RAW_DEVICE_ID	0xaa31
 #define BRCM_PCIE_4358_DEVICE_ID	0x43e9
 #define BRCM_PCIE_4359_DEVICE_ID	0x43ef
 #define BRCM_PCIE_43602_DEVICE_ID	0x43ba
 #define BRCM_PCIE_43602_2G_DEVICE_ID	0x43bb
 #define BRCM_PCIE_43602_5G_DEVICE_ID	0x43bc
 #define BRCM_PCIE_43602_RAW_DEVICE_ID	43602
-#define BRCM_PCIE_4364_DEVICE_ID	0x4464
 #define BRCM_PCIE_4365_DEVICE_ID	0x43ca
 #define BRCM_PCIE_4365_2G_DEVICE_ID	0x43cb
 #define BRCM_PCIE_4365_5G_DEVICE_ID	0x43cc
@@ -83,7 +84,9 @@
 #define BRCM_PCIE_4366_2G_DEVICE_ID	0x43c4
 #define BRCM_PCIE_4366_5G_DEVICE_ID	0x43c5
 #define BRCM_PCIE_4371_DEVICE_ID	0x440d
-
+#define CY_PCIE_89459_DEVICE_ID         0x4415
+#define CY_PCIE_89459_RAW_DEVICE_ID     0x4355
+#define CY_PCIE_54591_DEVICE_ID         0x4417
 
 /* brcmsmac IDs */
 #define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
index 0340bba9686..39cd34c2262 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
@@ -214,8 +214,197 @@ struct chipcregs {
 	u32 PAD[3];
 	u32 retention_grpidx;       /* 0x680 */
 	u32 retention_grpctl;       /* 0x684 */
-	u32 PAD[94];
-	u16 sromotp[768];
+	u32 mac_res_req_timer;      /* 0x688 */
+	u32 mac_res_req_mask;       /* 0x68c */
+	u32 PAD[18];
+	u32 pmucontrol_ext;         /* 0x6d8 */
+	u32 slowclkperiod;          /* 0x6dc */
+	u32 PAD[8];
+	u32 pmuintmask0;            /* 0x700 */
+	u32 pmuintmask1;            /* 0x704 */
+	u32 PAD[14];
+	u32 pmuintstatus;           /* 0x740 */
+	u32 extwakeupstatus;        /* 0x744 */
+	u32 watchdog_res_mask;      /* 0x748 */
+	u32 swscratch;              /* 0x750 */
+	u32 PAD[3];
+	u32 extwakemask[2];         /* 0x760-0x764 */
+	u32 PAD[2];
+	u32 extwakereqmask[2];      /* 0x770-0x774 */
+	u32 PAD[2];
+	u32 pmuintctrl0;            /* 0x780 */
+	u32 pmuintctrl1;            /* 0x784 */
+	u32 PAD[2];
+	u32 extwakectrl[2];         /* 0x790 */
+};
+
+#define CHIPGCIREGOFFS(field) offsetof(struct chipgciregs, field)
+
+struct chipgciregs {
+	u32 gci_corecaps0;                             /* 0x000 */
+	u32 gci_corecaps1;                             /* 0x004 */
+	u32 gci_corecaps2;                             /* 0x008 */
+	u32 gci_corectrl;                              /* 0x00c */
+	u32 gci_corestat;                              /* 0x010 */
+	u32 gci_intstat;                               /* 0x014 */
+	u32 gci_intmask;                               /* 0x018 */
+	u32 gci_wakemask;                              /* 0x01c */
+	u32 gci_levelintstat;                          /* 0x020 */
+	u32 gci_eventintstat;                          /* 0x024 */
+	u32 gci_wakelevelintstat;                      /* 0x028 */
+	u32 gci_wakeeventintstat;                      /* 0x02c */
+	u32 semaphoreintstatus;                        /* 0x030 */
+	u32 semaphoreintmask;                          /* 0x034 */
+	u32 semaphorerequest;                          /* 0x038 */
+	u32 semaphorereserve;                          /* 0x03c */
+	u32 gci_indirect_addr;                         /* 0x040 */
+	u32 gci_gpioctl;                               /* 0x044 */
+	u32 gci_gpiostatus;                            /* 0x048 */
+	u32 gci_gpiomask;                              /* 0x04c */
+	u32 eventsummary;                              /* 0x050 */
+	u32 gci_miscctl;                               /* 0x054 */
+	u32 gci_gpiointmask;                           /* 0x058 */
+	u32 gci_gpiowakemask;                          /* 0x05c */
+	u32 gci_input[32];                             /* 0x060 */
+	u32 gci_event[32];                             /* 0x0e0 */
+	u32 gci_output[4];                             /* 0x160 */
+	u32 gci_control_0;                             /* 0x170 */
+	u32 gci_control_1;                             /* 0x174 */
+	u32 gci_intpolreg;                             /* 0x178 */
+	u32 gci_levelintmask;                          /* 0x17c */
+	u32 gci_eventintmask;                          /* 0x180 */
+	u32 wakelevelintmask;                          /* 0x184 */
+	u32 wakeeventintmask;                          /* 0x188 */
+	u32 hwmask;                                    /* 0x18c */
+	u32 PAD;
+	u32 gci_inbandeventintmask;                    /* 0x194 */
+	u32 PAD;
+	u32 gci_inbandeventstatus;                     /* 0x19c */
+	u32 gci_seciauxtx;                             /* 0x1a0 */
+	u32 gci_seciauxrx;                             /* 0x1a4 */
+	u32 gci_secitx_datatag;                        /* 0x1a8 */
+	u32 gci_secirx_datatag;                        /* 0x1ac */
+	u32 gci_secitx_datamask;                       /* 0x1b0 */
+	u32 gci_seciusef0tx_reg;                       /* 0x1b4 */
+	u32 gci_secif0tx_offset;                       /* 0x1b8 */
+	u32 gci_secif0rx_offset;                       /* 0x1bc */
+	u32 gci_secif1tx_offset;                       /* 0x1c0 */
+	u32 gci_rxfifo_common_ctrl;                    /* 0x1c4 */
+	u32 gci_rxfifoctrl;                            /* 0x1c8 */
+	u32 gci_hw_sema_status;                        /* 0x1cc */
+	u32 gci_seciuartescval;                        /* 0x1d0 */
+	u32 gic_seciuartautobaudctr;                   /* 0x1d4 */
+	u32 gci_secififolevel;                         /* 0x1d8 */
+	u32 gci_seciuartdata;                          /* 0x1dc */
+	u32 gci_secibauddiv;                           /* 0x1e0 */
+	u32 gci_secifcr;                               /* 0x1e4 */
+	u32 gci_secilcr;                               /* 0x1e8 */
+	u32 gci_secimcr;                               /* 0x1ec */
+	u32 gci_secilsr;                               /* 0x1f0 */
+	u32 gci_secimsr;                               /* 0x1f4 */
+	u32 gci_baudadj;                               /* 0x1f8 */
+	u32 gci_inbandintmask;                         /* 0x1fc */
+	u32 gci_chipctrl;                              /* 0x200 */
+	u32 gci_chipsts;                               /* 0x204 */
+	u32 gci_gpioout;                               /* 0x208 */
+	u32 gci_gpioout_read;                          /* 0x20C */
+	u32 gci_mpwaketx;                              /* 0x210 */
+	u32 gci_mpwakedetect;                          /* 0x214 */
+	u32 gci_seciin_ctrl;                           /* 0x218 */
+	u32 gci_seciout_ctrl;                          /* 0x21C */
+	u32 gci_seciin_auxfifo_en;                     /* 0x220 */
+	u32 gci_seciout_txen_txbr;                     /* 0x224 */
+	u32 gci_seciin_rxbrstatus;                     /* 0x228 */
+	u32 gci_seciin_rxerrstatus;                    /* 0x22C */
+	u32 gci_seciin_fcstatus;                       /* 0x230 */
+	u32 gci_seciout_txstatus;                      /* 0x234 */
+	u32 gci_seciout_txbrstatus;                    /* 0x238 */
+	u32 wlan_mem_info;                             /* 0x23C */
+	u32 wlan_bankxinfo;                            /* 0x240 */
+	u32 bt_smem_select;                            /* 0x244 */
+	u32 bt_smem_stby;                              /* 0x248 */
+	u32 bt_smem_status;                            /* 0x24C */
+	u32 wlan_bankxactivepda;                       /* 0x250 */
+	u32 wlan_bankxsleeppda;                        /* 0x254 */
+	u32 wlan_bankxkill;                            /* 0x258 */
+	u32 PAD[41];
+	u32 gci_chipid;                                /* 0x300 */
+	u32 PAD[3];
+	u32 otpstatus;                                 /* 0x310 */
+	u32 otpcontrol;                                /* 0x314 */
+	u32 otpprog;                                   /* 0x318 */
+	u32 otplayout;                                 /* 0x31c */
+	u32 otplayoutextension;                        /* 0x320 */
+	u32 otpcontrol1;                               /* 0x324 */
+	u32 otpprogdata;                               /* 0x328 */
+	u32 PAD[52];
+	u32 otpECCstatus;                              /* 0x3FC */
+	u32 PAD[512];
+	u32 lhl_core_capab_adr;                        /* 0xC00 */
+	u32 lhl_main_ctl_adr;                          /* 0xC04 */
+	u32 lhl_pmu_ctl_adr;                           /* 0xC08 */
+	u32 lhl_extlpo_ctl_adr;                        /* 0xC0C */
+	u32 lpo_ctl_adr;                               /* 0xC10 */
+	u32 lhl_lpo2_ctl_adr;                          /* 0xC14 */
+	u32 lhl_osc32k_ctl_adr;                        /* 0xC18 */
+	u32 lhl_clk_status_adr;                        /* 0xC1C */
+	u32 lhl_clk_det_ctl_adr;                       /* 0xC20 */
+	u32 lhl_clk_sel_adr;                           /* 0xC24 */
+	u32 hidoff_cnt_adr[2];                         /* 0xC28-0xC2C */
+	u32 lhl_autoclk_ctl_adr;                       /* 0xC30 */
+	u32 PAD;
+	u32 lhl_hibtim_adr;                            /* 0xC38 */
+	u32 lhl_wl_ilp_val_adr;                        /* 0xC3C */
+	u32 lhl_wl_armtim0_intrp_adr;                  /* 0xC40 */
+	u32 lhl_wl_armtim0_st_adr;                     /* 0xC44 */
+	u32 lhl_wl_armtim0_adr;                        /* 0xC48 */
+	u32 PAD[9];
+	u32 lhl_wl_mactim0_intrp_adr;                  /* 0xC70 */
+	u32 lhl_wl_mactim0_st_adr;                     /* 0xC74 */
+	u32 lhl_wl_mactim_int0_adr;                    /* 0xC78 */
+	u32 lhl_wl_mactim_frac0_adr;                   /* 0xC7C */
+	u32 lhl_wl_mactim1_intrp_adr;                  /* 0xC80 */
+	u32 lhl_wl_mactim1_st_adr;                     /* 0xC84 */
+	u32 lhl_wl_mactim_int1_adr;                    /* 0xC88 */
+	u32 lhl_wl_mactim_frac1_adr;                   /* 0xC8C */
+	u32 PAD[8];
+	u32 gpio_int_en_port_adr[4];                   /* 0xCB0-0xCBC */
+	u32 gpio_int_st_port_adr[4];                   /* 0xCC0-0xCCC */
+	u32 gpio_ctrl_iocfg_p_adr[64];                 /* 0xCD0-0xDCC */
+	u32 gpio_gctrl_iocfg_p0_p39_adr;               /* 0xDD0 */
+	u32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr;     /* 0xDD4 */
+	u32 gpio_gdsctrl_iocfg_p26_p29_adr;            /* 0xDD8 */
+	u32 PAD[8];
+	u32 lhl_gpio_din0_adr;                         /* 0xDFC */
+	u32 lhl_gpio_din1_adr;                         /* 0xE00 */
+	u32 lhl_wkup_status_adr;                       /* 0xE04 */
+	u32 lhl_ctl_adr;                               /* 0xE08 */
+	u32 lhl_adc_ctl_adr;                           /* 0xE0C */
+	u32 lhl_qdxyz_in_dly_adr;                      /* 0xE10 */
+	u32 lhl_optctl_adr;                            /* 0xE14 */
+	u32 lhl_optct2_adr;                            /* 0xE18 */
+	u32 lhl_scanp_cntr_init_val_adr;               /* 0xE1C */
+	u32 lhl_opt_togg_val_adr[6];                   /* 0xE20-0xE34 */
+	u32 lhl_optx_smp_val_adr;                      /* 0xE38 */
+	u32 lhl_opty_smp_val_adr;                      /* 0xE3C */
+	u32 lhl_optz_smp_val_adr;                      /* 0xE40 */
+	u32 lhl_hidoff_keepstate_adr[3];               /* 0xE44-0xE4C */
+	u32 lhl_bt_slmboot_ctl0_adr[4];                /* 0xE50-0xE5C */
+	u32 lhl_wl_fw_ctl;                             /* 0xE60 */
+	u32 lhl_wl_hw_ctl_adr[2];                      /* 0xE64-0xE68 */
+	u32 lhl_bt_hw_ctl_adr;                         /* 0xE6C */
+	u32 lhl_top_pwrseq_en_adr;                     /* 0xE70 */
+	u32 lhl_top_pwrdn_ctl_adr;                     /* 0xE74 */
+	u32 lhl_top_pwrup_ctl_adr;                     /* 0xE78 */
+	u32 lhl_top_pwrseq_ctl_adr;                    /* 0xE7C */
+	u32 lhl_top_pwrdn2_ctl_adr;                    /* 0xE80 */
+	u32 lhl_top_pwrup2_ctl_adr;                    /* 0xE84 */
+	u32 wpt_regon_intrp_cfg_adr;                   /* 0xE88 */
+	u32 bt_regon_intrp_cfg_adr;                    /* 0xE8C */
+	u32 wl_regon_intrp_cfg_adr;                    /* 0xE90 */
+	u32 regon_intrp_st_adr;                        /* 0xE94 */
+	u32 regon_intrp_en_adr;                        /* 0xE98 */
+
 };
 
 /* chipid */
@@ -308,4 +497,6 @@ struct chipcregs {
 */
 #define PMU_MAX_TRANSITION_DLY	15000
 
+#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77
+
 #endif				/* _SBCHIPC_H */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 18eba06d094..4e7d55080b8 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -816,7 +816,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
 	 * doesn't seem to have as many firmware restart cycles...
 	 *
 	 * As a test, we're sticking in a 1/100s delay here */
-	schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+	schedule_msec_hrtimeout_uninterruptible((10));
 
 	return 0;
 
@@ -1267,7 +1267,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
 	IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
 	i = 5000;
 	do {
-		schedule_timeout_uninterruptible(msecs_to_jiffies(40));
+		schedule_msec_hrtimeout_uninterruptible((40));
 		/* Todo... wait for sync command ... */
 
 		read_register(priv->net_dev, IPW_REG_INTA, &inta);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 29971c25dba..9ea3e563467 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -577,6 +577,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
 	IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
 	IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
 	IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+	IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+	IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
 
 	IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
 
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 90fb7357549..c94048b048a 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -208,7 +208,7 @@ int parport_wait_peripheral(struct parport *port,
 			/* parport_wait_event didn't time out, but the
 			 * peripheral wasn't actually ready either.
 			 * Wait for another 10ms. */
-			schedule_timeout_interruptible(msecs_to_jiffies(10));
+			schedule_msec_hrtimeout_interruptible((10));
 		}
 	}
 
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index 5d41dda6da4..34705f6b423 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
 			/* Yield the port for a while. */
 			if (count && dev->port->irq != PARPORT_IRQ_NONE) {
 				parport_release (dev);
-				schedule_timeout_interruptible(msecs_to_jiffies(40));
+				schedule_msec_hrtimeout_interruptible((40));
 				parport_claim_or_block (dev);
 			}
 			else
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 594622a6cb1..a6b9b479b97 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -148,11 +148,11 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
 	pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
 	u16 status;
 
-	pci_info(pdev, "ACPI event %#x received\n", event);
-
 	if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
 		return;
 
+	pci_info(pdev, "EDR event received\n");
+
 	/* Locate the port which issued EDR event */
 	edev = acpi_dpc_port_get(pdev);
 	if (!edev) {
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index bffe548187e..c2918ee3e10 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -798,7 +798,7 @@ static int ips_adjust(void *data)
 			ips_gpu_lower(ips);
 
 sleep:
-		schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
+		schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
 	} while (!kthread_should_stop());
 
 	dev_dbg(ips->dev, "ips-adjust thread stopped\n");
@@ -974,7 +974,7 @@ static int ips_monitor(void *data)
 	seqno_timestamp = get_jiffies_64();
 
 	old_cpu_power = thm_readl(THM_CEC);
-	schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+	schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
 
 	/* Collect an initial average */
 	for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
@@ -1001,7 +1001,7 @@ static int ips_monitor(void *data)
 			mchp_samples[i] = mchp;
 		}
 
-		schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+		schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
 		if (kthread_should_stop())
 			break;
 	}
@@ -1028,7 +1028,7 @@ static int ips_monitor(void *data)
 	 * us to reduce the sample frequency if the CPU and GPU are idle.
 	 */
 	old_cpu_power = thm_readl(THM_CEC);
-	schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+	schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
 	last_sample_period = IPS_SAMPLE_PERIOD;
 
 	timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index ff0350ca3b7..78b6fa270cf 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -2,3 +2,4 @@
 source "drivers/power/avs/Kconfig"
 source "drivers/power/reset/Kconfig"
 source "drivers/power/supply/Kconfig"
+source "drivers/power/pwrseq/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b7c2e372186..13046c7fb49 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -2,3 +2,4 @@
 obj-$(CONFIG_POWER_AVS)		+= avs/
 obj-$(CONFIG_POWER_RESET)	+= reset/
 obj-$(CONFIG_POWER_SUPPLY)	+= supply/
+obj-$(CONFIG_POWER_SEQUENCE)	+= pwrseq/
diff --git a/drivers/power/pwrseq/Kconfig b/drivers/power/pwrseq/Kconfig
new file mode 100644
index 00000000000..c6b356926cc
--- /dev/null
+++ b/drivers/power/pwrseq/Kconfig
@@ -0,0 +1,20 @@
+#
+# Power Sequence library
+#
+
+menuconfig POWER_SEQUENCE
+	bool "Power sequence control"
+	help
+	   It is used for drivers which needs to do power sequence
+	   (eg, turn on clock, toggle reset gpio) before the related
+	   devices can be found by hardware, eg, USB bus.
+
+if POWER_SEQUENCE
+
+config PWRSEQ_GENERIC
+	bool "Generic power sequence control"
+	depends on OF
+	help
+	   This is the generic power sequence control library, and is
+	   supposed to support common power sequence usage.
+endif
diff --git a/drivers/power/pwrseq/Makefile b/drivers/power/pwrseq/Makefile
new file mode 100644
index 00000000000..ad82389028c
--- /dev/null
+++ b/drivers/power/pwrseq/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWER_SEQUENCE) += core.o
+obj-$(CONFIG_PWRSEQ_GENERIC) += pwrseq_generic.o
diff --git a/drivers/power/pwrseq/core.c b/drivers/power/pwrseq/core.c
new file mode 100644
index 00000000000..3d19e62a2e7
--- /dev/null
+++ b/drivers/power/pwrseq/core.c
@@ -0,0 +1,335 @@
+/*
+ * core.c	power sequence core file
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Author: Peter Chen <peter.chen@nxp.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/power/pwrseq.h>
+
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
+
+static int pwrseq_get(struct device_node *np, struct pwrseq *p)
+{
+	if (p && p->get)
+		return p->get(np, p);
+
+	return -ENOTSUPP;
+}
+
+static int pwrseq_on(struct pwrseq *p)
+{
+	if (p && p->on)
+		return p->on(p);
+
+	return -ENOTSUPP;
+}
+
+static void pwrseq_off(struct pwrseq *p)
+{
+	if (p && p->off)
+		p->off(p);
+}
+
+static void pwrseq_put(struct pwrseq *p)
+{
+	if (p && p->put)
+		p->put(p);
+}
+
+/**
+ * pwrseq_register - Add pwrseq instance to global pwrseq list
+ *
+ * @pwrseq: the pwrseq instance
+ */
+void pwrseq_register(struct pwrseq *pwrseq)
+{
+	mutex_lock(&pwrseq_list_mutex);
+	list_add(&pwrseq->node, &pwrseq_list);
+	mutex_unlock(&pwrseq_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pwrseq_register);
+
+/**
+ * pwrseq_unregister - Remove pwrseq instance from global pwrseq list
+ *
+ * @pwrseq: the pwrseq instance
+ */
+void pwrseq_unregister(struct pwrseq *pwrseq)
+{
+	mutex_lock(&pwrseq_list_mutex);
+	list_del(&pwrseq->node);
+	mutex_unlock(&pwrseq_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pwrseq_unregister);
+
+static struct pwrseq *pwrseq_find_available_instance(struct device_node *np)
+{
+	struct pwrseq *pwrseq;
+
+	mutex_lock(&pwrseq_list_mutex);
+	list_for_each_entry(pwrseq, &pwrseq_list, node) {
+		if (pwrseq->used)
+			continue;
+
+		/* compare compatible string for pwrseq node */
+		if (of_match_node(pwrseq->pwrseq_of_match_table, np)) {
+			pwrseq->used = true;
+			mutex_unlock(&pwrseq_list_mutex);
+			return pwrseq;
+		}
+
+		/* return generic pwrseq instance */
+		if (!strcmp(pwrseq->pwrseq_of_match_table->compatible,
+				"generic")) {
+			pr_debug("using generic pwrseq instance for %s\n",
+				np->full_name);
+			pwrseq->used = true;
+			mutex_unlock(&pwrseq_list_mutex);
+			return pwrseq;
+		}
+	}
+	mutex_unlock(&pwrseq_list_mutex);
+	pr_debug("Can't find any pwrseq instances for %s\n", np->full_name);
+
+	return NULL;
+}
+
+/**
+ * of_pwrseq_on - Carry out power sequence on for device node
+ *
+ * @np: the device node would like to power on
+ *
+ * Carry out a single device power on.  If multiple devices
+ * need to be handled, use of_pwrseq_on_list() instead.
+ *
+ * Return a pointer to the power sequence instance on success,
+ * or an error code otherwise.
+ */
+struct pwrseq *of_pwrseq_on(struct device_node *np)
+{
+	struct pwrseq *pwrseq;
+	int ret;
+
+	pwrseq = pwrseq_find_available_instance(np);
+	if (!pwrseq)
+		return ERR_PTR(-ENOENT);
+
+	ret = pwrseq_get(np, pwrseq);
+	if (ret) {
+		/* Mark current pwrseq as unused */
+		pwrseq->used = false;
+		return ERR_PTR(ret);
+	}
+
+	ret = pwrseq_on(pwrseq);
+	if (ret)
+		goto pwr_put;
+
+	return pwrseq;
+
+pwr_put:
+	pwrseq_put(pwrseq);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(of_pwrseq_on);
+
+/**
+ * of_pwrseq_off - Carry out power sequence off for this pwrseq instance
+ *
+ * @pwrseq: the pwrseq instance which related device would like to be off
+ *
+ * This API is used to power off single device, it is the opposite
+ * operation for of_pwrseq_on.
+ */
+void of_pwrseq_off(struct pwrseq *pwrseq)
+{
+	pwrseq_off(pwrseq);
+	pwrseq_put(pwrseq);
+}
+EXPORT_SYMBOL_GPL(of_pwrseq_off);
+
+/**
+ * of_pwrseq_on_list - Carry out power sequence on for list
+ *
+ * @np: the device node would like to power on
+ * @head: the list head for pwrseq list on this bus
+ *
+ * This API is used to power on multiple devices at single bus.
+ * If there are several devices on bus (eg, USB bus), uses this
+ * this API. Otherwise, use of_pwrseq_on instead. After the device
+ * is powered on successfully, it will be added to pwrseq list for
+ * this bus. The caller needs to use mutex_lock for concurrent.
+ *
+ * Return 0 on success, or an error value otherwise.
+ */
+int of_pwrseq_on_list(struct device_node *np, struct list_head *head)
+{
+	struct pwrseq *pwrseq;
+	struct pwrseq_list_per_dev *pwrseq_list_node;
+
+	pwrseq_list_node = kzalloc(sizeof(*pwrseq_list_node), GFP_KERNEL);
+	if (!pwrseq_list_node)
+		return -ENOMEM;
+
+	pwrseq = of_pwrseq_on(np);
+	if (IS_ERR(pwrseq)) {
+		kfree(pwrseq_list_node);
+		return PTR_ERR(pwrseq);
+	}
+
+	pwrseq_list_node->pwrseq = pwrseq;
+	list_add(&pwrseq_list_node->list, head);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_pwrseq_on_list);
+
+/**
+ * of_pwrseq_off_list - Carry out power sequence off for the list
+ *
+ * @head: the list head for pwrseq instance list on this bus
+ *
+ * This API is used to power off all devices on this bus, it is
+ * the opposite operation for of_pwrseq_on_list.
+ * The caller needs to use mutex_lock for concurrent.
+ */
+void of_pwrseq_off_list(struct list_head *head)
+{
+	struct pwrseq *pwrseq;
+	struct pwrseq_list_per_dev *pwrseq_list_node, *tmp_node;
+
+	list_for_each_entry_safe(pwrseq_list_node, tmp_node, head, list) {
+		pwrseq = pwrseq_list_node->pwrseq;
+		of_pwrseq_off(pwrseq);
+		list_del(&pwrseq_list_node->list);
+		kfree(pwrseq_list_node);
+	}
+}
+EXPORT_SYMBOL_GPL(of_pwrseq_off_list);
+
+/**
+ * pwrseq_suspend - Carry out power sequence suspend for this pwrseq instance
+ *
+ * @pwrseq: the pwrseq instance
+ *
+ * This API is used to do suspend operation on pwrseq instance.
+ *
+ * Return 0 on success, or an error value otherwise.
+ */
+int pwrseq_suspend(struct pwrseq *p)
+{
+	int ret = 0;
+
+	if (p && p->suspend)
+		ret = p->suspend(p);
+	else
+		return ret;
+
+	if (!ret)
+		p->suspended = true;
+	else
+		pr_err("%s failed\n", __func__);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_suspend);
+
+/**
+ * pwrseq_resume - Carry out power sequence resume for this pwrseq instance
+ *
+ * @pwrseq: the pwrseq instance
+ *
+ * This API is used to do resume operation on pwrseq instance.
+ *
+ * Return 0 on success, or an error value otherwise.
+ */
+int pwrseq_resume(struct pwrseq *p)
+{
+	int ret = 0;
+
+	if (p && p->resume)
+		ret = p->resume(p);
+	else
+		return ret;
+
+	if (!ret)
+		p->suspended = false;
+	else
+		pr_err("%s failed\n", __func__);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_resume);
+
+/**
+ * pwrseq_suspend_list - Carry out power sequence suspend for list
+ *
+ * @head: the list head for pwrseq instance list on this bus
+ *
+ * This API is used to do suspend on all power sequence instances on this bus.
+ * The caller needs to use mutex_lock for concurrent.
+ */
+int pwrseq_suspend_list(struct list_head *head)
+{
+	struct pwrseq *pwrseq;
+	struct pwrseq_list_per_dev *pwrseq_list_node;
+	int ret = 0;
+
+	list_for_each_entry(pwrseq_list_node, head, list) {
+		ret = pwrseq_suspend(pwrseq_list_node->pwrseq);
+		if (ret)
+			break;
+	}
+
+	if (ret) {
+		list_for_each_entry(pwrseq_list_node, head, list) {
+			pwrseq = pwrseq_list_node->pwrseq;
+			if (pwrseq->suspended)
+				pwrseq_resume(pwrseq);
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_suspend_list);
+
+/**
+ * pwrseq_resume_list - Carry out power sequence resume for the list
+ *
+ * @head: the list head for pwrseq instance list on this bus
+ *
+ * This API is used to do resume on all power sequence instances on this bus.
+ * The caller needs to use mutex_lock for concurrent.
+ */
+int pwrseq_resume_list(struct list_head *head)
+{
+	struct pwrseq_list_per_dev *pwrseq_list_node;
+	int ret = 0;
+
+	list_for_each_entry(pwrseq_list_node, head, list) {
+		ret = pwrseq_resume(pwrseq_list_node->pwrseq);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pwrseq_resume_list);
diff --git a/drivers/power/pwrseq/pwrseq_generic.c b/drivers/power/pwrseq/pwrseq_generic.c
new file mode 100644
index 00000000000..4e7c09086cf
--- /dev/null
+++ b/drivers/power/pwrseq/pwrseq_generic.c
@@ -0,0 +1,234 @@
+/*
+ * pwrseq_generic.c	Generic power sequence handling
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Author: Peter Chen <peter.chen@nxp.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+
+#include <linux/power/pwrseq.h>
+
+struct pwrseq_generic {
+	struct pwrseq pwrseq;
+	struct gpio_desc *gpiod_reset;
+	struct clk *clks[PWRSEQ_MAX_CLKS];
+	u32 duration_us;
+	bool suspended;
+};
+
+#define to_generic_pwrseq(p) container_of(p, struct pwrseq_generic, pwrseq)
+
+static int pwrseq_generic_alloc_instance(void);
+static const struct of_device_id generic_id_table[] = {
+	{ .compatible = "generic",},
+	{ /* sentinel */ }
+};
+
+static int pwrseq_generic_suspend(struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	int clk;
+
+	for (clk = PWRSEQ_MAX_CLKS - 1; clk >= 0; clk--)
+		clk_disable_unprepare(pwrseq_gen->clks[clk]);
+
+	pwrseq_gen->suspended = true;
+	return 0;
+}
+
+static int pwrseq_generic_resume(struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	int clk, ret = 0;
+
+	for (clk = 0; clk < PWRSEQ_MAX_CLKS && pwrseq_gen->clks[clk]; clk++) {
+		ret = clk_prepare_enable(pwrseq_gen->clks[clk]);
+		if (ret) {
+			pr_err("Can't enable clock, ret=%d\n", ret);
+			goto err_disable_clks;
+		}
+	}
+
+	pwrseq_gen->suspended = false;
+	return ret;
+
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(pwrseq_gen->clks[clk]);
+
+	return ret;
+}
+
+static void pwrseq_generic_put(struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	int clk;
+
+	if (pwrseq_gen->gpiod_reset)
+		gpiod_put(pwrseq_gen->gpiod_reset);
+
+	for (clk = 0; clk < PWRSEQ_MAX_CLKS; clk++)
+		clk_put(pwrseq_gen->clks[clk]);
+
+	pwrseq_unregister(&pwrseq_gen->pwrseq);
+	kfree(pwrseq_gen);
+}
+
+static void pwrseq_generic_off(struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	int clk;
+
+	if (pwrseq_gen->suspended)
+		return;
+
+	for (clk = PWRSEQ_MAX_CLKS - 1; clk >= 0; clk--)
+		clk_disable_unprepare(pwrseq_gen->clks[clk]);
+}
+
+static int pwrseq_generic_on(struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	int clk, ret = 0;
+	struct gpio_desc *gpiod_reset = pwrseq_gen->gpiod_reset;
+
+	for (clk = 0; clk < PWRSEQ_MAX_CLKS && pwrseq_gen->clks[clk]; clk++) {
+		ret = clk_prepare_enable(pwrseq_gen->clks[clk]);
+		if (ret) {
+			pr_err("Can't enable clock, ret=%d\n", ret);
+			goto err_disable_clks;
+		}
+	}
+
+	if (gpiod_reset) {
+		u32 duration_us = pwrseq_gen->duration_us;
+
+		if (duration_us <= 10)
+			udelay(10);
+		else
+			usleep_range(duration_us, duration_us + 100);
+		gpiod_set_value(gpiod_reset, 0);
+	}
+
+	return ret;
+
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(pwrseq_gen->clks[clk]);
+
+	return ret;
+}
+
+static int pwrseq_generic_get(struct device_node *np, struct pwrseq *pwrseq)
+{
+	struct pwrseq_generic *pwrseq_gen = to_generic_pwrseq(pwrseq);
+	enum of_gpio_flags flags;
+	int reset_gpio, clk, ret = 0;
+
+	for (clk = 0; clk < PWRSEQ_MAX_CLKS; clk++) {
+		pwrseq_gen->clks[clk] = of_clk_get(np, clk);
+		if (IS_ERR(pwrseq_gen->clks[clk])) {
+			ret = PTR_ERR(pwrseq_gen->clks[clk]);
+			if (ret != -ENOENT)
+				goto err_put_clks;
+			pwrseq_gen->clks[clk] = NULL;
+			break;
+		}
+	}
+
+	reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0, &flags);
+	if (gpio_is_valid(reset_gpio)) {
+		unsigned long gpio_flags;
+
+		if (flags & OF_GPIO_ACTIVE_LOW)
+			gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_LOW;
+		else
+			gpio_flags = GPIOF_OUT_INIT_HIGH;
+
+		ret = gpio_request_one(reset_gpio, gpio_flags,
+				"pwrseq-reset-gpios");
+		if (ret)
+			goto err_put_clks;
+
+		pwrseq_gen->gpiod_reset = gpio_to_desc(reset_gpio);
+		of_property_read_u32(np, "reset-duration-us",
+				&pwrseq_gen->duration_us);
+	} else if (reset_gpio == -ENOENT) {
+		; /* no such gpio */
+	} else {
+		ret = reset_gpio;
+		pr_err("Failed to get reset gpio on %s, err = %d\n",
+				np->full_name, reset_gpio);
+		goto err_put_clks;
+	}
+
+	/* allocate new one for later pwrseq instance request */
+	ret = pwrseq_generic_alloc_instance();
+	if (ret)
+		goto err_put_gpio;
+
+	return 0;
+
+err_put_gpio:
+	if (pwrseq_gen->gpiod_reset)
+		gpiod_put(pwrseq_gen->gpiod_reset);
+err_put_clks:
+	while (--clk >= 0)
+		clk_put(pwrseq_gen->clks[clk]);
+	return ret;
+}
+
+/**
+ * pwrseq_generic_alloc_instance - power sequence instance allocation
+ *
+ * This function is used to allocate one generic power sequence instance,
+ * it is called when the system boots up and after one power sequence
+ * instance is got successfully.
+ *
+ * Return zero on success or an error code otherwise.
+ */
+static int pwrseq_generic_alloc_instance(void)
+{
+	struct pwrseq_generic *pwrseq_gen;
+
+	pwrseq_gen = kzalloc(sizeof(*pwrseq_gen), GFP_KERNEL);
+	if (!pwrseq_gen)
+		return -ENOMEM;
+
+	pwrseq_gen->pwrseq.pwrseq_of_match_table = generic_id_table;
+	pwrseq_gen->pwrseq.get = pwrseq_generic_get;
+	pwrseq_gen->pwrseq.on = pwrseq_generic_on;
+	pwrseq_gen->pwrseq.off = pwrseq_generic_off;
+	pwrseq_gen->pwrseq.put = pwrseq_generic_put;
+	pwrseq_gen->pwrseq.suspend = pwrseq_generic_suspend;
+	pwrseq_gen->pwrseq.resume = pwrseq_generic_resume;
+
+	pwrseq_register(&pwrseq_gen->pwrseq);
+	return 0;
+}
+
+/* Allocate one pwrseq instance during boots up */
+static int __init pwrseq_generic_register(void)
+{
+	return pwrseq_generic_alloc_instance();
+}
+postcore_initcall(pwrseq_generic_register)
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index b8100c3ceda..855cde0c46c 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -299,6 +299,13 @@ static const struct regulator_ops twl6030fixed_ops = {
 	.get_status	= twl6030reg_get_status,
 };
 
+static struct regulator_ops twl6030_fixed_resource = {
+	.enable	= twl6030reg_enable,
+	.disable	= twl6030reg_disable,
+	.is_enabled	= twl6030reg_is_enabled,
+	.get_status	= twl6030reg_get_status,
+};
+
 /*
  * SMPS status and control
  */
@@ -572,6 +579,19 @@ static const struct twlreg_info TWLSMPS_INFO_##label = { \
 		}, \
 	}
 
+#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) \
+static struct twlreg_info TWLRES_INFO_##label = { \
+	.base = offset, \
+	.desc = { \
+	.name = #label, \
+	.id = TWL6030_REG_##label, \
+	.ops = &twl6030_fixed_resource, \
+	.type = REGULATOR_VOLTAGE, \
+	.owner = THIS_MODULE, \
+	.enable_time = turnon_delay, \
+	}, \
+	}
+
 /* VUSBCP is managed *only* by the USB subchip */
 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
 /* Turnon-delay and remap configuration values for 6030 are not
@@ -601,6 +621,7 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
 TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
 TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
+TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0);
 TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34);
 TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10);
 TWL6032_ADJUSTABLE_SMPS(VIO, 0x16);
@@ -632,6 +653,7 @@ static u8 twl_get_smps_mult(void)
 #define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label)
 #define TWL6032_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6032, label)
 #define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
+#define TWLRES_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLRES, label)
 #define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
 
 static const struct of_device_id twl_of_match[] = {
@@ -659,6 +681,7 @@ static const struct of_device_id twl_of_match[] = {
 	TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
 	TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
 	TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
+	TWLRES_OF_MATCH("ti,twl6030-clk32kg", CLK32KG),
 	TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3),
 	TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4),
 	TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO),
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 2018614f258..fc19b312c34 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -114,7 +114,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
 	/* Wait until confirmation of stopping */
 	do {
 		rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout_uninterruptible((1));
 	} while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
 
 	if (!retries) {
@@ -197,7 +197,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
 	/* Wait until confirmation of stopping */
 	do {
 		rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout_uninterruptible((1));
 	} while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
 
 	if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
@@ -220,7 +220,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
 	/* Wait until confirmation */
 	do {
 		rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout_uninterruptible((1));
 	} while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
 
 	if (rtc_ctrl & WM8350_RTC_ALMSTS)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 2017c43dac1..25f56898cb2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1522,4 +1522,6 @@ endif # SCSI_LOWLEVEL
 
 source "drivers/scsi/device_handler/Kconfig"
 
+source "drivers/scsi/vhba/Kconfig"
+
 endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index c00e3dd5799..97546650b6e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -153,6 +153,7 @@ obj-$(CONFIG_CHR_DEV_SCH)	+= ch.o
 obj-$(CONFIG_SCSI_ENCLOSURE)	+= ses.o
 
 obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
+obj-$(CONFIG_VHBA)		+= vhba/
 
 # This goes last, so that "real" scsi devices probe earlier
 obj-$(CONFIG_SCSI_DEBUG)	+= scsi_debug.o
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index b6079589399..d2d05691dbd 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -216,7 +216,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
 
 	/* wait for io cmpl */
 	while (atomic_read(&fnic->in_flight))
-		schedule_timeout(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout((1));
 
 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
@@ -2277,7 +2277,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
 		}
 	}
 
-	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+	schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
 
 	/* walk again to check, if IOs are still pending in fw */
 	if (fnic_is_abts_pending(fnic, lr_sc))
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ad62fb3f3a5..a84d4c99d7d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5191,7 +5191,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
 					tgt_id, lun_id, context);
 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
 	while (time_after(later, jiffies) && cnt) {
-		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+		schedule_msec_hrtimeout_uninterruptible((20));
 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
 	}
 	if (cnt) {
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index b3650c989ed..7ed1fb28575 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -2353,7 +2353,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
 
 	/* Wait for all the IOs that are entered in Qcmd */
 	while (atomic_read(&snic->ios_inflight))
-		schedule_timeout(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout((1));
 
 	ret = snic_issue_hba_reset(snic, sc);
 	if (ret) {
diff --git a/drivers/scsi/vhba/Kconfig b/drivers/scsi/vhba/Kconfig
new file mode 100644
index 00000000000..7ccb7d8dc25
--- /dev/null
+++ b/drivers/scsi/vhba/Kconfig
@@ -0,0 +1,9 @@
+config VHBA
+	tristate "Virtual (SCSI) Host Bus Adapter"
+	depends on SCSI
+	---help---
+        This is the in-kernel part of CDEmu, a CD/DVD-ROM device
+        emulator.
+
+	This driver can also be built as a module. If so, the module
+	will be called vhba.
diff --git a/drivers/scsi/vhba/Makefile b/drivers/scsi/vhba/Makefile
new file mode 100644
index 00000000000..25219d1193b
--- /dev/null
+++ b/drivers/scsi/vhba/Makefile
@@ -0,0 +1,4 @@
+VHBA_VERSION := 20200106
+
+obj-$(CONFIG_VHBA)		+= vhba.o
+ccflags-y := -DVHBA_VERSION=\"$(VHBA_VERSION)\" -Werror
diff --git a/drivers/scsi/vhba/vhba.c b/drivers/scsi/vhba/vhba.c
new file mode 100644
index 00000000000..7655be66fbe
--- /dev/null
+++ b/drivers/scsi/vhba/vhba.c
@@ -0,0 +1,1086 @@
+/*
+ * vhba.c
+ *
+ * Copyright (C) 2007-2012 Chia-I Wu <b90201047 AT ntu DOT edu DOT tw>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/version.h>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#include <linux/sched/signal.h>
+#else
+#include <linux/sched.h>
+#endif
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <asm/uaccess.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+
+MODULE_AUTHOR("Chia-I Wu");
+MODULE_VERSION(VHBA_VERSION);
+MODULE_DESCRIPTION("Virtual SCSI HBA");
+MODULE_LICENSE("GPL");
+
+#ifdef DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+/* scmd_dbg was introduced in 3.15 */
+#ifndef scmd_dbg
+#define scmd_dbg(scmd, fmt, a...)       \
+    dev_dbg(&(scmd)->device->sdev_gendev, fmt, ##a)
+#endif
+
+#define VHBA_MAX_SECTORS_PER_IO 256
+#define VHBA_MAX_BUS 16
+#define VHBA_MAX_ID 16 /* Usually 8 or 16 */
+#define VHBA_MAX_DEVICES (VHBA_MAX_BUS * (VHBA_MAX_ID-1))
+#define VHBA_CAN_QUEUE 32
+#define VHBA_INVALID_BUS -1
+#define VHBA_INVALID_ID -1
+#define VHBA_KBUF_SIZE PAGE_SIZE
+
+#define DATA_TO_DEVICE(dir) ((dir) == DMA_TO_DEVICE || (dir) == DMA_BIDIRECTIONAL)
+#define DATA_FROM_DEVICE(dir) ((dir) == DMA_FROM_DEVICE || (dir) == DMA_BIDIRECTIONAL)
+
+
+enum vhba_req_state {
+    VHBA_REQ_FREE,
+    VHBA_REQ_PENDING,
+    VHBA_REQ_READING,
+    VHBA_REQ_SENT,
+    VHBA_REQ_WRITING,
+};
+
+struct vhba_command {
+    struct scsi_cmnd *cmd;
+    unsigned long serial_number;
+    int status;
+    struct list_head entry;
+};
+
+struct vhba_device {
+    int bus; /* aka. channel */
+    int id;
+    int num;
+    spinlock_t cmd_lock;
+    struct list_head cmd_list;
+    wait_queue_head_t cmd_wq;
+    atomic_t refcnt;
+
+    unsigned char *kbuf;
+    size_t kbuf_size;
+
+    unsigned long cmd_count;
+};
+
+struct vhba_host {
+    struct Scsi_Host *shost;
+    spinlock_t cmd_lock;
+    int cmd_next;
+    struct vhba_command commands[VHBA_CAN_QUEUE];
+    spinlock_t dev_lock;
+    struct vhba_device *devices[VHBA_MAX_DEVICES];
+    int num_devices;
+    DECLARE_BITMAP(chgmap, VHBA_MAX_DEVICES);
+    int chgtype[VHBA_MAX_DEVICES];
+    struct work_struct scan_devices;
+};
+
+#define MAX_COMMAND_SIZE 16
+
+struct vhba_request {
+    __u32 tag;
+    __u32 lun;
+    __u8 cdb[MAX_COMMAND_SIZE];
+    __u8 cdb_len;
+    __u32 data_len;
+};
+
+struct vhba_response {
+    __u32 tag;
+    __u32 status;
+    __u32 data_len;
+};
+
+static struct vhba_command *vhba_alloc_command (void);
+static void vhba_free_command (struct vhba_command *vcmd);
+
+static struct platform_device vhba_platform_device;
+
+static struct vhba_device *vhba_device_alloc (void)
+{
+    struct vhba_device *vdev;
+
+    vdev = kzalloc(sizeof(struct vhba_device), GFP_KERNEL);
+    if (!vdev) {
+        return NULL;
+    }
+
+    vdev->bus = VHBA_INVALID_BUS;
+    vdev->id = VHBA_INVALID_ID;
+    spin_lock_init(&vdev->cmd_lock);
+    INIT_LIST_HEAD(&vdev->cmd_list);
+    init_waitqueue_head(&vdev->cmd_wq);
+    atomic_set(&vdev->refcnt, 1);
+
+    vdev->kbuf = NULL;
+    vdev->kbuf_size = 0;
+
+    vdev->cmd_count = 0;
+
+    return vdev;
+}
+
+static void devnum_to_bus_and_id(int devnum, int *bus, int *id)
+{
+    int a = devnum / (VHBA_MAX_ID-1);
+    int b = devnum % (VHBA_MAX_ID-1);
+
+    *bus = a;
+    *id  = b + 1;
+}
+
+static int bus_and_id_to_devnum(int bus, int id)
+{
+    return (bus * (VHBA_MAX_ID-1)) + id - 1;
+}
+
+static void vhba_device_put (struct vhba_device *vdev)
+{
+    if (atomic_dec_and_test(&vdev->refcnt)) {
+        kfree(vdev);
+    }
+}
+
+static struct vhba_device *vhba_device_get (struct vhba_device *vdev)
+{
+    atomic_inc(&vdev->refcnt);
+
+    return vdev;
+}
+
+static int vhba_device_queue (struct vhba_device *vdev, struct scsi_cmnd *cmd)
+{
+    struct vhba_command *vcmd;
+    unsigned long flags;
+
+    vcmd = vhba_alloc_command();
+    if (!vcmd) {
+        return SCSI_MLQUEUE_HOST_BUSY;
+    }
+
+    vcmd->cmd = cmd;
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    vcmd->serial_number = vdev->cmd_count++;
+    list_add_tail(&vcmd->entry, &vdev->cmd_list);
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    wake_up_interruptible(&vdev->cmd_wq);
+
+    return 0;
+}
+
+static int vhba_device_dequeue (struct vhba_device *vdev, struct scsi_cmnd *cmd)
+{
+    struct vhba_command *vcmd;
+    int retval;
+    unsigned long flags;
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    list_for_each_entry(vcmd, &vdev->cmd_list, entry) {
+        if (vcmd->cmd == cmd) {
+            list_del_init(&vcmd->entry);
+            break;
+        }
+    }
+
+    /* command not found */
+    if (&vcmd->entry == &vdev->cmd_list) {
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+        return SUCCESS;
+    }
+
+    while (vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING) {
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+        scmd_dbg(cmd, "wait for I/O before aborting\n");
+        schedule_timeout(1);
+        spin_lock_irqsave(&vdev->cmd_lock, flags);
+    }
+
+    retval = (vcmd->status == VHBA_REQ_SENT) ? FAILED : SUCCESS;
+
+    vhba_free_command(vcmd);
+
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    return retval;
+}
+
+static inline void vhba_scan_devices_add (struct vhba_host *vhost, int bus, int id)
+{
+    struct scsi_device *sdev;
+
+    sdev = scsi_device_lookup(vhost->shost, bus, id, 0);
+    if (!sdev) {
+        scsi_add_device(vhost->shost, bus, id, 0);
+    } else {
+        dev_warn(&vhost->shost->shost_gendev, "tried to add an already-existing device %d:%d:0!\n", bus, id);
+        scsi_device_put(sdev);
+    }
+}
+
+static inline void vhba_scan_devices_remove (struct vhba_host *vhost, int bus, int id)
+{
+    struct scsi_device *sdev;
+
+    sdev = scsi_device_lookup(vhost->shost, bus, id, 0);
+    if (sdev) {
+        scsi_remove_device(sdev);
+        scsi_device_put(sdev);
+    } else {
+        dev_warn(&vhost->shost->shost_gendev, "tried to remove non-existing device %d:%d:0!\n", bus, id);
+    }
+}
+
+static void vhba_scan_devices (struct work_struct *work)
+{
+    struct vhba_host *vhost = container_of(work, struct vhba_host, scan_devices);
+    unsigned long flags;
+    int devnum, change, exists;
+    int bus, id;
+
+    while (1) {
+        spin_lock_irqsave(&vhost->dev_lock, flags);
+
+        devnum = find_first_bit(vhost->chgmap, VHBA_MAX_DEVICES);
+        if (devnum >= VHBA_MAX_DEVICES) {
+            spin_unlock_irqrestore(&vhost->dev_lock, flags);
+            break;
+        }
+        change = vhost->chgtype[devnum];
+        exists = vhost->devices[devnum] != NULL;
+
+        vhost->chgtype[devnum] = 0;
+        clear_bit(devnum, vhost->chgmap);
+
+        spin_unlock_irqrestore(&vhost->dev_lock, flags);
+
+        devnum_to_bus_and_id(devnum, &bus, &id);
+
+        if (change < 0) {
+            dev_dbg(&vhost->shost->shost_gendev, "trying to remove target %d:%d:0\n", bus, id);
+            vhba_scan_devices_remove(vhost, bus, id);
+        } else if (change > 0) {
+            dev_dbg(&vhost->shost->shost_gendev, "trying to add target %d:%d:0\n", bus, id);
+            vhba_scan_devices_add(vhost, bus, id);
+        } else {
+            /* quick sequence of add/remove or remove/add; we determine
+               which one it was by checking if device structure exists */
+            if (exists) {
+                /* remove followed by add: remove and (re)add */
+                dev_dbg(&vhost->shost->shost_gendev, "trying to (re)add target %d:%d:0\n", bus, id);
+                vhba_scan_devices_remove(vhost, bus, id);
+                vhba_scan_devices_add(vhost, bus, id);
+            } else {
+                /* add followed by remove: no-op */
+                dev_dbg(&vhost->shost->shost_gendev, "no-op for target %d:%d:0\n", bus, id);
+            }
+        }
+    }
+}
+
+static int vhba_add_device (struct vhba_device *vdev)
+{
+    struct vhba_host *vhost;
+    int i;
+    unsigned long flags;
+    int bus, id;
+
+    vhost = platform_get_drvdata(&vhba_platform_device);
+
+    vhba_device_get(vdev);
+
+    spin_lock_irqsave(&vhost->dev_lock, flags);
+    if (vhost->num_devices >= VHBA_MAX_DEVICES) {
+        spin_unlock_irqrestore(&vhost->dev_lock, flags);
+        vhba_device_put(vdev);
+        return -EBUSY;
+    }
+
+    for (i = 0; i < VHBA_MAX_DEVICES; i++) {
+        devnum_to_bus_and_id(i, &bus, &id);
+
+        if (vhost->devices[i] == NULL) {
+            vdev->bus = bus;
+            vdev->id  = id;
+            vdev->num = i;
+            vhost->devices[i] = vdev;
+            vhost->num_devices++;
+            set_bit(i, vhost->chgmap);
+            vhost->chgtype[i]++;
+            break;
+        }
+    }
+    spin_unlock_irqrestore(&vhost->dev_lock, flags);
+
+    schedule_work(&vhost->scan_devices);
+
+    return 0;
+}
+
+static int vhba_remove_device (struct vhba_device *vdev)
+{
+    struct vhba_host *vhost;
+    unsigned long flags;
+
+    vhost = platform_get_drvdata(&vhba_platform_device);
+
+    spin_lock_irqsave(&vhost->dev_lock, flags);
+    set_bit(vdev->num, vhost->chgmap);
+    vhost->chgtype[vdev->num]--;
+    vhost->devices[vdev->num] = NULL;
+    vhost->num_devices--;
+    vdev->bus = VHBA_INVALID_BUS;
+    vdev->id = VHBA_INVALID_ID;
+    spin_unlock_irqrestore(&vhost->dev_lock, flags);
+
+    vhba_device_put(vdev);
+
+    schedule_work(&vhost->scan_devices);
+
+    return 0;
+}
+
+static struct vhba_device *vhba_lookup_device (int devnum)
+{
+    struct vhba_host *vhost;
+    struct vhba_device *vdev = NULL;
+    unsigned long flags;
+
+    vhost = platform_get_drvdata(&vhba_platform_device);
+
+    if (likely(devnum < VHBA_MAX_DEVICES)) {
+        spin_lock_irqsave(&vhost->dev_lock, flags);
+        vdev = vhost->devices[devnum];
+        if (vdev) {
+            vdev = vhba_device_get(vdev);
+        }
+
+        spin_unlock_irqrestore(&vhost->dev_lock, flags);
+    }
+
+    return vdev;
+}
+
+static struct vhba_command *vhba_alloc_command (void)
+{
+    struct vhba_host *vhost;
+    struct vhba_command *vcmd;
+    unsigned long flags;
+    int i;
+
+    vhost = platform_get_drvdata(&vhba_platform_device);
+
+    spin_lock_irqsave(&vhost->cmd_lock, flags);
+
+    vcmd = vhost->commands + vhost->cmd_next++;
+    if (vcmd->status != VHBA_REQ_FREE) {
+        for (i = 0; i < vhost->shost->can_queue; i++) {
+            vcmd = vhost->commands + i;
+
+            if (vcmd->status == VHBA_REQ_FREE) {
+                vhost->cmd_next = i + 1;
+                break;
+            }
+        }
+
+        if (i == vhost->shost->can_queue) {
+            vcmd = NULL;
+        }
+    }
+
+    if (vcmd) {
+        vcmd->status = VHBA_REQ_PENDING;
+    }
+
+    vhost->cmd_next %= vhost->shost->can_queue;
+
+    spin_unlock_irqrestore(&vhost->cmd_lock, flags);
+
+    return vcmd;
+}
+
+static void vhba_free_command (struct vhba_command *vcmd)
+{
+    struct vhba_host *vhost;
+    unsigned long flags;
+
+    vhost = platform_get_drvdata(&vhba_platform_device);
+
+    spin_lock_irqsave(&vhost->cmd_lock, flags);
+    vcmd->status = VHBA_REQ_FREE;
+    spin_unlock_irqrestore(&vhost->cmd_lock, flags);
+}
+
+static int vhba_queuecommand_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+    struct vhba_device *vdev;
+    int retval;
+
+    scmd_dbg(cmd, "queue %p\n", cmd);
+
+    vdev = vhba_lookup_device(bus_and_id_to_devnum(cmd->device->channel, cmd->device->id));
+    if (!vdev) {
+        scmd_dbg(cmd, "no such device\n");
+
+        cmd->result = DID_NO_CONNECT << 16;
+        done(cmd);
+
+        return 0;
+    }
+
+    cmd->scsi_done = done;
+    retval = vhba_device_queue(vdev, cmd);
+
+    vhba_device_put(vdev);
+
+    return retval;
+}
+
+#ifdef DEF_SCSI_QCMD
+DEF_SCSI_QCMD(vhba_queuecommand)
+#else
+#define vhba_queuecommand vhba_queuecommand_lck
+#endif
+
+static int vhba_abort (struct scsi_cmnd *cmd)
+{
+    struct vhba_device *vdev;
+    int retval = SUCCESS;
+
+    scmd_dbg(cmd, "abort %p\n", cmd);
+
+    vdev = vhba_lookup_device(bus_and_id_to_devnum(cmd->device->channel, cmd->device->id));
+    if (vdev) {
+        retval = vhba_device_dequeue(vdev, cmd);
+        vhba_device_put(vdev);
+    } else {
+        cmd->result = DID_NO_CONNECT << 16;
+    }
+
+    return retval;
+}
+
+static struct scsi_host_template vhba_template = {
+    .module = THIS_MODULE,
+    .name = "vhba",
+    .proc_name = "vhba",
+    .queuecommand = vhba_queuecommand,
+    .eh_abort_handler = vhba_abort,
+    .can_queue = VHBA_CAN_QUEUE,
+    .this_id = -1,
+    .cmd_per_lun = 1,
+    .max_sectors = VHBA_MAX_SECTORS_PER_IO,
+    .sg_tablesize = 256,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+    .max_segment_size = VHBA_KBUF_SIZE,
+#endif
+};
+
+static ssize_t do_request (struct vhba_device *vdev, unsigned long cmd_serial_number, struct scsi_cmnd *cmd, char __user *buf, size_t buf_len)
+{
+    struct vhba_request vreq;
+    ssize_t ret;
+
+    scmd_dbg(cmd, "request %lu (%p), cdb 0x%x, bufflen %d, sg count %d\n",
+        cmd_serial_number, cmd, cmd->cmnd[0], scsi_bufflen(cmd), scsi_sg_count(cmd));
+
+    ret = sizeof(vreq);
+    if (DATA_TO_DEVICE(cmd->sc_data_direction)) {
+        ret += scsi_bufflen(cmd);
+    }
+
+    if (ret > buf_len) {
+        scmd_dbg(cmd, "buffer too small (%zd < %zd) for a request\n", buf_len, ret);
+        return -EIO;
+    }
+
+    vreq.tag = cmd_serial_number;
+    vreq.lun = cmd->device->lun;
+    memcpy(vreq.cdb, cmd->cmnd, MAX_COMMAND_SIZE);
+    vreq.cdb_len = cmd->cmd_len;
+    vreq.data_len = scsi_bufflen(cmd);
+
+    if (copy_to_user(buf, &vreq, sizeof(vreq))) {
+        return -EFAULT;
+    }
+
+    if (DATA_TO_DEVICE(cmd->sc_data_direction) && vreq.data_len) {
+        buf += sizeof(vreq);
+
+        if (scsi_sg_count(cmd)) {
+            unsigned char *kaddr, *uaddr;
+            struct scatterlist *sglist = scsi_sglist(cmd);
+            struct scatterlist *sg;
+            int i;
+
+            uaddr = (unsigned char *) buf;
+
+            for_each_sg(sglist, sg, scsi_sg_count(cmd), i) {
+                size_t len = sg->length;
+
+                if (len > vdev->kbuf_size) {
+                    scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size);
+                    len = vdev->kbuf_size;
+                }
+
+                kaddr = kmap_atomic(sg_page(sg));
+                memcpy(vdev->kbuf, kaddr + sg->offset, len);
+                kunmap_atomic(kaddr);
+
+                if (copy_to_user(uaddr, vdev->kbuf, len)) {
+                    return -EFAULT;
+                }
+                uaddr += len;
+            }
+        } else {
+            if (copy_to_user(buf, scsi_sglist(cmd), vreq.data_len)) {
+                return -EFAULT;
+            }
+        }
+    }
+
+    return ret;
+}
+
+static ssize_t do_response (struct vhba_device *vdev, unsigned long cmd_serial_number, struct scsi_cmnd *cmd, const char __user *buf, size_t buf_len, struct vhba_response *res)
+{
+    ssize_t ret = 0;
+
+    scmd_dbg(cmd, "response %lu (%p), status %x, data len %d, sg count %d\n",
+         cmd_serial_number, cmd, res->status, res->data_len, scsi_sg_count(cmd));
+
+    if (res->status) {
+        unsigned char sense_stack[SCSI_SENSE_BUFFERSIZE];
+
+        if (res->data_len > SCSI_SENSE_BUFFERSIZE) {
+            scmd_dbg(cmd, "truncate sense (%d < %d)", SCSI_SENSE_BUFFERSIZE, res->data_len);
+            res->data_len = SCSI_SENSE_BUFFERSIZE;
+        }
+
+        /* Copy via temporary buffer on stack in order to avoid problems
+           with PAX on grsecurity-enabled kernels */
+        if (copy_from_user(sense_stack, buf, res->data_len)) {
+            return -EFAULT;
+        }
+        memcpy(cmd->sense_buffer, sense_stack, res->data_len);
+
+        cmd->result = res->status;
+
+        ret += res->data_len;
+    } else if (DATA_FROM_DEVICE(cmd->sc_data_direction) && scsi_bufflen(cmd)) {
+        size_t to_read;
+
+        if (res->data_len > scsi_bufflen(cmd)) {
+            scmd_dbg(cmd, "truncate data (%d < %d)\n", scsi_bufflen(cmd), res->data_len);
+            res->data_len = scsi_bufflen(cmd);
+        }
+
+        to_read = res->data_len;
+
+        if (scsi_sg_count(cmd)) {
+            unsigned char *kaddr, *uaddr;
+            struct scatterlist *sglist = scsi_sglist(cmd);
+            struct scatterlist *sg;
+            int i;
+
+            uaddr = (unsigned char *)buf;
+
+            for_each_sg(sglist, sg, scsi_sg_count(cmd), i) {
+                size_t len = (sg->length < to_read) ? sg->length : to_read;
+
+                if (len > vdev->kbuf_size) {
+                    scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size);
+                    len = vdev->kbuf_size;
+                }
+
+                if (copy_from_user(vdev->kbuf, uaddr, len)) {
+                    return -EFAULT;
+                }
+                uaddr += len;
+
+                kaddr = kmap_atomic(sg_page(sg));
+                memcpy(kaddr + sg->offset, vdev->kbuf, len);
+                kunmap_atomic(kaddr);
+
+                to_read -= len;
+                if (to_read == 0) {
+                    break;
+                }
+            }
+        } else {
+            if (copy_from_user(scsi_sglist(cmd), buf, res->data_len)) {
+                return -EFAULT;
+            }
+
+            to_read -= res->data_len;
+        }
+
+        scsi_set_resid(cmd, to_read);
+
+        ret += res->data_len - to_read;
+    }
+
+    return ret;
+}
+
+static inline struct vhba_command *next_command (struct vhba_device *vdev)
+{
+    struct vhba_command *vcmd;
+
+    list_for_each_entry(vcmd, &vdev->cmd_list, entry) {
+        if (vcmd->status == VHBA_REQ_PENDING) {
+            break;
+        }
+    }
+
+    if (&vcmd->entry == &vdev->cmd_list) {
+        vcmd = NULL;
+    }
+
+    return vcmd;
+}
+
+static inline struct vhba_command *match_command (struct vhba_device *vdev, u32 tag)
+{
+    struct vhba_command *vcmd;
+
+    list_for_each_entry(vcmd, &vdev->cmd_list, entry) {
+        if (vcmd->serial_number == tag) {
+            break;
+        }
+    }
+
+    if (&vcmd->entry == &vdev->cmd_list) {
+        vcmd = NULL;
+    }
+
+    return vcmd;
+}
+
+static struct vhba_command *wait_command (struct vhba_device *vdev, unsigned long flags)
+{
+    struct vhba_command *vcmd;
+    DEFINE_WAIT(wait);
+
+    while (!(vcmd = next_command(vdev))) {
+        if (signal_pending(current)) {
+            break;
+        }
+
+        prepare_to_wait(&vdev->cmd_wq, &wait, TASK_INTERRUPTIBLE);
+
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+        schedule();
+
+        spin_lock_irqsave(&vdev->cmd_lock, flags);
+    }
+
+    finish_wait(&vdev->cmd_wq, &wait);
+    if (vcmd) {
+        vcmd->status = VHBA_REQ_READING;
+    }
+
+    return vcmd;
+}
+
+static ssize_t vhba_ctl_read (struct file *file, char __user *buf, size_t buf_len, loff_t *offset)
+{
+    struct vhba_device *vdev;
+    struct vhba_command *vcmd;
+    ssize_t ret;
+    unsigned long flags;
+
+    vdev = file->private_data;
+
+    /* Get next command */
+    if (file->f_flags & O_NONBLOCK) {
+        /* Non-blocking variant */
+        spin_lock_irqsave(&vdev->cmd_lock, flags);
+        vcmd = next_command(vdev);
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+        if (!vcmd) {
+            return -EWOULDBLOCK;
+        }
+    } else {
+        /* Blocking variant */
+        spin_lock_irqsave(&vdev->cmd_lock, flags);
+        vcmd = wait_command(vdev, flags);
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+        if (!vcmd) {
+            return -ERESTARTSYS;
+        }
+    }
+
+    ret = do_request(vdev, vcmd->serial_number, vcmd->cmd, buf, buf_len);
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    if (ret >= 0) {
+        vcmd->status = VHBA_REQ_SENT;
+        *offset += ret;
+    } else {
+        vcmd->status = VHBA_REQ_PENDING;
+    }
+
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    return ret;
+}
+
+static ssize_t vhba_ctl_write (struct file *file, const char __user *buf, size_t buf_len, loff_t *offset)
+{
+    struct vhba_device *vdev;
+    struct vhba_command *vcmd;
+    struct vhba_response res;
+    ssize_t ret;
+    unsigned long flags;
+
+    if (buf_len < sizeof(res)) {
+        return -EIO;
+    }
+
+    if (copy_from_user(&res, buf, sizeof(res))) {
+        return -EFAULT;
+    }
+
+    vdev = file->private_data;
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    vcmd = match_command(vdev, res.tag);
+    if (!vcmd || vcmd->status != VHBA_REQ_SENT) {
+        spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+        DPRINTK("not expecting response\n");
+        return -EIO;
+    }
+    vcmd->status = VHBA_REQ_WRITING;
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    ret = do_response(vdev, vcmd->serial_number, vcmd->cmd, buf + sizeof(res), buf_len - sizeof(res), &res);
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    if (ret >= 0) {
+        vcmd->cmd->scsi_done(vcmd->cmd);
+        ret += sizeof(res);
+
+        /* don't compete with vhba_device_dequeue */
+        if (!list_empty(&vcmd->entry)) {
+            list_del_init(&vcmd->entry);
+            vhba_free_command(vcmd);
+        }
+    } else {
+        vcmd->status = VHBA_REQ_SENT;
+    }
+
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    return ret;
+}
+
+static long vhba_ctl_ioctl (struct file *file, unsigned int cmd, unsigned long arg)
+{
+    struct vhba_device *vdev = file->private_data;
+    struct vhba_host *vhost;
+    struct scsi_device *sdev;
+
+    switch (cmd) {
+        case 0xBEEF001: {
+            vhost = platform_get_drvdata(&vhba_platform_device);
+            sdev = scsi_device_lookup(vhost->shost, vdev->bus, vdev->id, 0);
+
+            if (sdev) {
+                int id[4] = {
+                    sdev->host->host_no,
+                    sdev->channel,
+                    sdev->id,
+                    sdev->lun
+                };
+
+                scsi_device_put(sdev);
+
+                if (copy_to_user((void *)arg, id, sizeof(id))) {
+                    return -EFAULT;
+                }
+
+                return 0;
+            } else {
+                return -ENODEV;
+            }
+        }
+        case 0xBEEF002: {
+            int device_number = vdev->num;
+            if (copy_to_user((void *)arg, &device_number, sizeof(device_number))) {
+                return -EFAULT;
+            }
+            return 0;
+        }
+    }
+
+    return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static long vhba_ctl_compat_ioctl (struct file *file, unsigned int cmd, unsigned long arg)
+{
+    unsigned long compat_arg = (unsigned long)compat_ptr(arg);
+    return vhba_ctl_ioctl(file, cmd, compat_arg);
+}
+#endif
+
+static unsigned int vhba_ctl_poll (struct file *file, poll_table *wait)
+{
+    struct vhba_device *vdev = file->private_data;
+    unsigned int mask = 0;
+    unsigned long flags;
+
+    poll_wait(file, &vdev->cmd_wq, wait);
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    if (next_command(vdev)) {
+        mask |= POLLIN | POLLRDNORM;
+    }
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    return mask;
+}
+
+static int vhba_ctl_open (struct inode *inode, struct file *file)
+{
+    struct vhba_device *vdev;
+    int retval;
+
+    DPRINTK("open\n");
+
+    /* check if vhba is probed */
+    if (!platform_get_drvdata(&vhba_platform_device)) {
+        return -ENODEV;
+    }
+
+    vdev = vhba_device_alloc();
+    if (!vdev) {
+        return -ENOMEM;
+    }
+
+    vdev->kbuf_size = VHBA_KBUF_SIZE;
+    vdev->kbuf = kmalloc(vdev->kbuf_size, GFP_KERNEL);
+    if (!vdev->kbuf) {
+        return -ENOMEM;
+    }
+
+    if (!(retval = vhba_add_device(vdev))) {
+        file->private_data = vdev;
+    }
+
+    vhba_device_put(vdev);
+
+    return retval;
+}
+
+static int vhba_ctl_release (struct inode *inode, struct file *file)
+{
+    struct vhba_device *vdev;
+    struct vhba_command *vcmd;
+    unsigned long flags;
+
+    DPRINTK("release\n");
+
+    vdev = file->private_data;
+
+    vhba_device_get(vdev);
+    vhba_remove_device(vdev);
+
+    spin_lock_irqsave(&vdev->cmd_lock, flags);
+    list_for_each_entry(vcmd, &vdev->cmd_list, entry) {
+        WARN_ON(vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING);
+
+        scmd_dbg(vcmd->cmd, "device released with command %lu (%p)\n", vcmd->serial_number, vcmd->cmd);
+        vcmd->cmd->result = DID_NO_CONNECT << 16;
+        vcmd->cmd->scsi_done(vcmd->cmd);
+
+        vhba_free_command(vcmd);
+    }
+    INIT_LIST_HEAD(&vdev->cmd_list);
+    spin_unlock_irqrestore(&vdev->cmd_lock, flags);
+
+    kfree(vdev->kbuf);
+    vdev->kbuf = NULL;
+
+    vhba_device_put(vdev);
+
+    return 0;
+}
+
+static struct file_operations vhba_ctl_fops = {
+    .owner = THIS_MODULE,
+    .open = vhba_ctl_open,
+    .release = vhba_ctl_release,
+    .read = vhba_ctl_read,
+    .write = vhba_ctl_write,
+    .poll = vhba_ctl_poll,
+    .unlocked_ioctl = vhba_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+    .compat_ioctl = vhba_ctl_compat_ioctl,
+#endif
+};
+
+static struct miscdevice vhba_miscdev = {
+    .minor = MISC_DYNAMIC_MINOR,
+    .name = "vhba_ctl",
+    .fops = &vhba_ctl_fops,
+};
+
+static int vhba_probe (struct platform_device *pdev)
+{
+    struct Scsi_Host *shost;
+    struct vhba_host *vhost;
+    int i;
+
+    shost = scsi_host_alloc(&vhba_template, sizeof(struct vhba_host));
+    if (!shost) {
+        return -ENOMEM;
+    }
+
+    shost->max_channel = VHBA_MAX_BUS-1;
+    shost->max_id = VHBA_MAX_ID;
+    /* we don't support lun > 0 */
+    shost->max_lun = 1;
+    shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+    vhost = (struct vhba_host *)shost->hostdata;
+    memset(vhost, 0, sizeof(*vhost));
+
+    vhost->shost = shost;
+    vhost->num_devices = 0;
+    spin_lock_init(&vhost->dev_lock);
+    spin_lock_init(&vhost->cmd_lock);
+    INIT_WORK(&vhost->scan_devices, vhba_scan_devices);
+    vhost->cmd_next = 0;
+    for (i = 0; i < vhost->shost->can_queue; i++) {
+        vhost->commands[i].status = VHBA_REQ_FREE;
+    }
+
+    platform_set_drvdata(pdev, vhost);
+
+    if (scsi_add_host(shost, &pdev->dev)) {
+        scsi_host_put(shost);
+        return -ENOMEM;
+    }
+
+    return 0;
+}
+
+static int vhba_remove (struct platform_device *pdev)
+{
+    struct vhba_host *vhost;
+    struct Scsi_Host *shost;
+
+    vhost = platform_get_drvdata(pdev);
+    shost = vhost->shost;
+
+    scsi_remove_host(shost);
+    scsi_host_put(shost);
+
+    return 0;
+}
+
+static void vhba_release (struct device * dev)
+{
+    return;
+}
+
+static struct platform_device vhba_platform_device = {
+    .name = "vhba",
+    .id = -1,
+    .dev = {
+        .release = vhba_release,
+    },
+};
+
+static struct platform_driver vhba_platform_driver = {
+    .driver = {
+        .owner = THIS_MODULE,
+        .name = "vhba",
+    },
+    .probe = vhba_probe,
+    .remove = vhba_remove,
+};
+
+static int __init vhba_init (void)
+{
+    int ret;
+
+    ret = platform_device_register(&vhba_platform_device);
+    if (ret < 0) {
+        return ret;
+    }
+
+    ret = platform_driver_register(&vhba_platform_driver);
+    if (ret < 0) {
+        platform_device_unregister(&vhba_platform_device);
+        return ret;
+    }
+
+    ret = misc_register(&vhba_miscdev);
+    if (ret < 0) {
+        platform_driver_unregister(&vhba_platform_driver);
+        platform_device_unregister(&vhba_platform_device);
+        return ret;
+    }
+
+    return 0;
+}
+
+static void __exit vhba_exit(void)
+{
+    misc_deregister(&vhba_miscdev);
+    platform_driver_unregister(&vhba_platform_driver);
+    platform_device_unregister(&vhba_platform_device);
+}
+
+module_init(vhba_init);
+module_exit(vhba_exit);
+
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 012a8912306..133935b5ad0 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -731,9 +731,9 @@ static int spidev_probe(struct spi_device *spi)
 	 * compatible string, it is a Linux implementation thing
 	 * rather than a description of the hardware.
 	 */
-	WARN(spi->dev.of_node &&
-	     of_device_is_compatible(spi->dev.of_node, "spidev"),
-	     "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
+//	WARN(spi->dev.of_node &&
+//	     of_device_is_compatible(spi->dev.of_node, "spidev"),
+//	     "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
 
 	spidev_probe_acpi(spi);
 
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index d99f4065b96..15f870d4e95 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -4748,7 +4748,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
 		if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
 			break;
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (schedule_timeout(1))
+		if (schedule_min_hrtimeout())
 			return -EIO;
 	}
 	if (i == timeout) {
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index be0053c795b..cc2e18c733e 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -490,7 +490,7 @@ static int rtsx_polling_thread(void *__dev)
 
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
+		schedule_msec_hrtimeout((POLLING_INTERVAL));
 
 		/* lock the device pointers */
 		mutex_lock(&dev->dev_mutex);
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index c94328a5bd4..6e7d4671aa6 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
 		full_time_val = full_time->u.n.value;
 		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 		if (synth_full()) {
-			schedule_timeout(msecs_to_jiffies(full_time_val));
+			schedule_msec_hrtimeout((full_time_val));
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
@@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
 			jiffy_delta_val = jiffy_delta->u.n.value;
 			delay_time_val = delay_time->u.n.value;
 			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout(delay_time_val);
 			jiff_max = jiffies + jiffy_delta_val;
 		}
 	}
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 0877b4044c2..627102d048c 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
 		if (!synth->io_ops->synth_out(synth, ch)) {
 			synth->io_ops->tiocmset(0, UART_MCR_RTS);
 			synth->io_ops->tiocmset(UART_MCR_RTS, 0);
-			schedule_timeout(msecs_to_jiffies(full_time_val));
+			schedule_msec_hrtimeout(full_time_val);
 			continue;
 		}
 		if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index ddbb7e97d11..f9502addc76 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -176,7 +176,7 @@ static void do_catch_up(struct spk_synth *synth)
 		if (ch == '\n')
 			ch = 0x0D;
 		if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout(delay_time_val);
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 798c42dfa16..d85b41db67a 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -394,7 +394,7 @@ static void do_catch_up(struct spk_synth *synth)
 		if (ch == '\n')
 			ch = 0x0D;
 		if (dt_sendchar(ch)) {
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout((delay_time_val));
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index dccb4ea29d3..8ecead307d0 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -244,7 +244,7 @@ static void do_catch_up(struct spk_synth *synth)
 		if (ch == '\n')
 			ch = 0x0D;
 		if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout(delay_time_val);
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index dbebed0eeee..6d83c13ca4a 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
 		delay_time_val = delay_time->u.n.value;
 		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 		if (synth_full()) {
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout((delay_time_val));
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
@@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
 			delay_time_val = delay_time->u.n.value;
 			jiffy_delta_val = jiffy_delta->u.n.value;
 			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout((delay_time_val));
 			jiff_max = jiffies + jiffy_delta_val;
 		}
 	}
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 414827e888f..cb31c9176da 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
 		full_time_val = full_time->u.n.value;
 		spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 		if (synth_full()) {
-			schedule_timeout(msecs_to_jiffies(full_time_val));
+			schedule_msec_hrtimeout((full_time_val));
 			continue;
 		}
 		set_current_state(TASK_RUNNING);
@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
 			jiffy_delta_val = jiffy_delta->u.n.value;
 			delay_time_val = delay_time->u.n.value;
 			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-			schedule_timeout(msecs_to_jiffies(delay_time_val));
+			schedule_msec_hrtimeout(delay_time_val);
 			jiff_max = jiffies + jiffy_delta_val;
 		}
 	}
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 3568bfb8991..a5540729a9e 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -98,7 +98,7 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
 		else
 			ret = synth->io_ops->synth_out(synth, ch);
 		if (!ret) {
-			schedule_timeout(msecs_to_jiffies(full_time_val));
+			schedule_msec_hrtimeout(full_time_val);
 			continue;
 		}
 		if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
@@ -108,11 +108,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
 			full_time_val = full_time->u.n.value;
 			spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 			if (synth->io_ops->synth_out(synth, synth->procspeech))
-				schedule_timeout(
-					msecs_to_jiffies(delay_time_val));
+				schedule_msec_hrtimeout(delay_time_val);
 			else
-				schedule_timeout(
-					msecs_to_jiffies(full_time_val));
+				schedule_msec_hrtimeout(full_time_val);
 			jiff_max = jiffies + jiffy_delta_val;
 		}
 		set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 0433536930a..d8726f28843 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
 		}
 		set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
-		wait += schedule_timeout(msecs_to_jiffies(10));
+		wait += schedule_msec_hrtimeout((10));
 		spin_lock_irqsave(&devdata->priv_lock, flags);
 	}
 
@@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
 		while (1) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			spin_unlock_irqrestore(&devdata->priv_lock, flags);
-			schedule_timeout(msecs_to_jiffies(10));
+			schedule_msec_hrtimeout((10));
 			spin_lock_irqsave(&devdata->priv_lock, flags);
 			if (atomic_read(&devdata->usage))
 				break;
@@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
 		}
 		set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
-		wait += schedule_timeout(msecs_to_jiffies(10));
+		wait += schedule_msec_hrtimeout((10));
 		spin_lock_irqsave(&devdata->priv_lock, flags);
 	}
 
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 16cfb887c5a..25e20a837b8 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1605,10 +1605,10 @@ static int __init omap8250_console_fixup(void)
 	}
 
 	add_preferred_console("ttyS", idx, options);
-	pr_err("WARNING: Your 'console=ttyO%d' has been replaced by 'ttyS%d'\n",
+	pr_info("WARNING: Your 'console=ttyO%d' has been replaced by 'ttyS%d'\n",
 	       idx, idx);
-	pr_err("This ensures that you still see kernel messages. Please\n");
-	pr_err("update your kernel commandline.\n");
+	pr_info("This ensures that you still see kernel messages. Please\n");
+	pr_info("update your kernel commandline.\n");
 	return 0;
 }
 console_initcall(omap8250_console_fixup);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 275568abc67..266957dd028 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -46,6 +46,7 @@ config USB
 	depends on USB_ARCH_HAS_HCD
 	select GENERIC_ALLOCATOR
 	select USB_COMMON
+	select POWER_SEQUENCE
 	select NLS  # for UTF-8 strings
 	---help---
 	  Universal Serial Bus (USB) is a specification for a serial bus
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fc748c73183..e0c5c8c6ac2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -30,6 +30,7 @@
 #include <linux/random.h>
 #include <linux/pm_qos.h>
 #include <linux/kobject.h>
+#include <linux/power/pwrseq.h>
 
 #include <linux/uaccess.h>
 #include <asm/byteorder.h>
@@ -1714,6 +1715,7 @@ static void hub_disconnect(struct usb_interface *intf)
 	hub->error = 0;
 	hub_quiesce(hub, HUB_DISCONNECT);
 
+	of_pwrseq_off_list(&hub->pwrseq_on_list);
 	mutex_lock(&usb_port_peer_mutex);
 
 	/* Avoid races with recursively_mark_NOTATTACHED() */
@@ -1764,11 +1766,41 @@ static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
         return true;
 }
 
+#ifdef CONFIG_OF
+static int hub_of_pwrseq_on(struct usb_hub *hub)
+{
+	struct device *parent;
+	struct usb_device *hdev = hub->hdev;
+	struct device_node *np;
+	int ret;
+
+	if (hdev->parent)
+		parent = &hdev->dev;
+	else
+		parent = bus_to_hcd(hdev->bus)->self.sysdev;
+
+	for_each_child_of_node(parent->of_node, np) {
+		ret = of_pwrseq_on_list(np, &hub->pwrseq_on_list);
+		/* Maybe no power sequence library is chosen */
+		if (ret && ret != -ENOENT)
+			return ret;
+	}
+
+	return 0;
+}
+#else
+static int hub_of_pwrseq_on(struct usb_hub *hub)
+{
+	return 0;
+}
+#endif
+
 static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	struct usb_host_interface *desc;
 	struct usb_device *hdev;
 	struct usb_hub *hub;
+	int ret = -ENODEV;
 
 	desc = intf->cur_altsetting;
 	hdev = interface_to_usbdev(intf);
@@ -1859,6 +1891,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	INIT_DELAYED_WORK(&hub->leds, led_work);
 	INIT_DELAYED_WORK(&hub->init_work, NULL);
 	INIT_WORK(&hub->events, hub_event);
+	INIT_LIST_HEAD(&hub->pwrseq_on_list);
 	spin_lock_init(&hub->irq_urb_lock);
 	timer_setup(&hub->irq_urb_retry, hub_retry_irq_urb, 0);
 	usb_get_intf(intf);
@@ -1879,11 +1912,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 		usb_autopm_get_interface_no_resume(intf);
 	}
 
-	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
-		return 0;
+	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
+		ret = hub_of_pwrseq_on(hub);
+		if (!ret)
+			return 0;
+	}
 
 	hub_disconnect(intf);
-	return -ENODEV;
+	return ret;
 }
 
 static int
@@ -3745,7 +3781,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
 
 	/* stop hub_wq and related activity */
 	hub_quiesce(hub, HUB_SUSPEND);
-	return 0;
+	return pwrseq_suspend_list(&hub->pwrseq_on_list);
 }
 
 /* Report wakeup requests from the ports of a resuming root hub */
@@ -3785,8 +3821,13 @@ static void report_wakeup_requests(struct usb_hub *hub)
 static int hub_resume(struct usb_interface *intf)
 {
 	struct usb_hub *hub = usb_get_intfdata(intf);
+	int ret;
 
 	dev_dbg(&intf->dev, "%s\n", __func__);
+	ret = pwrseq_resume_list(&hub->pwrseq_on_list);
+	if (ret)
+		return ret;
+
 	hub_activate(hub, HUB_RESUME);
 
 	/*
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index a97dd1ba964..b34ea395ef8 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -73,6 +73,7 @@ struct usb_hub {
 	spinlock_t		irq_urb_lock;
 	struct timer_list	irq_urb_retry;
 	struct usb_port		**ports;
+	struct list_head	pwrseq_on_list; /* powered pwrseq node list */
 };
 
 /**
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 8167d379e11..12726f0319b 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1379,8 +1379,13 @@ void gserial_disconnect(struct gserial *gser)
 	gser->ioport = NULL;
 	if (port->port.count > 0) {
 		wake_up_interruptible(&port->drain_wait);
+#if 0
 		if (port->port.tty)
 			tty_hangup(port->port.tty);
+#else
+		if (port->port.tty)
+			stop_tty(port->port.tty);
+#endif
 	}
 	spin_unlock_irqrestore(&port->port_lock, flags);
 
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index cfe63932f82..71c00ef772a 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -913,7 +913,7 @@ static void hwa742_resume(void)
 		if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
 			break;
 		set_current_state(TASK_UNINTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(5));
+		schedule_msec_hrtimeout((5));
 	}
 	hwa742_set_update_mode(hwa742.update_mode_before_suspend);
 }
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 00b96a78676..37fc1c2d4cb 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
 		mutex_unlock(&fbi->ctrlr_lock);
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(30));
+		schedule_msec_hrtimeout((30));
 	}
 
 	pr_debug("%s(): task ending\n", __func__);
diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
index 3c14e43b82f..36bb8b1cb30 100644
--- a/drivers/video/logo/logo_linux_clut224.ppm
+++ b/drivers/video/logo/logo_linux_clut224.ppm
@@ -1,1604 +1,14404 @@
 P3
-# Standard 224-color Linux logo
-80 80
-255
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  10  10  10  10  10  10
- 10  10  10   6   6   6   6   6   6   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 22  22  22  26  26  26  30  30  30  34  34  34
- 30  30  30  30  30  30  26  26  26  18  18  18
- 14  14  14  10  10  10   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  26  26  26  42  42  42
- 54  54  54  66  66  66  78  78  78  78  78  78
- 78  78  78  74  74  74  66  66  66  54  54  54
- 42  42  42  26  26  26  18  18  18  10  10  10
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 22  22  22  42  42  42  66  66  66  86  86  86
- 66  66  66  38  38  38  38  38  38  22  22  22
- 26  26  26  34  34  34  54  54  54  66  66  66
- 86  86  86  70  70  70  46  46  46  26  26  26
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 50  50  50  82  82  82  58  58  58   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  54  54  54  86  86  86  66  66  66
- 38  38  38  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 78  78  78  34  34  34   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6  70  70  70
- 78  78  78  46  46  46  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  42  42  42  82  82  82
- 26  26  26   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 46  46  46  34  34  34   6   6   6   2   2   6
- 42  42  42  78  78  78  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  30  30  30  66  66  66  58  58  58
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 86  86  86 101 101 101  46  46  46  10  10  10
-  2   2   6  58  58  58  70  70  70  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  86  86  86  10  10  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  30  30  30
- 94  94  94  94  94  94  58  58  58  26  26  26
-  2   2   6   6   6   6  78  78  78  54  54  54
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  62  62  62  62  62  62   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 54  54  54  38  38  38  18  18  18  10  10  10
-  2   2   6   2   2   6  34  34  34  82  82  82
- 38  38  38  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 10  10  10   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  54  54  54
- 66  66  66  26  26  26   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  82  82  82   2   2   6   2   2   6
-  2   2   6   6   6   6  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   6   6   6
- 14  14  14  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  18  18  18
- 82  82  82  34  34  34  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
-  6   6   6   6   6   6  22  22  22  34  34  34
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  34  34  34
- 10  10  10  50  50  50  22  22  22   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 86  86  86  42  42  42  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
- 38  38  38 116 116 116  94  94  94  22  22  22
- 22  22  22   2   2   6   2   2   6   2   2   6
- 14  14  14  86  86  86 138 138 138 162 162 162
-154 154 154  38  38  38  26  26  26   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  14  14  14
-134 134 134 198 198 198 195 195 195 116 116 116
- 10  10  10   2   2   6   2   2   6   6   6   6
-101  98  89 187 187 187 210 210 210 218 218 218
-214 214 214 134 134 134  14  14  14   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  50  50  50  18  18  18   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  54  54  54
-218 218 218 195 195 195 226 226 226 246 246 246
- 58  58  58   2   2   6   2   2   6  30  30  30
-210 210 210 253 253 253 174 174 174 123 123 123
-221 221 221 234 234 234  74  74  74   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  82  82  82   2   2   6 106 106 106
-170 170 170  26  26  26  86  86  86 226 226 226
-123 123 123  10  10  10  14  14  14  46  46  46
-231 231 231 190 190 190   6   6   6  70  70  70
- 90  90  90 238 238 238 158 158 158   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  86  86  86   6   6   6 116 116 116
-106 106 106   6   6   6  70  70  70 149 149 149
-128 128 128  18  18  18  38  38  38  54  54  54
-221 221 221 106 106 106   2   2   6  14  14  14
- 46  46  46 190 190 190 198 198 198   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  94  94  94  14  14  14 101 101 101
-128 128 128   2   2   6  18  18  18 116 116 116
-118  98  46 121  92   8 121  92   8  98  78  10
-162 162 162 106 106 106   2   2   6   2   2   6
-  2   2   6 195 195 195 195 195 195   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   1
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  90  90  90  14  14  14  58  58  58
-210 210 210  26  26  26  54  38   6 154 114  10
-226 170  11 236 186  11 225 175  15 184 144  12
-215 174  15 175 146  61  37  26   9   2   2   6
- 70  70  70 246 246 246 138 138 138   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  66  66  66  26  26  26   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14  10  10  10
-195 195 195 188 164 115 192 133   9 225 175  15
-239 182  13 234 190  10 232 195  16 232 200  30
-245 207  45 241 208  19 232 195  16 184 144  12
-218 194 134 211 206 186  42  42  42   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  74  74  74  30  30  30   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  86  86  86  14  14  14   2   2   6
-121  87  25 192 133   9 219 162  10 239 182  13
-236 186  11 232 195  16 241 208  19 244 214  54
-246 218  60 246 218  38 246 215  20 241 208  19
-241 208  19 226 184  13 121  87  25   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  82  82  82  34  34  34  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  82  82  82  30  30  30  61  42   6
-180 123   7 206 145  10 230 174  11 239 182  13
-234 190  10 238 202  15 241 208  19 246 218  74
-246 218  38 246 215  20 246 215  20 246 215  20
-226 184  13 215 174  15 184 144  12   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 26  26  26  94  94  94  42  42  42  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  50  50  50 104  69   6
-192 133   9 216 158  10 236 178  12 236 186  11
-232 195  16 241 208  19 244 214  54 245 215  43
-246 215  20 246 215  20 241 208  19 198 155  10
-200 144  11 216 158  10 156 118  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  90  90  90  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  46  46  46  22  22  22
-137  92   6 210 162  10 239 182  13 238 190  10
-238 202  15 241 208  19 246 215  20 246 215  20
-241 208  19 203 166  17 185 133  11 210 150  10
-216 158  10 210 150  10 102  78  10   2   2   6
-  6   6   6  54  54  54  14  14  14   2   2   6
-  2   2   6  62  62  62  74  74  74  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  78  78  78  50  50  50   6   6   6
- 94  70  30 139 102  15 190 146  13 226 184  13
-232 200  30 232 195  16 215 174  15 190 146  13
-168 122  10 192 133   9 210 150  10 213 154  11
-202 150  34 182 157 106 101  98  89   2   2   6
-  2   2   6  78  78  78 116 116 116  58  58  58
-  2   2   6  22  22  22  90  90  90  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  50  50  50   6   6   6
-128 128 128 174 154 114 156 107  11 168 122  10
-198 155  10 184 144  12 197 138  11 200 144  11
-206 145  10 206 145  10 197 138  11 188 164 115
-195 195 195 198 198 198 174 174 174  14  14  14
-  2   2   6  22  22  22 116 116 116 116 116 116
- 22  22  22   2   2   6  74  74  74  70  70  70
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 101 101 101  26  26  26  10  10  10
-138 138 138 190 190 190 174 154 114 156 107  11
-197 138  11 200 144  11 197 138  11 192 133   9
-180 123   7 190 142  34 190 178 144 187 187 187
-202 202 202 221 221 221 214 214 214  66  66  66
-  2   2   6   2   2   6  50  50  50  62  62  62
-  6   6   6   2   2   6  10  10  10  90  90  90
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  34  34  34
- 74  74  74  74  74  74   2   2   6   6   6   6
-144 144 144 198 198 198 190 190 190 178 166 146
-154 121  60 156 107  11 156 107  11 168 124  44
-174 154 114 187 187 187 190 190 190 210 210 210
-246 246 246 253 253 253 253 253 253 182 182 182
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  62  62  62
- 74  74  74  34  34  34  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  22  22  22  54  54  54
- 94  94  94  18  18  18   2   2   6  46  46  46
-234 234 234 221 221 221 190 190 190 190 190 190
-190 190 190 187 187 187 187 187 187 190 190 190
-190 190 190 195 195 195 214 214 214 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
- 82  82  82   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 86  86  86  54  54  54  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  46  46  46  90  90  90
- 46  46  46  18  18  18   6   6   6 182 182 182
-253 253 253 246 246 246 206 206 206 190 190 190
-190 190 190 190 190 190 190 190 190 190 190 190
-206 206 206 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-202 202 202  14  14  14   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  86  86  86  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  38  38  38  74  74  74  66  66  66
-  2   2   6   6   6   6  90  90  90 250 250 250
-253 253 253 253 253 253 238 238 238 198 198 198
-190 190 190 190 190 190 195 195 195 221 221 221
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253  82  82  82   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  78  78  78  70  70  70  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  66  66  66  78  78  78   6   6   6
-  2   2   6  18  18  18 218 218 218 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-226 226 226 231 231 231 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 178 178 178   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  18  18  18  90  90  90  62  62  62
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 58  58  58  90  90  90  18  18  18   2   2   6
-  2   2   6 110 110 110 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231  18  18  18   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  94  94  94
- 54  54  54  26  26  26  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 90  90  90  26  26  26   2   2   6   2   2   6
- 14  14  14 195 195 195 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 242 242 242  54  54  54   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
- 86  86  86  50  50  50  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  38  38  38  82  82  82
- 34  34  34   2   2   6   2   2   6   2   2   6
- 42  42  42 195 195 195 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 242 242 242 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 246 246 246 238 238 238
-226 226 226 231 231 231 101 101 101   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 38  38  38  82  82  82  42  42  42  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  62  62  62  66  66  66
-  2   2   6   2   2   6   2   2   6   6   6   6
- 70  70  70 170 170 170 206 206 206 234 234 234
-246 246 246 250 250 250 250 250 250 238 238 238
-226 226 226 231 231 231 238 238 238 250 250 250
-250 250 250 250 250 250 246 246 246 231 231 231
-214 214 214 206 206 206 202 202 202 202 202 202
-198 198 198 202 202 202 182 182 182  18  18  18
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  62  62  62  66  66  66  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  82  82  82  18  18  18
-  2   2   6   2   2   6   2   2   6  10  10  10
- 94  94  94 182 182 182 218 218 218 242 242 242
-250 250 250 253 253 253 253 253 253 250 250 250
-234 234 234 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-238 238 238 226 226 226 210 210 210 202 202 202
-195 195 195 195 195 195 210 210 210 158 158 158
-  6   6   6  14  14  14  50  50  50  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  86  86  86  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54  70  70  70   2   2   6
-  2   2   6  10  10  10   2   2   6  22  22  22
-166 166 166 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-231 231 231 206 206 206 198 198 198 226 226 226
- 94  94  94   2   2   6   6   6   6  38  38  38
- 30  30  30   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  62  62  62  66  66  66
- 26  26  26  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74  50  50  50   2   2   6
- 26  26  26  26  26  26   2   2   6 106 106 106
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246 218 218 218 202 202 202
-210 210 210  14  14  14   2   2   6   2   2   6
- 30  30  30  22  22  22   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  86  86  86
- 42  42  42  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  90  90  90  22  22  22   2   2   6
- 42  42  42   2   2   6  18  18  18 218 218 218
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 221 221 221
-218 218 218 101 101 101   2   2   6  14  14  14
- 18  18  18  38  38  38  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 58  58  58  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  82  82  82   2   2   6  26  26  26
- 22  22  22   2   2   6 123 123 123 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-238 238 238 198 198 198   6   6   6  38  38  38
- 58  58  58  26  26  26  38  38  38   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
- 78  78  78  30  30  30  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  30  30  30
- 74  74  74  58  58  58   2   2   6  42  42  42
-  2   2   6  22  22  22 231 231 231 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246  46  46  46  38  38  38
- 42  42  42  14  14  14  38  38  38  14  14  14
-  2   2   6   2   2   6   2   2   6   6   6   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  42  42  42
- 90  90  90  18  18  18  18  18  18  26  26  26
-  2   2   6 116 116 116 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253  94  94  94   6   6   6
-  2   2   6   2   2   6  10  10  10  34  34  34
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  26  26  26  66  66  66
- 82  82  82   2   2   6  38  38  38   6   6   6
- 14  14  14 210 210 210 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 246 246 246 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 144 144 144   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  74  74  74  30  30  30  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  42  42  42  90  90  90
- 26  26  26   6   6   6  42  42  42   2   2   6
- 74  74  74 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 242 242 242 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 182 182 182   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 10  10  10  86  86  86  38  38  38  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  66  66  66  82  82  82
-  2   2   6  22  22  22  18  18  18   2   2   6
-149 149 149 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  86  86  86  46  46  46  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  46  46  46  86  86  86  18  18  18
-  2   2   6  34  34  34  10  10  10   6   6   6
-210 210 210 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 221 221 221   6   6   6
-  2   2   6   2   2   6   6   6   6  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 26  26  26  66  66  66  62  62  62   2   2   6
-  2   2   6  38  38  38  10  10  10  26  26  26
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231   6   6   6
-  2   2   6   2   2   6  10  10  10  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  78  78  78   6   6   6   2   2   6
-  2   2   6  46  46  46  14  14  14  42  42  42
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  10  10  10
-  2   2   6   2   2   6  22  22  22  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50  74  74  74   2   2   6   2   2   6
- 14  14  14  70  70  70  34  34  34  62  62  62
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  14  14  14
-  2   2   6   2   2   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  62  62  62   2   2   6   2   2   6
-  2   2   6  30  30  30  46  46  46  70  70  70
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 226 226 226  10  10  10
-  2   2   6   6   6   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58  62  62  62   2   2   6   2   2   6
-  2   2   6   2   2   6  30  30  30  78  78  78
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
- 22  22  22  34  34  34  18  14   6  22  22  22
- 26  26  26  18  18  18   6   6   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  26  26  26
- 62  62  62 106 106 106  74  54  14 185 133  11
-210 162  10 121  92   8   6   6   6  62  62  62
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 158 158 158  18  18  18
- 14  14  14   2   2   6   2   2   6   2   2   6
-  6   6   6  18  18  18  66  66  66  38  38  38
-  6   6   6  94  94  94  50  50  50  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 10  10  10  10  10  10  18  18  18  38  38  38
- 78  78  78 142 134 106 216 158  10 242 186  14
-246 190  14 246 190  14 156 118  10  10  10  10
- 90  90  90 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 230 190
-238 204  91 238 204  91 181 142  44  37  26   9
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  38  38  38  46  46  46
- 26  26  26 106 106 106  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  22  22  22
- 30  30  30  38  38  38  50  50  50  70  70  70
-106 106 106 190 142  34 226 170  11 242 186  14
-246 190  14 246 190  14 246 190  14 154 114  10
-  6   6   6  74  74  74 226 226 226 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 228 184  62
-241 196  14 241 208  19 232 195  16  38  30  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  30  30  30  26  26  26
-203 166  17 154 142  90  66  66  66  26  26  26
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  38  38  38  58  58  58
- 78  78  78  86  86  86 101 101 101 123 123 123
-175 146  61 210 150  10 234 174  13 246 186  14
-246 190  14 246 190  14 246 190  14 238 190  10
-102  78  10   2   2   6  46  46  46 198 198 198
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 224 178  62
-242 186  14 241 196  14 210 166  10  22  18   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6 121  92   8
-238 202  15 232 195  16  82  82  82  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  38  38  38  70  70  70 154 122  46
-190 142  34 200 144  11 197 138  11 197 138  11
-213 154  11 226 170  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-225 175  15  46  32   6   2   2   6  22  22  22
-158 158 158 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 242 242 242 224 178  62
-239 182  13 236 186  11 213 154  11  46  32   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 225 175  15
-238 190  10 236 186  11 112 100  78  42  42  42
- 14  14  14   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54 154 122  46 213 154  11
-226 170  11 230 174  11 226 170  11 226 170  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 184 144  12  10  10  10   2   2   6
-  6   6   6 116 116 116 242 242 242 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231 198 198 198 214 170  54
-236 178  12 236 178  12 210 150  10 137  92   6
- 18  14   6   2   2   6   2   2   6   2   2   6
-  6   6   6  70  47   6 200 144  11 236 178  12
-239 182  13 239 182  13 124 112  88  58  58  58
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  70  70  70 180 133  36 226 170  11
-239 182  13 242 186  14 242 186  14 246 186  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16  98  70   6   2   2   6
-  2   2   6   2   2   6  66  66  66 221 221 221
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 206 206 206 198 198 198 214 166  58
-230 174  11 230 174  11 216 158  10 192 133   9
-163 110   8 116  81   8 102  78  10 116  81   8
-167 114   7 197 138  11 226 170  11 239 182  13
-242 186  14 242 186  14 162 146  94  78  78  78
- 34  34  34  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78 190 142  34 226 170  11
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 241 196  14 203 166  17  22  18   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-218 218 218 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 206 206 206 198 198 198 202 162  69
-226 170  11 236 178  12 224 166  10 210 150  10
-200 144  11 197 138  11 192 133   9 197 138  11
-210 150  10 226 170  11 242 186  14 246 190  14
-246 190  14 246 186  14 225 175  15 124 112  88
- 62  62  62  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 174 135  50 224 166  10
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 139 102  15
-  2   2   6   2   2   6   2   2   6   2   2   6
- 78  78  78 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 214 214 214 198 198 198 190 150  46
-219 162  10 236 178  12 234 174  13 224 166  10
-216 158  10 213 154  11 213 154  11 216 158  10
-226 170  11 239 182  13 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 206 162  42
-101 101 101  58  58  58  30  30  30  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74 174 135  50 216 158  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 226 184  13
- 61  42   6   2   2   6   2   2   6   2   2   6
- 22  22  22 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 226 226 226 187 187 187 180 133  36
-216 158  10 236 178  12 239 182  13 236 178  12
-230 174  11 226 170  11 226 170  11 230 174  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 186  14 239 182  13
-206 162  42 106 106 106  66  66  66  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 213 154  11
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 241 196  14
-190 146  13  18  14   6   2   2   6   2   2   6
- 46  46  46 246 246 246 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 221 221 221  86  86  86 156 107  11
-216 158  10 236 178  12 242 186  14 246 186  14
-242 186  14 239 182  13 239 182  13 242 186  14
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 225 175  15 142 122  72  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 210 150  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-232 195  16 121  92   8  34  34  34 106 106 106
-221 221 221 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-242 242 242  82  82  82  18  14   6 163 110   8
-216 158  10 236 178  12 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 163 133  67
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 163 133  67 210 150  10
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 215 174  15 190 178 144 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 218 218 218
- 58  58  58   2   2   6  22  18   6 167 114   7
-216 158  10 236 178  12 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 186  14 242 186  14 190 150  46
- 54  54  54  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 38  38  38  86  86  86 180 133  36 213 154  11
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16 190 146  13 214 214 214
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 170 170 170  26  26  26
-  2   2   6   2   2   6  37  26   9 163 110   8
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 224 166  10 142 122  72
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 109 106  95 192 133   9 224 166  10
-242 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 226 184  13 210 162  10 142 110  46
-226 226 226 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-198 198 198  66  66  66   2   2   6   2   2   6
-  2   2   6   2   2   6  50  34   6 156 107  11
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 242 186  14
-234 174  13 213 154  11 154 122  46  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58 154 121  60 206 145  10 234 174  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 210 162  10 163 110   8
- 61  42   6 138 138 138 218 218 218 250 250 250
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 210 210 210 144 144 144  66  66  66
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 163 110   8
-216 158  10 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 239 182  13 230 174  11 216 158  10
-190 142  34 124 112  88  70  70  70  38  38  38
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 62  62  62 168 124  44 206 145  10 224 166  10
-236 178  12 239 182  13 242 186  14 242 186  14
-246 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 236 178  12 216 158  10 175 118   6
- 80  54   7   2   2   6   6   6   6  30  30  30
- 54  54  54  62  62  62  50  50  50  38  38  38
- 14  14  14   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 167 114   7
-213 154  11 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 242 186  14 239 182  13 239 182  13
-230 174  11 210 150  10 174 135  50 124 112  88
- 82  82  82  54  54  54  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 158 118  36 192 133   9 200 144  11
-216 158  10 219 162  10 224 166  10 226 170  11
-230 174  11 236 178  12 239 182  13 239 182  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 230 174  11 210 150  10 163 110   8
-104  69   6  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  91  60   6 167 114   7
-206 145  10 230 174  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 186  14 242 186  14
-239 182  13 230 174  11 224 166  10 213 154  11
-180 133  36 124 112  88  86  86  86  58  58  58
- 38  38  38  22  22  22  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  70  70  70 138 110  50 158 118  36
-167 114   7 180 123   7 192 133   9 197 138  11
-200 144  11 206 145  10 213 154  11 219 162  10
-224 166  10 230 174  11 239 182  13 242 186  14
-246 186  14 246 186  14 246 186  14 246 186  14
-239 182  13 216 158  10 185 133  11 152  99   6
-104  69   6  18  14   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 152  99   6
-192 133   9 219 162  10 236 178  12 239 182  13
-246 186  14 242 186  14 239 182  13 236 178  12
-224 166  10 206 145  10 192 133   9 154 121  60
- 94  94  94  62  62  62  42  42  42  22  22  22
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  34  34  34  58  58  58  78  78  78
-101  98  89 124 112  88 142 110  46 156 107  11
-163 110   8 167 114   7 175 118   6 180 123   7
-185 133  11 197 138  11 210 150  10 219 162  10
-226 170  11 236 178  12 236 178  12 234 174  13
-219 162  10 197 138  11 163 110   8 130  83   6
- 91  60   6  10  10  10   2   2   6   2   2   6
- 18  18  18  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  26  26  26   2   2   6
-  2   2   6   6   6   6  70  47   6 137  92   6
-175 118   6 200 144  11 219 162  10 230 174  11
-234 174  13 230 174  11 219 162  10 210 150  10
-192 133   9 163 110   8 124 112  88  82  82  82
- 50  50  50  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  22  22  22  34  34  34
- 42  42  42  58  58  58  74  74  74  86  86  86
-101  98  89 122 102  70 130  98  46 121  87  25
-137  92   6 152  99   6 163 110   8 180 123   7
-185 133  11 197 138  11 206 145  10 200 144  11
-180 123   7 156 107  11 130  83   6 104  69   6
- 50  34   6  54  54  54 110 110 110 101  98  89
- 86  86  86  82  82  82  78  78  78  78  78  78
- 78  78  78  78  78  78  78  78  78  78  78  78
- 78  78  78  82  82  82  86  86  86  94  94  94
-106 106 106 101 101 101  86  66  34 124  80   6
-156 107  11 180 123   7 192 133   9 200 144  11
-206 145  10 200 144  11 192 133   9 175 118   6
-139 102  15 109 106  95  70  70  70  42  42  42
- 22  22  22  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  10  10  10
- 14  14  14  22  22  22  30  30  30  38  38  38
- 50  50  50  62  62  62  74  74  74  90  90  90
-101  98  89 112 100  78 121  87  25 124  80   6
-137  92   6 152  99   6 152  99   6 152  99   6
-138  86   6 124  80   6  98  70   6  86  66  30
-101  98  89  82  82  82  58  58  58  46  46  46
- 38  38  38  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  38  38  38  42  42  42
- 54  54  54  82  82  82  94  86  76  91  60   6
-134  86   6 156 107  11 167 114   7 175 118   6
-175 118   6 167 114   7 152  99   6 121  87  25
-101  98  89  62  62  62  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6   6   6   6  10  10  10
- 18  18  18  22  22  22  30  30  30  42  42  42
- 50  50  50  66  66  66  86  86  86 101  98  89
-106  86  58  98  70   6 104  69   6 104  69   6
-104  69   6  91  60   6  82  62  34  90  90  90
- 62  62  62  38  38  38  22  22  22  14  14  14
- 10  10  10  10  10  10  10  10  10  10  10  10
- 10  10  10  10  10  10   6   6   6  10  10  10
- 10  10  10  10  10  10  10  10  10  14  14  14
- 22  22  22  42  42  42  70  70  70  89  81  66
- 80  54   7 104  69   6 124  80   6 137  92   6
-134  86   6 116  81   8 100  82  52  86  86  86
- 58  58  58  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 18  18  18  26  26  26  38  38  38  54  54  54
- 70  70  70  86  86  86  94  86  76  89  81  66
- 89  81  66  86  86  86  74  74  74  50  50  50
- 30  30  30  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  34  34  34  58  58  58
- 82  82  82  89  81  66  89  81  66  89  81  66
- 94  86  66  94  86  76  74  74  74  50  50  50
- 26  26  26  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  14  14  14  18  18  18
- 30  30  30  38  38  38  46  46  46  54  54  54
- 50  50  50  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  26  26  26
- 38  38  38  50  50  50  58  58  58  58  58  58
- 54  54  54  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
-  6   6   6  10  10  10  14  14  14  18  18  18
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  18  18  18  22  22  22  22  22  22
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
+# CREATOR: GIMP PNM Filter Version 1.1
+60 80
+255
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+19
+32
+37
+35
+52
+61
+24
+38
+43
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+41
+58
+67
+124
+195
+230
+132
+208
+249
+132
+208
+249
+56
+74
+84
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+35
+52
+61
+132
+208
+249
+139
+215
+255
+134
+211
+252
+134
+211
+252
+132
+203
+238
+71
+106
+125
+62
+97
+116
+24
+38
+43
+1
+7
+11
+46
+66
+79
+35
+52
+61
+14
+27
+32
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+54
+54
+42
+129
+126
+96
+36
+36
+29
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+22
+13
+19
+35
+20
+27
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+31
+45
+53
+76
+116
+141
+113
+182
+217
+132
+203
+238
+88
+129
+148
+110
+145
+159
+111
+150
+170
+107
+163
+193
+125
+202
+243
+132
+208
+249
+125
+202
+243
+80
+130
+153
+134
+211
+252
+139
+215
+255
+115
+170
+200
+19
+32
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+36
+36
+29
+111
+110
+85
+182
+183
+140
+129
+126
+96
+81
+80
+62
+63
+64
+50
+58
+36
+48
+192
+108
+156
+169
+92
+132
+94
+52
+74
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+41
+58
+67
+125
+202
+243
+125
+202
+243
+101
+131
+146
+107
+126
+136
+197
+217
+216
+213
+234
+233
+213
+234
+233
+149
+166
+166
+101
+131
+146
+134
+211
+252
+134
+211
+252
+119
+178
+208
+114
+164
+188
+114
+164
+188
+125
+202
+243
+76
+116
+141
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+36
+36
+29
+36
+36
+29
+91
+90
+71
+129
+126
+96
+138
+135
+104
+197
+195
+148
+95
+86
+75
+192
+108
+156
+169
+92
+132
+156
+86
+123
+143
+82
+116
+78
+43
+62
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+22
+40
+49
+125
+202
+243
+134
+211
+252
+88
+129
+148
+159
+179
+189
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+149
+166
+166
+94
+147
+175
+119
+178
+208
+156
+176
+175
+213
+234
+233
+206
+227
+226
+123
+149
+157
+67
+89
+102
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+182
+183
+140
+197
+195
+148
+152
+152
+119
+130
+138
+107
+152
+152
+119
+182
+183
+140
+113
+73
+88
+192
+108
+156
+129
+75
+105
+201
+109
+161
+201
+109
+161
+175
+97
+138
+22
+13
+19
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+94
+147
+175
+132
+203
+238
+119
+178
+208
+149
+166
+166
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+104
+128
+133
+111
+150
+170
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+156
+176
+175
+24
+31
+32
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+36
+36
+29
+91
+93
+68
+146
+144
+112
+146
+144
+112
+182
+183
+140
+129
+126
+96
+169
+92
+132
+201
+109
+161
+129
+75
+105
+192
+108
+156
+192
+108
+156
+201
+109
+161
+62
+34
+50
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+22
+40
+49
+132
+208
+249
+134
+211
+252
+104
+128
+133
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+149
+166
+166
+104
+128
+133
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+104
+116
+120
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+25
+25
+18
+63
+64
+50
+111
+110
+85
+168
+169
+129
+197
+195
+148
+207
+205
+158
+81
+80
+62
+192
+108
+156
+175
+97
+138
+140
+78
+112
+186
+101
+146
+201
+109
+161
+201
+109
+161
+94
+52
+74
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+56
+74
+84
+134
+211
+252
+132
+208
+249
+107
+126
+136
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+197
+217
+216
+130
+146
+146
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+179
+200
+199
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+54
+54
+42
+146
+144
+112
+182
+183
+140
+182
+183
+140
+182
+183
+140
+197
+195
+148
+63
+64
+50
+192
+108
+156
+156
+86
+123
+156
+86
+123
+201
+109
+161
+192
+108
+156
+192
+108
+156
+156
+86
+123
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+80
+130
+153
+134
+211
+252
+125
+202
+243
+130
+146
+146
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+131
+137
+133
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+69
+83
+88
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+17
+19
+16
+25
+25
+18
+36
+36
+29
+111
+110
+85
+152
+152
+119
+207
+205
+158
+91
+90
+71
+201
+109
+161
+140
+78
+112
+156
+86
+123
+192
+108
+156
+201
+109
+161
+201
+109
+161
+192
+108
+156
+22
+13
+19
+0
+1
+0
+0
+1
+0
+1
+7
+11
+94
+147
+175
+134
+211
+252
+132
+208
+249
+123
+149
+157
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+206
+227
+226
+124
+133
+134
+206
+227
+226
+197
+217
+216
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+89
+103
+106
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+91
+90
+71
+207
+205
+158
+207
+205
+158
+207
+205
+158
+207
+205
+158
+207
+205
+158
+91
+90
+71
+192
+108
+156
+156
+86
+123
+140
+78
+112
+201
+109
+161
+192
+108
+156
+192
+108
+156
+192
+108
+156
+22
+13
+19
+0
+1
+0
+0
+1
+0
+0
+1
+0
+94
+147
+175
+132
+203
+238
+134
+211
+252
+123
+149
+157
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+169
+188
+187
+175
+180
+182
+213
+234
+233
+197
+217
+216
+149
+166
+166
+149
+166
+166
+66
+73
+75
+213
+234
+233
+213
+234
+233
+213
+234
+233
+213
+234
+233
+69
+83
+88
+29
+31
+13
+18
+17
+7
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+36
+36
+29
+95
+86
+75
+129
+126
+96
+146
+144
+112
+157
+158
+118
+197
+195
+148
+111
+110
+85
+192
+108
+156
+186
+101
+146
+140
+78
+112
+192
+108
+156
+201
+109
+161
+201
+109
+161
+192
+108
+156
+22
+13
+19
+0
+1
+0
+0
+1
+0
+7
+10
+5
+46
+77
+90
+91
+139
+151
+91
+139
+151
+69
+99
+114
+156
+176
+175
+213
+234
+233
+213
+234
+233
+213
+234
+233
+197
+217
+216
+21
+25
+26
+80
+98
+92
+213
+234
+233
+130
+146
+146
+130
+146
+146
+102
+116
+108
+57
+62
+64
+213
+234
+233
+213
+234
+233
+213
+234
+233
+206
+227
+226
+112
+115
+71
+218
+217
+87
+198
+196
+82
+78
+78
+32
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+17
+19
+16
+111
+110
+85
+152
+152
+119
+197
+195
+148
+207
+205
+158
+207
+205
+158
+157
+158
+118
+175
+97
+138
+192
+108
+156
+156
+86
+123
+201
+109
+161
+192
+108
+156
+192
+108
+156
+175
+97
+138
+28
+14
+21
+0
+1
+0
+0
+1
+0
+29
+31
+13
+112
+115
+71
+171
+171
+73
+188
+186
+80
+188
+186
+80
+147
+154
+89
+134
+158
+150
+206
+227
+226
+213
+234
+233
+213
+234
+233
+102
+116
+108
+186
+191
+194
+206
+227
+226
+131
+150
+119
+95
+103
+65
+89
+103
+106
+123
+149
+157
+213
+234
+233
+213
+234
+233
+213
+234
+233
+104
+128
+133
+171
+171
+73
+151
+153
+62
+188
+186
+80
+213
+214
+91
+102
+102
+43
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+81
+80
+62
+146
+144
+112
+152
+152
+119
+152
+152
+119
+168
+169
+129
+197
+195
+148
+113
+73
+88
+192
+108
+156
+156
+86
+123
+192
+108
+156
+201
+109
+161
+201
+109
+161
+175
+97
+138
+17
+19
+16
+8
+0
+0
+8
+0
+0
+171
+171
+73
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+131
+150
+119
+169
+188
+187
+213
+234
+233
+213
+234
+233
+179
+200
+199
+102
+116
+108
+151
+153
+62
+181
+180
+75
+218
+217
+87
+160
+161
+68
+75
+83
+79
+156
+176
+175
+110
+131
+112
+198
+196
+82
+181
+180
+75
+126
+125
+53
+160
+161
+68
+198
+196
+82
+160
+161
+68
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+54
+54
+42
+168
+169
+129
+207
+205
+158
+207
+205
+158
+207
+205
+158
+207
+205
+158
+95
+86
+75
+192
+108
+156
+175
+97
+138
+175
+97
+138
+192
+108
+156
+192
+108
+156
+140
+78
+112
+8
+0
+0
+7
+10
+5
+51
+49
+21
+218
+217
+87
+213
+214
+91
+126
+125
+53
+71
+72
+31
+95
+92
+39
+143
+144
+61
+213
+214
+91
+213
+214
+91
+171
+171
+73
+134
+149
+107
+131
+150
+119
+147
+154
+89
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+142
+145
+68
+115
+116
+48
+218
+217
+87
+200
+202
+87
+95
+92
+39
+200
+202
+87
+218
+217
+87
+198
+196
+82
+51
+49
+21
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+63
+64
+50
+54
+54
+42
+63
+64
+50
+129
+126
+96
+138
+135
+104
+138
+135
+104
+111
+110
+85
+186
+101
+146
+192
+108
+156
+129
+75
+105
+201
+109
+161
+201
+109
+161
+94
+52
+74
+8
+0
+0
+0
+1
+0
+18
+17
+7
+198
+196
+82
+207
+208
+85
+213
+214
+91
+218
+217
+87
+160
+161
+68
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+198
+196
+82
+95
+92
+39
+137
+135
+53
+71
+72
+31
+198
+196
+82
+213
+214
+91
+181
+180
+75
+38
+38
+15
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+31
+30
+24
+138
+135
+104
+207
+205
+158
+207
+205
+158
+207
+205
+158
+207
+205
+158
+207
+205
+158
+98
+76
+74
+192
+108
+156
+186
+101
+146
+156
+86
+123
+192
+108
+156
+58
+36
+48
+0
+1
+0
+0
+1
+0
+8
+0
+0
+57
+60
+26
+200
+202
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+160
+161
+68
+143
+144
+61
+151
+153
+62
+160
+161
+68
+160
+161
+68
+143
+144
+61
+106
+108
+49
+194
+191
+78
+213
+214
+91
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+95
+92
+39
+188
+186
+80
+213
+214
+91
+213
+214
+91
+106
+108
+49
+24
+25
+12
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+31
+30
+24
+182
+183
+140
+138
+135
+104
+138
+135
+104
+152
+152
+119
+157
+158
+118
+207
+205
+158
+157
+158
+118
+129
+75
+105
+192
+108
+156
+192
+108
+156
+186
+101
+146
+22
+13
+19
+0
+1
+0
+0
+1
+0
+10
+7
+13
+8
+0
+0
+38
+38
+15
+143
+144
+61
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+194
+191
+78
+171
+171
+73
+171
+171
+73
+188
+186
+80
+198
+196
+82
+143
+144
+61
+188
+186
+80
+213
+214
+91
+213
+214
+91
+218
+217
+87
+143
+144
+61
+188
+186
+80
+213
+214
+91
+106
+108
+49
+46
+45
+20
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+18
+17
+7
+36
+36
+29
+36
+36
+29
+111
+110
+85
+111
+110
+85
+138
+135
+104
+197
+195
+148
+111
+110
+85
+175
+97
+138
+201
+109
+161
+124
+69
+101
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+10
+7
+13
+8
+0
+0
+38
+38
+15
+91
+93
+68
+142
+145
+68
+194
+191
+78
+213
+214
+91
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+160
+161
+68
+137
+135
+53
+171
+171
+73
+143
+144
+61
+143
+144
+61
+213
+214
+91
+82
+89
+46
+8
+0
+0
+8
+0
+0
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+36
+36
+29
+182
+183
+140
+168
+169
+129
+111
+110
+85
+138
+135
+104
+146
+144
+112
+63
+64
+50
+58
+36
+48
+192
+108
+156
+78
+43
+62
+8
+0
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+7
+10
+5
+8
+0
+0
+8
+0
+0
+8
+0
+0
+78
+114
+134
+63
+93
+95
+102
+102
+43
+194
+191
+78
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+194
+191
+78
+213
+214
+91
+213
+214
+91
+142
+145
+68
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+0
+1
+0
+7
+10
+5
+54
+54
+42
+8
+0
+0
+63
+64
+50
+138
+135
+104
+18
+17
+7
+18
+17
+7
+35
+20
+27
+201
+109
+161
+78
+43
+62
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+56
+74
+84
+139
+215
+255
+104
+128
+133
+198
+196
+82
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+188
+186
+80
+79
+105
+103
+22
+40
+49
+21
+25
+26
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+81
+80
+62
+31
+30
+24
+1
+7
+11
+0
+1
+0
+35
+20
+27
+201
+109
+161
+94
+52
+74
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+10
+7
+13
+46
+77
+90
+134
+211
+252
+139
+215
+255
+102
+116
+108
+200
+202
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+218
+217
+87
+188
+186
+80
+69
+83
+88
+132
+203
+238
+64
+82
+92
+182
+218
+240
+85
+103
+114
+13
+18
+20
+32
+35
+37
+41
+58
+67
+21
+25
+26
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+17
+19
+16
+8
+0
+0
+0
+1
+0
+0
+1
+0
+22
+13
+19
+192
+108
+156
+129
+75
+105
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+53
+83
+97
+139
+215
+255
+134
+211
+252
+132
+203
+238
+80
+98
+92
+147
+154
+89
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+159
+168
+82
+110
+141
+132
+132
+203
+238
+139
+215
+255
+62
+97
+116
+173
+210
+232
+195
+232
+254
+127
+145
+154
+107
+126
+136
+195
+232
+254
+154
+185
+201
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+22
+13
+19
+186
+101
+146
+175
+97
+138
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+10
+7
+13
+62
+97
+116
+134
+211
+252
+139
+215
+255
+139
+215
+255
+132
+203
+238
+119
+178
+208
+104
+128
+133
+110
+141
+132
+131
+150
+119
+130
+146
+146
+105
+172
+207
+139
+215
+255
+134
+211
+252
+139
+215
+255
+115
+170
+200
+97
+115
+126
+195
+232
+254
+195
+232
+254
+97
+115
+126
+182
+218
+240
+195
+232
+254
+144
+175
+191
+21
+25
+26
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+8
+0
+0
+186
+101
+146
+186
+101
+146
+22
+13
+19
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+105
+172
+207
+134
+211
+252
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+134
+211
+252
+134
+211
+252
+134
+211
+252
+139
+215
+255
+46
+66
+79
+195
+232
+254
+195
+232
+254
+182
+218
+240
+123
+149
+157
+195
+232
+254
+195
+232
+254
+107
+126
+136
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+7
+10
+5
+8
+0
+0
+58
+36
+48
+62
+69
+87
+80
+130
+153
+46
+77
+90
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+32
+35
+37
+139
+215
+255
+134
+211
+252
+132
+208
+249
+134
+211
+252
+132
+208
+249
+132
+203
+238
+139
+215
+255
+134
+211
+252
+132
+203
+238
+132
+203
+238
+139
+215
+255
+132
+203
+238
+139
+215
+255
+132
+208
+249
+132
+203
+238
+94
+147
+175
+110
+145
+159
+195
+232
+254
+195
+232
+254
+130
+162
+179
+195
+232
+254
+195
+232
+254
+195
+232
+254
+69
+83
+88
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+31
+45
+53
+115
+170
+200
+132
+203
+238
+134
+211
+252
+134
+211
+252
+80
+130
+153
+35
+52
+61
+1
+7
+11
+0
+1
+0
+1
+7
+11
+19
+32
+37
+35
+52
+61
+101
+155
+185
+134
+211
+252
+139
+215
+255
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+125
+202
+243
+111
+150
+170
+133
+151
+162
+130
+146
+146
+119
+135
+143
+119
+135
+143
+111
+150
+170
+132
+208
+249
+134
+211
+252
+64
+82
+92
+195
+232
+254
+195
+232
+254
+182
+218
+240
+195
+232
+254
+195
+232
+254
+195
+232
+254
+182
+218
+240
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+16
+23
+29
+62
+97
+116
+132
+208
+249
+139
+215
+255
+132
+203
+238
+139
+215
+255
+139
+215
+255
+132
+208
+249
+139
+215
+255
+76
+116
+141
+21
+25
+26
+72
+122
+146
+132
+208
+249
+132
+208
+249
+119
+178
+208
+125
+202
+243
+132
+208
+249
+134
+211
+252
+132
+208
+249
+132
+203
+238
+110
+145
+159
+166
+171
+173
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+230
+232
+229
+145
+153
+161
+114
+164
+188
+75
+93
+104
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+163
+195
+211
+31
+45
+53
+16
+23
+29
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+14
+27
+32
+106
+171
+199
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+134
+211
+252
+90
+138
+161
+139
+215
+255
+134
+211
+252
+132
+203
+238
+72
+122
+146
+132
+203
+238
+134
+211
+252
+139
+215
+255
+139
+215
+255
+132
+203
+238
+132
+203
+238
+134
+211
+252
+139
+215
+255
+106
+128
+145
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+200
+205
+208
+55
+66
+72
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+163
+195
+211
+67
+89
+102
+90
+138
+161
+124
+195
+230
+124
+195
+230
+72
+122
+146
+14
+27
+32
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+56
+74
+84
+134
+211
+252
+132
+208
+249
+132
+208
+249
+132
+208
+249
+132
+203
+238
+139
+215
+255
+94
+147
+175
+119
+178
+208
+134
+211
+252
+92
+124
+151
+105
+172
+207
+132
+208
+249
+132
+208
+249
+132
+208
+249
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+120
+152
+168
+239
+241
+238
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+130
+146
+146
+182
+218
+240
+195
+232
+254
+195
+232
+254
+86
+117
+131
+88
+129
+148
+132
+208
+249
+132
+203
+238
+139
+215
+255
+139
+215
+255
+132
+203
+238
+46
+77
+90
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+38
+68
+82
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+62
+97
+116
+80
+130
+153
+46
+77
+90
+132
+203
+238
+139
+215
+255
+132
+203
+238
+139
+215
+255
+134
+211
+252
+139
+215
+255
+132
+203
+238
+119
+178
+208
+209
+211
+208
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+230
+232
+229
+104
+128
+133
+195
+232
+254
+71
+106
+125
+119
+188
+224
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+105
+172
+207
+62
+97
+116
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+13
+18
+20
+119
+188
+224
+134
+211
+252
+134
+211
+252
+132
+208
+249
+134
+211
+252
+132
+208
+249
+132
+208
+249
+124
+195
+230
+31
+45
+53
+134
+211
+252
+139
+215
+255
+139
+215
+255
+134
+211
+252
+134
+211
+252
+139
+215
+255
+139
+215
+255
+134
+211
+252
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+139
+157
+167
+139
+157
+167
+114
+164
+188
+132
+208
+249
+134
+211
+252
+134
+211
+252
+134
+211
+252
+132
+208
+249
+139
+215
+255
+119
+178
+208
+35
+52
+61
+19
+32
+37
+31
+45
+53
+31
+45
+53
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+62
+97
+116
+134
+211
+252
+134
+211
+252
+139
+215
+255
+134
+211
+252
+139
+215
+255
+139
+215
+255
+134
+211
+252
+107
+163
+193
+119
+188
+224
+134
+211
+252
+134
+211
+252
+134
+211
+252
+134
+211
+252
+132
+208
+249
+132
+208
+249
+124
+195
+230
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+75
+93
+104
+132
+208
+249
+139
+215
+255
+134
+211
+252
+134
+211
+252
+134
+211
+252
+139
+215
+255
+132
+208
+249
+139
+215
+255
+105
+172
+207
+53
+83
+97
+134
+211
+252
+132
+208
+249
+78
+114
+134
+16
+23
+29
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+13
+18
+20
+53
+83
+97
+132
+203
+238
+139
+215
+255
+134
+211
+252
+132
+208
+249
+132
+208
+249
+134
+211
+252
+132
+208
+249
+80
+130
+153
+124
+195
+230
+134
+211
+252
+132
+203
+238
+119
+188
+224
+139
+215
+255
+139
+215
+255
+134
+211
+252
+114
+164
+188
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+111
+150
+170
+132
+208
+249
+132
+203
+238
+139
+215
+255
+132
+203
+238
+115
+170
+200
+101
+155
+185
+119
+188
+224
+132
+208
+249
+139
+215
+255
+107
+163
+193
+72
+122
+146
+139
+215
+255
+132
+203
+238
+78
+114
+134
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+16
+23
+29
+105
+172
+207
+88
+129
+148
+139
+215
+255
+132
+208
+249
+134
+211
+252
+113
+182
+217
+90
+138
+161
+106
+171
+199
+139
+215
+255
+62
+97
+116
+134
+211
+252
+139
+215
+255
+139
+215
+255
+90
+138
+161
+132
+208
+249
+132
+208
+249
+134
+211
+252
+120
+152
+168
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+220
+222
+219
+106
+171
+199
+124
+195
+230
+76
+116
+141
+56
+74
+84
+64
+82
+92
+106
+128
+145
+133
+151
+162
+110
+145
+159
+56
+74
+84
+105
+172
+207
+139
+215
+255
+78
+114
+134
+134
+211
+252
+139
+215
+255
+125
+202
+243
+19
+32
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+41
+58
+67
+134
+211
+252
+72
+122
+146
+134
+211
+252
+132
+203
+238
+75
+93
+104
+160
+93
+131
+186
+101
+146
+106
+110
+136
+119
+188
+224
+71
+106
+125
+134
+211
+252
+134
+211
+252
+139
+215
+255
+90
+138
+161
+139
+215
+255
+134
+211
+252
+139
+215
+255
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+175
+180
+182
+55
+66
+72
+163
+195
+211
+195
+232
+254
+195
+232
+254
+195
+232
+254
+182
+218
+240
+195
+232
+254
+127
+145
+154
+105
+172
+207
+125
+202
+243
+90
+138
+161
+134
+211
+252
+134
+211
+252
+139
+215
+255
+38
+68
+82
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+38
+68
+82
+134
+211
+252
+105
+172
+207
+107
+163
+193
+132
+208
+249
+88
+107
+129
+192
+108
+156
+201
+109
+161
+62
+69
+87
+62
+97
+116
+132
+203
+238
+132
+203
+238
+134
+211
+252
+132
+208
+249
+76
+116
+141
+139
+215
+255
+132
+203
+238
+134
+211
+252
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+209
+211
+208
+127
+145
+154
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+69
+83
+88
+173
+210
+232
+195
+232
+254
+86
+117
+131
+106
+171
+199
+139
+215
+255
+139
+215
+255
+132
+203
+238
+132
+208
+249
+62
+97
+116
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+41
+58
+67
+132
+203
+238
+139
+215
+255
+119
+178
+208
+134
+211
+252
+92
+124
+151
+175
+97
+138
+201
+109
+161
+77
+75
+95
+139
+215
+255
+139
+215
+255
+139
+215
+255
+132
+208
+249
+107
+163
+193
+76
+116
+141
+139
+215
+255
+139
+215
+255
+139
+215
+255
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+155
+160
+162
+163
+195
+211
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+123
+149
+157
+144
+175
+191
+195
+232
+254
+101
+131
+146
+124
+195
+230
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+38
+68
+82
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+22
+40
+49
+132
+208
+249
+134
+211
+252
+139
+215
+255
+139
+215
+255
+94
+147
+175
+175
+97
+138
+192
+108
+156
+92
+73
+98
+134
+211
+252
+101
+155
+185
+31
+45
+53
+21
+25
+26
+13
+18
+20
+71
+106
+125
+134
+211
+252
+139
+215
+255
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+116
+123
+125
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+110
+145
+159
+173
+210
+232
+195
+232
+254
+173
+210
+232
+80
+130
+153
+134
+211
+252
+132
+208
+249
+134
+211
+252
+119
+188
+224
+16
+23
+29
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+71
+106
+125
+134
+211
+252
+134
+211
+252
+139
+215
+255
+105
+172
+207
+156
+86
+123
+201
+109
+161
+105
+72
+102
+46
+66
+79
+13
+18
+20
+8
+0
+0
+8
+0
+0
+8
+0
+0
+76
+116
+141
+132
+208
+249
+139
+215
+255
+139
+215
+255
+130
+162
+179
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+230
+232
+229
+119
+135
+143
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+107
+126
+136
+195
+232
+254
+195
+232
+254
+195
+232
+254
+85
+103
+114
+134
+211
+252
+132
+203
+238
+124
+195
+230
+31
+45
+53
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+56
+74
+84
+115
+170
+200
+132
+208
+249
+124
+195
+230
+129
+75
+105
+201
+109
+161
+140
+78
+112
+1
+7
+11
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+78
+114
+134
+134
+211
+252
+134
+211
+252
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+152
+154
+151
+173
+210
+232
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+107
+126
+136
+195
+232
+254
+195
+232
+254
+195
+232
+254
+163
+195
+211
+88
+129
+148
+80
+130
+153
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+1
+7
+11
+1
+7
+11
+10
+7
+13
+32
+35
+37
+22
+40
+49
+113
+65
+94
+192
+108
+156
+175
+97
+138
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+92
+124
+151
+139
+215
+255
+134
+211
+252
+139
+215
+255
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+116
+123
+125
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+144
+175
+191
+130
+162
+179
+195
+232
+254
+195
+232
+254
+195
+232
+254
+64
+82
+92
+1
+7
+11
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+1
+7
+11
+1
+7
+11
+1
+7
+11
+1
+7
+11
+94
+52
+74
+192
+108
+156
+192
+108
+156
+28
+14
+21
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+107
+163
+193
+132
+208
+249
+134
+211
+252
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+200
+205
+208
+144
+175
+191
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+97
+115
+126
+195
+232
+254
+195
+232
+254
+195
+232
+254
+182
+218
+240
+31
+45
+53
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+0
+1
+0
+94
+52
+74
+201
+109
+161
+192
+108
+156
+45
+26
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+13
+18
+20
+132
+203
+238
+132
+208
+249
+134
+211
+252
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+140
+145
+147
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+104
+128
+133
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+123
+149
+157
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+78
+43
+62
+192
+108
+156
+201
+109
+161
+62
+34
+50
+0
+1
+0
+0
+1
+0
+0
+1
+0
+31
+45
+53
+139
+215
+255
+132
+203
+238
+134
+211
+252
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+123
+149
+157
+195
+232
+254
+195
+232
+254
+195
+232
+254
+163
+195
+211
+134
+160
+169
+120
+152
+168
+144
+175
+191
+182
+218
+240
+120
+152
+168
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+89
+103
+106
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+62
+34
+50
+192
+108
+156
+201
+109
+161
+78
+43
+62
+0
+1
+0
+0
+1
+0
+1
+7
+11
+71
+106
+125
+134
+211
+252
+139
+215
+255
+134
+211
+252
+139
+215
+255
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+133
+151
+162
+144
+175
+191
+101
+131
+146
+72
+122
+146
+106
+171
+199
+124
+195
+230
+124
+195
+230
+119
+178
+208
+78
+114
+134
+64
+82
+92
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+195
+232
+254
+85
+103
+114
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+45
+26
+37
+201
+109
+161
+192
+108
+156
+94
+52
+74
+0
+1
+0
+7
+10
+5
+16
+23
+29
+125
+202
+243
+134
+211
+252
+132
+208
+249
+139
+215
+255
+119
+188
+224
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+220
+222
+219
+75
+93
+104
+114
+164
+188
+139
+215
+255
+132
+203
+238
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+94
+147
+175
+144
+175
+191
+195
+232
+254
+195
+232
+254
+154
+185
+201
+75
+93
+104
+64
+82
+92
+75
+93
+104
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+35
+20
+27
+201
+109
+161
+192
+108
+156
+124
+69
+101
+8
+0
+0
+7
+10
+5
+71
+106
+125
+139
+215
+255
+139
+215
+255
+132
+208
+249
+139
+215
+255
+116
+162
+194
+239
+241
+238
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+104
+116
+120
+139
+215
+255
+132
+203
+238
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+56
+74
+84
+123
+149
+157
+35
+52
+61
+62
+97
+116
+124
+195
+230
+139
+215
+255
+139
+215
+255
+132
+203
+238
+53
+83
+97
+13
+18
+20
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+35
+20
+27
+192
+108
+156
+201
+109
+161
+160
+93
+131
+10
+7
+13
+35
+52
+61
+134
+211
+252
+134
+211
+252
+132
+208
+249
+139
+215
+255
+132
+208
+249
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+94
+99
+103
+88
+129
+148
+124
+195
+230
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+119
+188
+224
+106
+171
+199
+113
+182
+217
+106
+171
+199
+41
+58
+67
+132
+203
+238
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+101
+155
+185
+24
+38
+43
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+22
+13
+19
+192
+108
+156
+201
+109
+161
+186
+101
+146
+22
+13
+19
+101
+155
+185
+132
+203
+238
+139
+215
+255
+132
+208
+249
+132
+208
+249
+132
+208
+249
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+104
+116
+120
+132
+203
+238
+139
+215
+255
+182
+218
+240
+119
+178
+208
+107
+126
+136
+124
+133
+134
+116
+123
+125
+76
+83
+91
+145
+153
+161
+127
+145
+154
+75
+93
+104
+41
+58
+67
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+132
+203
+238
+119
+188
+224
+107
+163
+193
+24
+38
+43
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+22
+13
+19
+186
+101
+146
+201
+109
+161
+201
+109
+161
+62
+69
+87
+139
+215
+255
+139
+215
+255
+132
+203
+238
+139
+215
+255
+134
+211
+252
+125
+202
+243
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+97
+115
+126
+139
+215
+255
+114
+164
+188
+110
+145
+159
+195
+197
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+116
+123
+125
+182
+202
+214
+182
+202
+214
+75
+93
+104
+116
+162
+194
+139
+215
+255
+139
+215
+255
+139
+215
+255
+132
+203
+238
+88
+107
+129
+75
+93
+104
+75
+93
+104
+107
+126
+136
+106
+128
+145
+75
+93
+104
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+8
+0
+0
+175
+97
+138
+192
+108
+156
+201
+109
+161
+117
+117
+150
+139
+215
+255
+139
+215
+255
+134
+211
+252
+139
+215
+255
+132
+208
+249
+102
+125
+154
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+239
+241
+238
+85
+103
+114
+145
+153
+161
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+119
+135
+143
+144
+175
+191
+85
+103
+114
+124
+195
+230
+139
+215
+255
+139
+215
+255
+154
+185
+201
+56
+74
+84
+75
+93
+104
+159
+179
+189
+182
+202
+214
+182
+202
+214
+182
+202
+214
+182
+202
+214
+55
+66
+72
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+24
+38
+43
+90
+138
+161
+14
+27
+32
+0
+1
+0
+8
+0
+0
+7
+10
+5
+129
+75
+105
+201
+109
+161
+186
+101
+146
+117
+117
+150
+134
+211
+252
+139
+215
+255
+132
+203
+238
+139
+215
+255
+139
+215
+255
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+166
+171
+173
+68
+76
+83
+139
+215
+255
+139
+215
+255
+115
+170
+200
+56
+74
+84
+85
+103
+114
+169
+189
+201
+182
+202
+214
+182
+202
+214
+182
+202
+214
+182
+202
+214
+159
+179
+189
+55
+66
+72
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+10
+7
+13
+38
+68
+82
+139
+215
+255
+113
+182
+217
+35
+52
+61
+8
+0
+0
+7
+10
+5
+58
+36
+48
+201
+109
+161
+175
+97
+138
+116
+162
+194
+132
+208
+249
+139
+215
+255
+139
+215
+255
+139
+215
+255
+116
+162
+194
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+140
+145
+147
+139
+157
+167
+145
+153
+161
+49
+53
+55
+182
+202
+214
+182
+202
+214
+182
+202
+214
+182
+202
+214
+182
+202
+214
+169
+189
+201
+76
+83
+91
+21
+25
+26
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+56
+74
+84
+139
+215
+255
+139
+215
+255
+134
+211
+252
+107
+163
+193
+38
+68
+82
+56
+74
+84
+92
+124
+151
+116
+162
+194
+139
+215
+255
+139
+215
+255
+132
+208
+249
+132
+208
+249
+132
+208
+249
+140
+145
+147
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+166
+171
+173
+133
+151
+162
+182
+202
+214
+163
+195
+211
+97
+115
+126
+49
+53
+55
+22
+13
+19
+1
+7
+11
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+41
+58
+67
+134
+211
+252
+134
+211
+252
+132
+208
+249
+119
+188
+224
+53
+83
+97
+132
+203
+238
+139
+215
+255
+132
+203
+238
+134
+211
+252
+139
+215
+255
+132
+203
+238
+139
+215
+255
+116
+162
+194
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+49
+53
+55
+49
+53
+55
+13
+18
+20
+1
+7
+11
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+31
+45
+53
+134
+211
+252
+139
+215
+255
+132
+203
+238
+46
+66
+79
+119
+188
+224
+139
+215
+255
+134
+211
+252
+139
+215
+255
+139
+215
+255
+134
+211
+252
+139
+215
+255
+139
+215
+255
+106
+110
+136
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+94
+99
+103
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+16
+23
+29
+119
+188
+224
+139
+215
+255
+94
+147
+175
+72
+122
+146
+134
+211
+252
+132
+203
+238
+139
+215
+255
+132
+208
+249
+134
+211
+252
+134
+211
+252
+139
+215
+255
+119
+178
+208
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+116
+123
+125
+1
+7
+11
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+90
+138
+161
+134
+211
+252
+38
+68
+82
+125
+202
+243
+139
+215
+255
+139
+215
+255
+134
+211
+252
+139
+215
+255
+132
+203
+238
+134
+211
+252
+134
+211
+252
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+116
+123
+125
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+35
+52
+61
+113
+182
+217
+46
+77
+90
+134
+211
+252
+132
+208
+249
+134
+211
+252
+132
+203
+238
+139
+215
+255
+139
+215
+255
+134
+211
+252
+119
+188
+224
+175
+180
+182
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+140
+145
+147
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+14
+27
+32
+71
+106
+125
+94
+147
+175
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+139
+215
+255
+134
+211
+252
+134
+211
+252
+120
+152
+168
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+140
+145
+147
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+1
+7
+11
+10
+7
+13
+119
+188
+224
+134
+211
+252
+132
+208
+249
+134
+211
+252
+132
+208
+249
+132
+203
+238
+139
+215
+255
+134
+211
+252
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+124
+133
+134
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+7
+10
+5
+132
+203
+238
+139
+215
+255
+132
+203
+238
+134
+211
+252
+139
+215
+255
+139
+215
+255
+139
+215
+255
+119
+188
+224
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+94
+99
+103
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+10
+7
+13
+19
+32
+37
+134
+211
+252
+139
+215
+255
+139
+215
+255
+134
+211
+252
+134
+211
+252
+134
+211
+252
+139
+215
+255
+116
+162
+194
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+76
+83
+91
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+19
+32
+37
+132
+203
+238
+134
+211
+252
+134
+211
+252
+134
+211
+252
+134
+211
+252
+139
+215
+255
+139
+215
+255
+117
+117
+150
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+230
+232
+229
+32
+35
+37
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+16
+23
+29
+132
+208
+249
+132
+203
+238
+139
+215
+255
+134
+211
+252
+132
+208
+249
+139
+215
+255
+139
+215
+255
+130
+162
+179
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+140
+145
+147
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+10
+7
+13
+111
+150
+170
+139
+215
+255
+132
+203
+238
+139
+215
+255
+132
+203
+238
+134
+211
+252
+139
+215
+255
+133
+151
+162
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+68
+76
+83
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+46
+66
+79
+134
+211
+252
+132
+203
+238
+139
+215
+255
+139
+215
+255
+134
+211
+252
+139
+215
+255
+111
+150
+170
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+124
+133
+134
+10
+7
+13
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+13
+18
+20
+116
+162
+194
+139
+215
+255
+139
+215
+255
+134
+211
+252
+139
+215
+255
+134
+211
+252
+116
+162
+194
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+186
+191
+194
+49
+53
+55
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+1
+7
+11
+19
+32
+37
+119
+188
+224
+134
+211
+252
+132
+208
+249
+139
+215
+255
+139
+215
+255
+134
+211
+252
+186
+191
+194
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+200
+205
+208
+200
+205
+208
+239
+241
+238
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+186
+191
+194
+35
+20
+27
+51
+49
+21
+57
+60
+26
+78
+78
+32
+106
+108
+49
+115
+116
+48
+115
+116
+48
+95
+92
+39
+57
+60
+26
+29
+31
+13
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+24
+25
+12
+24
+25
+12
+29
+31
+13
+29
+31
+13
+17
+19
+16
+101
+131
+146
+139
+215
+255
+134
+211
+252
+134
+211
+252
+139
+215
+255
+145
+153
+161
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+200
+205
+208
+94
+99
+103
+45
+26
+37
+113
+73
+88
+116
+123
+125
+140
+145
+147
+183
+186
+182
+230
+232
+229
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+239
+241
+238
+131
+137
+133
+106
+108
+49
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+126
+125
+53
+38
+38
+15
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+38
+38
+15
+71
+72
+31
+115
+116
+48
+160
+161
+68
+198
+196
+82
+207
+208
+85
+213
+214
+91
+213
+214
+91
+213
+214
+91
+160
+161
+68
+91
+93
+68
+101
+131
+146
+125
+202
+243
+134
+211
+252
+119
+188
+224
+166
+171
+173
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+249
+251
+248
+200
+205
+208
+106
+110
+136
+98
+76
+74
+28
+14
+21
+8
+0
+0
+0
+1
+0
+8
+0
+0
+8
+0
+0
+28
+14
+21
+28
+14
+21
+46
+45
+20
+98
+76
+74
+113
+114
+94
+140
+145
+147
+140
+145
+147
+155
+160
+162
+140
+145
+147
+125
+128
+113
+134
+137
+80
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+200
+202
+87
+160
+161
+68
+181
+180
+75
+218
+217
+87
+151
+153
+62
+7
+10
+5
+0
+1
+0
+0
+1
+0
+38
+38
+15
+137
+135
+53
+213
+214
+91
+218
+217
+87
+218
+217
+87
+181
+180
+75
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+188
+186
+80
+102
+102
+43
+95
+103
+65
+109
+113
+104
+80
+98
+92
+113
+73
+88
+140
+145
+147
+116
+123
+125
+98
+76
+74
+95
+86
+75
+111
+110
+85
+51
+49
+21
+8
+0
+0
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+126
+125
+53
+218
+217
+87
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+160
+161
+68
+51
+49
+21
+8
+0
+0
+8
+0
+0
+0
+1
+0
+8
+0
+0
+102
+102
+43
+213
+214
+91
+213
+214
+91
+213
+214
+91
+115
+116
+48
+188
+186
+80
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+188
+186
+80
+8
+0
+0
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+137
+135
+53
+213
+214
+91
+213
+214
+91
+218
+217
+87
+198
+196
+82
+213
+214
+91
+198
+196
+82
+218
+217
+87
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+194
+191
+78
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+78
+78
+32
+7
+10
+5
+0
+1
+0
+7
+10
+5
+8
+0
+0
+18
+17
+7
+38
+38
+15
+36
+36
+29
+160
+161
+68
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+213
+214
+91
+188
+186
+80
+102
+102
+43
+46
+45
+20
+29
+31
+13
+0
+1
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+28
+14
+21
+46
+45
+20
+51
+49
+21
+95
+92
+39
+95
+92
+39
+95
+92
+39
+126
+125
+53
+143
+144
+61
+194
+191
+78
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+126
+125
+53
+106
+108
+49
+102
+102
+43
+115
+116
+48
+115
+116
+48
+126
+125
+53
+115
+116
+48
+71
+72
+31
+8
+0
+0
+8
+0
+0
+0
+1
+0
+8
+0
+0
+8
+0
+0
+8
+0
+0
+29
+31
+13
+218
+217
+87
+213
+214
+91
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+171
+171
+73
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+213
+214
+91
+218
+217
+87
+213
+214
+91
+126
+125
+53
+51
+49
+21
+31
+30
+24
+28
+14
+21
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+8
+0
+0
+8
+0
+0
+28
+14
+21
+28
+14
+21
+8
+0
+0
+28
+14
+21
+28
+14
+21
+28
+14
+21
+22
+13
+19
+46
+45
+20
+71
+72
+31
+126
+125
+53
+198
+196
+82
+218
+217
+87
+218
+217
+87
+218
+217
+87
+218
+217
+87
+78
+78
+32
+22
+13
+19
+8
+0
+0
+10
+7
+13
+8
+0
+0
+8
+0
+0
+7
+10
+5
+0
+1
+0
+0
+1
+0
+0
+1
+0
+1
+7
+11
+0
+1
+0
+0
+1
+0
+22
+13
+19
+95
+92
+39
+126
+125
+53
+95
+92
+39
+78
+78
+32
+71
+72
+31
+71
+72
+31
+198
+196
+82
+218
+217
+87
+218
+217
+87
+218
+217
+87
+213
+214
+91
+213
+214
+91
+126
+125
+53
+31
+30
+24
+22
+13
+19
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+8
+0
+0
+8
+0
+0
+22
+13
+19
+36
+36
+29
+71
+72
+31
+160
+161
+68
+213
+214
+91
+213
+214
+91
+200
+202
+87
+24
+25
+12
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+10
+7
+13
+8
+0
+0
+22
+13
+19
+8
+0
+0
+22
+13
+19
+10
+7
+13
+143
+144
+61
+218
+217
+87
+213
+214
+91
+218
+217
+87
+160
+161
+68
+36
+36
+29
+8
+0
+0
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+7
+10
+5
+8
+0
+0
+1
+7
+11
+8
+0
+0
+8
+0
+0
+10
+7
+13
+22
+13
+19
+22
+13
+19
+38
+38
+15
+61
+63
+26
+51
+49
+21
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+1
+7
+11
+8
+0
+0
+8
+0
+0
+18
+17
+7
+35
+20
+27
+51
+49
+21
+71
+72
+31
+46
+45
+20
+18
+17
+7
+8
+0
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+8
+0
+0
+1
+7
+11
+8
+0
+0
+10
+7
+13
+10
+7
+13
+8
+0
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
+0
+1
+0
diff --git a/drivers/video/logo/logo_linux_mono.pbm b/drivers/video/logo/logo_linux_mono.pbm
index 2f14d9fedf6..664de625955 100644
--- a/drivers/video/logo/logo_linux_mono.pbm
+++ b/drivers/video/logo/logo_linux_mono.pbm
@@ -1,203 +1,72 @@
 P1
-# Standard black and white Linux logo
-80 80
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 1 1 1 0 0 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1
-1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 0 0 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1
-1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-0 1 1 0 0 1 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0 1 1 1 0 1 1 1 1 0 0 1
-1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 1 1 1 0 1 1 1 0 1 1 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1
-1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-0 1 1 0 0 0 0 0 0 0 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 0 0 1 1 1
-1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1
-1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 0 0 0 0 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 1 1
-1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 1 1 1 1 1 1 1 0 0 0 0 1 1 0 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 1
-1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1
-0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 1 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0
-1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1
-0 0 0 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 0 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 1 1
-1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 1 1 1 1 1 1
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1
-1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 1 0 1 1 1 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 0 1 1 1 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 1 0 0 0 1 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 1 1 1 1 1 1 1 0
-0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 1 1 1 0 0 0
-0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1
-1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1
-1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1
-1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0
-0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
-1 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
-1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
-1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+# CREATOR: GIMP PNM Filter Version 1.1
+60 80
+1111111111111111111111111111111111111111111111111111111111111111111111
+1111111111111110001111111111111111111111111111111111111111111111111111
+1111000001111111111111111111111111111111111111111111111111111110011110
+0010011111111111111111111111111111011110011111111110011000110001101111
+1111111111111111111111111101001111111111001100000110100111111111111111
+1111111111100111010100011111110010000000110000111111111111111111111111
+1111101001000111111001000000001100000111111111111111111111111111001001
+0001111110010000000001000000111111111111111111111111000010110001111110
+0100000000010000001111111111111111111111111110101100001111100100000000
+0100000011111111111111111111111000001011000011111001000001100111000011
+1111111111111111111111111010010000111111111000011011110000100111111111
+1111111111111000100100001111110011000110111100011100111111111111111111
+1111110101000011110000001100011001111001101111111111111111111100000100
+0001111001111000111000001100100011111111111111111111111111001001111000
+0100000000000001110001111111111111111111111000001001011111000011111110
+0000010001111111111111111111111101111011000111111100000110010000100111
+1111111111111111111111111111010011111111111000000011111011111111111111
+1111111111111101111110111111111111100000000001111111111111111111111111
+1111111111101111111111101000000000011111111111111111111111111111111111
+1011111111111001000000001010111111111111111111111111111111111011111111
+1110001100001100100110111111111111111111111111111110011111111110000111
+1100000100100111111111111111111111111111100111111111100000000000000100
+0100111111111111111111111111111111111111110000000000000001100100011111
+1111111111111111111111000111111110000000111111001000000011111111111111
+1111111111000000011100000000110000001110000000111111111111111111111110
+0000100010000000010000000000100000110011111111111111111111000000100110
+0000001000000000001000110000011111111111111111110000000111000000000000
+0000000001010000001111111111111111111100000000100000000100000000000001
+1100000001111111111111111111100000001000000001000000000000001000000001
+1001111111111111111000000010000000100000000000000010000110001100111111
+1111111110000110100010001000000000000000101111111101000111111111111010
+0110101000100010000000000000001100000011010001111111111110010100110000
+1000100000000000000001000010011000011111111111100001001000011000100000
+0000000000100000110100000111111111111000010010111110001000000000000000
+1000001000100001111111111111000110111111100010000000000000001000001000
+1000111111111111111100101111111000100000000000000100000010000111111111
+1111111111111001111110001000000000000001000000110001111111111111111111
+1110011111100010000000000000110000000100001111111111111111111110011111
+0000100000000000001000000001000011111111111111111111100111110000100000
+0000000010000111010000011111111111111111111001111100001000000000000011
+1110001100000011111111111111111110011110000010000000000000110000000110
+0111111111111111111111100111100001000000000000010000000000111100001111
+1111111111111000110000010000000000000110000000101100000001111111111111
+1110001100000100000000000001000011111111000000001111111111111110001000
+0001000000000000010111000100110000111111111111111111100010000010000000
+0000000110000001010000110000011111111111111100100000100000000000000000
+0000011001110000001111111111001111000000010000000000000000000000001111
+0000001111111111110001111100000100000000000000000000000000011001111111
+1111111100001000000011000000000000000000000000000011111111111111111100
+0100000000100000000000000000000000000000111111111111111111001100000000
+1000000000000000000000000000001111111111111111111010000000010000000000
+0000000000000000000011111111111111111110100000000100000000000000000000
+0000000000111111111111111111111000000010000000000000000000000000000000
+1111111111111111111100000000100000000000000000000000000000001111111111
+1111111111000000001000000000000000000000000000000011111111111111111111
+0000000100000000000000000000000000000000111111111111111111110000000100
+0000000000000000000000000000001111111111111111111100000001000000000000
+0000000000000000000111111111111111111111100000010000000000000000000000
+0000000001111111111111111111111000000100000000000000000000000000000011
+1111111111111111111111000001000000000000000000000000000001111111111111
+1111111111110000001000000000000001000000000000111111111111111111111111
+1111000010000000000111111100000000011000000000011111111111000001110001
+0000001111111111111111111000000000011011111100010000000011111111111111
+1111110000000000000000000011111110001000000000000000000001111111110000
+0000000000000000001111111110000000000000000111111111111111111110000000
+0111111111111111000000100000001111111111111111111111111110000011111111
+1111111111110000001111111111111111111111111111111100011111111111111111
+1110001111111111111111111111111111111111111111111111111111111111111111
+1111111111111111111111111111111111111111
\ No newline at end of file
diff --git a/drivers/video/logo/logo_linux_vga16.ppm b/drivers/video/logo/logo_linux_vga16.ppm
index 1850c15e6fe..8615deb9444 100644
--- a/drivers/video/logo/logo_linux_vga16.ppm
+++ b/drivers/video/logo/logo_linux_vga16.ppm
@@ -1,1604 +1,14404 @@
 P3
-# Standard 16-color Linux logo
-80 80
-255
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85  85  85  85  85  85  85  85  85  85
- 85  85  85  85  85  85  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
- 85  85  85  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85 170 170 170   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-170 170 170 170 170 170  85  85  85   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170 170 170 170 170 170
-170 170 170   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
- 85  85  85 170 170 170 170 170 170 170 170 170
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 170 170 170 255 255 255 255 255 255
-255 255 255 170 170 170   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-170 170 170 170 170 170 255 255 255 255 255 255
-  0   0   0   0   0   0   0   0   0   0   0   0
-170 170 170 255 255 255 170 170 170 170 170 170
-255 255 255 170 170 170   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-170 170 170   0   0   0   0   0   0 255 255 255
- 85  85  85   0   0   0   0   0   0   0   0   0
-255 255 255 170 170 170   0   0   0  85  85  85
-170 170 170 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
- 85  85  85   0   0   0   0   0   0 170 170 170
- 85  85  85   0   0   0   0   0   0   0   0   0
-255 255 255  85  85  85   0   0   0   0   0   0
- 85  85  85 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-170 170 170   0   0   0   0   0   0 170 170 170
- 85  85  85  85  85  85  85  85  85  85  85  85
-255 255 255  85  85  85   0   0   0   0   0   0
- 85  85  85 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-255 255 255   0   0   0   0   0   0 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0   0   0   0   0   0   0
- 85  85  85 255 255 255   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-170 170 170 170 170 170 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170 170 170 170 170 170   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 170  85   0
-170  85   0 170  85   0  85  85  85   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
- 85  85  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0  85  85  85   0   0   0
-  0   0   0  85  85  85 170 170 170  85  85  85
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
- 85  85  85 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170 170 170 170 170 170 170 170 170   0   0   0
-  0   0   0   0   0   0 170 170 170 170 170 170
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
- 85  85  85 170 170 170 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170 170 170 170 170 170
-170 170 170 170 170 170 170 170 170  85  85  85
-  0   0   0   0   0   0  85  85  85  85  85  85
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
- 85  85  85 170 170 170 170 170 170 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170 170 170 170 170 170 170 170 170 170 170 170
-255 255 255 255 255 255 255 255 255 170 170 170
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0  85  85  85
-255 255 255 255 255 255 170 170 170 170 170 170
-170 170 170 170 170 170 170 170 170 170 170 170
-170 170 170 170 170 170 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0 170 170 170
-255 255 255 255 255 255 170 170 170 170 170 170
-170 170 170 170 170 170 170 170 170 170 170 170
-170 170 170 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0  85  85  85 255 255 255
-255 255 255 255 255 255 255 255 255 170 170 170
-170 170 170 170 170 170 170 170 170 170 170 170
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0  85  85  85 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
- 85  85  85 170 170 170 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85 170 170 170 170 170 170 170 170 170
-255 255 255 255 255 255 255 255 255 170 170 170
-170 170 170 170 170 170 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170 170 170 170 170 170 170 170 170 170
-170 170 170 170 170 170 170 170 170  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-170 170 170 170 170 170 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 170 170 170
-170 170 170 170 170 170 170 170 170  85  85  85
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-170 170 170 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170 170 170 170 170 170 170
-  0   0   0   0   0   0   0   0   0  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
- 85  85  85   0   0   0   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 170 170 170
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0 170 170 170
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-  0   0   0   0   0   0  85  85  85 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170   0   0   0  85  85  85
- 85  85  85   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-  0   0   0  85  85  85 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170   0   0   0  85  85  85
- 85  85  85   0   0   0  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0  85  85  85
-  0   0   0 170 170 170 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0  85  85  85   0   0   0
-  0   0   0 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0  85  85  85   0   0   0
- 85  85  85 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0  85  85  85   0   0   0   0   0   0
-170 170 170 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255   0   0   0
- 85  85  85  85  85  85  85  85  85  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85   0   0   0 170  85   0
-255 255  85 170  85   0   0   0   0   0   0   0
- 85  85  85 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  85  85  85  85  85  85   0   0   0
-  0   0   0  85  85  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0   0   0   0
-  0   0   0  85  85  85 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255  85 170  85   0 255 255  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-  0   0   0   0   0   0  85  85  85 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255  85
-170  85   0 255 255  85 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0   0   0   0   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 170  85   0
-255 255  85 170  85   0 255 255  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-255 255  85 170  85   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85   0   0   0   0   0   0   0   0   0
- 85  85  85 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255  85
-170  85   0 255 255  85 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0 255 255  85
-170  85   0 255 255  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85   0   0   0   0   0   0
-  0   0   0  85  85  85 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85   0   0   0
-  0   0   0   0   0   0  85  85  85 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170 170 170 170 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170 170 170 170 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170 170 170 170 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170 170 170 170 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85   0   0   0   0   0   0   0   0   0
- 85  85  85 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 170 170 170  85  85  85 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85  85  85  85  85  85  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170  85  85  85  85  85  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 170 170 170
- 85  85  85   0   0   0   0   0   0 170  85   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170  85  85  85
-  0   0   0   0   0   0   0   0   0 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170 170 170 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-170 170 170  85  85  85  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 170  85   0
-170  85   0 170 170 170 255 255 255 255 255 255
-255 255 255 255 255 255 255 255 255 255 255 255
-255 255 255 255 255 255 170 170 170  85  85  85
- 85  85  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85 170  85   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 170  85   0 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  85  85  85 170  85   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 170  85   0 170  85   0 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
- 85  85  85  85  85  85  85  85  85  85  85  85
- 85  85  85  85  85  85  85  85  85  85  85  85
- 85  85  85  85  85  85  85  85  85   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-170  85   0 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 255 255  85 170  85   0
-170  85   0 170  85   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 255 255  85 170  85   0
-255 255  85 170  85   0 170  85   0 170  85   0
- 85  85  85  85  85  85  85  85  85  85  85  85
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  85  85  85
- 85  85  85  85  85  85  85  85  85 170  85   0
-170  85   0 170  85   0 170  85   0 255 255  85
-170  85   0 255 255  85 170  85   0 170  85   0
-170  85   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0 170  85   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-170  85   0 170  85   0 170  85   0 170  85   0
-170  85   0 170  85   0 170  85   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
+# CREATOR: GIMP PNM Filter Version 1.1
+60 80
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+170
+170
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+255
+255
+255
+85
+85
+85
+85
+85
+85
+85
+85
+85
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+170
+170
+0
+170
+170
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+170
+170
+170
+85
+85
+85
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+85
+85
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+170
+170
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+170
+170
+170
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+255
+255
+255
+255
+170
+170
+170
+85
+85
+85
+170
+170
+170
+170
+170
+170
+0
+0
+0
+255
+85
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+170
+170
+170
+85
+85
+85
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+170
+170
+170
+255
+255
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+255
+255
+255
+170
+170
+170
+170
+170
+170
+255
+255
+255
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+255
+255
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+255
+255
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+170
+170
+170
+0
+170
+0
+170
+0
+170
+170
+0
+170
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+170
+170
+0
+170
+170
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+255
+255
+255
+170
+170
+170
+170
+170
+170
+85
+85
+85
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+170
+170
+170
+170
+170
+170
+170
+170
+170
+0
+170
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+85
+0
+255
+255
+85
+255
+255
+85
+170
+170
+170
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+0
+170
+170
+85
+85
+85
+85
+85
+85
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+170
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+170
+170
+170
+85
+85
+85
+85
+85
+85
+170
+170
+170
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+170
+170
+170
+85
+85
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+170
+0
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+170
+85
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+85
+85
+85
+85
+85
+85
+170
+0
+170
+170
+0
+170
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+255
+255
+85
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+170
+255
+255
+255
+255
+255
+255
+170
+170
+170
+255
+255
+255
+170
+170
+170
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+170
+0
+85
+85
+85
+0
+170
+0
+170
+85
+0
+0
+170
+0
+170
+85
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+255
+85
+85
+85
+85
+85
+85
+170
+170
+170
+85
+85
+85
+255
+255
+255
+0
+170
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+170
+170
+170
+85
+85
+85
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+85
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+170
+0
+85
+85
+85
+170
+85
+0
+0
+170
+0
+170
+85
+0
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+255
+170
+170
+170
+85
+85
+85
+170
+170
+170
+170
+170
+170
+0
+0
+0
+0
+0
+0
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+85
+255
+255
+0
+0
+0
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+170
+85
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+170
+170
+170
+85
+85
+85
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+85
+85
+85
+0
+170
+170
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+170
+170
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+170
+170
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+170
+170
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+170
+170
+170
+170
+170
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+0
+0
+170
+0
+170
+170
+0
+170
+85
+85
+85
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+170
+170
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+0
+170
+170
+0
+170
+170
+85
+255
+255
+85
+85
+85
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+255
+255
+0
+170
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+170
+170
+170
+255
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+0
+170
+0
+170
+255
+85
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+0
+170
+170
+0
+170
+170
+0
+170
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+170
+170
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+255
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+85
+85
+85
+170
+170
+170
+255
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+170
+170
+170
+85
+85
+85
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+0
+0
+0
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+170
+170
+170
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+170
+0
+170
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+170
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+255
+85
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+85
+85
+85
+85
+85
+85
+0
+0
+0
+85
+85
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+85
+255
+255
+0
+170
+170
+85
+85
+85
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+255
+85
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+170
+170
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+170
+170
+0
+170
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+0
+170
+170
+0
+170
+170
+0
+170
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+170
+170
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+0
+170
+170
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+85
+255
+170
+0
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+170
+170
+85
+85
+85
+0
+0
+0
+0
+170
+170
+85
+255
+255
+0
+170
+170
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+85
+85
+85
+85
+85
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+170
+170
+85
+255
+255
+0
+170
+170
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+0
+170
+170
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+255
+255
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+0
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+85
+0
+0
+170
+0
+170
+85
+0
+85
+85
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+0
+0
+0
+0
+0
+0
+170
+170
+170
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+85
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+170
+85
+0
+0
+0
+0
+0
+170
+170
+85
+255
+255
+85
+255
+255
+85
+255
+255
+170
+170
+170
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+255
+85
+85
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+170
+85
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+85
+85
+255
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+170
+170
+170
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+85
+85
+85
+85
+85
+85
+0
+170
+0
+85
+85
+85
+170
+85
+0
+0
+170
+0
+255
+255
+85
+170
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+170
+170
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+170
+85
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+255
+255
+85
+85
+85
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+170
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+255
+255
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+85
+85
+85
+85
+85
+85
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 18ebd7a6af9..0b43efddea2 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
  * Changes the event filter mask for the given session.
  *
  * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
- * do session cleanup. Takes the session spinlock.
+ * do session cleanup. Takes the session mutex.
  *
  * Return: 0 or negative errno value.
  * @gdev:			The Guest extension device.
@@ -662,7 +662,156 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
 }
 
 /**
- * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Set guest capabilities on the host.
+ * Must be called with gdev->session_mutex hold.
+ * Return: 0 or negative errno value.
+ * @gdev:			The Guest extension device.
+ * @session:			The session.
+ * @session_termination:	Set if we're called by the session cleanup code.
+ */
+static int vbg_set_host_capabilities(struct vbg_dev *gdev,
+				     struct vbg_session *session,
+				     bool session_termination)
+{
+	struct vmmdev_mask *req;
+	u32 caps;
+	int rc;
+
+	WARN_ON(!mutex_is_locked(&gdev->session_mutex));
+
+	caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
+
+	if (gdev->guest_caps_host == caps)
+		return 0;
+
+	/* On termination the requestor is the kernel, as we're cleaning up. */
+	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+			    session_termination ? VBG_KERNEL_REQUEST :
+						  session->requestor);
+	if (!req) {
+		gdev->guest_caps_host = U32_MAX;
+		return -ENOMEM;
+	}
+
+	req->or_mask = caps;
+	req->not_mask = ~caps;
+	rc = vbg_req_perform(gdev, req);
+	vbg_req_free(req, sizeof(*req));
+
+	gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
+
+	return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Acquire (get exclusive access) guest capabilities for a session.
+ * Takes the session mutex.
+ * Return: 0 or negative errno value.
+ * @gdev:			The Guest extension device.
+ * @session:			The session.
+ * @flags:			Flags (VBGL_IOC_AGC_FLAGS_XXX).
+ * @or_mask:			The capabilities to add.
+ * @not_mask:			The capabilities to remove.
+ * @session_termination:	Set if we're called by the session cleanup code.
+ *				This tweaks the error handling so we perform
+ *				proper session cleanup even if the host
+ *				misbehaves.
+ */
+static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
+					    struct vbg_session *session,
+					    u32 or_mask, u32 not_mask,
+					    u32 flags, bool session_termination)
+{
+	unsigned long irqflags;
+	bool wakeup = false;
+	int ret = 0;
+
+	mutex_lock(&gdev->session_mutex);
+
+	if (gdev->set_guest_caps_tracker.mask & or_mask) {
+		vbg_err("%s error: cannot acquire caps which are currently set\n",
+			__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Mark any caps in the or_mask as now being in acquire-mode. Note
+	 * once caps are in acquire_mode they always stay in this mode.
+	 * This impacts event handling, so we take the event-lock.
+	 */
+	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+	gdev->acquire_mode_guest_caps |= or_mask;
+	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+	/* If we only have to switch the caps to acquire mode, we're done. */
+	if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
+		goto out;
+
+	not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
+	not_mask &= session->acquired_guest_caps;
+	or_mask &= ~session->acquired_guest_caps;
+
+	if (or_mask == 0 && not_mask == 0)
+		goto out;
+
+	if (gdev->acquired_guest_caps & or_mask) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	gdev->acquired_guest_caps |= or_mask;
+	gdev->acquired_guest_caps &= ~not_mask;
+	/* session->acquired_guest_caps impacts event handling, take the lock */
+	spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+	session->acquired_guest_caps |= or_mask;
+	session->acquired_guest_caps &= ~not_mask;
+	spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+	ret = vbg_set_host_capabilities(gdev, session, session_termination);
+	/* Roll back on failure, unless it's session termination time. */
+	if (ret < 0 && !session_termination) {
+		gdev->acquired_guest_caps &= ~or_mask;
+		gdev->acquired_guest_caps |= not_mask;
+		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+		session->acquired_guest_caps &= ~or_mask;
+		session->acquired_guest_caps |= not_mask;
+		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+	}
+
+	/*
+	 * If we added a capability, check if that means some other thread in
+	 * our session should be unblocked because there are events pending
+	 * (the result of vbg_get_allowed_event_mask_for_session() may change).
+	 *
+	 * HACK ALERT! When the seamless support capability is added we generate
+	 *	a seamless change event so that the ring-3 client can sync with
+	 *	the seamless state.
+	 */
+	if (ret == 0 && or_mask != 0) {
+		spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+
+		if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
+			gdev->pending_events |=
+				VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+		if (gdev->pending_events)
+			wakeup = true;
+
+		spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+		if (wakeup)
+			wake_up(&gdev->event_wq);
+	}
+
+out:
+	mutex_unlock(&gdev->session_mutex);
+
+	return ret;
+}
+
+/**
+ * Sets the guest capabilities for a session. Takes the session mutex.
  * Return: 0 or negative errno value.
  * @gdev:			The Guest extension device.
  * @session:			The session.
@@ -678,62 +827,40 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
 					u32 or_mask, u32 not_mask,
 					bool session_termination)
 {
-	struct vmmdev_mask *req;
 	u32 changed, previous;
-	int rc, ret = 0;
-
-	/*
-	 * Allocate a request buffer before taking the spinlock, when
-	 * the session is being terminated the requestor is the kernel,
-	 * as we're cleaning up.
-	 */
-	req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
-			    session_termination ? VBG_KERNEL_REQUEST :
-						  session->requestor);
-	if (!req) {
-		if (!session_termination)
-			return -ENOMEM;
-		/* Ignore allocation failure, we must do session cleanup. */
-	}
+	int ret = 0;
 
 	mutex_lock(&gdev->session_mutex);
 
+	if (gdev->acquire_mode_guest_caps & or_mask) {
+		vbg_err("%s error: cannot set caps which are in acquire_mode\n",
+			__func__);
+		ret = -EBUSY;
+		goto out;
+	}
+
 	/* Apply the changes to the session mask. */
-	previous = session->guest_caps;
-	session->guest_caps |= or_mask;
-	session->guest_caps &= ~not_mask;
+	previous = session->set_guest_caps;
+	session->set_guest_caps |= or_mask;
+	session->set_guest_caps &= ~not_mask;
 
 	/* If anything actually changed, update the global usage counters. */
-	changed = previous ^ session->guest_caps;
+	changed = previous ^ session->set_guest_caps;
 	if (!changed)
 		goto out;
 
-	vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
-	or_mask = gdev->guest_caps_tracker.mask;
-
-	if (gdev->guest_caps_host == or_mask || !req)
-		goto out;
+	vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
 
-	gdev->guest_caps_host = or_mask;
-	req->or_mask = or_mask;
-	req->not_mask = ~or_mask;
-	rc = vbg_req_perform(gdev, req);
-	if (rc < 0) {
-		ret = vbg_status_code_to_errno(rc);
-
-		/* Failed, roll back (unless it's session termination time). */
-		gdev->guest_caps_host = U32_MAX;
-		if (session_termination)
-			goto out;
-
-		vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
-				    session->guest_caps);
-		session->guest_caps = previous;
+	ret = vbg_set_host_capabilities(gdev, session, session_termination);
+	/* Roll back on failure, unless it's session termination time. */
+	if (ret < 0 && !session_termination) {
+		vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
+				    session->set_guest_caps);
+		session->set_guest_caps = previous;
 	}
 
 out:
 	mutex_unlock(&gdev->session_mutex);
-	vbg_req_free(req, sizeof(*req));
 
 	return ret;
 }
@@ -949,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session)
 	struct vbg_dev *gdev = session->gdev;
 	int i, rc;
 
+	vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
 	vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
 	vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
 
@@ -1006,6 +1134,25 @@ static int vbg_ioctl_driver_version_info(
 	return 0;
 }
 
+/* Must be called with the event_lock held */
+static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
+						  struct vbg_session *session)
+{
+	u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
+	u32 session_acquired_caps = session->acquired_guest_caps;
+	u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
+
+	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
+	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
+		allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
+
+	if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
+	    !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
+		allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+	return allowed_events;
+}
+
 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
 				struct vbg_session *session,
 				u32 event_mask)
@@ -1017,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev,
 	spin_lock_irqsave(&gdev->event_spinlock, flags);
 
 	events = gdev->pending_events & event_mask;
+	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
 	wakeup = events || session->cancel_waiters;
 
 	spin_unlock_irqrestore(&gdev->event_spinlock, flags);
@@ -1031,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
 {
 	u32 events = gdev->pending_events & event_mask;
 
+	events &= vbg_get_allowed_event_mask_for_session(gdev, session);
 	gdev->pending_events &= ~events;
 	return events;
 }
@@ -1150,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
 	case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
 	case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
 	case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
+	case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
 	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
+	case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
 	case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
 	case VMMDEVREQ_GET_VRDPCHANGE_REQ:
 	case VMMDEVREQ_LOG_STRING:
@@ -1432,6 +1583,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
 					    false);
 }
 
+static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
+	     struct vbg_session *session,
+	     struct vbg_ioctl_acquire_guest_caps *caps)
+{
+	u32 flags, or_mask, not_mask;
+
+	if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
+		return -EINVAL;
+
+	flags = caps->u.in.flags;
+	or_mask = caps->u.in.or_mask;
+	not_mask = caps->u.in.not_mask;
+
+	if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
+		return -EINVAL;
+
+	if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
+		return -EINVAL;
+
+	return vbg_acquire_session_capabilities(gdev, session, or_mask,
+						not_mask, flags, false);
+}
+
 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
 	     struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
 {
@@ -1452,7 +1626,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
 	if (ret)
 		return ret;
 
-	caps->u.out.session_caps = session->guest_caps;
+	caps->u.out.session_caps = session->set_guest_caps;
 	caps->u.out.global_caps = gdev->guest_caps_host;
 
 	return 0;
@@ -1541,6 +1715,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 		return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
 	case VBG_IOCTL_CHANGE_FILTER_MASK:
 		return vbg_ioctl_change_filter_mask(gdev, session, data);
+	case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
+		return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
 	case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
 		return vbg_ioctl_change_guest_capabilities(gdev, session, data);
 	case VBG_IOCTL_CHECK_BALLOON:
@@ -1563,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 		return vbg_ioctl_log(data);
 	}
 
-	vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+	vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
 	return -ENOTTY;
 }
 
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 77c3a9c8255..ab4bf64e2ce 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -118,11 +118,21 @@ struct vbg_dev {
 	u32 event_filter_host;
 
 	/**
-	 * Usage counters for guest capabilities. Indexed by capability bit
+	 * Guest capabilities which have been switched to acquire_mode.
+	 */
+	u32 acquire_mode_guest_caps;
+	/**
+	 * Guest capabilities acquired by vbg_acquire_session_capabilities().
+	 * Only one session can acquire a capability at a time.
+	 */
+	u32 acquired_guest_caps;
+	/**
+	 * Usage counters for guest capabilities requested through
+	 * vbg_set_session_capabilities(). Indexed by capability bit
 	 * number, one count per session using a capability.
 	 * Protected by session_mutex.
 	 */
-	struct vbg_bit_usage_tracker guest_caps_tracker;
+	struct vbg_bit_usage_tracker set_guest_caps_tracker;
 	/**
 	 * The guest capabilities last reported to the host (or UINT32_MAX).
 	 * Protected by session_mutex.
@@ -164,11 +174,16 @@ struct vbg_session {
 	 */
 	u32 event_filter;
 	/**
-	 * Guest capabilities for this session.
+	 * Guest capabilities acquired by vbg_acquire_session_capabilities().
+	 * Only one session can acquire a capability at a time.
+	 */
+	u32 acquired_guest_caps;
+	/**
+	 * Guest capabilities set through vbg_set_session_capabilities().
 	 * A capability claimed by any guest session will be reported to the
 	 * host. Protected by vbg_gdev.session_mutex.
 	 */
-	u32 guest_caps;
+	u32 set_guest_caps;
 	/** VMMDEV_REQUESTOR_* flags */
 	u32 requestor;
 	/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 7396187ee32..ea05af41ec6 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -59,6 +59,7 @@ EXPORT_SYMBOL(name)
 VBG_LOG(vbg_info, pr_info);
 VBG_LOG(vbg_warn, pr_warn);
 VBG_LOG(vbg_err, pr_err);
+VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
 VBG_LOG(vbg_debug, pr_debug);
 #endif
diff --git a/fs/Kconfig b/fs/Kconfig
index f08fbbfafd9..1c597cddc35 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -267,6 +267,7 @@ source "fs/sysv/Kconfig"
 source "fs/ufs/Kconfig"
 source "fs/erofs/Kconfig"
 source "fs/vboxsf/Kconfig"
+source "fs/aufs/Kconfig"
 
 endif # MISC_FILESYSTEMS
 
diff --git a/fs/Makefile b/fs/Makefile
index 2ce5112b02c..d9b4a519855 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -136,3 +136,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
 obj-$(CONFIG_EROFS_FS)		+= erofs/
 obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
 obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
+obj-$(CONFIG_AUFS_FS)           += aufs/
diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig
new file mode 100644
index 00000000000..9f436425716
--- /dev/null
+++ b/fs/aufs/Kconfig
@@ -0,0 +1,199 @@
+# SPDX-License-Identifier: GPL-2.0
+config AUFS_FS
+	tristate "Aufs (Advanced multi layered unification filesystem) support"
+	help
+	Aufs is a stackable unification filesystem such as Unionfs,
+	which unifies several directories and provides a merged single
+	directory.
+	In the early days, aufs was entirely re-designed and
+	re-implemented Unionfs Version 1.x series. Introducing many
+	original ideas, approaches and improvements, it becomes totally
+	different from Unionfs while keeping the basic features.
+
+if AUFS_FS
+choice
+	prompt "Maximum number of branches"
+	default AUFS_BRANCH_MAX_127
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_127
+	bool "127"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_511
+	bool "511"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_1023
+	bool "1023"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_32767
+	bool "32767"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+endchoice
+
+config AUFS_SBILIST
+	bool
+	depends on AUFS_MAGIC_SYSRQ || PROC_FS
+	default y
+	help
+	Automatic configuration for internal use.
+	When aufs supports Magic SysRq or /proc, enabled automatically.
+
+config AUFS_HNOTIFY
+	bool "Detect direct branch access (bypassing aufs)"
+	help
+	If you want to modify files on branches directly, eg. bypassing aufs,
+	and want aufs to detect the changes of them fully, then enable this
+	option and use 'udba=notify' mount option.
+	Currently there is only one available configuration, "fsnotify".
+	It will have a negative impact to the performance.
+	See detail in aufs.5.
+
+choice
+	prompt "method" if AUFS_HNOTIFY
+	default AUFS_HFSNOTIFY
+config AUFS_HFSNOTIFY
+	bool "fsnotify"
+	select FSNOTIFY
+endchoice
+
+config AUFS_EXPORT
+	bool "NFS-exportable aufs"
+	depends on EXPORTFS
+	help
+	If you want to export your mounted aufs via NFS, then enable this
+	option. There are several requirements for this configuration.
+	See detail in aufs.5.
+
+config AUFS_INO_T_64
+	bool
+	depends on AUFS_EXPORT
+	depends on 64BIT && !(ALPHA || S390)
+	default y
+	help
+	Automatic configuration for internal use.
+	/* typedef unsigned long/int __kernel_ino_t */
+	/* alpha and s390x are int */
+
+config AUFS_XATTR
+	bool "support for XATTR/EA (including Security Labels)"
+	help
+	If your branch fs supports XATTR/EA and you want to make them
+	available in aufs too, then enable this opsion and specify the
+	branch attributes for EA.
+	See detail in aufs.5.
+
+config AUFS_FHSM
+	bool "File-based Hierarchical Storage Management"
+	help
+	Hierarchical Storage Management (or HSM) is a well-known feature
+	in the storage world. Aufs provides this feature as file-based.
+	with multiple branches.
+	These multiple branches are prioritized, ie. the topmost one
+	should be the fastest drive and be used heavily.
+
+config AUFS_RDU
+	bool "Readdir in userspace"
+	help
+	Aufs has two methods to provide a merged view for a directory,
+	by a user-space library and by kernel-space natively. The latter
+	is always enabled but sometimes large and slow.
+	If you enable this option, install the library in aufs2-util
+	package, and set some environment variables for your readdir(3),
+	then the work will be handled in user-space which generally
+	shows better performance in most cases.
+	See detail in aufs.5.
+
+config AUFS_DIRREN
+	bool "Workaround for rename(2)-ing a directory"
+	help
+	By default, aufs returns EXDEV error in renameing a dir who has
+	his child on the lower branch, since it is a bad idea to issue
+	rename(2) internally for every lower branch. But user may not
+	accept this behaviour. So here is a workaround to allow such
+	rename(2) and store some extra infromation on the writable
+	branch. Obviously this costs high (and I don't like it).
+	To use this feature, you need to enable this configuration AND
+	to specify the mount option `dirren.'
+	See details in aufs.5 and the design documents.
+
+config AUFS_SHWH
+	bool "Show whiteouts"
+	help
+	If you want to make the whiteouts in aufs visible, then enable
+	this option and specify 'shwh' mount option. Although it may
+	sounds like philosophy or something, but in technically it
+	simply shows the name of whiteout with keeping its behaviour.
+
+config AUFS_BR_RAMFS
+	bool "Ramfs (initramfs/rootfs) as an aufs branch"
+	help
+	If you want to use ramfs as an aufs branch fs, then enable this
+	option. Generally tmpfs is recommended.
+	Aufs prohibited them to be a branch fs by default, because
+	initramfs becomes unusable after switch_root or something
+	generally. If you sets initramfs as an aufs branch and boot your
+	system by switch_root, you will meet a problem easily since the
+	files in initramfs may be inaccessible.
+	Unless you are going to use ramfs as an aufs branch fs without
+	switch_root or something, leave it N.
+
+config AUFS_BR_FUSE
+	bool "Fuse fs as an aufs branch"
+	depends on FUSE_FS
+	select AUFS_POLL
+	help
+	If you want to use fuse-based userspace filesystem as an aufs
+	branch fs, then enable this option.
+	It implements the internal poll(2) operation which is
+	implemented by fuse only (curretnly).
+
+config AUFS_POLL
+	bool
+	help
+	Automatic configuration for internal use.
+
+config AUFS_BR_HFSPLUS
+	bool "Hfsplus as an aufs branch"
+	depends on HFSPLUS_FS
+	default y
+	help
+	If you want to use hfsplus fs as an aufs branch fs, then enable
+	this option. This option introduces a small overhead at
+	copying-up a file on hfsplus.
+
+config AUFS_BDEV_LOOP
+	bool
+	depends on BLK_DEV_LOOP
+	default y
+	help
+	Automatic configuration for internal use.
+	Convert =[ym] into =y.
+
+config AUFS_DEBUG
+	bool "Debug aufs"
+	help
+	Enable this to compile aufs internal debug code.
+	It will have a negative impact to the performance.
+
+config AUFS_MAGIC_SYSRQ
+	bool
+	depends on AUFS_DEBUG && MAGIC_SYSRQ
+	default y
+	help
+	Automatic configuration for internal use.
+	When aufs supports Magic SysRq, enabled automatically.
+endif
diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile
new file mode 100644
index 00000000000..2c819a64935
--- /dev/null
+++ b/fs/aufs/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0
+
+include ${src}/magic.mk
+ifeq (${CONFIG_AUFS_FS},m)
+include ${src}/conf.mk
+endif
+-include ${src}/priv_def.mk
+
+# cf. include/linux/kernel.h
+# enable pr_debug
+ccflags-y += -DDEBUG
+# sparse requires the full pathname
+ifdef M
+ccflags-y += -include ${M}/../../include/uapi/linux/aufs_type.h
+else
+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h
+endif
+
+obj-$(CONFIG_AUFS_FS) += aufs.o
+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
+	wkq.o vfsub.o dcsub.o \
+	cpup.o whout.o wbr_policy.o \
+	dinfo.o dentry.o \
+	dynop.o \
+	finfo.o file.o f_op.o \
+	dir.o vdir.o \
+	iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
+	mvdown.o ioctl.o
+
+# all are boolean
+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
+aufs-$(CONFIG_SYSFS) += sysfs.o
+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
+aufs-$(CONFIG_AUFS_EXPORT) += export.o
+aufs-$(CONFIG_AUFS_XATTR) += xattr.o
+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
+aufs-$(CONFIG_AUFS_DIRREN) += dirren.o
+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o
+aufs-$(CONFIG_AUFS_POLL) += poll.o
+aufs-$(CONFIG_AUFS_RDU) += rdu.o
+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h
new file mode 100644
index 00000000000..9a573445876
--- /dev/null
+++ b/fs/aufs/aufs.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * all header files
+ */
+
+#ifndef __AUFS_H__
+#define __AUFS_H__
+
+#ifdef __KERNEL__
+
+#define AuStub(type, name, body, ...) \
+	static inline type name(__VA_ARGS__) { body; }
+
+#define AuStubVoid(name, ...) \
+	AuStub(void, name, , __VA_ARGS__)
+#define AuStubInt0(name, ...) \
+	AuStub(int, name, return 0, __VA_ARGS__)
+
+#include "debug.h"
+
+#include "branch.h"
+#include "cpup.h"
+#include "dcsub.h"
+#include "dbgaufs.h"
+#include "dentry.h"
+#include "dir.h"
+#include "dirren.h"
+#include "dynop.h"
+#include "file.h"
+#include "fstype.h"
+#include "hbl.h"
+#include "inode.h"
+#include "lcnt.h"
+#include "loop.h"
+#include "module.h"
+#include "opts.h"
+#include "rwsem.h"
+#include "super.h"
+#include "sysaufs.h"
+#include "vfsub.h"
+#include "whout.h"
+#include "wkq.h"
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
new file mode 100644
index 00000000000..26d41e1bc0c
--- /dev/null
+++ b/fs/aufs/branch.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * branch management
+ */
+
+#include <linux/compat.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/*
+ * free a single branch
+ */
+static void au_br_do_free(struct au_branch *br)
+{
+	int i;
+	struct au_wbr *wbr;
+	struct au_dykey **key;
+
+	au_hnotify_fin_br(br);
+	/* always, regardless the mount option */
+	au_dr_hino_free(&br->br_dirren);
+	au_xino_put(br);
+
+	AuLCntZero(au_lcnt_read(&br->br_nfiles, /*do_rev*/0));
+	au_lcnt_fin(&br->br_nfiles, /*do_sync*/0);
+	AuLCntZero(au_lcnt_read(&br->br_count, /*do_rev*/0));
+	au_lcnt_fin(&br->br_count, /*do_sync*/0);
+
+	wbr = br->br_wbr;
+	if (wbr) {
+		for (i = 0; i < AuBrWh_Last; i++)
+			dput(wbr->wbr_wh[i]);
+		AuDebugOn(atomic_read(&wbr->wbr_wh_running));
+		AuRwDestroy(&wbr->wbr_wh_rwsem);
+	}
+
+	if (br->br_fhsm) {
+		au_br_fhsm_fin(br->br_fhsm);
+		au_kfree_try_rcu(br->br_fhsm);
+	}
+
+	key = br->br_dykey;
+	for (i = 0; i < AuBrDynOp; i++, key++)
+		if (*key)
+			au_dy_put(*key);
+		else
+			break;
+
+	/* recursive lock, s_umount of branch's */
+	/* synchronize_rcu(); */ /* why? */
+	lockdep_off();
+	path_put(&br->br_path);
+	lockdep_on();
+	au_kfree_rcu(wbr);
+	au_lcnt_wait_for_fin(&br->br_nfiles);
+	au_lcnt_wait_for_fin(&br->br_count);
+	/* I don't know why, but percpu_refcount requires this */
+	/* synchronize_rcu(); */
+	au_kfree_rcu(br);
+}
+
+/*
+ * frees all branches
+ */
+void au_br_free(struct au_sbinfo *sbinfo)
+{
+	aufs_bindex_t bmax;
+	struct au_branch **br;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	bmax = sbinfo->si_bbot + 1;
+	br = sbinfo->si_branch;
+	while (bmax--)
+		au_br_do_free(*br++);
+}
+
+/*
+ * find the index of a branch which is specified by @br_id.
+ */
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
+{
+	aufs_bindex_t bindex, bbot;
+
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++)
+		if (au_sbr_id(sb, bindex) == br_id)
+			return bindex;
+	return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * add a branch
+ */
+
+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
+			struct dentry *h_root)
+{
+	if (unlikely(h_adding == h_root
+		     || au_test_loopback_overlap(sb, h_adding)))
+		return 1;
+	if (h_adding->d_sb != h_root->d_sb)
+		return 0;
+	return au_test_subdir(h_adding, h_root)
+		|| au_test_subdir(h_root, h_adding);
+}
+
+/*
+ * returns a newly allocated branch. @new_nbranch is a number of branches
+ * after adding a branch.
+ */
+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
+				     int perm)
+{
+	struct au_branch *add_branch;
+	struct dentry *root;
+	struct inode *inode;
+	int err;
+
+	err = -ENOMEM;
+	add_branch = kzalloc(sizeof(*add_branch), GFP_NOFS);
+	if (unlikely(!add_branch))
+		goto out;
+	add_branch->br_xino = au_xino_alloc(/*nfile*/1);
+	if (unlikely(!add_branch->br_xino))
+		goto out_br;
+	err = au_hnotify_init_br(add_branch, perm);
+	if (unlikely(err))
+		goto out_xino;
+
+	if (au_br_writable(perm)) {
+		/* may be freed separately at changing the branch permission */
+		add_branch->br_wbr = kzalloc(sizeof(*add_branch->br_wbr),
+					     GFP_NOFS);
+		if (unlikely(!add_branch->br_wbr))
+			goto out_hnotify;
+	}
+
+	if (au_br_fhsm(perm)) {
+		err = au_fhsm_br_alloc(add_branch);
+		if (unlikely(err))
+			goto out_wbr;
+	}
+
+	root = sb->s_root;
+	err = au_sbr_realloc(au_sbi(sb), new_nbranch, /*may_shrink*/0);
+	if (!err)
+		err = au_di_realloc(au_di(root), new_nbranch, /*may_shrink*/0);
+	if (!err) {
+		inode = d_inode(root);
+		err = au_hinode_realloc(au_ii(inode), new_nbranch,
+					/*may_shrink*/0);
+	}
+	if (!err)
+		return add_branch; /* success */
+
+out_wbr:
+	au_kfree_rcu(add_branch->br_wbr);
+out_hnotify:
+	au_hnotify_fin_br(add_branch);
+out_xino:
+	au_xino_put(add_branch);
+out_br:
+	au_kfree_rcu(add_branch);
+out:
+	return ERR_PTR(err);
+}
+
+/*
+ * test if the branch permission is legal or not.
+ */
+static int test_br(struct inode *inode, int brperm, char *path)
+{
+	int err;
+
+	err = (au_br_writable(brperm) && IS_RDONLY(inode));
+	if (!err)
+		goto out;
+
+	err = -EINVAL;
+	pr_err("write permission for readonly mount or inode, %s\n", path);
+
+out:
+	return err;
+}
+
+/*
+ * returns:
+ * 0: success, the caller will add it
+ * plus: success, it is already unified, the caller should ignore it
+ * minus: error
+ */
+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+	int err;
+	aufs_bindex_t bbot, bindex;
+	struct dentry *root, *h_dentry;
+	struct inode *inode, *h_inode;
+
+	root = sb->s_root;
+	bbot = au_sbbot(sb);
+	if (unlikely(bbot >= 0
+		     && au_find_dbindex(root, add->path.dentry) >= 0)) {
+		err = 1;
+		if (!remount) {
+			err = -EINVAL;
+			pr_err("%s duplicated\n", add->pathname);
+		}
+		goto out;
+	}
+
+	err = -ENOSPC; /* -E2BIG; */
+	if (unlikely(AUFS_BRANCH_MAX <= add->bindex
+		     || AUFS_BRANCH_MAX - 1 <= bbot)) {
+		pr_err("number of branches exceeded %s\n", add->pathname);
+		goto out;
+	}
+
+	err = -EDOM;
+	if (unlikely(add->bindex < 0 || bbot + 1 < add->bindex)) {
+		pr_err("bad index %d\n", add->bindex);
+		goto out;
+	}
+
+	inode = d_inode(add->path.dentry);
+	err = -ENOENT;
+	if (unlikely(!inode->i_nlink)) {
+		pr_err("no existence %s\n", add->pathname);
+		goto out;
+	}
+
+	err = -EINVAL;
+	if (unlikely(inode->i_sb == sb)) {
+		pr_err("%s must be outside\n", add->pathname);
+		goto out;
+	}
+
+	if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
+		pr_err("unsupported filesystem, %s (%s)\n",
+		       add->pathname, au_sbtype(inode->i_sb));
+		goto out;
+	}
+
+	if (unlikely(inode->i_sb->s_stack_depth)) {
+		pr_err("already stacked, %s (%s)\n",
+		       add->pathname, au_sbtype(inode->i_sb));
+		goto out;
+	}
+
+	err = test_br(d_inode(add->path.dentry), add->perm, add->pathname);
+	if (unlikely(err))
+		goto out;
+
+	if (bbot < 0)
+		return 0; /* success */
+
+	err = -EINVAL;
+	for (bindex = 0; bindex <= bbot; bindex++)
+		if (unlikely(test_overlap(sb, add->path.dentry,
+					  au_h_dptr(root, bindex)))) {
+			pr_err("%s is overlapped\n", add->pathname);
+			goto out;
+		}
+
+	err = 0;
+	if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
+		h_dentry = au_h_dptr(root, 0);
+		h_inode = d_inode(h_dentry);
+		if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
+		    || !uid_eq(h_inode->i_uid, inode->i_uid)
+		    || !gid_eq(h_inode->i_gid, inode->i_gid))
+			pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
+				add->pathname,
+				i_uid_read(inode), i_gid_read(inode),
+				(inode->i_mode & S_IALLUGO),
+				i_uid_read(h_inode), i_gid_read(h_inode),
+				(h_inode->i_mode & S_IALLUGO));
+	}
+
+out:
+	return err;
+}
+
+/*
+ * initialize or clean the whiteouts for an adding branch
+ */
+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
+			 int new_perm)
+{
+	int err, old_perm;
+	aufs_bindex_t bindex;
+	struct inode *h_inode;
+	struct au_wbr *wbr;
+	struct au_hinode *hdir;
+	struct dentry *h_dentry;
+
+	err = vfsub_mnt_want_write(au_br_mnt(br));
+	if (unlikely(err))
+		goto out;
+
+	wbr = br->br_wbr;
+	old_perm = br->br_perm;
+	br->br_perm = new_perm;
+	hdir = NULL;
+	h_inode = NULL;
+	bindex = au_br_index(sb, br->br_id);
+	if (0 <= bindex) {
+		hdir = au_hi(d_inode(sb->s_root), bindex);
+		au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+	} else {
+		h_dentry = au_br_dentry(br);
+		h_inode = d_inode(h_dentry);
+		inode_lock_nested(h_inode, AuLsc_I_PARENT);
+	}
+	if (!wbr)
+		err = au_wh_init(br, sb);
+	else {
+		wbr_wh_write_lock(wbr);
+		err = au_wh_init(br, sb);
+		wbr_wh_write_unlock(wbr);
+	}
+	if (hdir)
+		au_hn_inode_unlock(hdir);
+	else
+		inode_unlock(h_inode);
+	vfsub_mnt_drop_write(au_br_mnt(br));
+	br->br_perm = old_perm;
+
+	if (!err && wbr && !au_br_writable(new_perm)) {
+		au_kfree_rcu(wbr);
+		br->br_wbr = NULL;
+	}
+
+out:
+	return err;
+}
+
+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
+		       int perm)
+{
+	int err;
+	struct kstatfs kst;
+	struct au_wbr *wbr;
+
+	wbr = br->br_wbr;
+	au_rw_init(&wbr->wbr_wh_rwsem);
+	atomic_set(&wbr->wbr_wh_running, 0);
+
+	/*
+	 * a limit for rmdir/rename a dir
+	 * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h
+	 */
+	err = vfs_statfs(&br->br_path, &kst);
+	if (unlikely(err))
+		goto out;
+	err = -EINVAL;
+	if (kst.f_namelen >= NAME_MAX)
+		err = au_br_init_wh(sb, br, perm);
+	else
+		pr_err("%pd(%s), unsupported namelen %ld\n",
+		       au_br_dentry(br),
+		       au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
+
+out:
+	return err;
+}
+
+/* initialize a new branch */
+static int au_br_init(struct au_branch *br, struct super_block *sb,
+		      struct au_opt_add *add)
+{
+	int err;
+	struct au_branch *brbase;
+	struct file *xf;
+	struct inode *h_inode;
+
+	err = 0;
+	br->br_perm = add->perm;
+	br->br_path = add->path; /* set first, path_get() later */
+	spin_lock_init(&br->br_dykey_lock);
+	au_lcnt_init(&br->br_nfiles, /*release*/NULL);
+	au_lcnt_init(&br->br_count, /*release*/NULL);
+	br->br_id = au_new_br_id(sb);
+	AuDebugOn(br->br_id < 0);
+
+	/* always, regardless the given option */
+	err = au_dr_br_init(sb, br, &add->path);
+	if (unlikely(err))
+		goto out_err;
+
+	if (au_br_writable(add->perm)) {
+		err = au_wbr_init(br, sb, add->perm);
+		if (unlikely(err))
+			goto out_err;
+	}
+
+	if (au_opt_test(au_mntflags(sb), XINO)) {
+		brbase = au_sbr(sb, 0);
+		xf = au_xino_file(brbase->br_xino, /*idx*/-1);
+		AuDebugOn(!xf);
+		h_inode = d_inode(add->path.dentry);
+		err = au_xino_init_br(sb, br, h_inode->i_ino, &xf->f_path);
+		if (unlikely(err)) {
+			AuDebugOn(au_xino_file(br->br_xino, /*idx*/-1));
+			goto out_err;
+		}
+	}
+
+	sysaufs_br_init(br);
+	path_get(&br->br_path);
+	goto out; /* success */
+
+out_err:
+	memset(&br->br_path, 0, sizeof(br->br_path));
+out:
+	return err;
+}
+
+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
+			     struct au_branch *br, aufs_bindex_t bbot,
+			     aufs_bindex_t amount)
+{
+	struct au_branch **brp;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	brp = sbinfo->si_branch + bindex;
+	memmove(brp + 1, brp, sizeof(*brp) * amount);
+	*brp = br;
+	sbinfo->si_bbot++;
+	if (unlikely(bbot < 0))
+		sbinfo->si_bbot = 0;
+}
+
+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
+			     aufs_bindex_t bbot, aufs_bindex_t amount)
+{
+	struct au_hdentry *hdp;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	hdp = au_hdentry(dinfo, bindex);
+	memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
+	au_h_dentry_init(hdp);
+	dinfo->di_bbot++;
+	if (unlikely(bbot < 0))
+		dinfo->di_btop = 0;
+}
+
+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
+			     aufs_bindex_t bbot, aufs_bindex_t amount)
+{
+	struct au_hinode *hip;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	hip = au_hinode(iinfo, bindex);
+	memmove(hip + 1, hip, sizeof(*hip) * amount);
+	au_hinode_init(hip);
+	iinfo->ii_bbot++;
+	if (unlikely(bbot < 0))
+		iinfo->ii_btop = 0;
+}
+
+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
+			 aufs_bindex_t bindex)
+{
+	struct dentry *root, *h_dentry;
+	struct inode *root_inode, *h_inode;
+	aufs_bindex_t bbot, amount;
+
+	root = sb->s_root;
+	root_inode = d_inode(root);
+	bbot = au_sbbot(sb);
+	amount = bbot + 1 - bindex;
+	h_dentry = au_br_dentry(br);
+	au_sbilist_lock();
+	au_br_do_add_brp(au_sbi(sb), bindex, br, bbot, amount);
+	au_br_do_add_hdp(au_di(root), bindex, bbot, amount);
+	au_br_do_add_hip(au_ii(root_inode), bindex, bbot, amount);
+	au_set_h_dptr(root, bindex, dget(h_dentry));
+	h_inode = d_inode(h_dentry);
+	au_set_h_iptr(root_inode, bindex, au_igrab(h_inode), /*flags*/0);
+	au_sbilist_unlock();
+}
+
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+	int err;
+	aufs_bindex_t bbot, add_bindex;
+	struct dentry *root, *h_dentry;
+	struct inode *root_inode;
+	struct au_branch *add_branch;
+
+	root = sb->s_root;
+	root_inode = d_inode(root);
+	IMustLock(root_inode);
+	IiMustWriteLock(root_inode);
+	err = test_add(sb, add, remount);
+	if (unlikely(err < 0))
+		goto out;
+	if (err) {
+		err = 0;
+		goto out; /* success */
+	}
+
+	bbot = au_sbbot(sb);
+	add_branch = au_br_alloc(sb, bbot + 2, add->perm);
+	err = PTR_ERR(add_branch);
+	if (IS_ERR(add_branch))
+		goto out;
+
+	err = au_br_init(add_branch, sb, add);
+	if (unlikely(err)) {
+		au_br_do_free(add_branch);
+		goto out;
+	}
+
+	add_bindex = add->bindex;
+	sysaufs_brs_del(sb, add_bindex);	/* remove successors */
+	au_br_do_add(sb, add_branch, add_bindex);
+	sysaufs_brs_add(sb, add_bindex);	/* append successors */
+	dbgaufs_brs_add(sb, add_bindex, /*topdown*/0);	/* rename successors */
+
+	h_dentry = add->path.dentry;
+	if (!add_bindex) {
+		au_cpup_attr_all(root_inode, /*force*/1);
+		sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
+	} else
+		au_add_nlink(root_inode, d_inode(h_dentry));
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned long long au_farray_cb(struct super_block *sb, void *a,
+				       unsigned long long max __maybe_unused,
+				       void *arg)
+{
+	unsigned long long n;
+	struct file **p, *f;
+	struct hlist_bl_head *files;
+	struct hlist_bl_node *pos;
+	struct au_finfo *finfo;
+
+	n = 0;
+	p = a;
+	files = &au_sbi(sb)->si_files;
+	hlist_bl_lock(files);
+	hlist_bl_for_each_entry(finfo, pos, files, fi_hlist) {
+		f = finfo->fi_file;
+		if (file_count(f)
+		    && !special_file(file_inode(f)->i_mode)) {
+			get_file(f);
+			*p++ = f;
+			n++;
+			AuDebugOn(n > max);
+		}
+	}
+	hlist_bl_unlock(files);
+
+	return n;
+}
+
+static struct file **au_farray_alloc(struct super_block *sb,
+				     unsigned long long *max)
+{
+	struct au_sbinfo *sbi;
+
+	sbi = au_sbi(sb);
+	*max = au_lcnt_read(&sbi->si_nfiles, /*do_rev*/1);
+	return au_array_alloc(max, au_farray_cb, sb, /*arg*/NULL);
+}
+
+static void au_farray_free(struct file **a, unsigned long long max)
+{
+	unsigned long long ull;
+
+	for (ull = 0; ull < max; ull++)
+		if (a[ull])
+			fput(a[ull]);
+	kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * delete a branch
+ */
+
+/* to show the line number, do not make it inlined function */
+#define AuVerbose(do_info, fmt, ...) do { \
+	if (do_info) \
+		pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int au_test_ibusy(struct inode *inode, aufs_bindex_t btop,
+			 aufs_bindex_t bbot)
+{
+	return (inode && !S_ISDIR(inode->i_mode)) || btop == bbot;
+}
+
+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t btop,
+			 aufs_bindex_t bbot)
+{
+	return au_test_ibusy(d_inode(dentry), btop, bbot);
+}
+
+/*
+ * test if the branch is deletable or not.
+ */
+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
+			    unsigned int sigen, const unsigned int verbose)
+{
+	int err, i, j, ndentry;
+	aufs_bindex_t btop, bbot;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry *d;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, root, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	for (i = 0; !err && i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		ndentry = dpage->ndentry;
+		for (j = 0; !err && j < ndentry; j++) {
+			d = dpage->dentries[j];
+			AuDebugOn(au_dcount(d) <= 0);
+			if (!au_digen_test(d, sigen)) {
+				di_read_lock_child(d, AuLock_IR);
+				if (unlikely(au_dbrange_test(d))) {
+					di_read_unlock(d, AuLock_IR);
+					continue;
+				}
+			} else {
+				di_write_lock_child(d);
+				if (unlikely(au_dbrange_test(d))) {
+					di_write_unlock(d);
+					continue;
+				}
+				err = au_reval_dpath(d, sigen);
+				if (!err)
+					di_downgrade_lock(d, AuLock_IR);
+				else {
+					di_write_unlock(d);
+					break;
+				}
+			}
+
+			/* AuDbgDentry(d); */
+			btop = au_dbtop(d);
+			bbot = au_dbbot(d);
+			if (btop <= bindex
+			    && bindex <= bbot
+			    && au_h_dptr(d, bindex)
+			    && au_test_dbusy(d, btop, bbot)) {
+				err = -EBUSY;
+				AuVerbose(verbose, "busy %pd\n", d);
+				AuDbgDentry(d);
+			}
+			di_read_unlock(d, AuLock_IR);
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
+			   unsigned int sigen, const unsigned int verbose)
+{
+	int err;
+	unsigned long long max, ull;
+	struct inode *i, **array;
+	aufs_bindex_t btop, bbot;
+
+	array = au_iarray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	err = 0;
+	AuDbg("b%d\n", bindex);
+	for (ull = 0; !err && ull < max; ull++) {
+		i = array[ull];
+		if (unlikely(!i))
+			break;
+		if (i->i_ino == AUFS_ROOT_INO)
+			continue;
+
+		/* AuDbgInode(i); */
+		if (au_iigen(i, NULL) == sigen)
+			ii_read_lock_child(i);
+		else {
+			ii_write_lock_child(i);
+			err = au_refresh_hinode_self(i);
+			au_iigen_dec(i);
+			if (!err)
+				ii_downgrade_lock(i);
+			else {
+				ii_write_unlock(i);
+				break;
+			}
+		}
+
+		btop = au_ibtop(i);
+		bbot = au_ibbot(i);
+		if (btop <= bindex
+		    && bindex <= bbot
+		    && au_h_iptr(i, bindex)
+		    && au_test_ibusy(i, btop, bbot)) {
+			err = -EBUSY;
+			AuVerbose(verbose, "busy i%lu\n", i->i_ino);
+			AuDbgInode(i);
+		}
+		ii_read_unlock(i);
+	}
+	au_iarray_free(array, max);
+
+out:
+	return err;
+}
+
+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
+			      const unsigned int verbose)
+{
+	int err;
+	unsigned int sigen;
+
+	sigen = au_sigen(root->d_sb);
+	DiMustNoWaiters(root);
+	IiMustNoWaiters(d_inode(root));
+	di_write_unlock(root);
+	err = test_dentry_busy(root, bindex, sigen, verbose);
+	if (!err)
+		err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
+	di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
+
+	return err;
+}
+
+static int test_dir_busy(struct file *file, aufs_bindex_t br_id,
+			 struct file **to_free, int *idx)
+{
+	int err;
+	unsigned char matched, root;
+	aufs_bindex_t bindex, bbot;
+	struct au_fidir *fidir;
+	struct au_hfile *hfile;
+
+	err = 0;
+	root = IS_ROOT(file->f_path.dentry);
+	if (root) {
+		get_file(file);
+		to_free[*idx] = file;
+		(*idx)++;
+		goto out;
+	}
+
+	matched = 0;
+	fidir = au_fi(file)->fi_hdir;
+	AuDebugOn(!fidir);
+	bbot = au_fbbot_dir(file);
+	for (bindex = au_fbtop(file); bindex <= bbot; bindex++) {
+		hfile = fidir->fd_hfile + bindex;
+		if (!hfile->hf_file)
+			continue;
+
+		if (hfile->hf_br->br_id == br_id) {
+			matched = 1;
+			break;
+		}
+	}
+	if (matched)
+		err = -EBUSY;
+
+out:
+	return err;
+}
+
+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id,
+			  struct file **to_free, int opened)
+{
+	int err, idx;
+	unsigned long long ull, max;
+	aufs_bindex_t btop;
+	struct file *file, **array;
+	struct dentry *root;
+	struct au_hfile *hfile;
+
+	array = au_farray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	err = 0;
+	idx = 0;
+	root = sb->s_root;
+	di_write_unlock(root);
+	for (ull = 0; ull < max; ull++) {
+		file = array[ull];
+		if (unlikely(!file))
+			break;
+
+		/* AuDbg("%pD\n", file); */
+		fi_read_lock(file);
+		btop = au_fbtop(file);
+		if (!d_is_dir(file->f_path.dentry)) {
+			hfile = &au_fi(file)->fi_htop;
+			if (hfile->hf_br->br_id == br_id)
+				err = -EBUSY;
+		} else
+			err = test_dir_busy(file, br_id, to_free, &idx);
+		fi_read_unlock(file);
+		if (unlikely(err))
+			break;
+	}
+	di_write_lock_child(root);
+	au_farray_free(array, max);
+	AuDebugOn(idx > opened);
+
+out:
+	return err;
+}
+
+static void br_del_file(struct file **to_free, unsigned long long opened,
+			aufs_bindex_t br_id)
+{
+	unsigned long long ull;
+	aufs_bindex_t bindex, btop, bbot, bfound;
+	struct file *file;
+	struct au_fidir *fidir;
+	struct au_hfile *hfile;
+
+	for (ull = 0; ull < opened; ull++) {
+		file = to_free[ull];
+		if (unlikely(!file))
+			break;
+
+		/* AuDbg("%pD\n", file); */
+		AuDebugOn(!d_is_dir(file->f_path.dentry));
+		bfound = -1;
+		fidir = au_fi(file)->fi_hdir;
+		AuDebugOn(!fidir);
+		fi_write_lock(file);
+		btop = au_fbtop(file);
+		bbot = au_fbbot_dir(file);
+		for (bindex = btop; bindex <= bbot; bindex++) {
+			hfile = fidir->fd_hfile + bindex;
+			if (!hfile->hf_file)
+				continue;
+
+			if (hfile->hf_br->br_id == br_id) {
+				bfound = bindex;
+				break;
+			}
+		}
+		AuDebugOn(bfound < 0);
+		au_set_h_fptr(file, bfound, NULL);
+		if (bfound == btop) {
+			for (btop++; btop <= bbot; btop++)
+				if (au_hf_dir(file, btop)) {
+					au_set_fbtop(file, btop);
+					break;
+				}
+		}
+		fi_write_unlock(file);
+	}
+}
+
+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
+			     const aufs_bindex_t bindex,
+			     const aufs_bindex_t bbot)
+{
+	struct au_branch **brp, **p;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	brp = sbinfo->si_branch + bindex;
+	if (bindex < bbot)
+		memmove(brp, brp + 1, sizeof(*brp) * (bbot - bindex));
+	sbinfo->si_branch[0 + bbot] = NULL;
+	sbinfo->si_bbot--;
+
+	p = au_krealloc(sbinfo->si_branch, sizeof(*p) * bbot, AuGFP_SBILIST,
+			/*may_shrink*/1);
+	if (p)
+		sbinfo->si_branch = p;
+	/* harmless error */
+}
+
+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
+			     const aufs_bindex_t bbot)
+{
+	struct au_hdentry *hdp, *p;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	hdp = au_hdentry(dinfo, bindex);
+	if (bindex < bbot)
+		memmove(hdp, hdp + 1, sizeof(*hdp) * (bbot - bindex));
+	/* au_h_dentry_init(au_hdentry(dinfo, bbot); */
+	dinfo->di_bbot--;
+
+	p = au_krealloc(dinfo->di_hdentry, sizeof(*p) * bbot, AuGFP_SBILIST,
+			/*may_shrink*/1);
+	if (p)
+		dinfo->di_hdentry = p;
+	/* harmless error */
+}
+
+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
+			     const aufs_bindex_t bbot)
+{
+	struct au_hinode *hip, *p;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	hip = au_hinode(iinfo, bindex);
+	if (bindex < bbot)
+		memmove(hip, hip + 1, sizeof(*hip) * (bbot - bindex));
+	/* au_hinode_init(au_hinode(iinfo, bbot)); */
+	iinfo->ii_bbot--;
+
+	p = au_krealloc(iinfo->ii_hinode, sizeof(*p) * bbot, AuGFP_SBILIST,
+			/*may_shrink*/1);
+	if (p)
+		iinfo->ii_hinode = p;
+	/* harmless error */
+}
+
+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
+			 struct au_branch *br)
+{
+	aufs_bindex_t bbot;
+	struct au_sbinfo *sbinfo;
+	struct dentry *root, *h_root;
+	struct inode *inode, *h_inode;
+	struct au_hinode *hinode;
+
+	SiMustWriteLock(sb);
+
+	root = sb->s_root;
+	inode = d_inode(root);
+	sbinfo = au_sbi(sb);
+	bbot = sbinfo->si_bbot;
+
+	h_root = au_h_dptr(root, bindex);
+	hinode = au_hi(inode, bindex);
+	h_inode = au_igrab(hinode->hi_inode);
+	au_hiput(hinode);
+
+	au_sbilist_lock();
+	au_br_do_del_brp(sbinfo, bindex, bbot);
+	au_br_do_del_hdp(au_di(root), bindex, bbot);
+	au_br_do_del_hip(au_ii(inode), bindex, bbot);
+	au_sbilist_unlock();
+
+	/* ignore an error */
+	au_dr_br_fin(sb, br); /* always, regardless the mount option */
+
+	dput(h_root);
+	iput(h_inode);
+	au_br_do_free(br);
+}
+
+static unsigned long long empty_cb(struct super_block *sb, void *array,
+				   unsigned long long max, void *arg)
+{
+	return max;
+}
+
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
+{
+	int err, rerr, i;
+	unsigned long long opened;
+	unsigned int mnt_flags;
+	aufs_bindex_t bindex, bbot, br_id;
+	unsigned char do_wh, verbose;
+	struct au_branch *br;
+	struct au_wbr *wbr;
+	struct dentry *root;
+	struct file **to_free;
+
+	err = 0;
+	opened = 0;
+	to_free = NULL;
+	root = sb->s_root;
+	bindex = au_find_dbindex(root, del->h_path.dentry);
+	if (bindex < 0) {
+		if (remount)
+			goto out; /* success */
+		err = -ENOENT;
+		pr_err("%s no such branch\n", del->pathname);
+		goto out;
+	}
+	AuDbg("bindex b%d\n", bindex);
+
+	err = -EBUSY;
+	mnt_flags = au_mntflags(sb);
+	verbose = !!au_opt_test(mnt_flags, VERBOSE);
+	bbot = au_sbbot(sb);
+	if (unlikely(!bbot)) {
+		AuVerbose(verbose, "no more branches left\n");
+		goto out;
+	}
+
+	br = au_sbr(sb, bindex);
+	AuDebugOn(!path_equal(&br->br_path, &del->h_path));
+	if (unlikely(au_lcnt_read(&br->br_count, /*do_rev*/1))) {
+		AuVerbose(verbose, "br %pd2 is busy now\n", del->h_path.dentry);
+		goto out;
+	}
+
+	br_id = br->br_id;
+	opened = au_lcnt_read(&br->br_nfiles, /*do_rev*/1);
+	if (unlikely(opened)) {
+		to_free = au_array_alloc(&opened, empty_cb, sb, NULL);
+		err = PTR_ERR(to_free);
+		if (IS_ERR(to_free))
+			goto out;
+
+		err = test_file_busy(sb, br_id, to_free, opened);
+		if (unlikely(err)) {
+			AuVerbose(verbose, "%llu file(s) opened\n", opened);
+			goto out;
+		}
+	}
+
+	wbr = br->br_wbr;
+	do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
+	if (do_wh) {
+		/* instead of WbrWhMustWriteLock(wbr) */
+		SiMustWriteLock(sb);
+		for (i = 0; i < AuBrWh_Last; i++) {
+			dput(wbr->wbr_wh[i]);
+			wbr->wbr_wh[i] = NULL;
+		}
+	}
+
+	err = test_children_busy(root, bindex, verbose);
+	if (unlikely(err)) {
+		if (do_wh)
+			goto out_wh;
+		goto out;
+	}
+
+	err = 0;
+	if (to_free) {
+		/*
+		 * now we confirmed the branch is deletable.
+		 * let's free the remaining opened dirs on the branch.
+		 */
+		di_write_unlock(root);
+		br_del_file(to_free, opened, br_id);
+		di_write_lock_child(root);
+	}
+
+	sysaufs_brs_del(sb, bindex);	/* remove successors */
+	dbgaufs_xino_del(br);		/* remove one */
+	au_br_do_del(sb, bindex, br);
+	sysaufs_brs_add(sb, bindex);	/* append successors */
+	dbgaufs_brs_add(sb, bindex, /*topdown*/1);	/* rename successors */
+
+	if (!bindex) {
+		au_cpup_attr_all(d_inode(root), /*force*/1);
+		sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
+	} else
+		au_sub_nlink(d_inode(root), d_inode(del->h_path.dentry));
+	if (au_opt_test(mnt_flags, PLINK))
+		au_plink_half_refresh(sb, br_id);
+
+	goto out; /* success */
+
+out_wh:
+	/* revert */
+	rerr = au_br_init_wh(sb, br, br->br_perm);
+	if (rerr)
+		pr_warn("failed re-creating base whiteout, %s. (%d)\n",
+			del->pathname, rerr);
+out:
+	if (to_free)
+		au_farray_free(to_free, opened);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
+{
+	int err;
+	aufs_bindex_t btop, bbot;
+	struct aufs_ibusy ibusy;
+	struct inode *inode, *h_inode;
+
+	err = -EPERM;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = copy_from_user(&ibusy, arg, sizeof(ibusy));
+	if (!err)
+		/* VERIFY_WRITE */
+		err = !access_ok(&arg->h_ino, sizeof(arg->h_ino));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+
+	err = -EINVAL;
+	si_read_lock(sb, AuLock_FLUSH);
+	if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbbot(sb)))
+		goto out_unlock;
+
+	err = 0;
+	ibusy.h_ino = 0; /* invalid */
+	inode = ilookup(sb, ibusy.ino);
+	if (!inode
+	    || inode->i_ino == AUFS_ROOT_INO
+	    || au_is_bad_inode(inode))
+		goto out_unlock;
+
+	ii_read_lock_child(inode);
+	btop = au_ibtop(inode);
+	bbot = au_ibbot(inode);
+	if (btop <= ibusy.bindex && ibusy.bindex <= bbot) {
+		h_inode = au_h_iptr(inode, ibusy.bindex);
+		if (h_inode && au_test_ibusy(inode, btop, bbot))
+			ibusy.h_ino = h_inode->i_ino;
+	}
+	ii_read_unlock(inode);
+	iput(inode);
+
+out_unlock:
+	si_read_unlock(sb);
+	if (!err) {
+		err = __put_user(ibusy.h_ino, &arg->h_ino);
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+	}
+out:
+	return err;
+}
+
+long au_ibusy_ioctl(struct file *file, unsigned long arg)
+{
+	return au_ibusy(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
+{
+	return au_ibusy(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * change a branch permission
+ */
+
+static void au_warn_ima(void)
+{
+#ifdef CONFIG_IMA
+	/* since it doesn't support mark_files_ro() */
+	AuWarn1("RW -> RO makes IMA to produce wrong message\n");
+#endif
+}
+
+static int do_need_sigen_inc(int a, int b)
+{
+	return au_br_whable(a) && !au_br_whable(b);
+}
+
+static int need_sigen_inc(int old, int new)
+{
+	return do_need_sigen_inc(old, new)
+		|| do_need_sigen_inc(new, old);
+}
+
+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
+{
+	int err, do_warn;
+	unsigned int mnt_flags;
+	unsigned long long ull, max;
+	aufs_bindex_t br_id;
+	unsigned char verbose, writer;
+	struct file *file, *hf, **array;
+	struct au_hfile *hfile;
+	struct inode *h_inode;
+
+	mnt_flags = au_mntflags(sb);
+	verbose = !!au_opt_test(mnt_flags, VERBOSE);
+
+	array = au_farray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	do_warn = 0;
+	br_id = au_sbr_id(sb, bindex);
+	for (ull = 0; ull < max; ull++) {
+		file = array[ull];
+		if (unlikely(!file))
+			break;
+
+		/* AuDbg("%pD\n", file); */
+		fi_read_lock(file);
+		if (unlikely(au_test_mmapped(file))) {
+			err = -EBUSY;
+			AuVerbose(verbose, "mmapped %pD\n", file);
+			AuDbgFile(file);
+			FiMustNoWaiters(file);
+			fi_read_unlock(file);
+			goto out_array;
+		}
+
+		hfile = &au_fi(file)->fi_htop;
+		hf = hfile->hf_file;
+		if (!d_is_reg(file->f_path.dentry)
+		    || !(file->f_mode & FMODE_WRITE)
+		    || hfile->hf_br->br_id != br_id
+		    || !(hf->f_mode & FMODE_WRITE))
+			array[ull] = NULL;
+		else {
+			do_warn = 1;
+			get_file(file);
+		}
+
+		FiMustNoWaiters(file);
+		fi_read_unlock(file);
+		fput(file);
+	}
+
+	err = 0;
+	if (do_warn)
+		au_warn_ima();
+
+	for (ull = 0; ull < max; ull++) {
+		file = array[ull];
+		if (!file)
+			continue;
+
+		/* todo: already flushed? */
+		/*
+		 * fs/super.c:mark_files_ro() is gone, but aufs keeps its
+		 * approach which resets f_mode and calls mnt_drop_write() and
+		 * file_release_write() for each file, because the branch
+		 * attribute in aufs world is totally different from the native
+		 * fs rw/ro mode.
+		*/
+		/* fi_read_lock(file); */
+		hfile = &au_fi(file)->fi_htop;
+		hf = hfile->hf_file;
+		/* fi_read_unlock(file); */
+		spin_lock(&hf->f_lock);
+		writer = !!(hf->f_mode & FMODE_WRITER);
+		hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER);
+		spin_unlock(&hf->f_lock);
+		if (writer) {
+			h_inode = file_inode(hf);
+			if (hf->f_mode & FMODE_READ)
+				i_readcount_inc(h_inode);
+			put_write_access(h_inode);
+			__mnt_drop_write(hf->f_path.mnt);
+		}
+	}
+
+out_array:
+	au_farray_free(array, max);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+	      int *do_refresh)
+{
+	int err, rerr;
+	aufs_bindex_t bindex;
+	struct dentry *root;
+	struct au_branch *br;
+	struct au_br_fhsm *bf;
+
+	root = sb->s_root;
+	bindex = au_find_dbindex(root, mod->h_root);
+	if (bindex < 0) {
+		if (remount)
+			return 0; /* success */
+		err = -ENOENT;
+		pr_err("%s no such branch\n", mod->path);
+		goto out;
+	}
+	AuDbg("bindex b%d\n", bindex);
+
+	err = test_br(d_inode(mod->h_root), mod->perm, mod->path);
+	if (unlikely(err))
+		goto out;
+
+	br = au_sbr(sb, bindex);
+	AuDebugOn(mod->h_root != au_br_dentry(br));
+	if (br->br_perm == mod->perm)
+		return 0; /* success */
+
+	/* pre-allocate for non-fhsm --> fhsm */
+	bf = NULL;
+	if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) {
+		err = au_fhsm_br_alloc(br);
+		if (unlikely(err))
+			goto out;
+		bf = br->br_fhsm;
+		br->br_fhsm = NULL;
+	}
+
+	if (au_br_writable(br->br_perm)) {
+		/* remove whiteout base */
+		err = au_br_init_wh(sb, br, mod->perm);
+		if (unlikely(err))
+			goto out_bf;
+
+		if (!au_br_writable(mod->perm)) {
+			/* rw --> ro, file might be mmapped */
+			DiMustNoWaiters(root);
+			IiMustNoWaiters(d_inode(root));
+			di_write_unlock(root);
+			err = au_br_mod_files_ro(sb, bindex);
+			/* aufs_write_lock() calls ..._child() */
+			di_write_lock_child(root);
+
+			if (unlikely(err)) {
+				rerr = -ENOMEM;
+				br->br_wbr = kzalloc(sizeof(*br->br_wbr),
+						     GFP_NOFS);
+				if (br->br_wbr)
+					rerr = au_wbr_init(br, sb, br->br_perm);
+				if (unlikely(rerr)) {
+					AuIOErr("nested error %d (%d)\n",
+						rerr, err);
+					br->br_perm = mod->perm;
+				}
+			}
+		}
+	} else if (au_br_writable(mod->perm)) {
+		/* ro --> rw */
+		err = -ENOMEM;
+		br->br_wbr = kzalloc(sizeof(*br->br_wbr), GFP_NOFS);
+		if (br->br_wbr) {
+			err = au_wbr_init(br, sb, mod->perm);
+			if (unlikely(err)) {
+				au_kfree_rcu(br->br_wbr);
+				br->br_wbr = NULL;
+			}
+		}
+	}
+	if (unlikely(err))
+		goto out_bf;
+
+	if (au_br_fhsm(br->br_perm)) {
+		if (!au_br_fhsm(mod->perm)) {
+			/* fhsm --> non-fhsm */
+			au_br_fhsm_fin(br->br_fhsm);
+			au_kfree_rcu(br->br_fhsm);
+			br->br_fhsm = NULL;
+		}
+	} else if (au_br_fhsm(mod->perm))
+		/* non-fhsm --> fhsm */
+		br->br_fhsm = bf;
+
+	*do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
+	br->br_perm = mod->perm;
+	goto out; /* success */
+
+out_bf:
+	au_kfree_try_rcu(bf);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs)
+{
+	int err;
+	struct kstatfs kstfs;
+
+	err = vfs_statfs(&br->br_path, &kstfs);
+	if (!err) {
+		stfs->f_blocks = kstfs.f_blocks;
+		stfs->f_bavail = kstfs.f_bavail;
+		stfs->f_files = kstfs.f_files;
+		stfs->f_ffree = kstfs.f_ffree;
+	}
+
+	return err;
+}
diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h
new file mode 100644
index 00000000000..594c8bd674b
--- /dev/null
+++ b/fs/aufs/branch.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * branch filesystems and xino for them
+ */
+
+#ifndef __AUFS_BRANCH_H__
+#define __AUFS_BRANCH_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+#include "dirren.h"
+#include "dynop.h"
+#include "lcnt.h"
+#include "rwsem.h"
+#include "super.h"
+
+/* ---------------------------------------------------------------------- */
+
+/* a xino file */
+struct au_xino {
+	struct file		**xi_file;
+	unsigned int		xi_nfile;
+
+	struct {
+		spinlock_t		spin;
+		ino_t			*array;
+		int			total;
+		/* reserved for future use */
+		/* unsigned long	*bitmap; */
+		wait_queue_head_t	wqh;
+	} xi_nondir;
+
+	struct mutex		xi_mtx;	/* protects xi_file array */
+	struct hlist_bl_head	xi_writing;
+
+	atomic_t		xi_truncating;
+
+	struct kref		xi_kref;
+};
+
+/* File-based Hierarchical Storage Management */
+struct au_br_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+	struct mutex		bf_lock;
+	unsigned long		bf_jiffy;
+	struct aufs_stfs	bf_stfs;
+	int			bf_readable;
+#endif
+};
+
+/* members for writable branch only */
+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
+struct au_wbr {
+	struct au_rwsem		wbr_wh_rwsem;
+	struct dentry		*wbr_wh[AuBrWh_Last];
+	atomic_t		wbr_wh_running;
+#define wbr_whbase		wbr_wh[AuBrWh_BASE]	/* whiteout base */
+#define wbr_plink		wbr_wh[AuBrWh_PLINK]	/* pseudo-link dir */
+#define wbr_orph		wbr_wh[AuBrWh_ORPH]	/* dir for orphans */
+
+	/* mfs mode */
+	unsigned long long	wbr_bytes;
+};
+
+/* ext2 has 3 types of operations at least, ext3 has 4 */
+#define AuBrDynOp (AuDyLast * 4)
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+/* support for asynchronous destruction */
+struct au_br_hfsnotify {
+	struct fsnotify_group	*hfsn_group;
+};
+#endif
+
+/* sysfs entries */
+struct au_brsysfs {
+	char			name[16];
+	struct attribute	attr;
+};
+
+enum {
+	AuBrSysfs_BR,
+	AuBrSysfs_BRID,
+	AuBrSysfs_Last
+};
+
+/* protected by superblock rwsem */
+struct au_branch {
+	struct au_xino		*br_xino;
+
+	aufs_bindex_t		br_id;
+
+	int			br_perm;
+	struct path		br_path;
+	spinlock_t		br_dykey_lock;
+	struct au_dykey		*br_dykey[AuBrDynOp];
+	au_lcnt_t		br_nfiles;	/* opened files */
+	au_lcnt_t		br_count;	/* in-use for other */
+
+	struct au_wbr		*br_wbr;
+	struct au_br_fhsm	*br_fhsm;
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	struct au_br_hfsnotify	*br_hfsn;
+#endif
+
+#ifdef CONFIG_SYSFS
+	/* entries under sysfs per mount-point */
+	struct au_brsysfs	br_sysfs[AuBrSysfs_Last];
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		 *br_dbgaufs; /* xino */
+#endif
+
+	struct au_dr_br		br_dirren;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
+{
+	return br->br_path.mnt;
+}
+
+static inline struct dentry *au_br_dentry(struct au_branch *br)
+{
+	return br->br_path.dentry;
+}
+
+static inline struct super_block *au_br_sb(struct au_branch *br)
+{
+	return au_br_mnt(br)->mnt_sb;
+}
+
+static inline int au_br_rdonly(struct au_branch *br)
+{
+	return (sb_rdonly(au_br_sb(br))
+		|| !au_br_writable(br->br_perm))
+		? -EROFS : 0;
+}
+
+static inline int au_br_hnotifyable(int brperm __maybe_unused)
+{
+#ifdef CONFIG_AUFS_HNOTIFY
+	return !(brperm & AuBrPerm_RR);
+#else
+	return 0;
+#endif
+}
+
+static inline int au_br_test_oflag(int oflag, struct au_branch *br)
+{
+	int err, exec_flag;
+
+	err = 0;
+	exec_flag = oflag & __FMODE_EXEC;
+	if (unlikely(exec_flag && path_noexec(&br->br_path)))
+		err = -EACCES;
+
+	return err;
+}
+
+static inline void au_xino_get(struct au_branch *br)
+{
+	struct au_xino *xi;
+
+	xi = br->br_xino;
+	if (xi)
+		kref_get(&xi->xi_kref);
+}
+
+static inline int au_xino_count(struct au_branch *br)
+{
+	int v;
+	struct au_xino *xi;
+
+	v = 0;
+	xi = br->br_xino;
+	if (xi)
+		v = kref_read(&xi->xi_kref);
+
+	return v;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* branch.c */
+struct au_sbinfo;
+void au_br_free(struct au_sbinfo *sinfo);
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
+struct au_opt_add;
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
+struct au_opt_del;
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
+long au_ibusy_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+struct au_opt_mod;
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+	      int *do_refresh);
+struct aufs_stfs;
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs);
+
+/* xino.c */
+static const loff_t au_loff_max = LLONG_MAX;
+
+aufs_bindex_t au_xi_root(struct super_block *sb, struct dentry *dentry);
+struct file *au_xino_create(struct super_block *sb, char *fpath, int silent,
+			    int wbrtop);
+struct file *au_xino_create2(struct super_block *sb, struct path *base,
+			     struct file *copy_src);
+struct au_xi_new {
+	struct au_xino *xi;	/* switch between xino and xigen */
+	int idx;
+	struct path *base;
+	struct file *copy_src;
+};
+struct file *au_xi_new(struct super_block *sb, struct au_xi_new *xinew);
+
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		 ino_t *ino);
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		  ino_t ino);
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *buf, size_t size,
+		   loff_t *pos);
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+		    size_t size, loff_t *pos);
+
+int au_xib_trunc(struct super_block *sb);
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex, int idx_begin);
+
+struct au_xino *au_xino_alloc(unsigned int nfile);
+int au_xino_put(struct au_branch *br);
+struct file *au_xino_file1(struct au_xino *xi);
+
+struct au_opt_xino;
+void au_xino_clr(struct super_block *sb);
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xiopt, int remount);
+struct file *au_xino_def(struct super_block *sb);
+int au_xino_init_br(struct super_block *sb, struct au_branch *br, ino_t hino,
+		    struct path *base);
+
+ino_t au_xino_new_ino(struct super_block *sb);
+void au_xino_delete_inode(struct inode *inode, const int unlinked);
+
+void au_xinondir_leave(struct super_block *sb, aufs_bindex_t bindex,
+		       ino_t h_ino, int idx);
+int au_xinondir_enter(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		      int *idx);
+
+int au_xino_path(struct seq_file *seq, struct file *file);
+
+/* ---------------------------------------------------------------------- */
+
+/* @idx is signed to accept -1 meaning the first file */
+static inline struct file *au_xino_file(struct au_xino *xi, int idx)
+{
+	struct file *file;
+
+	file = NULL;
+	if (!xi)
+		goto out;
+
+	if (idx >= 0) {
+		if (idx < xi->xi_nfile)
+			file = xi->xi_file[idx];
+	} else
+		file = au_xino_file1(xi);
+
+out:
+	return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Superblock to branch */
+static inline
+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_sbr(sb, bindex)->br_id;
+}
+
+static inline
+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_mnt(au_sbr(sb, bindex));
+}
+
+static inline
+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_sb(au_sbr(sb, bindex));
+}
+
+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_sbr(sb, bindex)->br_perm;
+}
+
+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_whable(au_sbr_perm(sb, bindex));
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define wbr_wh_read_lock(wbr)	au_rw_read_lock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_lock(wbr)	au_rw_write_lock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_read_trylock(wbr)	au_rw_read_trylock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_trylock(wbr) au_rw_write_trylock(&(wbr)->wbr_wh_rwsem)
+/*
+#define wbr_wh_read_trylock_nested(wbr) \
+	au_rw_read_trylock_nested(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_trylock_nested(wbr) \
+	au_rw_write_trylock_nested(&(wbr)->wbr_wh_rwsem)
+*/
+
+#define wbr_wh_read_unlock(wbr)	au_rw_read_unlock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_write_unlock(wbr)	au_rw_write_unlock(&(wbr)->wbr_wh_rwsem)
+#define wbr_wh_downgrade_lock(wbr)	au_rw_dgrade_lock(&(wbr)->wbr_wh_rwsem)
+
+#define WbrWhMustNoWaiters(wbr)	AuRwMustNoWaiters(&(wbr)->wbr_wh_rwsem)
+#define WbrWhMustAnyLock(wbr)	AuRwMustAnyLock(&(wbr)->wbr_wh_rwsem)
+#define WbrWhMustWriteLock(wbr)	AuRwMustWriteLock(&(wbr)->wbr_wh_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_FHSM
+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm)
+{
+	mutex_init(&brfhsm->bf_lock);
+	brfhsm->bf_jiffy = 0;
+	brfhsm->bf_readable = 0;
+}
+
+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm)
+{
+	mutex_destroy(&brfhsm->bf_lock);
+}
+#else
+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm)
+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_BRANCH_H__ */
diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk
new file mode 100644
index 00000000000..12782f8e0f3
--- /dev/null
+++ b/fs/aufs/conf.mk
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+
+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS}
+
+define AuConf
+ifdef ${1}
+AuConfStr += ${1}=${${1}}
+endif
+endef
+
+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \
+	SBILIST \
+	HNOTIFY HFSNOTIFY \
+	EXPORT INO_T_64 \
+	XATTR \
+	FHSM \
+	RDU \
+	DIRREN \
+	SHWH \
+	BR_RAMFS \
+	BR_FUSE POLL \
+	BR_HFSPLUS \
+	BDEV_LOOP \
+	DEBUG MAGIC_SYSRQ
+$(foreach i, ${AuConfAll}, \
+	$(eval $(call AuConf,CONFIG_AUFS_${i})))
+
+AuConfName = ${obj}/conf.str
+${AuConfName}.tmp: FORCE
+	@echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@
+${AuConfName}: ${AuConfName}.tmp
+	@diff -q $< $@ > /dev/null 2>&1 || { \
+	echo '  GEN    ' $@; \
+	cp -p $< $@; \
+	}
+FORCE:
+clean-files += ${AuConfName} ${AuConfName}.tmp
+${obj}/sysfs.o: ${AuConfName}
+
+-include ${srctree}/${src}/conf_priv.mk
diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c
new file mode 100644
index 00000000000..492442339b6
--- /dev/null
+++ b/fs/aufs/cpup.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * copy-up functions, see wbr_policy.c for copy-down
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include <linux/task_work.h>
+#include "aufs.h"
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
+{
+	const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
+		| S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
+
+	BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
+
+	dst->i_flags |= iflags & ~mask;
+	if (au_test_fs_notime(dst->i_sb))
+		dst->i_flags |= S_NOATIME | S_NOCMTIME;
+}
+
+void au_cpup_attr_timesizes(struct inode *inode)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibtop(inode));
+	fsstack_copy_attr_times(inode, h_inode);
+	fsstack_copy_inode_size(inode, h_inode);
+}
+
+void au_cpup_attr_nlink(struct inode *inode, int force)
+{
+	struct inode *h_inode;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bbot;
+
+	sb = inode->i_sb;
+	bindex = au_ibtop(inode);
+	h_inode = au_h_iptr(inode, bindex);
+	if (!force
+	    && !S_ISDIR(h_inode->i_mode)
+	    && au_opt_test(au_mntflags(sb), PLINK)
+	    && au_plink_test(inode))
+		return;
+
+	/*
+	 * 0 can happen in revalidating.
+	 * h_inode->i_mutex may not be held here, but it is harmless since once
+	 * i_nlink reaches 0, it will never become positive except O_TMPFILE
+	 * case.
+	 * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause
+	 *	 the incorrect link count.
+	 */
+	set_nlink(inode, h_inode->i_nlink);
+
+	/*
+	 * fewer nlink makes find(1) noisy, but larger nlink doesn't.
+	 * it may includes whplink directory.
+	 */
+	if (S_ISDIR(h_inode->i_mode)) {
+		bbot = au_ibbot(inode);
+		for (bindex++; bindex <= bbot; bindex++) {
+			h_inode = au_h_iptr(inode, bindex);
+			if (h_inode)
+				au_add_nlink(inode, h_inode);
+		}
+	}
+}
+
+void au_cpup_attr_changeable(struct inode *inode)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibtop(inode));
+	inode->i_mode = h_inode->i_mode;
+	inode->i_uid = h_inode->i_uid;
+	inode->i_gid = h_inode->i_gid;
+	au_cpup_attr_timesizes(inode);
+	au_cpup_attr_flags(inode, h_inode->i_flags);
+}
+
+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
+{
+	struct au_iinfo *iinfo = au_ii(inode);
+
+	IiMustWriteLock(inode);
+
+	iinfo->ii_higen = h_inode->i_generation;
+	iinfo->ii_hsb1 = h_inode->i_sb;
+}
+
+void au_cpup_attr_all(struct inode *inode, int force)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibtop(inode));
+	au_cpup_attr_changeable(inode);
+	if (inode->i_nlink > 0)
+		au_cpup_attr_nlink(inode, force);
+	inode->i_rdev = h_inode->i_rdev;
+	inode->i_blkbits = h_inode->i_blkbits;
+	au_cpup_igen(inode, h_inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
+
+/* keep the timestamps of the parent dir when cpup */
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+		    struct path *h_path)
+{
+	struct inode *h_inode;
+
+	dt->dt_dentry = dentry;
+	dt->dt_h_path = *h_path;
+	h_inode = d_inode(h_path->dentry);
+	dt->dt_atime = h_inode->i_atime;
+	dt->dt_mtime = h_inode->i_mtime;
+	/* smp_mb(); */
+}
+
+void au_dtime_revert(struct au_dtime *dt)
+{
+	struct iattr attr;
+	int err;
+
+	attr.ia_atime = dt->dt_atime;
+	attr.ia_mtime = dt->dt_mtime;
+	attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
+		| ATTR_ATIME | ATTR_ATIME_SET;
+
+	/* no delegation since this is a directory */
+	err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL);
+	if (unlikely(err))
+		pr_warn("restoring timestamps failed(%d). ignored\n", err);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* internal use only */
+struct au_cpup_reg_attr {
+	int		valid;
+	struct kstat	st;
+	unsigned int	iflags; /* inode->i_flags */
+};
+
+static noinline_for_stack
+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
+	       struct au_cpup_reg_attr *h_src_attr)
+{
+	int err, sbits, icex;
+	unsigned int mnt_flags;
+	unsigned char verbose;
+	struct iattr ia;
+	struct path h_path;
+	struct inode *h_isrc, *h_idst;
+	struct kstat *h_st;
+	struct au_branch *br;
+
+	h_path.dentry = au_h_dptr(dst, bindex);
+	h_idst = d_inode(h_path.dentry);
+	br = au_sbr(dst->d_sb, bindex);
+	h_path.mnt = au_br_mnt(br);
+	h_isrc = d_inode(h_src);
+	ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
+		| ATTR_ATIME | ATTR_MTIME
+		| ATTR_ATIME_SET | ATTR_MTIME_SET;
+	if (h_src_attr && h_src_attr->valid) {
+		h_st = &h_src_attr->st;
+		ia.ia_uid = h_st->uid;
+		ia.ia_gid = h_st->gid;
+		ia.ia_atime = h_st->atime;
+		ia.ia_mtime = h_st->mtime;
+		if (h_idst->i_mode != h_st->mode
+		    && !S_ISLNK(h_idst->i_mode)) {
+			ia.ia_valid |= ATTR_MODE;
+			ia.ia_mode = h_st->mode;
+		}
+		sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
+		au_cpup_attr_flags(h_idst, h_src_attr->iflags);
+	} else {
+		ia.ia_uid = h_isrc->i_uid;
+		ia.ia_gid = h_isrc->i_gid;
+		ia.ia_atime = h_isrc->i_atime;
+		ia.ia_mtime = h_isrc->i_mtime;
+		if (h_idst->i_mode != h_isrc->i_mode
+		    && !S_ISLNK(h_idst->i_mode)) {
+			ia.ia_valid |= ATTR_MODE;
+			ia.ia_mode = h_isrc->i_mode;
+		}
+		sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
+		au_cpup_attr_flags(h_idst, h_isrc->i_flags);
+	}
+	/* no delegation since it is just created */
+	err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+
+	/* is this nfs only? */
+	if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
+		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+		ia.ia_mode = h_isrc->i_mode;
+		err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+	}
+
+	icex = br->br_perm & AuBrAttr_ICEX;
+	if (!err) {
+		mnt_flags = au_mntflags(dst->d_sb);
+		verbose = !!au_opt_test(mnt_flags, VERBOSE);
+		err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
+			   char *buf, unsigned long blksize)
+{
+	int err;
+	size_t sz, rbytes, wbytes;
+	unsigned char all_zero;
+	char *p, *zp;
+	struct inode *h_inode;
+	/* reduce stack usage */
+	struct iattr *ia;
+
+	zp = page_address(ZERO_PAGE(0));
+	if (unlikely(!zp))
+		return -ENOMEM; /* possible? */
+
+	err = 0;
+	all_zero = 0;
+	while (len) {
+		AuDbg("len %lld\n", len);
+		sz = blksize;
+		if (len < blksize)
+			sz = len;
+
+		rbytes = 0;
+		/* todo: signal_pending? */
+		while (!rbytes || err == -EAGAIN || err == -EINTR) {
+			rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
+			err = rbytes;
+		}
+		if (unlikely(err < 0))
+			break;
+
+		all_zero = 0;
+		if (len >= rbytes && rbytes == blksize)
+			all_zero = !memcmp(buf, zp, rbytes);
+		if (!all_zero) {
+			wbytes = rbytes;
+			p = buf;
+			while (wbytes) {
+				size_t b;
+
+				b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
+				err = b;
+				/* todo: signal_pending? */
+				if (unlikely(err == -EAGAIN || err == -EINTR))
+					continue;
+				if (unlikely(err < 0))
+					break;
+				wbytes -= b;
+				p += b;
+			}
+			if (unlikely(err < 0))
+				break;
+		} else {
+			loff_t res;
+
+			AuLabel(hole);
+			res = vfsub_llseek(dst, rbytes, SEEK_CUR);
+			err = res;
+			if (unlikely(res < 0))
+				break;
+		}
+		len -= rbytes;
+		err = 0;
+	}
+
+	/* the last block may be a hole */
+	if (!err && all_zero) {
+		AuLabel(last hole);
+
+		err = 1;
+		if (au_test_nfs(dst->f_path.dentry->d_sb)) {
+			/* nfs requires this step to make last hole */
+			/* is this only nfs? */
+			do {
+				/* todo: signal_pending? */
+				err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
+			} while (err == -EAGAIN || err == -EINTR);
+			if (err == 1)
+				dst->f_pos--;
+		}
+
+		if (err == 1) {
+			ia = (void *)buf;
+			ia->ia_size = dst->f_pos;
+			ia->ia_valid = ATTR_SIZE | ATTR_FILE;
+			ia->ia_file = dst;
+			h_inode = file_inode(dst);
+			inode_lock_nested(h_inode, AuLsc_I_CHILD2);
+			/* no delegation since it is just created */
+			err = vfsub_notify_change(&dst->f_path, ia,
+						  /*delegated*/NULL);
+			inode_unlock(h_inode);
+		}
+	}
+
+	return err;
+}
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len)
+{
+	int err;
+	unsigned long blksize;
+	unsigned char do_kfree;
+	char *buf;
+	struct super_block *h_sb;
+
+	err = -ENOMEM;
+	h_sb = file_inode(dst)->i_sb;
+	blksize = h_sb->s_blocksize;
+	if (!blksize || PAGE_SIZE < blksize)
+		blksize = PAGE_SIZE;
+	AuDbg("blksize %lu\n", blksize);
+	do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
+	if (do_kfree)
+		buf = kmalloc(blksize, GFP_NOFS);
+	else
+		buf = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!buf))
+		goto out;
+
+	if (len > (1 << 22))
+		AuDbg("copying a large file %lld\n", (long long)len);
+
+	src->f_pos = 0;
+	dst->f_pos = 0;
+	err = au_do_copy_file(dst, src, len, buf, blksize);
+	if (do_kfree) {
+		AuDebugOn(!au_kfree_do_sz_test(blksize));
+		au_kfree_do_rcu(buf);
+	} else
+		free_page((unsigned long)buf);
+
+out:
+	return err;
+}
+
+static int au_do_copy(struct file *dst, struct file *src, loff_t len)
+{
+	int err;
+	struct super_block *h_src_sb;
+	struct inode *h_src_inode;
+
+	h_src_inode = file_inode(src);
+	h_src_sb = h_src_inode->i_sb;
+
+	/* XFS acquires inode_lock */
+	if (!au_test_xfs(h_src_sb))
+		err = au_copy_file(dst, src, len);
+	else {
+		inode_unlock_shared(h_src_inode);
+		err = au_copy_file(dst, src, len);
+		inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+	}
+
+	return err;
+}
+
+static int au_clone_or_copy(struct file *dst, struct file *src, loff_t len)
+{
+	int err;
+	loff_t lo;
+	struct super_block *h_src_sb;
+	struct inode *h_src_inode;
+
+	h_src_inode = file_inode(src);
+	h_src_sb = h_src_inode->i_sb;
+	if (h_src_sb != file_inode(dst)->i_sb
+	    || !dst->f_op->remap_file_range) {
+		err = au_do_copy(dst, src, len);
+		goto out;
+	}
+
+	if (!au_test_nfs(h_src_sb)) {
+		inode_unlock_shared(h_src_inode);
+		lo = vfsub_clone_file_range(src, dst, len);
+		inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+	} else
+		lo = vfsub_clone_file_range(src, dst, len);
+	if (lo == len) {
+		err = 0;
+		goto out; /* success */
+	} else if (lo >= 0)
+		/* todo: possible? */
+		/* paritially succeeded */
+		AuDbg("lo %lld, len %lld. Retrying.\n", lo, len);
+	else if (lo != -EOPNOTSUPP) {
+		/* older XFS has a condition in cloning */
+		err = lo;
+		goto out;
+	}
+
+	/* the backend fs on NFS may not support cloning */
+	err = au_do_copy(dst, src, len);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * to support a sparse file which is opened with O_APPEND,
+ * we need to close the file.
+ */
+static int au_cp_regular(struct au_cp_generic *cpg)
+{
+	int err, i;
+	enum { SRC, DST };
+	struct {
+		aufs_bindex_t bindex;
+		unsigned int flags;
+		struct dentry *dentry;
+		int force_wr;
+		struct file *file;
+	} *f, file[] = {
+		{
+			.bindex = cpg->bsrc,
+			.flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
+		},
+		{
+			.bindex = cpg->bdst,
+			.flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
+			.force_wr = !!au_ftest_cpup(cpg->flags, RWDST),
+		}
+	};
+	struct au_branch *br;
+	struct super_block *sb, *h_src_sb;
+	struct inode *h_src_inode;
+	struct task_struct *tsk = current;
+
+	/* bsrc branch can be ro/rw. */
+	sb = cpg->dentry->d_sb;
+	f = file;
+	for (i = 0; i < 2; i++, f++) {
+		f->dentry = au_h_dptr(cpg->dentry, f->bindex);
+		f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
+				    /*file*/NULL, f->force_wr);
+		if (IS_ERR(f->file)) {
+			err = PTR_ERR(f->file);
+			if (i == SRC)
+				goto out;
+			else
+				goto out_src;
+		}
+	}
+
+	/* try stopping to update while we copyup */
+	h_src_inode = d_inode(file[SRC].dentry);
+	h_src_sb = h_src_inode->i_sb;
+	if (!au_test_nfs(h_src_sb))
+		IMustLock(h_src_inode);
+	err = au_clone_or_copy(file[DST].file, file[SRC].file, cpg->len);
+
+	/* i wonder if we had O_NO_DELAY_FPUT flag */
+	if (tsk->flags & PF_KTHREAD)
+		__fput_sync(file[DST].file);
+	else {
+		/* it happened actually */
+		fput(file[DST].file);
+		/*
+		 * too bad.
+		 * we have to call both since we don't know which place the file
+		 * was added to.
+		 */
+		task_work_run();
+		flush_delayed_fput();
+	}
+	br = au_sbr(sb, file[DST].bindex);
+	au_lcnt_dec(&br->br_nfiles);
+
+out_src:
+	fput(file[SRC].file);
+	br = au_sbr(sb, file[SRC].bindex);
+	au_lcnt_dec(&br->br_nfiles);
+out:
+	return err;
+}
+
+static int au_do_cpup_regular(struct au_cp_generic *cpg,
+			      struct au_cpup_reg_attr *h_src_attr)
+{
+	int err, rerr;
+	loff_t l;
+	struct path h_path;
+	struct inode *h_src_inode, *h_dst_inode;
+
+	err = 0;
+	h_src_inode = au_h_iptr(d_inode(cpg->dentry), cpg->bsrc);
+	l = i_size_read(h_src_inode);
+	if (cpg->len == -1 || l < cpg->len)
+		cpg->len = l;
+	if (cpg->len) {
+		/* try stopping to update while we are referencing */
+		inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+		au_pin_hdir_unlock(cpg->pin);
+
+		h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+		h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
+		h_src_attr->iflags = h_src_inode->i_flags;
+		if (!au_test_nfs(h_src_inode->i_sb))
+			err = vfsub_getattr(&h_path, &h_src_attr->st);
+		else {
+			inode_unlock_shared(h_src_inode);
+			err = vfsub_getattr(&h_path, &h_src_attr->st);
+			inode_lock_shared_nested(h_src_inode, AuLsc_I_CHILD);
+		}
+		if (unlikely(err)) {
+			inode_unlock_shared(h_src_inode);
+			goto out;
+		}
+		h_src_attr->valid = 1;
+		if (!au_test_nfs(h_src_inode->i_sb)) {
+			err = au_cp_regular(cpg);
+			inode_unlock_shared(h_src_inode);
+		} else {
+			inode_unlock_shared(h_src_inode);
+			err = au_cp_regular(cpg);
+		}
+		rerr = au_pin_hdir_relock(cpg->pin);
+		if (!err && rerr)
+			err = rerr;
+	}
+	if (!err && (h_src_inode->i_state & I_LINKABLE)) {
+		h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst);
+		h_dst_inode = d_inode(h_path.dentry);
+		spin_lock(&h_dst_inode->i_lock);
+		h_dst_inode->i_state |= I_LINKABLE;
+		spin_unlock(&h_dst_inode->i_lock);
+	}
+
+out:
+	return err;
+}
+
+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
+			      struct inode *h_dir)
+{
+	int err, symlen;
+	mm_segment_t old_fs;
+	union {
+		char *k;
+		char __user *u;
+	} sym;
+
+	err = -ENOMEM;
+	sym.k = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!sym.k))
+		goto out;
+
+	/* unnecessary to support mmap_sem since symlink is not mmap-able */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	symlen = vfs_readlink(h_src, sym.u, PATH_MAX);
+	err = symlen;
+	set_fs(old_fs);
+
+	if (symlen > 0) {
+		sym.k[symlen] = 0;
+		err = vfsub_symlink(h_dir, h_path, sym.k);
+	}
+	free_page((unsigned long)sym.k);
+
+out:
+	return err;
+}
+
+/*
+ * regardless 'acl' option, reset all ACL.
+ * All ACL will be copied up later from the original entry on the lower branch.
+ */
+static int au_reset_acl(struct inode *h_dir, struct path *h_path, umode_t mode)
+{
+	int err;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	h_dentry = h_path->dentry;
+	h_inode = d_inode(h_dentry);
+	/* forget_all_cached_acls(h_inode)); */
+	err = vfsub_removexattr(h_dentry, XATTR_NAME_POSIX_ACL_ACCESS);
+	AuTraceErr(err);
+	if (err == -EOPNOTSUPP)
+		err = 0;
+	if (!err)
+		err = vfsub_acl_chmod(h_inode, mode);
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_do_cpup_dir(struct au_cp_generic *cpg, struct dentry *dst_parent,
+			  struct inode *h_dir, struct path *h_path)
+{
+	int err;
+	struct inode *dir, *inode;
+
+	err = vfsub_removexattr(h_path->dentry, XATTR_NAME_POSIX_ACL_DEFAULT);
+	AuTraceErr(err);
+	if (err == -EOPNOTSUPP)
+		err = 0;
+	if (unlikely(err))
+		goto out;
+
+	/*
+	 * strange behaviour from the users view,
+	 * particularly setattr case
+	 */
+	dir = d_inode(dst_parent);
+	if (au_ibtop(dir) == cpg->bdst)
+		au_cpup_attr_nlink(dir, /*force*/1);
+	inode = d_inode(cpg->dentry);
+	au_cpup_attr_nlink(inode, /*force*/1);
+
+out:
+	return err;
+}
+
+static noinline_for_stack
+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
+	       struct au_cpup_reg_attr *h_src_attr)
+{
+	int err;
+	umode_t mode;
+	unsigned int mnt_flags;
+	unsigned char isdir, isreg, force;
+	const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
+	struct au_dtime dt;
+	struct path h_path;
+	struct dentry *h_src, *h_dst, *h_parent;
+	struct inode *h_inode, *h_dir;
+	struct super_block *sb;
+
+	/* bsrc branch can be ro/rw. */
+	h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+	h_inode = d_inode(h_src);
+	AuDebugOn(h_inode != au_h_iptr(d_inode(cpg->dentry), cpg->bsrc));
+
+	/* try stopping to be referenced while we are creating */
+	h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+	if (au_ftest_cpup(cpg->flags, RENAME))
+		AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
+				  AUFS_WH_PFX_LEN));
+	h_parent = h_dst->d_parent; /* dir inode is locked */
+	h_dir = d_inode(h_parent);
+	IMustLock(h_dir);
+	AuDebugOn(h_parent != h_dst->d_parent);
+
+	sb = cpg->dentry->d_sb;
+	h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
+	if (do_dt) {
+		h_path.dentry = h_parent;
+		au_dtime_store(&dt, dst_parent, &h_path);
+	}
+	h_path.dentry = h_dst;
+
+	isreg = 0;
+	isdir = 0;
+	mode = h_inode->i_mode;
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+		isreg = 1;
+		err = vfsub_create(h_dir, &h_path, 0600, /*want_excl*/true);
+		if (!err)
+			err = au_do_cpup_regular(cpg, h_src_attr);
+		break;
+	case S_IFDIR:
+		isdir = 1;
+		err = vfsub_mkdir(h_dir, &h_path, mode);
+		if (!err)
+			err = au_do_cpup_dir(cpg, dst_parent, h_dir, &h_path);
+		break;
+	case S_IFLNK:
+		err = au_do_cpup_symlink(&h_path, h_src, h_dir);
+		break;
+	case S_IFCHR:
+	case S_IFBLK:
+		AuDebugOn(!capable(CAP_MKNOD));
+		/*FALLTHROUGH*/
+	case S_IFIFO:
+	case S_IFSOCK:
+		err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
+		break;
+	default:
+		AuIOErr("Unknown inode type 0%o\n", mode);
+		err = -EIO;
+	}
+	if (!err)
+		err = au_reset_acl(h_dir, &h_path, mode);
+
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, UDBA_NONE)
+	    && !isdir
+	    && au_opt_test(mnt_flags, XINO)
+	    && (h_inode->i_nlink == 1
+		|| (h_inode->i_state & I_LINKABLE))
+	    /* todo: unnecessary? */
+	    /* && d_inode(cpg->dentry)->i_nlink == 1 */
+	    && cpg->bdst < cpg->bsrc
+	    && !au_ftest_cpup(cpg->flags, KEEPLINO))
+		au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
+		/* ignore this error */
+
+	if (!err) {
+		force = 0;
+		if (isreg) {
+			force = !!cpg->len;
+			if (cpg->len == -1)
+				force = !!i_size_read(h_inode);
+		}
+		au_fhsm_wrote(sb, cpg->bdst, force);
+	}
+
+	if (do_dt)
+		au_dtime_revert(&dt);
+	return err;
+}
+
+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path)
+{
+	int err;
+	struct dentry *dentry, *h_dentry, *h_parent, *parent;
+	struct inode *h_dir;
+	aufs_bindex_t bdst;
+
+	dentry = cpg->dentry;
+	bdst = cpg->bdst;
+	h_dentry = au_h_dptr(dentry, bdst);
+	if (!au_ftest_cpup(cpg->flags, OVERWRITE)) {
+		dget(h_dentry);
+		au_set_h_dptr(dentry, bdst, NULL);
+		err = au_lkup_neg(dentry, bdst, /*wh*/0);
+		if (!err)
+			h_path->dentry = dget(au_h_dptr(dentry, bdst));
+		au_set_h_dptr(dentry, bdst, h_dentry);
+	} else {
+		err = 0;
+		parent = dget_parent(dentry);
+		h_parent = au_h_dptr(parent, bdst);
+		dput(parent);
+		h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
+		if (IS_ERR(h_path->dentry))
+			err = PTR_ERR(h_path->dentry);
+	}
+	if (unlikely(err))
+		goto out;
+
+	h_parent = h_dentry->d_parent; /* dir inode is locked */
+	h_dir = d_inode(h_parent);
+	IMustLock(h_dir);
+	AuDbg("%pd %pd\n", h_dentry, h_path->dentry);
+	/* no delegation since it is just created */
+	err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL,
+			   /*flags*/0);
+	dput(h_path->dentry);
+
+out:
+	return err;
+}
+
+/*
+ * copyup the @dentry from @bsrc to @bdst.
+ * the caller must set the both of lower dentries.
+ * @len is for truncating when it is -1 copyup the entire file.
+ * in link/rename cases, @dst_parent may be different from the real one.
+ * basic->bsrc can be larger than basic->bdst.
+ * aufs doesn't touch the credential so
+ * security_inode_copy_up{,_xattr}() are unnecessary.
+ */
+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+	int err, rerr;
+	aufs_bindex_t old_ibtop;
+	unsigned char isdir, plink;
+	struct dentry *h_src, *h_dst, *h_parent;
+	struct inode *dst_inode, *h_dir, *inode, *delegated, *src_inode;
+	struct super_block *sb;
+	struct au_branch *br;
+	/* to reduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct path h_path;
+		struct au_cpup_reg_attr h_src_attr;
+	} *a;
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+	a->h_src_attr.valid = 0;
+
+	sb = cpg->dentry->d_sb;
+	br = au_sbr(sb, cpg->bdst);
+	a->h_path.mnt = au_br_mnt(br);
+	h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+	h_parent = h_dst->d_parent; /* dir inode is locked */
+	h_dir = d_inode(h_parent);
+	IMustLock(h_dir);
+
+	h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+	inode = d_inode(cpg->dentry);
+
+	if (!dst_parent)
+		dst_parent = dget_parent(cpg->dentry);
+	else
+		dget(dst_parent);
+
+	plink = !!au_opt_test(au_mntflags(sb), PLINK);
+	dst_inode = au_h_iptr(inode, cpg->bdst);
+	if (dst_inode) {
+		if (unlikely(!plink)) {
+			err = -EIO;
+			AuIOErr("hi%lu(i%lu) exists on b%d "
+				"but plink is disabled\n",
+				dst_inode->i_ino, inode->i_ino, cpg->bdst);
+			goto out_parent;
+		}
+
+		if (dst_inode->i_nlink) {
+			const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
+
+			h_src = au_plink_lkup(inode, cpg->bdst);
+			err = PTR_ERR(h_src);
+			if (IS_ERR(h_src))
+				goto out_parent;
+			if (unlikely(d_is_negative(h_src))) {
+				err = -EIO;
+				AuIOErr("i%lu exists on b%d "
+					"but not pseudo-linked\n",
+					inode->i_ino, cpg->bdst);
+				dput(h_src);
+				goto out_parent;
+			}
+
+			if (do_dt) {
+				a->h_path.dentry = h_parent;
+				au_dtime_store(&a->dt, dst_parent, &a->h_path);
+			}
+
+			a->h_path.dentry = h_dst;
+			delegated = NULL;
+			err = vfsub_link(h_src, h_dir, &a->h_path, &delegated);
+			if (!err && au_ftest_cpup(cpg->flags, RENAME))
+				err = au_do_ren_after_cpup(cpg, &a->h_path);
+			if (do_dt)
+				au_dtime_revert(&a->dt);
+			if (unlikely(err == -EWOULDBLOCK)) {
+				pr_warn("cannot retry for NFSv4 delegation"
+					" for an internal link\n");
+				iput(delegated);
+			}
+			dput(h_src);
+			goto out_parent;
+		} else
+			/* todo: cpup_wh_file? */
+			/* udba work */
+			au_update_ibrange(inode, /*do_put_zero*/1);
+	}
+
+	isdir = S_ISDIR(inode->i_mode);
+	old_ibtop = au_ibtop(inode);
+	err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
+	if (unlikely(err))
+		goto out_rev;
+	dst_inode = d_inode(h_dst);
+	inode_lock_nested(dst_inode, AuLsc_I_CHILD2);
+	/* todo: necessary? */
+	/* au_pin_hdir_unlock(cpg->pin); */
+
+	err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
+	if (unlikely(err)) {
+		/* todo: necessary? */
+		/* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
+		inode_unlock(dst_inode);
+		goto out_rev;
+	}
+
+	if (cpg->bdst < old_ibtop) {
+		if (S_ISREG(inode->i_mode)) {
+			err = au_dy_iaop(inode, cpg->bdst, dst_inode);
+			if (unlikely(err)) {
+				/* ignore an error */
+				/* au_pin_hdir_relock(cpg->pin); */
+				inode_unlock(dst_inode);
+				goto out_rev;
+			}
+		}
+		au_set_ibtop(inode, cpg->bdst);
+	} else
+		au_set_ibbot(inode, cpg->bdst);
+	au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
+		      au_hi_flags(inode, isdir));
+
+	/* todo: necessary? */
+	/* err = au_pin_hdir_relock(cpg->pin); */
+	inode_unlock(dst_inode);
+	if (unlikely(err))
+		goto out_rev;
+
+	src_inode = d_inode(h_src);
+	if (!isdir
+	    && (src_inode->i_nlink > 1
+		|| src_inode->i_state & I_LINKABLE)
+	    && plink)
+		au_plink_append(inode, cpg->bdst, h_dst);
+
+	if (au_ftest_cpup(cpg->flags, RENAME)) {
+		a->h_path.dentry = h_dst;
+		err = au_do_ren_after_cpup(cpg, &a->h_path);
+	}
+	if (!err)
+		goto out_parent; /* success */
+
+	/* revert */
+out_rev:
+	a->h_path.dentry = h_parent;
+	au_dtime_store(&a->dt, dst_parent, &a->h_path);
+	a->h_path.dentry = h_dst;
+	rerr = 0;
+	if (d_is_positive(h_dst)) {
+		if (!isdir) {
+			/* no delegation since it is just created */
+			rerr = vfsub_unlink(h_dir, &a->h_path,
+					    /*delegated*/NULL, /*force*/0);
+		} else
+			rerr = vfsub_rmdir(h_dir, &a->h_path);
+	}
+	au_dtime_revert(&a->dt);
+	if (rerr) {
+		AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
+		err = -EIO;
+	}
+out_parent:
+	dput(dst_parent);
+	au_kfree_rcu(a);
+out:
+	return err;
+}
+
+#if 0 /* reserved */
+struct au_cpup_single_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+	struct dentry *dst_parent;
+};
+
+static void au_call_cpup_single(void *args)
+{
+	struct au_cpup_single_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_single(a->cpg, a->dst_parent);
+	au_pin_hdir_release(a->cpg->pin);
+}
+#endif
+
+/*
+ * prevent SIGXFSZ in copy-up.
+ * testing CAP_MKNOD is for generic fs,
+ * but CAP_FSETID is for xfs only, currently.
+ */
+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
+{
+	int do_sio;
+	struct super_block *sb;
+	struct inode *h_dir;
+
+	do_sio = 0;
+	sb = au_pinned_parent(pin)->d_sb;
+	if (!au_wkq_test()
+	    && (!au_sbi(sb)->si_plink_maint_pid
+		|| au_plink_maint(sb, AuLock_NOPLM))) {
+		switch (mode & S_IFMT) {
+		case S_IFREG:
+			/* no condition about RLIMIT_FSIZE and the file size */
+			do_sio = 1;
+			break;
+		case S_IFCHR:
+		case S_IFBLK:
+			do_sio = !capable(CAP_MKNOD);
+			break;
+		}
+		if (!do_sio)
+			do_sio = ((mode & (S_ISUID | S_ISGID))
+				  && !capable(CAP_FSETID));
+		/* this workaround may be removed in the future */
+		if (!do_sio) {
+			h_dir = au_pinned_h_dir(pin);
+			do_sio = h_dir->i_mode & S_ISVTX;
+		}
+	}
+
+	return do_sio;
+}
+
+#if 0 /* reserved */
+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+	int err, wkq_err;
+	struct dentry *h_dentry;
+
+	h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+	if (!au_cpup_sio_test(pin, d_inode(h_dentry)->i_mode))
+		err = au_cpup_single(cpg, dst_parent);
+	else {
+		struct au_cpup_single_args args = {
+			.errp		= &err,
+			.cpg		= cpg,
+			.dst_parent	= dst_parent
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_single, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+#endif
+
+/*
+ * copyup the @dentry from the first active lower branch to @bdst,
+ * using au_cpup_single().
+ */
+static int au_cpup_simple(struct au_cp_generic *cpg)
+{
+	int err;
+	unsigned int flags_orig;
+	struct dentry *dentry;
+
+	AuDebugOn(cpg->bsrc < 0);
+
+	dentry = cpg->dentry;
+	DiMustWriteLock(dentry);
+
+	err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
+	if (!err) {
+		flags_orig = cpg->flags;
+		au_fset_cpup(cpg->flags, RENAME);
+		err = au_cpup_single(cpg, NULL);
+		cpg->flags = flags_orig;
+		if (!err)
+			return 0; /* success */
+
+		/* revert */
+		au_set_h_dptr(dentry, cpg->bdst, NULL);
+		au_set_dbtop(dentry, cpg->bsrc);
+	}
+
+	return err;
+}
+
+struct au_cpup_simple_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+};
+
+static void au_call_cpup_simple(void *args)
+{
+	struct au_cpup_simple_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_simple(a->cpg);
+	au_pin_hdir_release(a->cpg->pin);
+}
+
+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+	int err, wkq_err;
+	struct dentry *dentry, *parent;
+	struct file *h_file;
+	struct inode *h_dir;
+
+	dentry = cpg->dentry;
+	h_file = NULL;
+	if (au_ftest_cpup(cpg->flags, HOPEN)) {
+		AuDebugOn(cpg->bsrc < 0);
+		h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0);
+		err = PTR_ERR(h_file);
+		if (IS_ERR(h_file))
+			goto out;
+	}
+
+	parent = dget_parent(dentry);
+	h_dir = au_h_iptr(d_inode(parent), cpg->bdst);
+	if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
+	    && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+		err = au_cpup_simple(cpg);
+	else {
+		struct au_cpup_simple_args args = {
+			.errp		= &err,
+			.cpg		= cpg
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	dput(parent);
+	if (h_file)
+		au_h_open_post(dentry, cpg->bsrc, h_file);
+
+out:
+	return err;
+}
+
+int au_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+	aufs_bindex_t bsrc, bbot;
+	struct dentry *dentry, *h_dentry;
+
+	if (cpg->bsrc < 0) {
+		dentry = cpg->dentry;
+		bbot = au_dbbot(dentry);
+		for (bsrc = cpg->bdst + 1; bsrc <= bbot; bsrc++) {
+			h_dentry = au_h_dptr(dentry, bsrc);
+			if (h_dentry) {
+				AuDebugOn(d_is_negative(h_dentry));
+				break;
+			}
+		}
+		AuDebugOn(bsrc > bbot);
+		cpg->bsrc = bsrc;
+	}
+	AuDebugOn(cpg->bsrc <= cpg->bdst);
+	return au_do_sio_cpup_simple(cpg);
+}
+
+int au_sio_cpdown_simple(struct au_cp_generic *cpg)
+{
+	AuDebugOn(cpg->bdst <= cpg->bsrc);
+	return au_do_sio_cpup_simple(cpg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * copyup the deleted file for writing.
+ */
+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
+			 struct file *file)
+{
+	int err;
+	unsigned int flags_orig;
+	aufs_bindex_t bsrc_orig;
+	struct au_dinfo *dinfo;
+	struct {
+		struct au_hdentry *hd;
+		struct dentry *h_dentry;
+	} hdst, hsrc;
+
+	dinfo = au_di(cpg->dentry);
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	bsrc_orig = cpg->bsrc;
+	cpg->bsrc = dinfo->di_btop;
+	hdst.hd = au_hdentry(dinfo, cpg->bdst);
+	hdst.h_dentry = hdst.hd->hd_dentry;
+	hdst.hd->hd_dentry = wh_dentry;
+	dinfo->di_btop = cpg->bdst;
+
+	hsrc.h_dentry = NULL;
+	if (file) {
+		hsrc.hd = au_hdentry(dinfo, cpg->bsrc);
+		hsrc.h_dentry = hsrc.hd->hd_dentry;
+		hsrc.hd->hd_dentry = au_hf_top(file)->f_path.dentry;
+	}
+	flags_orig = cpg->flags;
+	cpg->flags = !AuCpup_DTIME;
+	err = au_cpup_single(cpg, /*h_parent*/NULL);
+	cpg->flags = flags_orig;
+	if (file) {
+		if (!err)
+			err = au_reopen_nondir(file);
+		hsrc.hd->hd_dentry = hsrc.h_dentry;
+	}
+	hdst.hd->hd_dentry = hdst.h_dentry;
+	dinfo->di_btop = cpg->bsrc;
+	cpg->bsrc = bsrc_orig;
+
+	return err;
+}
+
+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+	int err;
+	aufs_bindex_t bdst;
+	struct au_dtime dt;
+	struct dentry *dentry, *parent, *h_parent, *wh_dentry;
+	struct au_branch *br;
+	struct path h_path;
+
+	dentry = cpg->dentry;
+	bdst = cpg->bdst;
+	br = au_sbr(dentry->d_sb, bdst);
+	parent = dget_parent(dentry);
+	h_parent = au_h_dptr(parent, bdst);
+	wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out;
+
+	h_path.dentry = h_parent;
+	h_path.mnt = au_br_mnt(br);
+	au_dtime_store(&dt, parent, &h_path);
+	err = au_do_cpup_wh(cpg, wh_dentry, file);
+	if (unlikely(err))
+		goto out_wh;
+
+	dget(wh_dentry);
+	h_path.dentry = wh_dentry;
+	if (!d_is_dir(wh_dentry)) {
+		/* no delegation since it is just created */
+		err = vfsub_unlink(d_inode(h_parent), &h_path,
+				   /*delegated*/NULL, /*force*/0);
+	} else
+		err = vfsub_rmdir(d_inode(h_parent), &h_path);
+	if (unlikely(err)) {
+		AuIOErr("failed remove copied-up tmp file %pd(%d)\n",
+			wh_dentry, err);
+		err = -EIO;
+	}
+	au_dtime_revert(&dt);
+	au_set_hi_wh(d_inode(dentry), bdst, wh_dentry);
+
+out_wh:
+	dput(wh_dentry);
+out:
+	dput(parent);
+	return err;
+}
+
+struct au_cpup_wh_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+	struct file *file;
+};
+
+static void au_call_cpup_wh(void *args)
+{
+	struct au_cpup_wh_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_wh(a->cpg, a->file);
+	au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+	int err, wkq_err;
+	aufs_bindex_t bdst;
+	struct dentry *dentry, *parent, *h_orph, *h_parent;
+	struct inode *dir, *h_dir, *h_tmpdir;
+	struct au_wbr *wbr;
+	struct au_pin wh_pin, *pin_orig;
+
+	dentry = cpg->dentry;
+	bdst = cpg->bdst;
+	parent = dget_parent(dentry);
+	dir = d_inode(parent);
+	h_orph = NULL;
+	h_parent = NULL;
+	h_dir = au_igrab(au_h_iptr(dir, bdst));
+	h_tmpdir = h_dir;
+	pin_orig = NULL;
+	if (!h_dir->i_nlink) {
+		wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
+		h_orph = wbr->wbr_orph;
+
+		h_parent = dget(au_h_dptr(parent, bdst));
+		au_set_h_dptr(parent, bdst, dget(h_orph));
+		h_tmpdir = d_inode(h_orph);
+		au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
+
+		inode_lock_nested(h_tmpdir, AuLsc_I_PARENT3);
+		/* todo: au_h_open_pre()? */
+
+		pin_orig = cpg->pin;
+		au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
+			    AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
+		cpg->pin = &wh_pin;
+	}
+
+	if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
+	    && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+		err = au_cpup_wh(cpg, file);
+	else {
+		struct au_cpup_wh_args args = {
+			.errp	= &err,
+			.cpg	= cpg,
+			.file	= file
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	if (h_orph) {
+		inode_unlock(h_tmpdir);
+		/* todo: au_h_open_post()? */
+		au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
+		au_set_h_dptr(parent, bdst, h_parent);
+		AuDebugOn(!pin_orig);
+		cpg->pin = pin_orig;
+	}
+	iput(h_dir);
+	dput(parent);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generic routine for both of copy-up and copy-down.
+ */
+/* cf. revalidate function in file.c */
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg),
+	       void *arg)
+{
+	int err;
+	struct au_pin pin;
+	struct dentry *d, *parent, *h_parent, *real_parent, *h_dentry;
+
+	err = 0;
+	parent = dget_parent(dentry);
+	if (IS_ROOT(parent))
+		goto out;
+
+	au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
+		    au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
+
+	/* do not use au_dpage */
+	real_parent = parent;
+	while (1) {
+		dput(parent);
+		parent = dget_parent(dentry);
+		h_parent = au_h_dptr(parent, bdst);
+		if (h_parent)
+			goto out; /* success */
+
+		/* find top dir which is necessary to cpup */
+		do {
+			d = parent;
+			dput(parent);
+			parent = dget_parent(d);
+			di_read_lock_parent3(parent, !AuLock_IR);
+			h_parent = au_h_dptr(parent, bdst);
+			di_read_unlock(parent, !AuLock_IR);
+		} while (!h_parent);
+
+		if (d != real_parent)
+			di_write_lock_child3(d);
+
+		/* somebody else might create while we were sleeping */
+		h_dentry = au_h_dptr(d, bdst);
+		if (!h_dentry || d_is_negative(h_dentry)) {
+			if (h_dentry)
+				au_update_dbtop(d);
+
+			au_pin_set_dentry(&pin, d);
+			err = au_do_pin(&pin);
+			if (!err) {
+				err = cp(d, bdst, &pin, h_parent, arg);
+				au_unpin(&pin);
+			}
+		}
+
+		if (d != real_parent)
+			di_write_unlock(d);
+		if (unlikely(err))
+			break;
+	}
+
+out:
+	dput(parent);
+	return err;
+}
+
+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
+		       struct au_pin *pin,
+		       struct dentry *h_parent __maybe_unused,
+		       void *arg __maybe_unused)
+{
+	struct au_cp_generic cpg = {
+		.dentry	= dentry,
+		.bdst	= bdst,
+		.bsrc	= -1,
+		.len	= 0,
+		.pin	= pin,
+		.flags	= AuCpup_DTIME
+	};
+	return au_sio_cpup_simple(&cpg);
+}
+
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
+}
+
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	int err;
+	struct dentry *parent;
+	struct inode *dir;
+
+	parent = dget_parent(dentry);
+	dir = d_inode(parent);
+	err = 0;
+	if (au_h_iptr(dir, bdst))
+		goto out;
+
+	di_read_unlock(parent, AuLock_IR);
+	di_write_lock_parent(parent);
+	/* someone else might change our inode while we were sleeping */
+	if (!au_h_iptr(dir, bdst))
+		err = au_cpup_dirs(dentry, bdst);
+	di_downgrade_lock(parent, AuLock_IR);
+
+out:
+	dput(parent);
+	return err;
+}
diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h
new file mode 100644
index 00000000000..d02f8150fa0
--- /dev/null
+++ b/fs/aufs/cpup.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * copy-up/down functions
+ */
+
+#ifndef __AUFS_CPUP_H__
+#define __AUFS_CPUP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct inode;
+struct file;
+struct au_pin;
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
+void au_cpup_attr_timesizes(struct inode *inode);
+void au_cpup_attr_nlink(struct inode *inode, int force);
+void au_cpup_attr_changeable(struct inode *inode);
+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
+void au_cpup_attr_all(struct inode *inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_cp_generic {
+	struct dentry	*dentry;
+	aufs_bindex_t	bdst, bsrc;
+	loff_t		len;
+	struct au_pin	*pin;
+	unsigned int	flags;
+};
+
+/* cpup flags */
+#define AuCpup_DTIME		1		/* do dtime_store/revert */
+#define AuCpup_KEEPLINO		(1 << 1)	/* do not clear the lower xino,
+						   for link(2) */
+#define AuCpup_RENAME		(1 << 2)	/* rename after cpup */
+#define AuCpup_HOPEN		(1 << 3)	/* call h_open_pre/post() in
+						   cpup */
+#define AuCpup_OVERWRITE	(1 << 4)	/* allow overwriting the
+						   existing entry */
+#define AuCpup_RWDST		(1 << 5)	/* force write target even if
+						   the branch is marked as RO */
+
+#ifndef CONFIG_AUFS_BR_HFSPLUS
+#undef AuCpup_HOPEN
+#define AuCpup_HOPEN		0
+#endif
+
+#define au_ftest_cpup(flags, name)	((flags) & AuCpup_##name)
+#define au_fset_cpup(flags, name) \
+	do { (flags) |= AuCpup_##name; } while (0)
+#define au_fclr_cpup(flags, name) \
+	do { (flags) &= ~AuCpup_##name; } while (0)
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len);
+int au_sio_cpup_simple(struct au_cp_generic *cpg);
+int au_sio_cpdown_simple(struct au_cp_generic *cpg);
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
+
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg),
+	       void *arg);
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+/* keep timestamps when copyup */
+struct au_dtime {
+	struct dentry *dt_dentry;
+	struct path dt_h_path;
+	struct timespec64 dt_atime, dt_mtime;
+};
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+		    struct path *h_path);
+void au_dtime_revert(struct au_dtime *dt);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_CPUP_H__ */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
new file mode 100644
index 00000000000..80266f4fcf7
--- /dev/null
+++ b/fs/aufs/dbgaufs.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debugfs interface
+ */
+
+#include <linux/debugfs.h>
+#include "aufs.h"
+
+#ifndef CONFIG_SYSFS
+#error DEBUG_FS depends upon SYSFS
+#endif
+
+static struct dentry *dbgaufs;
+static const mode_t dbgaufs_mode = 0444;
+
+/* 20 is max digits length of ulong 64 */
+struct dbgaufs_arg {
+	int n;
+	char a[20 * 4];
+};
+
+/*
+ * common function for all XINO files
+ */
+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
+			      struct file *file)
+{
+	void *p;
+
+	p = file->private_data;
+	if (p) {
+		/* this is struct dbgaufs_arg */
+		AuDebugOn(!au_kfree_sz_test(p));
+		au_kfree_do_rcu(p);
+	}
+	return 0;
+}
+
+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt,
+			   int cnt)
+{
+	int err;
+	struct kstat st;
+	struct dbgaufs_arg *p;
+
+	err = -ENOMEM;
+	p = kmalloc(sizeof(*p), GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = 0;
+	p->n = 0;
+	file->private_data = p;
+	if (!xf)
+		goto out;
+
+	err = vfsub_getattr(&xf->f_path, &st);
+	if (!err) {
+		if (do_fcnt)
+			p->n = snprintf
+				(p->a, sizeof(p->a), "%d, %llux%u %lld\n",
+				 cnt, st.blocks, st.blksize,
+				 (long long)st.size);
+		else
+			p->n = snprintf(p->a, sizeof(p->a), "%llux%u %lld\n",
+					st.blocks, st.blksize,
+					(long long)st.size);
+		AuDebugOn(p->n >= sizeof(p->a));
+	} else {
+		p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
+		err = 0;
+	}
+
+out:
+	return err;
+}
+
+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	struct dbgaufs_arg *p;
+
+	p = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dbgaufs_plink_arg {
+	int n;
+	char a[];
+};
+
+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
+				 struct file *file)
+{
+	free_page((unsigned long)file->private_data);
+	return 0;
+}
+
+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
+{
+	int err, i, limit;
+	unsigned long n, sum;
+	struct dbgaufs_plink_arg *p;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct hlist_bl_head *hbl;
+
+	err = -ENOMEM;
+	p = (void *)get_zeroed_page(GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = -EFBIG;
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		limit = PAGE_SIZE - sizeof(p->n);
+
+		/* the number of buckets */
+		n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
+		p->n += n;
+		limit -= n;
+
+		sum = 0;
+		for (i = 0, hbl = sbinfo->si_plink; i < AuPlink_NHASH;
+		     i++, hbl++) {
+			n = au_hbl_count(hbl);
+			sum += n;
+
+			n = snprintf(p->a + p->n, limit, "%lu ", n);
+			p->n += n;
+			limit -= n;
+			if (unlikely(limit <= 0))
+				goto out_free;
+		}
+		p->a[p->n - 1] = '\n';
+
+		/* the sum of plinks */
+		n = snprintf(p->a + p->n, limit, "%lu\n", sum);
+		p->n += n;
+		limit -= n;
+		if (unlikely(limit <= 0))
+			goto out_free;
+	} else {
+#define str "1\n0\n0\n"
+		p->n = sizeof(str) - 1;
+		strcpy(p->a, str);
+#undef str
+	}
+	si_read_unlock(sb);
+
+	err = 0;
+	file->private_data = p;
+	goto out; /* success */
+
+out_free:
+	free_page((unsigned long)p);
+out:
+	return err;
+}
+
+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *ppos)
+{
+	struct dbgaufs_plink_arg *p;
+
+	p = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+static const struct file_operations dbgaufs_plink_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_plink_open,
+	.release	= dbgaufs_plink_release,
+	.read		= dbgaufs_plink_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0, /*cnt*/0);
+	si_read_unlock(sb);
+	return err;
+}
+
+static const struct file_operations dbgaufs_xib_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xib_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+#define DbgaufsXi_PREFIX "xi"
+
+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
+{
+	int err, idx;
+	long l;
+	aufs_bindex_t bindex;
+	char *p, a[sizeof(DbgaufsXi_PREFIX) + 8];
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct au_xino *xi;
+	struct file *xf;
+	struct qstr *name;
+	struct au_branch *br;
+
+	err = -ENOENT;
+	name = &file->f_path.dentry->d_name;
+	if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
+		     || memcmp(name->name, DbgaufsXi_PREFIX,
+			       sizeof(DbgaufsXi_PREFIX) - 1)))
+		goto out;
+
+	AuDebugOn(name->len >= sizeof(a));
+	memcpy(a, name->name, name->len);
+	a[name->len] = '\0';
+	p = strchr(a, '-');
+	if (p)
+		*p = '\0';
+	err = kstrtol(a + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
+	if (unlikely(err))
+		goto out;
+	bindex = l;
+	idx = 0;
+	if (p) {
+		err = kstrtol(p + 1, 10, &l);
+		if (unlikely(err))
+			goto out;
+		idx = l;
+	}
+
+	err = -ENOENT;
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	if (unlikely(bindex < 0 || bindex > au_sbbot(sb)))
+		goto out_si;
+	br = au_sbr(sb, bindex);
+	xi = br->br_xino;
+	if (unlikely(idx >= xi->xi_nfile))
+		goto out_si;
+	xf = au_xino_file(xi, idx);
+	if (xf)
+		err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1,
+				      au_xino_count(br));
+
+out_si:
+	si_read_unlock(sb);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static const struct file_operations dbgaufs_xino_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xino_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+void dbgaufs_xino_del(struct au_branch *br)
+{
+	struct dentry *dbgaufs;
+
+	dbgaufs = br->br_dbgaufs;
+	if (!dbgaufs)
+		return;
+
+	br->br_dbgaufs = NULL;
+	/* debugfs acquires the parent i_mutex */
+	lockdep_off();
+	debugfs_remove(dbgaufs);
+	lockdep_on();
+}
+
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+	aufs_bindex_t bbot;
+	struct au_branch *br;
+
+	if (!au_sbi(sb)->si_dbgaufs)
+		return;
+
+	bbot = au_sbbot(sb);
+	for (; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		dbgaufs_xino_del(br);
+	}
+}
+
+static void dbgaufs_br_do_add(struct super_block *sb, aufs_bindex_t bindex,
+			      unsigned int idx, struct dentry *parent,
+			      struct au_sbinfo *sbinfo)
+{
+	struct au_branch *br;
+	struct dentry *d;
+	/* "xi" bindex(5) "-" idx(2) NULL */
+	char name[sizeof(DbgaufsXi_PREFIX) + 8];
+
+	if (!idx)
+		snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
+	else
+		snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d-%u",
+			 bindex, idx);
+	br = au_sbr(sb, bindex);
+	if (br->br_dbgaufs) {
+		struct qstr qstr = QSTR_INIT(name, strlen(name));
+
+		if (!au_qstreq(&br->br_dbgaufs->d_name, &qstr)) {
+			/* debugfs acquires the parent i_mutex */
+			lockdep_off();
+			d = debugfs_rename(parent, br->br_dbgaufs, parent,
+					   name);
+			lockdep_on();
+			if (unlikely(!d))
+				pr_warn("failed renaming %pd/%s, ignored.\n",
+					parent, name);
+		}
+	} else {
+		lockdep_off();
+		br->br_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
+						     sbinfo, &dbgaufs_xino_fop);
+		lockdep_on();
+		if (unlikely(!br->br_dbgaufs))
+			pr_warn("failed creating %pd/%s, ignored.\n",
+				parent, name);
+	}
+}
+
+static void dbgaufs_br_add(struct super_block *sb, aufs_bindex_t bindex,
+			   struct dentry *parent, struct au_sbinfo *sbinfo)
+{
+	struct au_branch *br;
+	struct au_xino *xi;
+	unsigned int u;
+
+	br = au_sbr(sb, bindex);
+	xi = br->br_xino;
+	for (u = 0; u < xi->xi_nfile; u++)
+		dbgaufs_br_do_add(sb, bindex, u, parent, sbinfo);
+}
+
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex, int topdown)
+{
+	struct au_sbinfo *sbinfo;
+	struct dentry *parent;
+	aufs_bindex_t bbot;
+
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		return;
+
+	sbinfo = au_sbi(sb);
+	parent = sbinfo->si_dbgaufs;
+	if (!parent)
+		return;
+
+	bbot = au_sbbot(sb);
+	if (topdown)
+		for (; bindex <= bbot; bindex++)
+			dbgaufs_br_add(sb, bindex, parent, sbinfo);
+	else
+		for (; bbot >= bindex; bbot--)
+			dbgaufs_br_add(sb, bbot, parent, sbinfo);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0, /*cnt*/0);
+	si_read_unlock(sb);
+	return err;
+}
+
+static const struct file_operations dbgaufs_xigen_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xigen_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+
+	/*
+	 * This function is a dynamic '__init' function actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	err = -EIO;
+	sbinfo->si_dbgaufs_xigen = debugfs_create_file
+		("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_xigen_fop);
+	if (sbinfo->si_dbgaufs_xigen)
+		err = 0;
+
+	return err;
+}
+#else
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+	return 0;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
+{
+	/*
+	 * This function is a dynamic '__fin' function actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	debugfs_remove_recursive(sbinfo->si_dbgaufs);
+	sbinfo->si_dbgaufs = NULL;
+}
+
+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+	char name[SysaufsSiNameLen];
+
+	/*
+	 * This function is a dynamic '__init' function actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	err = -ENOENT;
+	if (!dbgaufs) {
+		AuErr1("/debug/aufs is uninitialized\n");
+		goto out;
+	}
+
+	err = -EIO;
+	sysaufs_name(sbinfo, name);
+	sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
+	if (unlikely(!sbinfo->si_dbgaufs))
+		goto out;
+
+	/* regardless plink/noplink option */
+	sbinfo->si_dbgaufs_plink = debugfs_create_file
+		("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_plink_fop);
+	if (unlikely(!sbinfo->si_dbgaufs_plink))
+		goto out_dir;
+
+	/* regardless xino/noxino option */
+	sbinfo->si_dbgaufs_xib = debugfs_create_file
+		("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_xib_fop);
+	if (unlikely(!sbinfo->si_dbgaufs_xib))
+		goto out_dir;
+
+	err = dbgaufs_xigen_init(sbinfo);
+	if (!err)
+		goto out; /* success */
+
+out_dir:
+	dbgaufs_si_fin(sbinfo);
+out:
+	if (unlikely(err))
+		pr_err("debugfs/aufs failed\n");
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_fin(void)
+{
+	debugfs_remove(dbgaufs);
+}
+
+int __init dbgaufs_init(void)
+{
+	int err;
+
+	err = -EIO;
+	dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
+	if (dbgaufs)
+		err = 0;
+	return err;
+}
diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h
new file mode 100644
index 00000000000..7b4ccdebb67
--- /dev/null
+++ b/fs/aufs/dbgaufs.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debugfs interface
+ */
+
+#ifndef __DBGAUFS_H__
+#define __DBGAUFS_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+struct au_sbinfo;
+struct au_branch;
+
+#ifdef CONFIG_DEBUG_FS
+/* dbgaufs.c */
+void dbgaufs_xino_del(struct au_branch *br);
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex, int topdown);
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
+void dbgaufs_fin(void);
+int __init dbgaufs_init(void);
+#else
+AuStubVoid(dbgaufs_xino_del, struct au_branch *br)
+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex,
+	   int topdown)
+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
+AuStubVoid(dbgaufs_fin, void)
+AuStubInt0(__init dbgaufs_init, void)
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __KERNEL__ */
+#endif /* __DBGAUFS_H__ */
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
new file mode 100644
index 00000000000..0b9b1862b56
--- /dev/null
+++ b/fs/aufs/dcsub.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#include "aufs.h"
+
+static void au_dpage_free(struct au_dpage *dpage)
+{
+	int i;
+	struct dentry **p;
+
+	p = dpage->dentries;
+	for (i = 0; i < dpage->ndentry; i++)
+		dput(*p++);
+	free_page((unsigned long)dpage->dentries);
+}
+
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
+{
+	int err;
+	void *p;
+
+	err = -ENOMEM;
+	dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
+	if (unlikely(!dpages->dpages))
+		goto out;
+
+	p = (void *)__get_free_page(gfp);
+	if (unlikely(!p))
+		goto out_dpages;
+
+	dpages->dpages[0].ndentry = 0;
+	dpages->dpages[0].dentries = p;
+	dpages->ndpage = 1;
+	return 0; /* success */
+
+out_dpages:
+	au_kfree_try_rcu(dpages->dpages);
+out:
+	return err;
+}
+
+void au_dpages_free(struct au_dcsub_pages *dpages)
+{
+	int i;
+	struct au_dpage *p;
+
+	p = dpages->dpages;
+	for (i = 0; i < dpages->ndpage; i++)
+		au_dpage_free(p++);
+	au_kfree_try_rcu(dpages->dpages);
+}
+
+static int au_dpages_append(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, gfp_t gfp)
+{
+	int err, sz;
+	struct au_dpage *dpage;
+	void *p;
+
+	dpage = dpages->dpages + dpages->ndpage - 1;
+	sz = PAGE_SIZE / sizeof(dentry);
+	if (unlikely(dpage->ndentry >= sz)) {
+		AuLabel(new dpage);
+		err = -ENOMEM;
+		sz = dpages->ndpage * sizeof(*dpages->dpages);
+		p = au_kzrealloc(dpages->dpages, sz,
+				 sz + sizeof(*dpages->dpages), gfp,
+				 /*may_shrink*/0);
+		if (unlikely(!p))
+			goto out;
+
+		dpages->dpages = p;
+		dpage = dpages->dpages + dpages->ndpage;
+		p = (void *)__get_free_page(gfp);
+		if (unlikely(!p))
+			goto out;
+
+		dpage->ndentry = 0;
+		dpage->dentries = p;
+		dpages->ndpage++;
+	}
+
+	AuDebugOn(au_dcount(dentry) <= 0);
+	dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
+	return 0; /* success */
+
+out:
+	return err;
+}
+
+/* todo: BAD approach */
+/* copied from linux/fs/dcache.c */
+enum d_walk_ret {
+	D_WALK_CONTINUE,
+	D_WALK_QUIT,
+	D_WALK_NORETRY,
+	D_WALK_SKIP,
+};
+
+extern void d_walk(struct dentry *parent, void *data,
+		   enum d_walk_ret (*enter)(void *, struct dentry *));
+
+struct ac_dpages_arg {
+	int err;
+	struct au_dcsub_pages *dpages;
+	struct super_block *sb;
+	au_dpages_test test;
+	void *arg;
+};
+
+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry)
+{
+	enum d_walk_ret ret;
+	struct ac_dpages_arg *arg = _arg;
+
+	ret = D_WALK_CONTINUE;
+	if (dentry->d_sb == arg->sb
+	    && !IS_ROOT(dentry)
+	    && au_dcount(dentry) > 0
+	    && au_di(dentry)
+	    && (!arg->test || arg->test(dentry, arg->arg))) {
+		arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC);
+		if (unlikely(arg->err))
+			ret = D_WALK_QUIT;
+	}
+
+	return ret;
+}
+
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+		   au_dpages_test test, void *arg)
+{
+	struct ac_dpages_arg args = {
+		.err	= 0,
+		.dpages	= dpages,
+		.sb	= root->d_sb,
+		.test	= test,
+		.arg	= arg
+	};
+
+	d_walk(root, &args, au_call_dpages_append);
+
+	return args.err;
+}
+
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+		       int do_include, au_dpages_test test, void *arg)
+{
+	int err;
+
+	err = 0;
+	write_seqlock(&rename_lock);
+	spin_lock(&dentry->d_lock);
+	if (do_include
+	    && au_dcount(dentry) > 0
+	    && (!test || test(dentry, arg)))
+		err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+	spin_unlock(&dentry->d_lock);
+	if (unlikely(err))
+		goto out;
+
+	/*
+	 * RCU for vfsmount is unnecessary since this is a traverse in a single
+	 * mount
+	 */
+	while (!IS_ROOT(dentry)) {
+		dentry = dentry->d_parent; /* rename_lock is locked */
+		spin_lock(&dentry->d_lock);
+		if (au_dcount(dentry) > 0
+		    && (!test || test(dentry, arg)))
+			err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+		spin_unlock(&dentry->d_lock);
+		if (unlikely(err))
+			break;
+	}
+
+out:
+	write_sequnlock(&rename_lock);
+	return err;
+}
+
+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
+{
+	return au_di(dentry) && dentry->d_sb == arg;
+}
+
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, int do_include)
+{
+	return au_dcsub_pages_rev(dpages, dentry, do_include,
+				  au_dcsub_dpages_aufs, dentry->d_sb);
+}
+
+int au_test_subdir(struct dentry *d1, struct dentry *d2)
+{
+	struct path path[2] = {
+		{
+			.dentry = d1
+		},
+		{
+			.dentry = d2
+		}
+	};
+
+	return path_is_under(path + 0, path + 1);
+}
diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h
new file mode 100644
index 00000000000..36f7fcdd7f2
--- /dev/null
+++ b/fs/aufs/dcsub.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#ifndef __AUFS_DCSUB_H__
+#define __AUFS_DCSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+struct au_dpage {
+	int ndentry;
+	struct dentry **dentries;
+};
+
+struct au_dcsub_pages {
+	int ndpage;
+	struct au_dpage *dpages;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dcsub.c */
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
+void au_dpages_free(struct au_dcsub_pages *dpages);
+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+		   au_dpages_test test, void *arg);
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+		       int do_include, au_dpages_test test, void *arg);
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, int do_include);
+int au_test_subdir(struct dentry *d1, struct dentry *d2);
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * todo: in linux-3.13, several similar (but faster) helpers are added to
+ * include/linux/dcache.h. Try them (in the future).
+ */
+
+static inline int au_d_hashed_positive(struct dentry *d)
+{
+	int err;
+	struct inode *inode = d_inode(d);
+
+	err = 0;
+	if (unlikely(d_unhashed(d)
+		     || d_is_negative(d)
+		     || !inode->i_nlink))
+		err = -ENOENT;
+	return err;
+}
+
+static inline int au_d_linkable(struct dentry *d)
+{
+	int err;
+	struct inode *inode = d_inode(d);
+
+	err = au_d_hashed_positive(d);
+	if (err
+	    && d_is_positive(d)
+	    && (inode->i_state & I_LINKABLE))
+		err = 0;
+	return err;
+}
+
+static inline int au_d_alive(struct dentry *d)
+{
+	int err;
+	struct inode *inode;
+
+	err = 0;
+	if (!IS_ROOT(d))
+		err = au_d_hashed_positive(d);
+	else {
+		inode = d_inode(d);
+		if (unlikely(d_unlinked(d)
+			     || d_is_negative(d)
+			     || !inode->i_nlink))
+			err = -ENOENT;
+	}
+	return err;
+}
+
+static inline int au_alive_dir(struct dentry *d)
+{
+	int err;
+
+	err = au_d_alive(d);
+	if (unlikely(err || IS_DEADDIR(d_inode(d))))
+		err = -ENOENT;
+	return err;
+}
+
+static inline int au_qstreq(struct qstr *a, struct qstr *b)
+{
+	return a->len == b->len
+		&& !memcmp(a->name, b->name, a->len);
+}
+
+/*
+ * by the commit
+ * 360f547 2015-01-25 dcache: let the dentry count go down to zero without
+ *			taking d_lock
+ * the type of d_lockref.count became int, but the inlined function d_count()
+ * still returns unsigned int.
+ * I don't know why. Maybe it is for every d_count() users?
+ * Anyway au_dcount() lives on.
+ */
+static inline int au_dcount(struct dentry *d)
+{
+	return (int)d_count(d);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DCSUB_H__ */
diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c
new file mode 100644
index 00000000000..f0c076c6125
--- /dev/null
+++ b/fs/aufs/debug.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debug print functions
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+/* Returns 0, or -errno.  arg is in kp->arg. */
+static int param_atomic_t_set(const char *val, const struct kernel_param *kp)
+{
+	int err, n;
+
+	err = kstrtoint(val, 0, &n);
+	if (!err) {
+		if (n > 0)
+			au_debug_on();
+		else
+			au_debug_off();
+	}
+	return err;
+}
+
+/* Returns length written or -errno.  Buffer is 4k (ie. be short!) */
+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp)
+{
+	atomic_t *a;
+
+	a = kp->arg;
+	return sprintf(buffer, "%d", atomic_read(a));
+}
+
+static struct kernel_param_ops param_ops_atomic_t = {
+	.set = param_atomic_t_set,
+	.get = param_atomic_t_get
+	/* void (*free)(void *arg) */
+};
+
+atomic_t aufs_debug = ATOMIC_INIT(0);
+MODULE_PARM_DESC(debug, "debug print");
+module_param_named(debug, aufs_debug, atomic_t, 0664);
+
+DEFINE_MUTEX(au_dbg_mtx);	/* just to serialize the dbg msgs */
+char *au_plevel = KERN_DEBUG;
+#define dpri(fmt, ...) do {					\
+	if ((au_plevel						\
+	     && strcmp(au_plevel, KERN_DEBUG))			\
+	    || au_debug_test())					\
+		printk("%s" fmt, au_plevel, ##__VA_ARGS__);	\
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+void au_dpri_whlist(struct au_nhash *whlist)
+{
+	unsigned long ul, n;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (ul = 0; ul < n; ul++) {
+		hlist_for_each_entry(pos, head, wh_hash)
+			dpri("b%d, %.*s, %d\n",
+			     pos->wh_bindex,
+			     pos->wh_str.len, pos->wh_str.name,
+			     pos->wh_str.len);
+		head++;
+	}
+}
+
+void au_dpri_vdir(struct au_vdir *vdir)
+{
+	unsigned long ul;
+	union au_vdir_deblk_p p;
+	unsigned char *o;
+
+	if (!vdir || IS_ERR(vdir)) {
+		dpri("err %ld\n", PTR_ERR(vdir));
+		return;
+	}
+
+	dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %llu\n",
+	     vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
+	     vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
+	for (ul = 0; ul < vdir->vd_nblk; ul++) {
+		p.deblk = vdir->vd_deblk[ul];
+		o = p.deblk;
+		dpri("[%lu]: %p\n", ul, o);
+	}
+}
+
+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
+			struct dentry *wh)
+{
+	char *n = NULL;
+	int l = 0;
+
+	if (!inode || IS_ERR(inode)) {
+		dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
+		return -1;
+	}
+
+	/* the type of i_blocks depends upon CONFIG_LBDAF */
+	BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
+		     && sizeof(inode->i_blocks) != sizeof(u64));
+	if (wh) {
+		n = (void *)wh->d_name.name;
+		l = wh->d_name.len;
+	}
+
+	dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
+	     " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
+	     bindex, inode,
+	     inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
+	     atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
+	     i_size_read(inode), (unsigned long long)inode->i_blocks,
+	     hn, (long long)timespec64_to_ns(&inode->i_ctime) & 0x0ffff,
+	     inode->i_mapping ? inode->i_mapping->nrpages : 0,
+	     inode->i_state, inode->i_flags, inode_peek_iversion(inode),
+	     inode->i_generation,
+	     l ? ", wh " : "", l, n);
+	return 0;
+}
+
+void au_dpri_inode(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct au_hinode *hi;
+	aufs_bindex_t bindex;
+	int err, hn;
+
+	err = do_pri_inode(-1, inode, -1, NULL);
+	if (err || !au_test_aufs(inode->i_sb) || au_is_bad_inode(inode))
+		return;
+
+	iinfo = au_ii(inode);
+	dpri("i-1: btop %d, bbot %d, gen %d\n",
+	     iinfo->ii_btop, iinfo->ii_bbot, au_iigen(inode, NULL));
+	if (iinfo->ii_btop < 0)
+		return;
+	hn = 0;
+	for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot; bindex++) {
+		hi = au_hinode(iinfo, bindex);
+		hn = !!au_hn(hi);
+		do_pri_inode(bindex, hi->hi_inode, hn, hi->hi_whdentry);
+	}
+}
+
+void au_dpri_dalias(struct inode *inode)
+{
+	struct dentry *d;
+
+	spin_lock(&inode->i_lock);
+	hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias)
+		au_dpri_dentry(d);
+	spin_unlock(&inode->i_lock);
+}
+
+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
+{
+	struct dentry *wh = NULL;
+	int hn;
+	struct inode *inode;
+	struct au_iinfo *iinfo;
+	struct au_hinode *hi;
+
+	if (!dentry || IS_ERR(dentry)) {
+		dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
+		return -1;
+	}
+	/* do not call dget_parent() here */
+	/* note: access d_xxx without d_lock */
+	dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n",
+	     bindex, dentry, dentry,
+	     dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
+	     au_dcount(dentry), dentry->d_flags,
+	     d_unhashed(dentry) ? "un" : "");
+	hn = -1;
+	inode = NULL;
+	if (d_is_positive(dentry))
+		inode = d_inode(dentry);
+	if (inode
+	    && au_test_aufs(dentry->d_sb)
+	    && bindex >= 0
+	    && !au_is_bad_inode(inode)) {
+		iinfo = au_ii(inode);
+		hi = au_hinode(iinfo, bindex);
+		hn = !!au_hn(hi);
+		wh = hi->hi_whdentry;
+	}
+	do_pri_inode(bindex, inode, hn, wh);
+	return 0;
+}
+
+void au_dpri_dentry(struct dentry *dentry)
+{
+	struct au_dinfo *dinfo;
+	aufs_bindex_t bindex;
+	int err;
+
+	err = do_pri_dentry(-1, dentry);
+	if (err || !au_test_aufs(dentry->d_sb))
+		return;
+
+	dinfo = au_di(dentry);
+	if (!dinfo)
+		return;
+	dpri("d-1: btop %d, bbot %d, bwh %d, bdiropq %d, gen %d, tmp %d\n",
+	     dinfo->di_btop, dinfo->di_bbot,
+	     dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry),
+	     dinfo->di_tmpfile);
+	if (dinfo->di_btop < 0)
+		return;
+	for (bindex = dinfo->di_btop; bindex <= dinfo->di_bbot; bindex++)
+		do_pri_dentry(bindex, au_hdentry(dinfo, bindex)->hd_dentry);
+}
+
+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
+{
+	char a[32];
+
+	if (!file || IS_ERR(file)) {
+		dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
+		return -1;
+	}
+	a[0] = 0;
+	if (bindex < 0
+	    && !IS_ERR_OR_NULL(file->f_path.dentry)
+	    && au_test_aufs(file->f_path.dentry->d_sb)
+	    && au_fi(file))
+		snprintf(a, sizeof(a), ", gen %d, mmapped %d",
+			 au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
+	dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
+	     bindex, file->f_mode, file->f_flags, (long)file_count(file),
+	     file->f_version, file->f_pos, a);
+	if (!IS_ERR_OR_NULL(file->f_path.dentry))
+		do_pri_dentry(bindex, file->f_path.dentry);
+	return 0;
+}
+
+void au_dpri_file(struct file *file)
+{
+	struct au_finfo *finfo;
+	struct au_fidir *fidir;
+	struct au_hfile *hfile;
+	aufs_bindex_t bindex;
+	int err;
+
+	err = do_pri_file(-1, file);
+	if (err
+	    || IS_ERR_OR_NULL(file->f_path.dentry)
+	    || !au_test_aufs(file->f_path.dentry->d_sb))
+		return;
+
+	finfo = au_fi(file);
+	if (!finfo)
+		return;
+	if (finfo->fi_btop < 0)
+		return;
+	fidir = finfo->fi_hdir;
+	if (!fidir)
+		do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
+	else
+		for (bindex = finfo->fi_btop;
+		     bindex >= 0 && bindex <= fidir->fd_bbot;
+		     bindex++) {
+			hfile = fidir->fd_hfile + bindex;
+			do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
+		}
+}
+
+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
+{
+	struct vfsmount *mnt;
+	struct super_block *sb;
+
+	if (!br || IS_ERR(br))
+		goto out;
+	mnt = au_br_mnt(br);
+	if (!mnt || IS_ERR(mnt))
+		goto out;
+	sb = mnt->mnt_sb;
+	if (!sb || IS_ERR(sb))
+		goto out;
+
+	dpri("s%d: {perm 0x%x, id %d, wbr %p}, "
+	     "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
+	     "xino %d\n",
+	     bindex, br->br_perm, br->br_id, br->br_wbr,
+	     au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
+	     sb->s_flags, sb->s_count,
+	     atomic_read(&sb->s_active),
+	     !!au_xino_file(br->br_xino, /*idx*/-1));
+	return 0;
+
+out:
+	dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
+	return -1;
+}
+
+void au_dpri_sb(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+	aufs_bindex_t bindex;
+	int err;
+	/* to reduce stack size */
+	struct {
+		struct vfsmount mnt;
+		struct au_branch fake;
+	} *a;
+
+	/* this function can be called from magic sysrq */
+	a = kzalloc(sizeof(*a), GFP_ATOMIC);
+	if (unlikely(!a)) {
+		dpri("no memory\n");
+		return;
+	}
+
+	a->mnt.mnt_sb = sb;
+	a->fake.br_path.mnt = &a->mnt;
+	err = do_pri_br(-1, &a->fake);
+	au_kfree_rcu(a);
+	dpri("dev 0x%x\n", sb->s_dev);
+	if (err || !au_test_aufs(sb))
+		return;
+
+	sbinfo = au_sbi(sb);
+	if (!sbinfo)
+		return;
+	dpri("nw %d, gen %u, kobj %d\n",
+	     atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
+	     kref_read(&sbinfo->si_kobj.kref));
+	for (bindex = 0; bindex <= sbinfo->si_bbot; bindex++)
+		do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
+{
+	struct inode *h_inode, *inode = d_inode(dentry);
+	struct dentry *h_dentry;
+	aufs_bindex_t bindex, bbot, bi;
+
+	if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
+		return;
+
+	bbot = au_dbbot(dentry);
+	bi = au_ibbot(inode);
+	if (bi < bbot)
+		bbot = bi;
+	bindex = au_dbtop(dentry);
+	bi = au_ibtop(inode);
+	if (bi > bindex)
+		bindex = bi;
+
+	for (; bindex <= bbot; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		h_inode = au_h_iptr(inode, bindex);
+		if (unlikely(h_inode != d_inode(h_dentry))) {
+			au_debug_on();
+			AuDbg("b%d, %s:%d\n", bindex, func, line);
+			AuDbgDentry(dentry);
+			AuDbgInode(inode);
+			au_debug_off();
+			BUG();
+		}
+	}
+}
+
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
+{
+	int err, i, j;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	AuDebugOn(err);
+	err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
+	AuDebugOn(err);
+	for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		for (j = dpage->ndentry - 1; !err && j >= 0; j--)
+			AuDebugOn(au_digen_test(dentries[j], sigen));
+	}
+	au_dpages_free(&dpages);
+}
+
+void au_dbg_verify_kthread(void)
+{
+	if (au_wkq_test()) {
+		au_dbg_blocked();
+		/*
+		 * It may be recursive, but udba=notify between two aufs mounts,
+		 * where a single ro branch is shared, is not a problem.
+		 */
+		/* WARN_ON(1); */
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_debug_init(void)
+{
+	aufs_bindex_t bindex;
+	struct au_vdir_destr destr;
+
+	bindex = -1;
+	AuDebugOn(bindex >= 0);
+
+	destr.len = -1;
+	AuDebugOn(destr.len < NAME_MAX);
+
+#ifdef CONFIG_4KSTACKS
+	pr_warn("CONFIG_4KSTACKS is defined.\n");
+#endif
+
+	return 0;
+}
diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h
new file mode 100644
index 00000000000..7e46953513f
--- /dev/null
+++ b/fs/aufs/debug.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * debug print functions
+ */
+
+#ifndef __AUFS_DEBUG_H__
+#define __AUFS_DEBUG_H__
+
+#ifdef __KERNEL__
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDebugOn(a)		BUG_ON(a)
+
+/* module parameter */
+extern atomic_t aufs_debug;
+static inline void au_debug_on(void)
+{
+	atomic_inc(&aufs_debug);
+}
+static inline void au_debug_off(void)
+{
+	atomic_dec_if_positive(&aufs_debug);
+}
+
+static inline int au_debug_test(void)
+{
+	return atomic_read(&aufs_debug) > 0;
+}
+#else
+#define AuDebugOn(a)		do {} while (0)
+AuStubVoid(au_debug_on, void)
+AuStubVoid(au_debug_off, void)
+AuStubInt0(au_debug_test, void)
+#endif /* CONFIG_AUFS_DEBUG */
+
+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t)
+
+/* ---------------------------------------------------------------------- */
+
+/* debug print */
+
+#define AuDbg(fmt, ...) do { \
+	if (au_debug_test()) \
+		pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
+} while (0)
+#define AuLabel(l)		AuDbg(#l "\n")
+#define AuIOErr(fmt, ...)	pr_err("I/O Error, " fmt, ##__VA_ARGS__)
+#define AuWarn1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuErr1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		pr_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuIOErr1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		AuIOErr(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuUnsupportMsg	"This operation is not supported." \
+			" Please report this application to aufs-users ML."
+#define AuUnsupport(fmt, ...) do { \
+	pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
+	dump_stack(); \
+} while (0)
+
+#define AuTraceErr(e) do { \
+	if (unlikely((e) < 0)) \
+		AuDbg("err %d\n", (int)(e)); \
+} while (0)
+
+#define AuTraceErrPtr(p) do { \
+	if (IS_ERR(p)) \
+		AuDbg("err %ld\n", PTR_ERR(p)); \
+} while (0)
+
+/* dirty macros for debug print, use with "%.*s" and caution */
+#define AuLNPair(qstr)		(qstr)->len, (qstr)->name
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry;
+#ifdef CONFIG_AUFS_DEBUG
+extern struct mutex au_dbg_mtx;
+extern char *au_plevel;
+struct au_nhash;
+void au_dpri_whlist(struct au_nhash *whlist);
+struct au_vdir;
+void au_dpri_vdir(struct au_vdir *vdir);
+struct inode;
+void au_dpri_inode(struct inode *inode);
+void au_dpri_dalias(struct inode *inode);
+void au_dpri_dentry(struct dentry *dentry);
+struct file;
+void au_dpri_file(struct file *filp);
+struct super_block;
+void au_dpri_sb(struct super_block *sb);
+
+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
+void au_dbg_verify_kthread(void);
+
+int __init au_debug_init(void);
+
+#define AuDbgWhlist(w) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#w "\n"); \
+	au_dpri_whlist(w); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgVdir(v) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#v "\n"); \
+	au_dpri_vdir(v); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgInode(i) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#i "\n"); \
+	au_dpri_inode(i); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDAlias(i) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#i "\n"); \
+	au_dpri_dalias(i); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDentry(d) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#d "\n"); \
+	au_dpri_dentry(d); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgFile(f) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#f "\n"); \
+	au_dpri_file(f); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSb(sb) do { \
+	mutex_lock(&au_dbg_mtx); \
+	AuDbg(#sb "\n"); \
+	au_dpri_sb(sb); \
+	mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSym(addr) do {				\
+	char sym[KSYM_SYMBOL_LEN];			\
+	sprint_symbol(sym, (unsigned long)addr);	\
+	AuDbg("%s\n", sym);				\
+} while (0)
+#else
+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
+AuStubVoid(au_dbg_verify_kthread, void)
+AuStubInt0(__init au_debug_init, void)
+
+#define AuDbgWhlist(w)		do {} while (0)
+#define AuDbgVdir(v)		do {} while (0)
+#define AuDbgInode(i)		do {} while (0)
+#define AuDbgDAlias(i)		do {} while (0)
+#define AuDbgDentry(d)		do {} while (0)
+#define AuDbgFile(f)		do {} while (0)
+#define AuDbgSb(sb)		do {} while (0)
+#define AuDbgSym(addr)		do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+int __init au_sysrq_init(void);
+void au_sysrq_fin(void);
+
+#ifdef CONFIG_HW_CONSOLE
+#define au_dbg_blocked() do { \
+	WARN_ON(1); \
+	handle_sysrq('w'); \
+} while (0)
+#else
+AuStubVoid(au_dbg_blocked, void)
+#endif
+
+#else
+AuStubInt0(__init au_sysrq_init, void)
+AuStubVoid(au_sysrq_fin, void)
+AuStubVoid(au_dbg_blocked, void)
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DEBUG_H__ */
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
new file mode 100644
index 00000000000..50aaaa21d08
--- /dev/null
+++ b/fs/aufs/dentry.c
@@ -0,0 +1,1154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#include <linux/iversion.h>
+#include <linux/namei.h>
+#include "aufs.h"
+
+/*
+ * returns positive/negative dentry, NULL or an error.
+ * NULL means whiteout-ed or not-found.
+ */
+static struct dentry*
+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
+	     aufs_bindex_t bindex, struct au_do_lookup_args *args)
+{
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct au_branch *br;
+	int wh_found, opq;
+	unsigned char wh_able;
+	const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
+	const unsigned char ignore_perm = !!au_ftest_lkup(args->flags,
+							  IGNORE_PERM);
+
+	wh_found = 0;
+	br = au_sbr(dentry->d_sb, bindex);
+	wh_able = !!au_br_whable(br->br_perm);
+	if (wh_able)
+		wh_found = au_wh_test(h_parent, &args->whname, ignore_perm);
+	h_dentry = ERR_PTR(wh_found);
+	if (!wh_found)
+		goto real_lookup;
+	if (unlikely(wh_found < 0))
+		goto out;
+
+	/* We found a whiteout */
+	/* au_set_dbbot(dentry, bindex); */
+	au_set_dbwh(dentry, bindex);
+	if (!allow_neg)
+		return NULL; /* success */
+
+real_lookup:
+	if (!ignore_perm)
+		h_dentry = vfsub_lkup_one(args->name, h_parent);
+	else
+		h_dentry = au_sio_lkup_one(args->name, h_parent);
+	if (IS_ERR(h_dentry)) {
+		if (PTR_ERR(h_dentry) == -ENAMETOOLONG
+		    && !allow_neg)
+			h_dentry = NULL;
+		goto out;
+	}
+
+	h_inode = d_inode(h_dentry);
+	if (d_is_negative(h_dentry)) {
+		if (!allow_neg)
+			goto out_neg;
+	} else if (wh_found
+		   || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
+		goto out_neg;
+	else if (au_ftest_lkup(args->flags, DIRREN)
+		 /* && h_inode */
+		 && !au_dr_lkup_h_ino(args, bindex, h_inode->i_ino)) {
+		AuDbg("b%d %pd ignored hi%llu\n", bindex, h_dentry,
+		      (unsigned long long)h_inode->i_ino);
+		goto out_neg;
+	}
+
+	if (au_dbbot(dentry) <= bindex)
+		au_set_dbbot(dentry, bindex);
+	if (au_dbtop(dentry) < 0 || bindex < au_dbtop(dentry))
+		au_set_dbtop(dentry, bindex);
+	au_set_h_dptr(dentry, bindex, h_dentry);
+
+	if (!d_is_dir(h_dentry)
+	    || !wh_able
+	    || (d_really_is_positive(dentry) && !d_is_dir(dentry)))
+		goto out; /* success */
+
+	inode_lock_shared_nested(h_inode, AuLsc_I_CHILD);
+	opq = au_diropq_test(h_dentry);
+	inode_unlock_shared(h_inode);
+	if (opq > 0)
+		au_set_dbdiropq(dentry, bindex);
+	else if (unlikely(opq < 0)) {
+		au_set_h_dptr(dentry, bindex, NULL);
+		h_dentry = ERR_PTR(opq);
+	}
+	goto out;
+
+out_neg:
+	dput(h_dentry);
+	h_dentry = NULL;
+out:
+	return h_dentry;
+}
+
+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
+{
+	if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
+		     && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
+		return -EPERM;
+	return 0;
+}
+
+/*
+ * returns the number of lower positive dentries,
+ * otherwise an error.
+ * can be called at unlinking with @type is zero.
+ */
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t btop,
+		   unsigned int flags)
+{
+	int npositive, err;
+	aufs_bindex_t bindex, btail, bdiropq;
+	unsigned char isdir, dirperm1, dirren;
+	struct au_do_lookup_args args = {
+		.flags		= flags,
+		.name		= &dentry->d_name
+	};
+	struct dentry *parent;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	err = au_test_shwh(sb, args.name);
+	if (unlikely(err))
+		goto out;
+
+	err = au_wh_name_alloc(&args.whname, args.name);
+	if (unlikely(err))
+		goto out;
+
+	isdir = !!d_is_dir(dentry);
+	dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1);
+	dirren = !!au_opt_test(au_mntflags(sb), DIRREN);
+	if (dirren)
+		au_fset_lkup(args.flags, DIRREN);
+
+	npositive = 0;
+	parent = dget_parent(dentry);
+	btail = au_dbtaildir(parent);
+	for (bindex = btop; bindex <= btail; bindex++) {
+		struct dentry *h_parent, *h_dentry;
+		struct inode *h_inode, *h_dir;
+		struct au_branch *br;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry) {
+			if (d_is_positive(h_dentry))
+				npositive++;
+			break;
+		}
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || !d_is_dir(h_parent))
+			continue;
+
+		if (dirren) {
+			/* if the inum matches, then use the prepared name */
+			err = au_dr_lkup_name(&args, bindex);
+			if (unlikely(err))
+				goto out_parent;
+		}
+
+		h_dir = d_inode(h_parent);
+		inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+		h_dentry = au_do_lookup(h_parent, dentry, bindex, &args);
+		inode_unlock_shared(h_dir);
+		err = PTR_ERR(h_dentry);
+		if (IS_ERR(h_dentry))
+			goto out_parent;
+		if (h_dentry)
+			au_fclr_lkup(args.flags, ALLOW_NEG);
+		if (dirperm1)
+			au_fset_lkup(args.flags, IGNORE_PERM);
+
+		if (au_dbwh(dentry) == bindex)
+			break;
+		if (!h_dentry)
+			continue;
+		if (d_is_negative(h_dentry))
+			continue;
+		h_inode = d_inode(h_dentry);
+		npositive++;
+		if (!args.type)
+			args.type = h_inode->i_mode & S_IFMT;
+		if (args.type != S_IFDIR)
+			break;
+		else if (isdir) {
+			/* the type of lower may be different */
+			bdiropq = au_dbdiropq(dentry);
+			if (bdiropq >= 0 && bdiropq <= bindex)
+				break;
+		}
+		br = au_sbr(sb, bindex);
+		if (dirren
+		    && au_dr_hino_test_add(&br->br_dirren, h_inode->i_ino,
+					   /*add_ent*/NULL)) {
+			/* prepare next name to lookup */
+			err = au_dr_lkup(&args, dentry, bindex);
+			if (unlikely(err))
+				goto out_parent;
+		}
+	}
+
+	if (npositive) {
+		AuLabel(positive);
+		au_update_dbtop(dentry);
+	}
+	err = npositive;
+	if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE)
+		     && au_dbtop(dentry) < 0)) {
+		err = -EIO;
+		AuIOErr("both of real entry and whiteout found, %pd, err %d\n",
+			dentry, err);
+	}
+
+out_parent:
+	dput(parent);
+	au_kfree_try_rcu(args.whname.name);
+	if (dirren)
+		au_dr_lkup_fin(&args);
+out:
+	return err;
+}
+
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent)
+{
+	struct dentry *dentry;
+	int wkq_err;
+
+	if (!au_test_h_perm_sio(d_inode(parent), MAY_EXEC))
+		dentry = vfsub_lkup_one(name, parent);
+	else {
+		struct vfsub_lkup_one_args args = {
+			.errp	= &dentry,
+			.name	= name,
+			.parent	= parent
+		};
+
+		wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args);
+		if (unlikely(wkq_err))
+			dentry = ERR_PTR(wkq_err);
+	}
+
+	return dentry;
+}
+
+/*
+ * lookup @dentry on @bindex which should be negative.
+ */
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
+{
+	int err;
+	struct dentry *parent, *h_parent, *h_dentry;
+	struct au_branch *br;
+
+	parent = dget_parent(dentry);
+	h_parent = au_h_dptr(parent, bindex);
+	br = au_sbr(dentry->d_sb, bindex);
+	if (wh)
+		h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+	else
+		h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
+	err = PTR_ERR(h_dentry);
+	if (IS_ERR(h_dentry))
+		goto out;
+	if (unlikely(d_is_positive(h_dentry))) {
+		err = -EIO;
+		AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex);
+		dput(h_dentry);
+		goto out;
+	}
+
+	err = 0;
+	if (bindex < au_dbtop(dentry))
+		au_set_dbtop(dentry, bindex);
+	if (au_dbbot(dentry) < bindex)
+		au_set_dbbot(dentry, bindex);
+	au_set_h_dptr(dentry, bindex, h_dentry);
+
+out:
+	dput(parent);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* subset of struct inode */
+struct au_iattr {
+	unsigned long		i_ino;
+	/* unsigned int		i_nlink; */
+	kuid_t			i_uid;
+	kgid_t			i_gid;
+	u64			i_version;
+/*
+	loff_t			i_size;
+	blkcnt_t		i_blocks;
+*/
+	umode_t			i_mode;
+};
+
+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
+{
+	ia->i_ino = h_inode->i_ino;
+	/* ia->i_nlink = h_inode->i_nlink; */
+	ia->i_uid = h_inode->i_uid;
+	ia->i_gid = h_inode->i_gid;
+	ia->i_version = inode_query_iversion(h_inode);
+/*
+	ia->i_size = h_inode->i_size;
+	ia->i_blocks = h_inode->i_blocks;
+*/
+	ia->i_mode = (h_inode->i_mode & S_IFMT);
+}
+
+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
+{
+	return ia->i_ino != h_inode->i_ino
+		/* || ia->i_nlink != h_inode->i_nlink */
+		|| !uid_eq(ia->i_uid, h_inode->i_uid)
+		|| !gid_eq(ia->i_gid, h_inode->i_gid)
+		|| !inode_eq_iversion(h_inode, ia->i_version)
+/*
+		|| ia->i_size != h_inode->i_size
+		|| ia->i_blocks != h_inode->i_blocks
+*/
+		|| ia->i_mode != (h_inode->i_mode & S_IFMT);
+}
+
+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
+			      struct au_branch *br)
+{
+	int err;
+	struct au_iattr ia;
+	struct inode *h_inode;
+	struct dentry *h_d;
+	struct super_block *h_sb;
+
+	err = 0;
+	memset(&ia, -1, sizeof(ia));
+	h_sb = h_dentry->d_sb;
+	h_inode = NULL;
+	if (d_is_positive(h_dentry)) {
+		h_inode = d_inode(h_dentry);
+		au_iattr_save(&ia, h_inode);
+	} else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
+		/* nfs d_revalidate may return 0 for negative dentry */
+		/* fuse d_revalidate always return 0 for negative dentry */
+		goto out;
+
+	/* main purpose is namei.c:cached_lookup() and d_revalidate */
+	h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent);
+	err = PTR_ERR(h_d);
+	if (IS_ERR(h_d))
+		goto out;
+
+	err = 0;
+	if (unlikely(h_d != h_dentry
+		     || d_inode(h_d) != h_inode
+		     || (h_inode && au_iattr_test(&ia, h_inode))))
+		err = au_busy_or_stale();
+	dput(h_d);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+		struct dentry *h_parent, struct au_branch *br)
+{
+	int err;
+
+	err = 0;
+	if (udba == AuOpt_UDBA_REVAL
+	    && !au_test_fs_remote(h_dentry->d_sb)) {
+		IMustLock(h_dir);
+		err = (d_inode(h_dentry->d_parent) != h_dir);
+	} else if (udba != AuOpt_UDBA_NONE)
+		err = au_h_verify_dentry(h_dentry, h_parent, br);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
+{
+	int err;
+	aufs_bindex_t new_bindex, bindex, bbot, bwh, bdiropq;
+	struct au_hdentry tmp, *p, *q;
+	struct au_dinfo *dinfo;
+	struct super_block *sb;
+
+	DiMustWriteLock(dentry);
+
+	sb = dentry->d_sb;
+	dinfo = au_di(dentry);
+	bbot = dinfo->di_bbot;
+	bwh = dinfo->di_bwh;
+	bdiropq = dinfo->di_bdiropq;
+	bindex = dinfo->di_btop;
+	p = au_hdentry(dinfo, bindex);
+	for (; bindex <= bbot; bindex++, p++) {
+		if (!p->hd_dentry)
+			continue;
+
+		new_bindex = au_br_index(sb, p->hd_id);
+		if (new_bindex == bindex)
+			continue;
+
+		if (dinfo->di_bwh == bindex)
+			bwh = new_bindex;
+		if (dinfo->di_bdiropq == bindex)
+			bdiropq = new_bindex;
+		if (new_bindex < 0) {
+			au_hdput(p);
+			p->hd_dentry = NULL;
+			continue;
+		}
+
+		/* swap two lower dentries, and loop again */
+		q = au_hdentry(dinfo, new_bindex);
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hd_dentry) {
+			bindex--;
+			p--;
+		}
+	}
+
+	dinfo->di_bwh = -1;
+	if (bwh >= 0 && bwh <= au_sbbot(sb) && au_sbr_whable(sb, bwh))
+		dinfo->di_bwh = bwh;
+
+	dinfo->di_bdiropq = -1;
+	if (bdiropq >= 0
+	    && bdiropq <= au_sbbot(sb)
+	    && au_sbr_whable(sb, bdiropq))
+		dinfo->di_bdiropq = bdiropq;
+
+	err = -EIO;
+	dinfo->di_btop = -1;
+	dinfo->di_bbot = -1;
+	bbot = au_dbbot(parent);
+	bindex = 0;
+	p = au_hdentry(dinfo, bindex);
+	for (; bindex <= bbot; bindex++, p++)
+		if (p->hd_dentry) {
+			dinfo->di_btop = bindex;
+			break;
+		}
+
+	if (dinfo->di_btop >= 0) {
+		bindex = bbot;
+		p = au_hdentry(dinfo, bindex);
+		for (; bindex >= 0; bindex--, p--)
+			if (p->hd_dentry) {
+				dinfo->di_bbot = bindex;
+				err = 0;
+				break;
+			}
+	}
+
+	return err;
+}
+
+static void au_do_hide(struct dentry *dentry)
+{
+	struct inode *inode;
+
+	if (d_really_is_positive(dentry)) {
+		inode = d_inode(dentry);
+		if (!d_is_dir(dentry)) {
+			if (inode->i_nlink && !d_unhashed(dentry))
+				drop_nlink(inode);
+		} else {
+			clear_nlink(inode);
+			/* stop next lookup */
+			inode->i_flags |= S_DEAD;
+		}
+		smp_mb(); /* necessary? */
+	}
+	d_drop(dentry);
+}
+
+static int au_hide_children(struct dentry *parent)
+{
+	int err, i, j, ndentry;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry *dentry;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, parent, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	/* in reverse order */
+	for (i = dpages.ndpage - 1; i >= 0; i--) {
+		dpage = dpages.dpages + i;
+		ndentry = dpage->ndentry;
+		for (j = ndentry - 1; j >= 0; j--) {
+			dentry = dpage->dentries[j];
+			if (dentry != parent)
+				au_do_hide(dentry);
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static void au_hide(struct dentry *dentry)
+{
+	int err;
+
+	AuDbgDentry(dentry);
+	if (d_is_dir(dentry)) {
+		/* shrink_dcache_parent(dentry); */
+		err = au_hide_children(dentry);
+		if (unlikely(err))
+			AuIOErr("%pd, failed hiding children, ignored %d\n",
+				dentry, err);
+	}
+	au_do_hide(dentry);
+}
+
+/*
+ * By adding a dirty branch, a cached dentry may be affected in various ways.
+ *
+ * a dirty branch is added
+ * - on the top of layers
+ * - in the middle of layers
+ * - to the bottom of layers
+ *
+ * on the added branch there exists
+ * - a whiteout
+ * - a diropq
+ * - a same named entry
+ *   + exist
+ *     * negative --> positive
+ *     * positive --> positive
+ *	 - type is unchanged
+ *	 - type is changed
+ *   + doesn't exist
+ *     * negative --> negative
+ *     * positive --> negative (rejected by au_br_del() for non-dir case)
+ * - none
+ */
+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
+			       struct au_dinfo *tmp)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct {
+		struct dentry *dentry;
+		struct inode *inode;
+		mode_t mode;
+	} orig_h, tmp_h = {
+		.dentry = NULL
+	};
+	struct au_hdentry *hd;
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry;
+
+	err = 0;
+	AuDebugOn(dinfo->di_btop < 0);
+	orig_h.mode = 0;
+	orig_h.dentry = au_hdentry(dinfo, dinfo->di_btop)->hd_dentry;
+	orig_h.inode = NULL;
+	if (d_is_positive(orig_h.dentry)) {
+		orig_h.inode = d_inode(orig_h.dentry);
+		orig_h.mode = orig_h.inode->i_mode & S_IFMT;
+	}
+	if (tmp->di_btop >= 0) {
+		tmp_h.dentry = au_hdentry(tmp, tmp->di_btop)->hd_dentry;
+		if (d_is_positive(tmp_h.dentry)) {
+			tmp_h.inode = d_inode(tmp_h.dentry);
+			tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
+		}
+	}
+
+	inode = NULL;
+	if (d_really_is_positive(dentry))
+		inode = d_inode(dentry);
+	if (!orig_h.inode) {
+		AuDbg("negative originally\n");
+		if (inode) {
+			au_hide(dentry);
+			goto out;
+		}
+		AuDebugOn(inode);
+		AuDebugOn(dinfo->di_btop != dinfo->di_bbot);
+		AuDebugOn(dinfo->di_bdiropq != -1);
+
+		if (!tmp_h.inode) {
+			AuDbg("negative --> negative\n");
+			/* should have only one negative lower */
+			if (tmp->di_btop >= 0
+			    && tmp->di_btop < dinfo->di_btop) {
+				AuDebugOn(tmp->di_btop != tmp->di_bbot);
+				AuDebugOn(dinfo->di_btop != dinfo->di_bbot);
+				au_set_h_dptr(dentry, dinfo->di_btop, NULL);
+				au_di_cp(dinfo, tmp);
+				hd = au_hdentry(tmp, tmp->di_btop);
+				au_set_h_dptr(dentry, tmp->di_btop,
+					      dget(hd->hd_dentry));
+			}
+			au_dbg_verify_dinode(dentry);
+		} else {
+			AuDbg("negative --> positive\n");
+			/*
+			 * similar to the behaviour of creating with bypassing
+			 * aufs.
+			 * unhash it in order to force an error in the
+			 * succeeding create operation.
+			 * we should not set S_DEAD here.
+			 */
+			d_drop(dentry);
+			/* au_di_swap(tmp, dinfo); */
+			au_dbg_verify_dinode(dentry);
+		}
+	} else {
+		AuDbg("positive originally\n");
+		/* inode may be NULL */
+		AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
+		if (!tmp_h.inode) {
+			AuDbg("positive --> negative\n");
+			/* or bypassing aufs */
+			au_hide(dentry);
+			if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_btop)
+				dinfo->di_bwh = tmp->di_bwh;
+			if (inode)
+				err = au_refresh_hinode_self(inode);
+			au_dbg_verify_dinode(dentry);
+		} else if (orig_h.mode == tmp_h.mode) {
+			AuDbg("positive --> positive, same type\n");
+			if (!S_ISDIR(orig_h.mode)
+			    && dinfo->di_btop > tmp->di_btop) {
+				/*
+				 * similar to the behaviour of removing and
+				 * creating.
+				 */
+				au_hide(dentry);
+				if (inode)
+					err = au_refresh_hinode_self(inode);
+				au_dbg_verify_dinode(dentry);
+			} else {
+				/* fill empty slots */
+				if (dinfo->di_btop > tmp->di_btop)
+					dinfo->di_btop = tmp->di_btop;
+				if (dinfo->di_bbot < tmp->di_bbot)
+					dinfo->di_bbot = tmp->di_bbot;
+				dinfo->di_bwh = tmp->di_bwh;
+				dinfo->di_bdiropq = tmp->di_bdiropq;
+				bbot = dinfo->di_bbot;
+				bindex = tmp->di_btop;
+				hd = au_hdentry(tmp, bindex);
+				for (; bindex <= bbot; bindex++, hd++) {
+					if (au_h_dptr(dentry, bindex))
+						continue;
+					h_dentry = hd->hd_dentry;
+					if (!h_dentry)
+						continue;
+					AuDebugOn(d_is_negative(h_dentry));
+					h_inode = d_inode(h_dentry);
+					AuDebugOn(orig_h.mode
+						  != (h_inode->i_mode
+						      & S_IFMT));
+					au_set_h_dptr(dentry, bindex,
+						      dget(h_dentry));
+				}
+				if (inode)
+					err = au_refresh_hinode(inode, dentry);
+				au_dbg_verify_dinode(dentry);
+			}
+		} else {
+			AuDbg("positive --> positive, different type\n");
+			/* similar to the behaviour of removing and creating */
+			au_hide(dentry);
+			if (inode)
+				err = au_refresh_hinode_self(inode);
+			au_dbg_verify_dinode(dentry);
+		}
+	}
+
+out:
+	return err;
+}
+
+void au_refresh_dop(struct dentry *dentry, int force_reval)
+{
+	const struct dentry_operations *dop
+		= force_reval ? &aufs_dop : dentry->d_sb->s_d_op;
+	static const unsigned int mask
+		= DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE;
+
+	BUILD_BUG_ON(sizeof(mask) != sizeof(dentry->d_flags));
+
+	if (dentry->d_op == dop)
+		return;
+
+	AuDbg("%pd\n", dentry);
+	spin_lock(&dentry->d_lock);
+	if (dop == &aufs_dop)
+		dentry->d_flags |= mask;
+	else
+		dentry->d_flags &= ~mask;
+	dentry->d_op = dop;
+	spin_unlock(&dentry->d_lock);
+}
+
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
+{
+	int err, ebrange, nbr;
+	unsigned int sigen;
+	struct au_dinfo *dinfo, *tmp;
+	struct super_block *sb;
+	struct inode *inode;
+
+	DiMustWriteLock(dentry);
+	AuDebugOn(IS_ROOT(dentry));
+	AuDebugOn(d_really_is_negative(parent));
+
+	sb = dentry->d_sb;
+	sigen = au_sigen(sb);
+	err = au_digen_test(parent, sigen);
+	if (unlikely(err))
+		goto out;
+
+	nbr = au_sbbot(sb) + 1;
+	dinfo = au_di(dentry);
+	err = au_di_realloc(dinfo, nbr, /*may_shrink*/0);
+	if (unlikely(err))
+		goto out;
+	ebrange = au_dbrange_test(dentry);
+	if (!ebrange)
+		ebrange = au_do_refresh_hdentry(dentry, parent);
+
+	if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) {
+		AuDebugOn(au_dbtop(dentry) < 0 && au_dbbot(dentry) >= 0);
+		if (d_really_is_positive(dentry)) {
+			inode = d_inode(dentry);
+			err = au_refresh_hinode_self(inode);
+		}
+		au_dbg_verify_dinode(dentry);
+		if (!err)
+			goto out_dgen; /* success */
+		goto out;
+	}
+
+	/* temporary dinfo */
+	AuDbgDentry(dentry);
+	err = -ENOMEM;
+	tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+	if (unlikely(!tmp))
+		goto out;
+	au_di_swap(tmp, dinfo);
+	/* returns the number of positive dentries */
+	/*
+	 * if current working dir is removed, it returns an error.
+	 * but the dentry is legal.
+	 */
+	err = au_lkup_dentry(dentry, /*btop*/0, AuLkup_ALLOW_NEG);
+	AuDbgDentry(dentry);
+	au_di_swap(tmp, dinfo);
+	if (err == -ENOENT)
+		err = 0;
+	if (err >= 0) {
+		/* compare/refresh by dinfo */
+		AuDbgDentry(dentry);
+		err = au_refresh_by_dinfo(dentry, dinfo, tmp);
+		au_dbg_verify_dinode(dentry);
+		AuTraceErr(err);
+	}
+	au_di_realloc(dinfo, nbr, /*may_shrink*/1); /* harmless if err */
+	au_rw_write_unlock(&tmp->di_rwsem);
+	au_di_free(tmp);
+	if (unlikely(err))
+		goto out;
+
+out_dgen:
+	au_update_digen(dentry);
+out:
+	if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
+		AuIOErr("failed refreshing %pd, %d\n", dentry, err);
+		AuDbgDentry(dentry);
+	}
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags,
+			   struct dentry *dentry, aufs_bindex_t bindex)
+{
+	int err, valid;
+
+	err = 0;
+	if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
+		goto out;
+
+	AuDbg("b%d\n", bindex);
+	/*
+	 * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
+	 * due to whiteout and branch permission.
+	 */
+	flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
+		   | LOOKUP_FOLLOW | LOOKUP_EXCL);
+	/* it may return tri-state */
+	valid = h_dentry->d_op->d_revalidate(h_dentry, flags);
+
+	if (unlikely(valid < 0))
+		err = valid;
+	else if (!valid)
+		err = -EINVAL;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* todo: remove this */
+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
+			  unsigned int flags, int do_udba, int dirren)
+{
+	int err;
+	umode_t mode, h_mode;
+	aufs_bindex_t bindex, btail, btop, ibs, ibe;
+	unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile;
+	struct inode *h_inode, *h_cached_inode;
+	struct dentry *h_dentry;
+	struct qstr *name, *h_name;
+
+	err = 0;
+	plus = 0;
+	mode = 0;
+	ibs = -1;
+	ibe = -1;
+	unhashed = !!d_unhashed(dentry);
+	is_root = !!IS_ROOT(dentry);
+	name = &dentry->d_name;
+	tmpfile = au_di(dentry)->di_tmpfile;
+
+	/*
+	 * Theoretically, REVAL test should be unnecessary in case of
+	 * {FS,I}NOTIFY.
+	 * But {fs,i}notify doesn't fire some necessary events,
+	 *	IN_ATTRIB for atime/nlink/pageio
+	 * Let's do REVAL test too.
+	 */
+	if (do_udba && inode) {
+		mode = (inode->i_mode & S_IFMT);
+		plus = (inode->i_nlink > 0);
+		ibs = au_ibtop(inode);
+		ibe = au_ibbot(inode);
+	}
+
+	btop = au_dbtop(dentry);
+	btail = btop;
+	if (inode && S_ISDIR(inode->i_mode))
+		btail = au_dbtaildir(dentry);
+	for (bindex = btop; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+
+		AuDbg("b%d, %pd\n", bindex, h_dentry);
+		h_nfs = !!au_test_nfs(h_dentry->d_sb);
+		spin_lock(&h_dentry->d_lock);
+		h_name = &h_dentry->d_name;
+		if (unlikely(do_udba
+			     && !is_root
+			     && ((!h_nfs
+				  && (unhashed != !!d_unhashed(h_dentry)
+				      || (!tmpfile && !dirren
+					  && !au_qstreq(name, h_name))
+					  ))
+				 || (h_nfs
+				     && !(flags & LOOKUP_OPEN)
+				     && (h_dentry->d_flags
+					 & DCACHE_NFSFS_RENAMED)))
+			    )) {
+			int h_unhashed;
+
+			h_unhashed = d_unhashed(h_dentry);
+			spin_unlock(&h_dentry->d_lock);
+			AuDbg("unhash 0x%x 0x%x, %pd %pd\n",
+			      unhashed, h_unhashed, dentry, h_dentry);
+			goto err;
+		}
+		spin_unlock(&h_dentry->d_lock);
+
+		err = au_do_h_d_reval(h_dentry, flags, dentry, bindex);
+		if (unlikely(err))
+			/* do not goto err, to keep the errno */
+			break;
+
+		/* todo: plink too? */
+		if (!do_udba)
+			continue;
+
+		/* UDBA tests */
+		if (unlikely(!!inode != d_is_positive(h_dentry)))
+			goto err;
+
+		h_inode = NULL;
+		if (d_is_positive(h_dentry))
+			h_inode = d_inode(h_dentry);
+		h_plus = plus;
+		h_mode = mode;
+		h_cached_inode = h_inode;
+		if (h_inode) {
+			h_mode = (h_inode->i_mode & S_IFMT);
+			h_plus = (h_inode->i_nlink > 0);
+		}
+		if (inode && ibs <= bindex && bindex <= ibe)
+			h_cached_inode = au_h_iptr(inode, bindex);
+
+		if (!h_nfs) {
+			if (unlikely(plus != h_plus && !tmpfile))
+				goto err;
+		} else {
+			if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED)
+				     && !is_root
+				     && !IS_ROOT(h_dentry)
+				     && unhashed != d_unhashed(h_dentry)))
+				goto err;
+		}
+		if (unlikely(mode != h_mode
+			     || h_cached_inode != h_inode))
+			goto err;
+		continue;
+
+err:
+		err = -EINVAL;
+		break;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct dentry *parent;
+
+	if (!au_digen_test(dentry, sigen))
+		return 0;
+
+	parent = dget_parent(dentry);
+	di_read_lock_parent(parent, AuLock_IR);
+	AuDebugOn(au_digen_test(parent, sigen));
+	au_dbg_verify_gen(parent, sigen);
+	err = au_refresh_dentry(dentry, parent);
+	di_read_unlock(parent, AuLock_IR);
+	dput(parent);
+	AuTraceErr(err);
+	return err;
+}
+
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct dentry *d, *parent;
+
+	if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
+		return simple_reval_dpath(dentry, sigen);
+
+	/* slow loop, keep it simple and stupid */
+	/* cf: au_cpup_dirs() */
+	err = 0;
+	parent = NULL;
+	while (au_digen_test(dentry, sigen)) {
+		d = dentry;
+		while (1) {
+			dput(parent);
+			parent = dget_parent(d);
+			if (!au_digen_test(parent, sigen))
+				break;
+			d = parent;
+		}
+
+		if (d != dentry)
+			di_write_lock_child2(d);
+
+		/* someone might update our dentry while we were sleeping */
+		if (au_digen_test(d, sigen)) {
+			/*
+			 * todo: consolidate with simple_reval_dpath(),
+			 * do_refresh() and au_reval_for_attr().
+			 */
+			di_read_lock_parent(parent, AuLock_IR);
+			err = au_refresh_dentry(d, parent);
+			di_read_unlock(parent, AuLock_IR);
+		}
+
+		if (d != dentry)
+			di_write_unlock(d);
+		dput(parent);
+		if (unlikely(err))
+			break;
+	}
+
+	return err;
+}
+
+/*
+ * if valid returns 1, otherwise 0.
+ */
+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	int valid, err;
+	unsigned int sigen;
+	unsigned char do_udba, dirren;
+	struct super_block *sb;
+	struct inode *inode;
+
+	/* todo: support rcu-walk? */
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	valid = 0;
+	if (unlikely(!au_di(dentry)))
+		goto out;
+
+	valid = 1;
+	sb = dentry->d_sb;
+	/*
+	 * todo: very ugly
+	 * i_mutex of parent dir may be held,
+	 * but we should not return 'invalid' due to busy.
+	 */
+	err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
+	if (unlikely(err)) {
+		valid = err;
+		AuTraceErr(err);
+		goto out;
+	}
+	inode = NULL;
+	if (d_really_is_positive(dentry))
+		inode = d_inode(dentry);
+	if (unlikely(inode && au_is_bad_inode(inode))) {
+		err = -EINVAL;
+		AuTraceErr(err);
+		goto out_dgrade;
+	}
+	if (unlikely(au_dbrange_test(dentry))) {
+		err = -EINVAL;
+		AuTraceErr(err);
+		goto out_dgrade;
+	}
+
+	sigen = au_sigen(sb);
+	if (au_digen_test(dentry, sigen)) {
+		AuDebugOn(IS_ROOT(dentry));
+		err = au_reval_dpath(dentry, sigen);
+		if (unlikely(err)) {
+			AuTraceErr(err);
+			goto out_dgrade;
+		}
+	}
+	di_downgrade_lock(dentry, AuLock_IR);
+
+	err = -EINVAL;
+	if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY))
+	    && inode
+	    && !(inode->i_state && I_LINKABLE)
+	    && (IS_DEADDIR(inode) || !inode->i_nlink)) {
+		AuTraceErr(err);
+		goto out_inval;
+	}
+
+	do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
+	if (do_udba && inode) {
+		aufs_bindex_t btop = au_ibtop(inode);
+		struct inode *h_inode;
+
+		if (btop >= 0) {
+			h_inode = au_h_iptr(inode, btop);
+			if (h_inode && au_test_higen(inode, h_inode)) {
+				AuTraceErr(err);
+				goto out_inval;
+			}
+		}
+	}
+
+	dirren = !!au_opt_test(au_mntflags(sb), DIRREN);
+	err = h_d_revalidate(dentry, inode, flags, do_udba, dirren);
+	if (unlikely(!err && do_udba && au_dbtop(dentry) < 0)) {
+		err = -EIO;
+		AuDbg("both of real entry and whiteout found, %p, err %d\n",
+		      dentry, err);
+	}
+	goto out_inval;
+
+out_dgrade:
+	di_downgrade_lock(dentry, AuLock_IR);
+out_inval:
+	aufs_read_unlock(dentry, AuLock_IR);
+	AuTraceErr(err);
+	valid = !err;
+out:
+	if (!valid) {
+		AuDbg("%pd invalid, %d\n", dentry, valid);
+		d_drop(dentry);
+	}
+	return valid;
+}
+
+static void aufs_d_release(struct dentry *dentry)
+{
+	if (au_di(dentry)) {
+		au_di_fin(dentry);
+		au_hn_di_reinit(dentry);
+	}
+}
+
+const struct dentry_operations aufs_dop = {
+	.d_revalidate		= aufs_d_revalidate,
+	.d_weak_revalidate	= aufs_d_revalidate,
+	.d_release		= aufs_d_release
+};
+
+/* aufs_dop without d_revalidate */
+const struct dentry_operations aufs_dop_noreval = {
+	.d_release		= aufs_d_release
+};
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
new file mode 100644
index 00000000000..c9f9c704da0
--- /dev/null
+++ b/fs/aufs/dentry.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#ifndef __AUFS_DENTRY_H__
+#define __AUFS_DENTRY_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include "dirren.h"
+#include "rwsem.h"
+
+struct au_hdentry {
+	struct dentry		*hd_dentry;
+	aufs_bindex_t		hd_id;
+};
+
+struct au_dinfo {
+	atomic_t		di_generation;
+
+	struct au_rwsem		di_rwsem;
+	aufs_bindex_t		di_btop, di_bbot, di_bwh, di_bdiropq;
+	unsigned char		di_tmpfile; /* to allow the different name */
+	struct au_hdentry	*di_hdentry;
+	struct rcu_head		rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* flags for au_lkup_dentry() */
+#define AuLkup_ALLOW_NEG	1
+#define AuLkup_IGNORE_PERM	(1 << 1)
+#define AuLkup_DIRREN		(1 << 2)
+#define au_ftest_lkup(flags, name)	((flags) & AuLkup_##name)
+#define au_fset_lkup(flags, name) \
+	do { (flags) |= AuLkup_##name; } while (0)
+#define au_fclr_lkup(flags, name) \
+	do { (flags) &= ~AuLkup_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuLkup_DIRREN
+#define AuLkup_DIRREN 0
+#endif
+
+struct au_do_lookup_args {
+	unsigned int		flags;
+	mode_t			type;
+	struct qstr		whname, *name;
+	struct au_dr_lookup	dirren;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dentry.c */
+extern const struct dentry_operations aufs_dop, aufs_dop_noreval;
+struct au_branch;
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent);
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+		struct dentry *h_parent, struct au_branch *br);
+
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t btop,
+		   unsigned int flags);
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
+void au_refresh_dop(struct dentry *dentry, int force_reval);
+
+/* dinfo.c */
+void au_di_init_once(void *_di);
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
+void au_di_free(struct au_dinfo *dinfo);
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
+int au_di_init(struct dentry *dentry);
+void au_di_fin(struct dentry *dentry);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink);
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
+void di_read_unlock(struct dentry *d, int flags);
+void di_downgrade_lock(struct dentry *d, int flags);
+void di_write_lock(struct dentry *d, unsigned int lsc);
+void di_write_unlock(struct dentry *d);
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
+aufs_bindex_t au_dbtail(struct dentry *dentry);
+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+		   struct dentry *h_dentry);
+int au_digen_test(struct dentry *dentry, unsigned int sigen);
+int au_dbrange_test(struct dentry *dentry);
+void au_update_digen(struct dentry *dentry);
+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
+void au_update_dbtop(struct dentry *dentry);
+void au_update_dbbot(struct dentry *dentry);
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_dinfo *au_di(struct dentry *dentry)
+{
+	return dentry->d_fsdata;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for dinfo */
+enum {
+	AuLsc_DI_CHILD,		/* child first */
+	AuLsc_DI_CHILD2,	/* rename(2), link(2), and cpup at hnotify */
+	AuLsc_DI_CHILD3,	/* copyup dirs */
+	AuLsc_DI_PARENT,
+	AuLsc_DI_PARENT2,
+	AuLsc_DI_PARENT3,
+	AuLsc_DI_TMP		/* temp for replacing dinfo */
+};
+
+/*
+ * di_read_lock_child, di_write_lock_child,
+ * di_read_lock_child2, di_write_lock_child2,
+ * di_read_lock_child3, di_write_lock_child3,
+ * di_read_lock_parent, di_write_lock_parent,
+ * di_read_lock_parent2, di_write_lock_parent2,
+ * di_read_lock_parent3, di_write_lock_parent3,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void di_read_lock_##name(struct dentry *d, int flags) \
+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void di_write_lock_##name(struct dentry *d) \
+{ di_write_lock(d, AuLsc_DI_##lsc); }
+
+#define AuRWLockFuncs(name, lsc) \
+	AuReadLockFunc(name, lsc) \
+	AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define DiMustNoWaiters(d)	AuRwMustNoWaiters(&au_di(d)->di_rwsem)
+#define DiMustAnyLock(d)	AuRwMustAnyLock(&au_di(d)->di_rwsem)
+#define DiMustWriteLock(d)	AuRwMustWriteLock(&au_di(d)->di_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: memory barrier? */
+static inline unsigned int au_digen(struct dentry *d)
+{
+	return atomic_read(&au_di(d)->di_generation);
+}
+
+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
+{
+	hdentry->hd_dentry = NULL;
+}
+
+static inline struct au_hdentry *au_hdentry(struct au_dinfo *di,
+					    aufs_bindex_t bindex)
+{
+	return di->di_hdentry + bindex;
+}
+
+static inline void au_hdput(struct au_hdentry *hd)
+{
+	if (hd)
+		dput(hd->hd_dentry);
+}
+
+static inline aufs_bindex_t au_dbtop(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_btop;
+}
+
+static inline aufs_bindex_t au_dbbot(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bbot;
+}
+
+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bwh;
+}
+
+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bdiropq;
+}
+
+/* todo: hard/soft set? */
+static inline void au_set_dbtop(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_btop = bindex;
+}
+
+static inline void au_set_dbbot(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_bbot = bindex;
+}
+
+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	/* dbwh can be outside of btop - bbot range */
+	au_di(dentry)->di_bwh = bindex;
+}
+
+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_bdiropq = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_HNOTIFY
+static inline void au_digen_dec(struct dentry *d)
+{
+	atomic_dec(&au_di(d)->di_generation);
+}
+
+static inline void au_hn_di_reinit(struct dentry *dentry)
+{
+	dentry->d_fsdata = NULL;
+}
+#else
+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DENTRY_H__ */
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
new file mode 100644
index 00000000000..be959106d98
--- /dev/null
+++ b/fs/aufs/dinfo.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dentry private data
+ */
+
+#include "aufs.h"
+
+void au_di_init_once(void *_dinfo)
+{
+	struct au_dinfo *dinfo = _dinfo;
+
+	au_rw_init(&dinfo->di_rwsem);
+}
+
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
+{
+	struct au_dinfo *dinfo;
+	int nbr, i;
+
+	dinfo = au_cache_alloc_dinfo();
+	if (unlikely(!dinfo))
+		goto out;
+
+	nbr = au_sbbot(sb) + 1;
+	if (nbr <= 0)
+		nbr = 1;
+	dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
+	if (dinfo->di_hdentry) {
+		au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
+		dinfo->di_btop = -1;
+		dinfo->di_bbot = -1;
+		dinfo->di_bwh = -1;
+		dinfo->di_bdiropq = -1;
+		dinfo->di_tmpfile = 0;
+		for (i = 0; i < nbr; i++)
+			dinfo->di_hdentry[i].hd_id = -1;
+		goto out;
+	}
+
+	au_cache_free_dinfo(dinfo);
+	dinfo = NULL;
+
+out:
+	return dinfo;
+}
+
+void au_di_free(struct au_dinfo *dinfo)
+{
+	struct au_hdentry *p;
+	aufs_bindex_t bbot, bindex;
+
+	/* dentry may not be revalidated */
+	bindex = dinfo->di_btop;
+	if (bindex >= 0) {
+		bbot = dinfo->di_bbot;
+		p = au_hdentry(dinfo, bindex);
+		while (bindex++ <= bbot)
+			au_hdput(p++);
+	}
+	au_kfree_try_rcu(dinfo->di_hdentry);
+	au_cache_free_dinfo(dinfo);
+}
+
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
+{
+	struct au_hdentry *p;
+	aufs_bindex_t bi;
+
+	AuRwMustWriteLock(&a->di_rwsem);
+	AuRwMustWriteLock(&b->di_rwsem);
+
+#define DiSwap(v, name)				\
+	do {					\
+		v = a->di_##name;		\
+		a->di_##name = b->di_##name;	\
+		b->di_##name = v;		\
+	} while (0)
+
+	DiSwap(p, hdentry);
+	DiSwap(bi, btop);
+	DiSwap(bi, bbot);
+	DiSwap(bi, bwh);
+	DiSwap(bi, bdiropq);
+	/* smp_mb(); */
+
+#undef DiSwap
+}
+
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
+{
+	AuRwMustWriteLock(&dst->di_rwsem);
+	AuRwMustWriteLock(&src->di_rwsem);
+
+	dst->di_btop = src->di_btop;
+	dst->di_bbot = src->di_bbot;
+	dst->di_bwh = src->di_bwh;
+	dst->di_bdiropq = src->di_bdiropq;
+	/* smp_mb(); */
+}
+
+int au_di_init(struct dentry *dentry)
+{
+	int err;
+	struct super_block *sb;
+	struct au_dinfo *dinfo;
+
+	err = 0;
+	sb = dentry->d_sb;
+	dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
+	if (dinfo) {
+		atomic_set(&dinfo->di_generation, au_sigen(sb));
+		/* smp_mb(); */ /* atomic_set */
+		dentry->d_fsdata = dinfo;
+	} else
+		err = -ENOMEM;
+
+	return err;
+}
+
+void au_di_fin(struct dentry *dentry)
+{
+	struct au_dinfo *dinfo;
+
+	dinfo = au_di(dentry);
+	AuRwDestroy(&dinfo->di_rwsem);
+	au_di_free(dinfo);
+}
+
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink)
+{
+	int err, sz;
+	struct au_hdentry *hdp;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	err = -ENOMEM;
+	sz = sizeof(*hdp) * (dinfo->di_bbot + 1);
+	if (!sz)
+		sz = sizeof(*hdp);
+	hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS,
+			   may_shrink);
+	if (hdp) {
+		dinfo->di_hdentry = hdp;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
+{
+	switch (lsc) {
+	case AuLsc_DI_CHILD:
+		ii_write_lock_child(inode);
+		break;
+	case AuLsc_DI_CHILD2:
+		ii_write_lock_child2(inode);
+		break;
+	case AuLsc_DI_CHILD3:
+		ii_write_lock_child3(inode);
+		break;
+	case AuLsc_DI_PARENT:
+		ii_write_lock_parent(inode);
+		break;
+	case AuLsc_DI_PARENT2:
+		ii_write_lock_parent2(inode);
+		break;
+	case AuLsc_DI_PARENT3:
+		ii_write_lock_parent3(inode);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
+{
+	switch (lsc) {
+	case AuLsc_DI_CHILD:
+		ii_read_lock_child(inode);
+		break;
+	case AuLsc_DI_CHILD2:
+		ii_read_lock_child2(inode);
+		break;
+	case AuLsc_DI_CHILD3:
+		ii_read_lock_child3(inode);
+		break;
+	case AuLsc_DI_PARENT:
+		ii_read_lock_parent(inode);
+		break;
+	case AuLsc_DI_PARENT2:
+		ii_read_lock_parent2(inode);
+		break;
+	case AuLsc_DI_PARENT3:
+		ii_read_lock_parent3(inode);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
+{
+	struct inode *inode;
+
+	au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
+	if (d_really_is_positive(d)) {
+		inode = d_inode(d);
+		if (au_ftest_lock(flags, IW))
+			do_ii_write_lock(inode, lsc);
+		else if (au_ftest_lock(flags, IR))
+			do_ii_read_lock(inode, lsc);
+	}
+}
+
+void di_read_unlock(struct dentry *d, int flags)
+{
+	struct inode *inode;
+
+	if (d_really_is_positive(d)) {
+		inode = d_inode(d);
+		if (au_ftest_lock(flags, IW)) {
+			au_dbg_verify_dinode(d);
+			ii_write_unlock(inode);
+		} else if (au_ftest_lock(flags, IR)) {
+			au_dbg_verify_dinode(d);
+			ii_read_unlock(inode);
+		}
+	}
+	au_rw_read_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_downgrade_lock(struct dentry *d, int flags)
+{
+	if (d_really_is_positive(d) && au_ftest_lock(flags, IR))
+		ii_downgrade_lock(d_inode(d));
+	au_rw_dgrade_lock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock(struct dentry *d, unsigned int lsc)
+{
+	au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
+	if (d_really_is_positive(d))
+		do_ii_write_lock(d_inode(d), lsc);
+}
+
+void di_write_unlock(struct dentry *d)
+{
+	au_dbg_verify_dinode(d);
+	if (d_really_is_positive(d))
+		ii_write_unlock(d_inode(d));
+	au_rw_write_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
+{
+	AuDebugOn(d1 == d2
+		  || d_inode(d1) == d_inode(d2)
+		  || d1->d_sb != d2->d_sb);
+
+	if ((isdir && au_test_subdir(d1, d2))
+	    || d1 < d2) {
+		di_write_lock_child(d1);
+		di_write_lock_child2(d2);
+	} else {
+		di_write_lock_child(d2);
+		di_write_lock_child2(d1);
+	}
+}
+
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
+{
+	AuDebugOn(d1 == d2
+		  || d_inode(d1) == d_inode(d2)
+		  || d1->d_sb != d2->d_sb);
+
+	if ((isdir && au_test_subdir(d1, d2))
+	    || d1 < d2) {
+		di_write_lock_parent(d1);
+		di_write_lock_parent2(d2);
+	} else {
+		di_write_lock_parent(d2);
+		di_write_lock_parent2(d1);
+	}
+}
+
+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+	di_write_unlock(d1);
+	if (d_inode(d1) == d_inode(d2))
+		au_rw_write_unlock(&au_di(d2)->di_rwsem);
+	else
+		di_write_unlock(d2);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	struct dentry *d;
+
+	DiMustAnyLock(dentry);
+
+	if (au_dbtop(dentry) < 0 || bindex < au_dbtop(dentry))
+		return NULL;
+	AuDebugOn(bindex < 0);
+	d = au_hdentry(au_di(dentry), bindex)->hd_dentry;
+	AuDebugOn(d && au_dcount(d) <= 0);
+	return d;
+}
+
+/*
+ * extended version of au_h_dptr().
+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or
+ * error.
+ */
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	struct dentry *h_dentry;
+	struct inode *inode, *h_inode;
+
+	AuDebugOn(d_really_is_negative(dentry));
+
+	h_dentry = NULL;
+	if (au_dbtop(dentry) <= bindex
+	    && bindex <= au_dbbot(dentry))
+		h_dentry = au_h_dptr(dentry, bindex);
+	if (h_dentry && !au_d_linkable(h_dentry)) {
+		dget(h_dentry);
+		goto out; /* success */
+	}
+
+	inode = d_inode(dentry);
+	AuDebugOn(bindex < au_ibtop(inode));
+	AuDebugOn(au_ibbot(inode) < bindex);
+	h_inode = au_h_iptr(inode, bindex);
+	h_dentry = d_find_alias(h_inode);
+	if (h_dentry) {
+		if (!IS_ERR(h_dentry)) {
+			if (!au_d_linkable(h_dentry))
+				goto out; /* success */
+			dput(h_dentry);
+		} else
+			goto out;
+	}
+
+	if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
+		h_dentry = au_plink_lkup(inode, bindex);
+		AuDebugOn(!h_dentry);
+		if (!IS_ERR(h_dentry)) {
+			if (!au_d_hashed_positive(h_dentry))
+				goto out; /* success */
+			dput(h_dentry);
+			h_dentry = NULL;
+		}
+	}
+
+out:
+	AuDbgDentry(h_dentry);
+	return h_dentry;
+}
+
+aufs_bindex_t au_dbtail(struct dentry *dentry)
+{
+	aufs_bindex_t bbot, bwh;
+
+	bbot = au_dbbot(dentry);
+	if (0 <= bbot) {
+		bwh = au_dbwh(dentry);
+		if (!bwh)
+			return bwh;
+		if (0 < bwh && bwh < bbot)
+			return bwh - 1;
+	}
+	return bbot;
+}
+
+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
+{
+	aufs_bindex_t bbot, bopq;
+
+	bbot = au_dbtail(dentry);
+	if (0 <= bbot) {
+		bopq = au_dbdiropq(dentry);
+		if (0 <= bopq && bopq < bbot)
+			bbot = bopq;
+	}
+	return bbot;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+		   struct dentry *h_dentry)
+{
+	struct au_dinfo *dinfo;
+	struct au_hdentry *hd;
+	struct au_branch *br;
+
+	DiMustWriteLock(dentry);
+
+	dinfo = au_di(dentry);
+	hd = au_hdentry(dinfo, bindex);
+	au_hdput(hd);
+	hd->hd_dentry = h_dentry;
+	if (h_dentry) {
+		br = au_sbr(dentry->d_sb, bindex);
+		hd->hd_id = br->br_id;
+	}
+}
+
+int au_dbrange_test(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t btop, bbot;
+
+	err = 0;
+	btop = au_dbtop(dentry);
+	bbot = au_dbbot(dentry);
+	if (btop >= 0)
+		AuDebugOn(bbot < 0 && btop > bbot);
+	else {
+		err = -EIO;
+		AuDebugOn(bbot >= 0);
+	}
+
+	return err;
+}
+
+int au_digen_test(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(au_digen(dentry) != sigen
+		     || au_iigen_test(d_inode(dentry), sigen)))
+		err = -EIO;
+
+	return err;
+}
+
+void au_update_digen(struct dentry *dentry)
+{
+	atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
+	/* smp_mb(); */ /* atomic_set */
+}
+
+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
+{
+	struct au_dinfo *dinfo;
+	struct dentry *h_d;
+	struct au_hdentry *hdp;
+	aufs_bindex_t bindex, bbot;
+
+	DiMustWriteLock(dentry);
+
+	dinfo = au_di(dentry);
+	if (!dinfo || dinfo->di_btop < 0)
+		return;
+
+	if (do_put_zero) {
+		bbot = dinfo->di_bbot;
+		bindex = dinfo->di_btop;
+		hdp = au_hdentry(dinfo, bindex);
+		for (; bindex <= bbot; bindex++, hdp++) {
+			h_d = hdp->hd_dentry;
+			if (h_d && d_is_negative(h_d))
+				au_set_h_dptr(dentry, bindex, NULL);
+		}
+	}
+
+	dinfo->di_btop = 0;
+	hdp = au_hdentry(dinfo, dinfo->di_btop);
+	for (; dinfo->di_btop <= dinfo->di_bbot; dinfo->di_btop++, hdp++)
+		if (hdp->hd_dentry)
+			break;
+	if (dinfo->di_btop > dinfo->di_bbot) {
+		dinfo->di_btop = -1;
+		dinfo->di_bbot = -1;
+		return;
+	}
+
+	hdp = au_hdentry(dinfo, dinfo->di_bbot);
+	for (; dinfo->di_bbot >= 0; dinfo->di_bbot--, hdp--)
+		if (hdp->hd_dentry)
+			break;
+	AuDebugOn(dinfo->di_btop > dinfo->di_bbot || dinfo->di_bbot < 0);
+}
+
+void au_update_dbtop(struct dentry *dentry)
+{
+	aufs_bindex_t bindex, bbot;
+	struct dentry *h_dentry;
+
+	bbot = au_dbbot(dentry);
+	for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		if (d_is_positive(h_dentry)) {
+			au_set_dbtop(dentry, bindex);
+			return;
+		}
+		au_set_h_dptr(dentry, bindex, NULL);
+	}
+}
+
+void au_update_dbbot(struct dentry *dentry)
+{
+	aufs_bindex_t bindex, btop;
+	struct dentry *h_dentry;
+
+	btop = au_dbtop(dentry);
+	for (bindex = au_dbbot(dentry); bindex >= btop; bindex--) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		if (d_is_positive(h_dentry)) {
+			au_set_dbbot(dentry, bindex);
+			return;
+		}
+		au_set_h_dptr(dentry, bindex, NULL);
+	}
+}
+
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
+{
+	aufs_bindex_t bindex, bbot;
+
+	bbot = au_dbbot(dentry);
+	for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++)
+		if (au_h_dptr(dentry, bindex) == h_dentry)
+			return bindex;
+	return -1;
+}
diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c
new file mode 100644
index 00000000000..0bcb39ee725
--- /dev/null
+++ b/fs/aufs/dir.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * directory operations
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/iversion.h>
+#include "aufs.h"
+
+void au_add_nlink(struct inode *dir, struct inode *h_dir)
+{
+	unsigned int nlink;
+
+	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+	nlink = dir->i_nlink;
+	nlink += h_dir->i_nlink - 2;
+	if (h_dir->i_nlink < 2)
+		nlink += 2;
+	smp_mb(); /* for i_nlink */
+	/* 0 can happen in revaliding */
+	set_nlink(dir, nlink);
+}
+
+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
+{
+	unsigned int nlink;
+
+	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+	nlink = dir->i_nlink;
+	nlink -= h_dir->i_nlink - 2;
+	if (h_dir->i_nlink < 2)
+		nlink -= 2;
+	smp_mb(); /* for i_nlink */
+	/* nlink == 0 means the branch-fs is broken */
+	set_nlink(dir, nlink);
+}
+
+loff_t au_dir_size(struct file *file, struct dentry *dentry)
+{
+	loff_t sz;
+	aufs_bindex_t bindex, bbot;
+	struct file *h_file;
+	struct dentry *h_dentry;
+
+	sz = 0;
+	if (file) {
+		AuDebugOn(!d_is_dir(file->f_path.dentry));
+
+		bbot = au_fbbot_dir(file);
+		for (bindex = au_fbtop(file);
+		     bindex <= bbot && sz < KMALLOC_MAX_SIZE;
+		     bindex++) {
+			h_file = au_hf_dir(file, bindex);
+			if (h_file && file_inode(h_file))
+				sz += vfsub_f_size_read(h_file);
+		}
+	} else {
+		AuDebugOn(!dentry);
+		AuDebugOn(!d_is_dir(dentry));
+
+		bbot = au_dbtaildir(dentry);
+		for (bindex = au_dbtop(dentry);
+		     bindex <= bbot && sz < KMALLOC_MAX_SIZE;
+		     bindex++) {
+			h_dentry = au_h_dptr(dentry, bindex);
+			if (h_dentry && d_is_positive(h_dentry))
+				sz += i_size_read(d_inode(h_dentry));
+		}
+	}
+	if (sz < KMALLOC_MAX_SIZE)
+		sz = roundup_pow_of_two(sz);
+	if (sz > KMALLOC_MAX_SIZE)
+		sz = KMALLOC_MAX_SIZE;
+	else if (sz < NAME_MAX) {
+		BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
+		sz = AUFS_RDBLK_DEF;
+	}
+	return sz;
+}
+
+struct au_dir_ts_arg {
+	struct dentry *dentry;
+	aufs_bindex_t brid;
+};
+
+static void au_do_dir_ts(void *arg)
+{
+	struct au_dir_ts_arg *a = arg;
+	struct au_dtime dt;
+	struct path h_path;
+	struct inode *dir, *h_dir;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_hinode *hdir;
+	int err;
+	aufs_bindex_t btop, bindex;
+
+	sb = a->dentry->d_sb;
+	if (d_really_is_negative(a->dentry))
+		goto out;
+	/* no dir->i_mutex lock */
+	aufs_read_lock(a->dentry, AuLock_DW); /* noflush */
+
+	dir = d_inode(a->dentry);
+	btop = au_ibtop(dir);
+	bindex = au_br_index(sb, a->brid);
+	if (bindex < btop)
+		goto out_unlock;
+
+	br = au_sbr(sb, bindex);
+	h_path.dentry = au_h_dptr(a->dentry, bindex);
+	if (!h_path.dentry)
+		goto out_unlock;
+	h_path.mnt = au_br_mnt(br);
+	au_dtime_store(&dt, a->dentry, &h_path);
+
+	br = au_sbr(sb, btop);
+	if (!au_br_writable(br->br_perm))
+		goto out_unlock;
+	h_path.dentry = au_h_dptr(a->dentry, btop);
+	h_path.mnt = au_br_mnt(br);
+	err = vfsub_mnt_want_write(h_path.mnt);
+	if (err)
+		goto out_unlock;
+	hdir = au_hi(dir, btop);
+	au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+	h_dir = au_h_iptr(dir, btop);
+	if (h_dir->i_nlink
+	    && timespec64_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) {
+		dt.dt_h_path = h_path;
+		au_dtime_revert(&dt);
+	}
+	au_hn_inode_unlock(hdir);
+	vfsub_mnt_drop_write(h_path.mnt);
+	au_cpup_attr_timesizes(dir);
+
+out_unlock:
+	aufs_read_unlock(a->dentry, AuLock_DW);
+out:
+	dput(a->dentry);
+	au_nwt_done(&au_sbi(sb)->si_nowait);
+	au_kfree_try_rcu(arg);
+}
+
+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex)
+{
+	int perm, wkq_err;
+	aufs_bindex_t btop;
+	struct au_dir_ts_arg *arg;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	IMustLock(dir);
+
+	dentry = d_find_any_alias(dir);
+	AuDebugOn(!dentry);
+	sb = dentry->d_sb;
+	btop = au_ibtop(dir);
+	if (btop == bindex) {
+		au_cpup_attr_timesizes(dir);
+		goto out;
+	}
+
+	perm = au_sbr_perm(sb, btop);
+	if (!au_br_writable(perm))
+		goto out;
+
+	arg = kmalloc(sizeof(*arg), GFP_NOFS);
+	if (!arg)
+		goto out;
+
+	arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */
+	arg->brid = au_sbr_id(sb, bindex);
+	wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0);
+	if (unlikely(wkq_err)) {
+		pr_err("wkq %d\n", wkq_err);
+		dput(dentry);
+		au_kfree_try_rcu(arg);
+	}
+
+out:
+	dput(dentry);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int reopen_dir(struct file *file)
+{
+	int err;
+	unsigned int flags;
+	aufs_bindex_t bindex, btail, btop;
+	struct dentry *dentry, *h_dentry;
+	struct file *h_file;
+
+	/* open all lower dirs */
+	dentry = file->f_path.dentry;
+	btop = au_dbtop(dentry);
+	for (bindex = au_fbtop(file); bindex < btop; bindex++)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbtop(file, btop);
+
+	btail = au_dbtaildir(dentry);
+	for (bindex = au_fbbot_dir(file); btail < bindex; bindex--)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbbot_dir(file, btail);
+
+	flags = vfsub_file_flags(file);
+	for (bindex = btop; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		h_file = au_hf_dir(file, bindex);
+		if (h_file)
+			continue;
+
+		h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+		err = PTR_ERR(h_file);
+		if (IS_ERR(h_file))
+			goto out; /* close all? */
+		au_set_h_fptr(file, bindex, h_file);
+	}
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	err = 0;
+
+out:
+	return err;
+}
+
+static int do_open_dir(struct file *file, int flags, struct file *h_file)
+{
+	int err;
+	aufs_bindex_t bindex, btail;
+	struct dentry *dentry, *h_dentry;
+	struct vfsmount *mnt;
+
+	FiMustWriteLock(file);
+	AuDebugOn(h_file);
+
+	err = 0;
+	mnt = file->f_path.mnt;
+	dentry = file->f_path.dentry;
+	file->f_version = inode_query_iversion(d_inode(dentry));
+	bindex = au_dbtop(dentry);
+	au_set_fbtop(file, bindex);
+	btail = au_dbtaildir(dentry);
+	au_set_fbbot_dir(file, btail);
+	for (; !err && bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+
+		err = vfsub_test_mntns(mnt, h_dentry->d_sb);
+		if (unlikely(err))
+			break;
+		h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+		if (IS_ERR(h_file)) {
+			err = PTR_ERR(h_file);
+			break;
+		}
+		au_set_h_fptr(file, bindex, h_file);
+	}
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	if (!err)
+		return 0; /* success */
+
+	/* close all */
+	for (bindex = au_fbtop(file); bindex <= btail; bindex++)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbtop(file, -1);
+	au_set_fbbot_dir(file, -1);
+
+	return err;
+}
+
+static int aufs_open_dir(struct inode *inode __maybe_unused,
+			 struct file *file)
+{
+	int err;
+	struct super_block *sb;
+	struct au_fidir *fidir;
+
+	err = -ENOMEM;
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	fidir = au_fidir_alloc(sb);
+	if (fidir) {
+		struct au_do_open_args args = {
+			.open	= do_open_dir,
+			.fidir	= fidir
+		};
+		err = au_do_open(file, &args);
+		if (unlikely(err))
+			au_kfree_rcu(fidir);
+	}
+	si_read_unlock(sb);
+	return err;
+}
+
+static int aufs_release_dir(struct inode *inode __maybe_unused,
+			    struct file *file)
+{
+	struct au_vdir *vdir_cache;
+	struct au_finfo *finfo;
+	struct au_fidir *fidir;
+	struct au_hfile *hf;
+	aufs_bindex_t bindex, bbot;
+
+	finfo = au_fi(file);
+	fidir = finfo->fi_hdir;
+	if (fidir) {
+		au_hbl_del(&finfo->fi_hlist,
+			   &au_sbi(file->f_path.dentry->d_sb)->si_files);
+		vdir_cache = fidir->fd_vdir_cache; /* lock-free */
+		if (vdir_cache)
+			au_vdir_free(vdir_cache);
+
+		bindex = finfo->fi_btop;
+		if (bindex >= 0) {
+			hf = fidir->fd_hfile + bindex;
+			/*
+			 * calls fput() instead of filp_close(),
+			 * since no dnotify or lock for the lower file.
+			 */
+			bbot = fidir->fd_bbot;
+			for (; bindex <= bbot; bindex++, hf++)
+				if (hf->hf_file)
+					au_hfput(hf, /*execed*/0);
+		}
+		au_kfree_rcu(fidir);
+		finfo->fi_hdir = NULL;
+	}
+	au_finfo_fin(file);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_dir(struct file *file, fl_owner_t id)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct file *h_file;
+
+	err = 0;
+	bbot = au_fbbot_dir(file);
+	for (bindex = au_fbtop(file); !err && bindex <= bbot; bindex++) {
+		h_file = au_hf_dir(file, bindex);
+		if (h_file)
+			err = vfsub_flush(h_file, id);
+	}
+	return err;
+}
+
+static int aufs_flush_dir(struct file *file, fl_owner_t id)
+{
+	return au_do_flush(file, id, au_do_flush_dir);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
+{
+	int err;
+	aufs_bindex_t bbot, bindex;
+	struct inode *inode;
+	struct super_block *sb;
+
+	err = 0;
+	sb = dentry->d_sb;
+	inode = d_inode(dentry);
+	IMustLock(inode);
+	bbot = au_dbbot(dentry);
+	for (bindex = au_dbtop(dentry); !err && bindex <= bbot; bindex++) {
+		struct path h_path;
+
+		if (au_test_ro(sb, bindex, inode))
+			continue;
+		h_path.dentry = au_h_dptr(dentry, bindex);
+		if (!h_path.dentry)
+			continue;
+
+		h_path.mnt = au_sbr_mnt(sb, bindex);
+		err = vfsub_fsync(NULL, &h_path, datasync);
+	}
+
+	return err;
+}
+
+static int au_do_fsync_dir(struct file *file, int datasync)
+{
+	int err;
+	aufs_bindex_t bbot, bindex;
+	struct file *h_file;
+	struct super_block *sb;
+	struct inode *inode;
+
+	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1, /*fi_lsc*/0);
+	if (unlikely(err))
+		goto out;
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	bbot = au_fbbot_dir(file);
+	for (bindex = au_fbtop(file); !err && bindex <= bbot; bindex++) {
+		h_file = au_hf_dir(file, bindex);
+		if (!h_file || au_test_ro(sb, bindex, inode))
+			continue;
+
+		err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+	}
+
+out:
+	return err;
+}
+
+/*
+ * @file may be NULL
+ */
+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
+			  int datasync)
+{
+	int err;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct super_block *sb;
+
+	err = 0;
+	dentry = file->f_path.dentry;
+	inode = d_inode(dentry);
+	inode_lock(inode);
+	sb = dentry->d_sb;
+	si_noflush_read_lock(sb);
+	if (file)
+		err = au_do_fsync_dir(file, datasync);
+	else {
+		di_write_lock_child(dentry);
+		err = au_do_fsync_dir_no_file(dentry, datasync);
+	}
+	au_cpup_attr_timesizes(inode);
+	di_write_unlock(dentry);
+	if (file)
+		fi_write_unlock(file);
+
+	si_read_unlock(sb);
+	inode_unlock(inode);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_iterate_shared(struct file *file, struct dir_context *ctx)
+{
+	int err;
+	struct dentry *dentry;
+	struct inode *inode, *h_inode;
+	struct super_block *sb;
+
+	AuDbg("%pD, ctx{%ps, %llu}\n", file, ctx->actor, ctx->pos);
+
+	dentry = file->f_path.dentry;
+	inode = d_inode(dentry);
+	IMustLock(inode);
+
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1, /*fi_lsc*/0);
+	if (unlikely(err))
+		goto out;
+	err = au_alive_dir(dentry);
+	if (!err)
+		err = au_vdir_init(file);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err))
+		goto out_unlock;
+
+	h_inode = au_h_iptr(inode, au_ibtop(inode));
+	if (!au_test_nfsd()) {
+		err = au_vdir_fill_de(file, ctx);
+		fsstack_copy_attr_atime(inode, h_inode);
+	} else {
+		/*
+		 * nfsd filldir may call lookup_one_len(), vfs_getattr(),
+		 * encode_fh() and others.
+		 */
+		atomic_inc(&h_inode->i_count);
+		di_read_unlock(dentry, AuLock_IR);
+		si_read_unlock(sb);
+		err = au_vdir_fill_de(file, ctx);
+		fsstack_copy_attr_atime(inode, h_inode);
+		fi_write_unlock(file);
+		iput(h_inode);
+
+		AuTraceErr(err);
+		return err;
+	}
+
+out_unlock:
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuTestEmpty_WHONLY	1
+#define AuTestEmpty_CALLED	(1 << 1)
+#define AuTestEmpty_SHWH	(1 << 2)
+#define au_ftest_testempty(flags, name)	((flags) & AuTestEmpty_##name)
+#define au_fset_testempty(flags, name) \
+	do { (flags) |= AuTestEmpty_##name; } while (0)
+#define au_fclr_testempty(flags, name) \
+	do { (flags) &= ~AuTestEmpty_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuTestEmpty_SHWH
+#define AuTestEmpty_SHWH	0
+#endif
+
+struct test_empty_arg {
+	struct dir_context ctx;
+	struct au_nhash *whlist;
+	unsigned int flags;
+	int err;
+	aufs_bindex_t bindex;
+};
+
+static int test_empty_cb(struct dir_context *ctx, const char *__name,
+			 int namelen, loff_t offset __maybe_unused, u64 ino,
+			 unsigned int d_type)
+{
+	struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg,
+						  ctx);
+	char *name = (void *)__name;
+
+	arg->err = 0;
+	au_fset_testempty(arg->flags, CALLED);
+	/* smp_mb(); */
+	if (name[0] == '.'
+	    && (namelen == 1 || (name[1] == '.' && namelen == 2)))
+		goto out; /* success */
+
+	if (namelen <= AUFS_WH_PFX_LEN
+	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+		if (au_ftest_testempty(arg->flags, WHONLY)
+		    && !au_nhash_test_known_wh(arg->whlist, name, namelen))
+			arg->err = -ENOTEMPTY;
+		goto out;
+	}
+
+	name += AUFS_WH_PFX_LEN;
+	namelen -= AUFS_WH_PFX_LEN;
+	if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
+		arg->err = au_nhash_append_wh
+			(arg->whlist, name, namelen, ino, d_type, arg->bindex,
+			 au_ftest_testempty(arg->flags, SHWH));
+
+out:
+	/* smp_mb(); */
+	AuTraceErr(arg->err);
+	return arg->err;
+}
+
+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+	int err;
+	struct file *h_file;
+	struct au_branch *br;
+
+	h_file = au_h_open(dentry, arg->bindex,
+			   O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
+			   /*file*/NULL, /*force_wr*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = 0;
+	if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+	    && !file_inode(h_file)->i_nlink)
+		goto out_put;
+
+	do {
+		arg->err = 0;
+		au_fclr_testempty(arg->flags, CALLED);
+		/* smp_mb(); */
+		err = vfsub_iterate_dir(h_file, &arg->ctx);
+		if (err >= 0)
+			err = arg->err;
+	} while (!err && au_ftest_testempty(arg->flags, CALLED));
+
+out_put:
+	fput(h_file);
+	br = au_sbr(dentry->d_sb, arg->bindex);
+	au_lcnt_dec(&br->br_nfiles);
+out:
+	return err;
+}
+
+struct do_test_empty_args {
+	int *errp;
+	struct dentry *dentry;
+	struct test_empty_arg *arg;
+};
+
+static void call_do_test_empty(void *args)
+{
+	struct do_test_empty_args *a = args;
+	*a->errp = do_test_empty(a->dentry, a->arg);
+}
+
+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+	int err, wkq_err;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	h_dentry = au_h_dptr(dentry, arg->bindex);
+	h_inode = d_inode(h_dentry);
+	/* todo: i_mode changes anytime? */
+	inode_lock_shared_nested(h_inode, AuLsc_I_CHILD);
+	err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
+	inode_unlock_shared(h_inode);
+	if (!err)
+		err = do_test_empty(dentry, arg);
+	else {
+		struct do_test_empty_args args = {
+			.errp	= &err,
+			.dentry	= dentry,
+			.arg	= arg
+		};
+		unsigned int flags = arg->flags;
+
+		wkq_err = au_wkq_wait(call_do_test_empty, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+		arg->flags = flags;
+	}
+
+	return err;
+}
+
+int au_test_empty_lower(struct dentry *dentry)
+{
+	int err;
+	unsigned int rdhash;
+	aufs_bindex_t bindex, btop, btail;
+	struct au_nhash whlist;
+	struct test_empty_arg arg = {
+		.ctx = {
+			.actor = test_empty_cb
+		}
+	};
+	int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg);
+
+	SiMustAnyLock(dentry->d_sb);
+
+	rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
+	err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+
+	arg.flags = 0;
+	arg.whlist = &whlist;
+	btop = au_dbtop(dentry);
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+		au_fset_testempty(arg.flags, SHWH);
+	test_empty = do_test_empty;
+	if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1))
+		test_empty = sio_test_empty;
+	arg.bindex = btop;
+	err = test_empty(dentry, &arg);
+	if (unlikely(err))
+		goto out_whlist;
+
+	au_fset_testempty(arg.flags, WHONLY);
+	btail = au_dbtaildir(dentry);
+	for (bindex = btop + 1; !err && bindex <= btail; bindex++) {
+		struct dentry *h_dentry;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry && d_is_positive(h_dentry)) {
+			arg.bindex = bindex;
+			err = test_empty(dentry, &arg);
+		}
+	}
+
+out_whlist:
+	au_nhash_wh_free(&whlist);
+out:
+	return err;
+}
+
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
+{
+	int err;
+	struct test_empty_arg arg = {
+		.ctx = {
+			.actor = test_empty_cb
+		}
+	};
+	aufs_bindex_t bindex, btail;
+
+	err = 0;
+	arg.whlist = whlist;
+	arg.flags = AuTestEmpty_WHONLY;
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+		au_fset_testempty(arg.flags, SHWH);
+	btail = au_dbtaildir(dentry);
+	for (bindex = au_dbtop(dentry); !err && bindex <= btail; bindex++) {
+		struct dentry *h_dentry;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry && d_is_positive(h_dentry)) {
+			arg.bindex = bindex;
+			err = sio_test_empty(dentry, &arg);
+		}
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_dir_fop = {
+	.owner		= THIS_MODULE,
+	.llseek		= default_llseek,
+	.read		= generic_read_dir,
+	.iterate_shared	= aufs_iterate_shared,
+	.unlocked_ioctl	= aufs_ioctl_dir,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= aufs_compat_ioctl_dir,
+#endif
+	.open		= aufs_open_dir,
+	.release	= aufs_release_dir,
+	.flush		= aufs_flush_dir,
+	.fsync		= aufs_fsync_dir
+};
diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h
new file mode 100644
index 00000000000..e44c2a1a849
--- /dev/null
+++ b/fs/aufs/dir.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * directory operations
+ */
+
+#ifndef __AUFS_DIR_H__
+#define __AUFS_DIR_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+
+/* ---------------------------------------------------------------------- */
+
+/* need to be faster and smaller */
+
+struct au_nhash {
+	unsigned int		nh_num;
+	struct hlist_head	*nh_head;
+};
+
+struct au_vdir_destr {
+	unsigned char	len;
+	unsigned char	name[];
+} __packed;
+
+struct au_vdir_dehstr {
+	struct hlist_node	hash;
+	struct au_vdir_destr	*str;
+	struct rcu_head		rcu;
+} ____cacheline_aligned_in_smp;
+
+struct au_vdir_de {
+	ino_t			de_ino;
+	unsigned char		de_type;
+	/* caution: packed */
+	struct au_vdir_destr	de_str;
+} __packed;
+
+struct au_vdir_wh {
+	struct hlist_node	wh_hash;
+#ifdef CONFIG_AUFS_SHWH
+	ino_t			wh_ino;
+	aufs_bindex_t		wh_bindex;
+	unsigned char		wh_type;
+#else
+	aufs_bindex_t		wh_bindex;
+#endif
+	/* caution: packed */
+	struct au_vdir_destr	wh_str;
+} __packed;
+
+union au_vdir_deblk_p {
+	unsigned char		*deblk;
+	struct au_vdir_de	*de;
+};
+
+struct au_vdir {
+	unsigned char	**vd_deblk;
+	unsigned long	vd_nblk;
+	struct {
+		unsigned long		ul;
+		union au_vdir_deblk_p	p;
+	} vd_last;
+
+	u64		vd_version;
+	unsigned int	vd_deblk_sz;
+	unsigned long	vd_jiffy;
+	struct rcu_head	rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dir.c */
+extern const struct file_operations aufs_dir_fop;
+void au_add_nlink(struct inode *dir, struct inode *h_dir);
+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
+loff_t au_dir_size(struct file *file, struct dentry *dentry);
+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc);
+int au_test_empty_lower(struct dentry *dentry);
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
+
+/* vdir.c */
+unsigned int au_rdhash_est(loff_t sz);
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
+void au_nhash_wh_free(struct au_nhash *whlist);
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+			    int limit);
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+		       unsigned int d_type, aufs_bindex_t bindex,
+		       unsigned char shwh);
+void au_vdir_free(struct au_vdir *vdir);
+int au_vdir_init(struct file *file);
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx);
+
+/* ioctl.c */
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_AUFS_RDU
+/* rdu.c */
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg);
+#endif
+#else
+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file,
+       unsigned int cmd, unsigned long arg)
+#ifdef CONFIG_COMPAT
+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file,
+       unsigned int cmd, unsigned long arg)
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIR_H__ */
diff --git a/fs/aufs/dirren.c b/fs/aufs/dirren.c
new file mode 100644
index 00000000000..ba4bf56cfb8
--- /dev/null
+++ b/fs/aufs/dirren.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * special handling in renaming a directory
+ * in order to support looking-up the before-renamed name on the lower readonly
+ * branches
+ */
+
+#include <linux/byteorder/generic.h>
+#include "aufs.h"
+
+static void au_dr_hino_del(struct au_dr_br *dr, struct au_dr_hino *ent)
+{
+	int idx;
+
+	idx = au_dr_ihash(ent->dr_h_ino);
+	au_hbl_del(&ent->dr_hnode, dr->dr_h_ino + idx);
+}
+
+static int au_dr_hino_test_empty(struct au_dr_br *dr)
+{
+	int ret, i;
+	struct hlist_bl_head *hbl;
+
+	ret = 1;
+	for (i = 0; ret && i < AuDirren_NHASH; i++) {
+		hbl = dr->dr_h_ino + i;
+		hlist_bl_lock(hbl);
+		ret &= hlist_bl_empty(hbl);
+		hlist_bl_unlock(hbl);
+	}
+
+	return ret;
+}
+
+static struct au_dr_hino *au_dr_hino_find(struct au_dr_br *dr, ino_t ino)
+{
+	struct au_dr_hino *found, *ent;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	int idx;
+
+	found = NULL;
+	idx = au_dr_ihash(ino);
+	hbl = dr->dr_h_ino + idx;
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(ent, pos, hbl, dr_hnode)
+		if (ent->dr_h_ino == ino) {
+			found = ent;
+			break;
+		}
+	hlist_bl_unlock(hbl);
+
+	return found;
+}
+
+int au_dr_hino_test_add(struct au_dr_br *dr, ino_t ino,
+			struct au_dr_hino *add_ent)
+{
+	int found, idx;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_dr_hino *ent;
+
+	found = 0;
+	idx = au_dr_ihash(ino);
+	hbl = dr->dr_h_ino + idx;
+#if 0 /* debug print */
+	{
+		struct hlist_bl_node *tmp;
+
+		hlist_bl_for_each_entry_safe(ent, pos, tmp, hbl, dr_hnode)
+			AuDbg("hi%llu\n", (unsigned long long)ent->dr_h_ino);
+	}
+#endif
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(ent, pos, hbl, dr_hnode)
+		if (ent->dr_h_ino == ino) {
+			found = 1;
+			break;
+		}
+	if (!found && add_ent)
+		hlist_bl_add_head(&add_ent->dr_hnode, hbl);
+	hlist_bl_unlock(hbl);
+
+	if (!found && add_ent)
+		AuDbg("i%llu added\n", (unsigned long long)add_ent->dr_h_ino);
+
+	return found;
+}
+
+void au_dr_hino_free(struct au_dr_br *dr)
+{
+	int i;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos, *tmp;
+	struct au_dr_hino *ent;
+
+	/* SiMustWriteLock(sb); */
+
+	for (i = 0; i < AuDirren_NHASH; i++) {
+		hbl = dr->dr_h_ino + i;
+		/* no spinlock since sbinfo must be write-locked */
+		hlist_bl_for_each_entry_safe(ent, pos, tmp, hbl, dr_hnode)
+			au_kfree_rcu(ent);
+		INIT_HLIST_BL_HEAD(hbl);
+	}
+}
+
+/* returns the number of inodes or an error */
+static int au_dr_hino_store(struct super_block *sb, struct au_branch *br,
+			    struct file *hinofile)
+{
+	int err, i;
+	ssize_t ssz;
+	loff_t pos, oldsize;
+	__be64 u64;
+	struct inode *hinoinode;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *n1, *n2;
+	struct au_dr_hino *ent;
+
+	SiMustWriteLock(sb);
+	AuDebugOn(!au_br_writable(br->br_perm));
+
+	hinoinode = file_inode(hinofile);
+	oldsize = i_size_read(hinoinode);
+
+	err = 0;
+	pos = 0;
+	hbl = br->br_dirren.dr_h_ino;
+	for (i = 0; !err && i < AuDirren_NHASH; i++, hbl++) {
+		/* no bit-lock since sbinfo must be write-locked */
+		hlist_bl_for_each_entry_safe(ent, n1, n2, hbl, dr_hnode) {
+			AuDbg("hi%llu, %pD2\n",
+			      (unsigned long long)ent->dr_h_ino, hinofile);
+			u64 = cpu_to_be64(ent->dr_h_ino);
+			ssz = vfsub_write_k(hinofile, &u64, sizeof(u64), &pos);
+			if (ssz == sizeof(u64))
+				continue;
+
+			/* write error */
+			pr_err("ssz %zd, %pD2\n", ssz, hinofile);
+			err = -ENOSPC;
+			if (ssz < 0)
+				err = ssz;
+			break;
+		}
+	}
+	/* regardless the error */
+	if (pos < oldsize) {
+		err = vfsub_trunc(&hinofile->f_path, pos, /*attr*/0, hinofile);
+		AuTraceErr(err);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_dr_hino_load(struct au_dr_br *dr, struct file *hinofile)
+{
+	int err, hidx;
+	ssize_t ssz;
+	size_t sz, n;
+	loff_t pos;
+	uint64_t u64;
+	struct au_dr_hino *ent;
+	struct inode *hinoinode;
+	struct hlist_bl_head *hbl;
+
+	err = 0;
+	pos = 0;
+	hbl = dr->dr_h_ino;
+	hinoinode = file_inode(hinofile);
+	sz = i_size_read(hinoinode);
+	AuDebugOn(sz % sizeof(u64));
+	n = sz / sizeof(u64);
+	while (n--) {
+		ssz = vfsub_read_k(hinofile, &u64, sizeof(u64), &pos);
+		if (unlikely(ssz != sizeof(u64))) {
+			pr_err("ssz %zd, %pD2\n", ssz, hinofile);
+			err = -EINVAL;
+			if (ssz < 0)
+				err = ssz;
+			goto out_free;
+		}
+
+		ent = kmalloc(sizeof(*ent), GFP_NOFS);
+		if (!ent) {
+			err = -ENOMEM;
+			AuTraceErr(err);
+			goto out_free;
+		}
+		ent->dr_h_ino = be64_to_cpu((__force __be64)u64);
+		AuDbg("hi%llu, %pD2\n",
+		      (unsigned long long)ent->dr_h_ino, hinofile);
+		hidx = au_dr_ihash(ent->dr_h_ino);
+		au_hbl_add(&ent->dr_hnode, hbl + hidx);
+	}
+	goto out; /* success */
+
+out_free:
+	au_dr_hino_free(dr);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * @bindex/@br is a switch to distinguish whether suspending hnotify or not.
+ * @path is a switch to distinguish load and store.
+ */
+static int au_dr_hino(struct super_block *sb, aufs_bindex_t bindex,
+		      struct au_branch *br, const struct path *path)
+{
+	int err, flags;
+	unsigned char load, suspend;
+	struct file *hinofile;
+	struct au_hinode *hdir;
+	struct inode *dir, *delegated;
+	struct path hinopath;
+	struct qstr hinoname = QSTR_INIT(AUFS_WH_DR_BRHINO,
+					 sizeof(AUFS_WH_DR_BRHINO) - 1);
+
+	AuDebugOn(bindex < 0 && !br);
+	AuDebugOn(bindex >= 0 && br);
+
+	err = -EINVAL;
+	suspend = !br;
+	if (suspend)
+		br = au_sbr(sb, bindex);
+	load = !!path;
+	if (!load) {
+		path = &br->br_path;
+		AuDebugOn(!au_br_writable(br->br_perm));
+		if (unlikely(!au_br_writable(br->br_perm)))
+			goto out;
+	}
+
+	hdir = NULL;
+	if (suspend) {
+		dir = d_inode(sb->s_root);
+		hdir = au_hinode(au_ii(dir), bindex);
+		dir = hdir->hi_inode;
+		au_hn_inode_lock_nested(hdir, AuLsc_I_CHILD);
+	} else {
+		dir = d_inode(path->dentry);
+		inode_lock_nested(dir, AuLsc_I_CHILD);
+	}
+	hinopath.dentry = vfsub_lkup_one(&hinoname, path->dentry);
+	err = PTR_ERR(hinopath.dentry);
+	if (IS_ERR(hinopath.dentry))
+		goto out_unlock;
+
+	err = 0;
+	flags = O_RDONLY;
+	if (load) {
+		if (d_is_negative(hinopath.dentry))
+			goto out_dput; /* success */
+	} else {
+		if (au_dr_hino_test_empty(&br->br_dirren)) {
+			if (d_is_positive(hinopath.dentry)) {
+				delegated = NULL;
+				err = vfsub_unlink(dir, &hinopath, &delegated,
+						   /*force*/0);
+				AuTraceErr(err);
+				if (unlikely(err))
+					pr_err("ignored err %d, %pd2\n",
+					       err, hinopath.dentry);
+				if (unlikely(err == -EWOULDBLOCK))
+					iput(delegated);
+				err = 0;
+			}
+			goto out_dput;
+		} else if (!d_is_positive(hinopath.dentry)) {
+			err = vfsub_create(dir, &hinopath, 0600,
+					   /*want_excl*/false);
+			AuTraceErr(err);
+			if (unlikely(err))
+				goto out_dput;
+		}
+		flags = O_WRONLY;
+	}
+	hinopath.mnt = path->mnt;
+	hinofile = vfsub_dentry_open(&hinopath, flags);
+	if (suspend)
+		au_hn_inode_unlock(hdir);
+	else
+		inode_unlock(dir);
+	dput(hinopath.dentry);
+	AuTraceErrPtr(hinofile);
+	if (IS_ERR(hinofile)) {
+		err = PTR_ERR(hinofile);
+		goto out;
+	}
+
+	if (load)
+		err = au_dr_hino_load(&br->br_dirren, hinofile);
+	else
+		err = au_dr_hino_store(sb, br, hinofile);
+	fput(hinofile);
+	goto out;
+
+out_dput:
+	dput(hinopath.dentry);
+out_unlock:
+	if (suspend)
+		au_hn_inode_unlock(hdir);
+	else
+		inode_unlock(dir);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_dr_brid_init(struct au_dr_brid *brid, const struct path *path)
+{
+	int err;
+	struct kstatfs kstfs;
+	dev_t dev;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	err = vfs_statfs((void *)path, &kstfs);
+	AuTraceErr(err);
+	if (unlikely(err))
+		goto out;
+
+	/* todo: support for UUID */
+
+	if (kstfs.f_fsid.val[0] || kstfs.f_fsid.val[1]) {
+		brid->type = AuBrid_FSID;
+		brid->fsid = kstfs.f_fsid;
+	} else {
+		dentry = path->dentry;
+		sb = dentry->d_sb;
+		dev = sb->s_dev;
+		if (dev) {
+			brid->type = AuBrid_DEV;
+			brid->dev = dev;
+		}
+	}
+
+out:
+	return err;
+}
+
+int au_dr_br_init(struct super_block *sb, struct au_branch *br,
+		  const struct path *path)
+{
+	int err, i;
+	struct au_dr_br *dr;
+	struct hlist_bl_head *hbl;
+
+	dr = &br->br_dirren;
+	hbl = dr->dr_h_ino;
+	for (i = 0; i < AuDirren_NHASH; i++, hbl++)
+		INIT_HLIST_BL_HEAD(hbl);
+
+	err = au_dr_brid_init(&dr->dr_brid, path);
+	if (unlikely(err))
+		goto out;
+
+	if (au_opt_test(au_mntflags(sb), DIRREN))
+		err = au_dr_hino(sb, /*bindex*/-1, br, path);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_dr_br_fin(struct super_block *sb, struct au_branch *br)
+{
+	int err;
+
+	err = 0;
+	if (au_br_writable(br->br_perm))
+		err = au_dr_hino(sb, /*bindex*/-1, br, /*path*/NULL);
+	if (!err)
+		au_dr_hino_free(&br->br_dirren);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_brid_str(struct au_dr_brid *brid, struct inode *h_inode,
+		       char *buf, size_t sz)
+{
+	int err;
+	unsigned int major, minor;
+	char *p;
+
+	p = buf;
+	err = snprintf(p, sz, "%d_", brid->type);
+	AuDebugOn(err > sz);
+	p += err;
+	sz -= err;
+	switch (brid->type) {
+	case AuBrid_Unset:
+		return -EINVAL;
+	case AuBrid_UUID:
+		err = snprintf(p, sz, "%pU", brid->uuid.b);
+		break;
+	case AuBrid_FSID:
+		err = snprintf(p, sz, "%08x-%08x",
+			       brid->fsid.val[0], brid->fsid.val[1]);
+		break;
+	case AuBrid_DEV:
+		major = MAJOR(brid->dev);
+		minor = MINOR(brid->dev);
+		if (major <= 0xff && minor <= 0xff)
+			err = snprintf(p, sz, "%02x%02x", major, minor);
+		else
+			err = snprintf(p, sz, "%03x:%05x", major, minor);
+		break;
+	}
+	AuDebugOn(err > sz);
+	p += err;
+	sz -= err;
+	err = snprintf(p, sz, "_%llu", (unsigned long long)h_inode->i_ino);
+	AuDebugOn(err > sz);
+	p += err;
+	sz -= err;
+
+	return p - buf;
+}
+
+static int au_drinfo_name(struct au_branch *br, char *name, int len)
+{
+	int rlen;
+	struct dentry *br_dentry;
+	struct inode *br_inode;
+
+	br_dentry = au_br_dentry(br);
+	br_inode = d_inode(br_dentry);
+	rlen = au_brid_str(&br->br_dirren.dr_brid, br_inode, name, len);
+	AuDebugOn(rlen >= AUFS_DIRREN_ENV_VAL_SZ);
+	AuDebugOn(rlen > len);
+
+	return rlen;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * from the given @h_dentry, construct drinfo at @*fdata.
+ * when the size of @*fdata is not enough, reallocate and return new @fdata and
+ * @allocated.
+ */
+static int au_drinfo_construct(struct au_drinfo_fdata **fdata,
+			       struct dentry *h_dentry,
+			       unsigned char *allocated)
+{
+	int err, v;
+	struct au_drinfo_fdata *f, *p;
+	struct au_drinfo *drinfo;
+	struct inode *h_inode;
+	struct qstr *qname;
+
+	err = 0;
+	f = *fdata;
+	h_inode = d_inode(h_dentry);
+	qname = &h_dentry->d_name;
+	drinfo = &f->drinfo;
+	drinfo->ino = (__force uint64_t)cpu_to_be64(h_inode->i_ino);
+	drinfo->oldnamelen = qname->len;
+	if (*allocated < sizeof(*f) + qname->len) {
+		v = roundup_pow_of_two(*allocated + qname->len);
+		p = au_krealloc(f, v, GFP_NOFS, /*may_shrink*/0);
+		if (unlikely(!p)) {
+			err = -ENOMEM;
+			AuTraceErr(err);
+			goto out;
+		}
+		f = p;
+		*fdata = f;
+		*allocated = v;
+		drinfo = &f->drinfo;
+	}
+	memcpy(drinfo->oldname, qname->name, qname->len);
+	AuDbg("i%llu, %.*s\n",
+	      be64_to_cpu((__force __be64)drinfo->ino), drinfo->oldnamelen,
+	      drinfo->oldname);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* callers have to free the return value */
+static struct au_drinfo *au_drinfo_read_k(struct file *file, ino_t h_ino)
+{
+	struct au_drinfo *ret, *drinfo;
+	struct au_drinfo_fdata fdata;
+	int len;
+	loff_t pos;
+	ssize_t ssz;
+
+	ret = ERR_PTR(-EIO);
+	pos = 0;
+	ssz = vfsub_read_k(file, &fdata, sizeof(fdata), &pos);
+	if (unlikely(ssz != sizeof(fdata))) {
+		AuIOErr("ssz %zd, %u, %pD2\n",
+			ssz, (unsigned int)sizeof(fdata), file);
+		goto out;
+	}
+
+	fdata.magic = ntohl((__force __be32)fdata.magic);
+	switch (fdata.magic) {
+	case AUFS_DRINFO_MAGIC_V1:
+		break;
+	default:
+		AuIOErr("magic-num 0x%x, 0x%x, %pD2\n",
+			fdata.magic, AUFS_DRINFO_MAGIC_V1, file);
+		goto out;
+	}
+
+	drinfo = &fdata.drinfo;
+	len = drinfo->oldnamelen;
+	if (!len) {
+		AuIOErr("broken drinfo %pD2\n", file);
+		goto out;
+	}
+
+	ret = NULL;
+	drinfo->ino = be64_to_cpu((__force __be64)drinfo->ino);
+	if (unlikely(h_ino && drinfo->ino != h_ino)) {
+		AuDbg("ignored i%llu, i%llu, %pD2\n",
+		      (unsigned long long)drinfo->ino,
+		      (unsigned long long)h_ino, file);
+		goto out; /* success */
+	}
+
+	ret = kmalloc(sizeof(*ret) + len, GFP_NOFS);
+	if (unlikely(!ret)) {
+		ret = ERR_PTR(-ENOMEM);
+		AuTraceErrPtr(ret);
+		goto out;
+	}
+
+	*ret = *drinfo;
+	ssz = vfsub_read_k(file, (void *)ret->oldname, len, &pos);
+	if (unlikely(ssz != len)) {
+		au_kfree_rcu(ret);
+		ret = ERR_PTR(-EIO);
+		AuIOErr("ssz %zd, %u, %pD2\n", ssz, len, file);
+		goto out;
+	}
+
+	AuDbg("oldname %.*s\n", ret->oldnamelen, ret->oldname);
+
+out:
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* in order to be revertible */
+struct au_drinfo_rev_elm {
+	int			created;
+	struct dentry		*info_dentry;
+	struct au_drinfo	*info_last;
+};
+
+struct au_drinfo_rev {
+	unsigned char			already;
+	aufs_bindex_t			nelm;
+	struct au_drinfo_rev_elm	elm[];
+};
+
+/* todo: isn't it too large? */
+struct au_drinfo_store {
+	struct path h_ppath;
+	struct dentry *h_dentry;
+	struct au_drinfo_fdata *fdata;
+	char *infoname;			/* inside of whname, just after PFX */
+	char whname[sizeof(AUFS_WH_DR_INFO_PFX) + AUFS_DIRREN_ENV_VAL_SZ];
+	aufs_bindex_t btgt, btail;
+	unsigned char no_sio,
+		allocated,		/* current size of *fdata */
+		infonamelen,		/* room size for p */
+		whnamelen,		/* length of the generated name */
+		renameback;		/* renamed back */
+};
+
+/* on rename(2) error, the caller should revert it using @elm */
+static int au_drinfo_do_store(struct au_drinfo_store *w,
+			      struct au_drinfo_rev_elm *elm)
+{
+	int err, len;
+	ssize_t ssz;
+	loff_t pos;
+	struct path infopath = {
+		.mnt = w->h_ppath.mnt
+	};
+	struct inode *h_dir, *h_inode, *delegated;
+	struct file *infofile;
+	struct qstr *qname;
+
+	AuDebugOn(elm
+		  && memcmp(elm, page_address(ZERO_PAGE(0)), sizeof(*elm)));
+
+	infopath.dentry = vfsub_lookup_one_len(w->whname, w->h_ppath.dentry,
+					       w->whnamelen);
+	AuTraceErrPtr(infopath.dentry);
+	if (IS_ERR(infopath.dentry)) {
+		err = PTR_ERR(infopath.dentry);
+		goto out;
+	}
+
+	err = 0;
+	h_dir = d_inode(w->h_ppath.dentry);
+	if (elm && d_is_negative(infopath.dentry)) {
+		err = vfsub_create(h_dir, &infopath, 0600, /*want_excl*/true);
+		AuTraceErr(err);
+		if (unlikely(err))
+			goto out_dput;
+		elm->created = 1;
+		elm->info_dentry = dget(infopath.dentry);
+	}
+
+	infofile = vfsub_dentry_open(&infopath, O_RDWR);
+	AuTraceErrPtr(infofile);
+	if (IS_ERR(infofile)) {
+		err = PTR_ERR(infofile);
+		goto out_dput;
+	}
+
+	h_inode = d_inode(infopath.dentry);
+	if (elm && i_size_read(h_inode)) {
+		h_inode = d_inode(w->h_dentry);
+		elm->info_last = au_drinfo_read_k(infofile, h_inode->i_ino);
+		AuTraceErrPtr(elm->info_last);
+		if (IS_ERR(elm->info_last)) {
+			err = PTR_ERR(elm->info_last);
+			elm->info_last = NULL;
+			AuDebugOn(elm->info_dentry);
+			goto out_fput;
+		}
+	}
+
+	if (elm && w->renameback) {
+		delegated = NULL;
+		err = vfsub_unlink(h_dir, &infopath, &delegated, /*force*/0);
+		AuTraceErr(err);
+		if (unlikely(err == -EWOULDBLOCK))
+			iput(delegated);
+		goto out_fput;
+	}
+
+	pos = 0;
+	qname = &w->h_dentry->d_name;
+	len = sizeof(*w->fdata) + qname->len;
+	if (!elm)
+		len = sizeof(*w->fdata) + w->fdata->drinfo.oldnamelen;
+	ssz = vfsub_write_k(infofile, w->fdata, len, &pos);
+	if (ssz == len) {
+		AuDbg("hi%llu, %.*s\n", w->fdata->drinfo.ino,
+		      w->fdata->drinfo.oldnamelen, w->fdata->drinfo.oldname);
+		goto out_fput; /* success */
+	} else {
+		err = -EIO;
+		if (ssz < 0)
+			err = ssz;
+		/* the caller should revert it using @elm */
+	}
+
+out_fput:
+	fput(infofile);
+out_dput:
+	dput(infopath.dentry);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+struct au_call_drinfo_do_store_args {
+	int *errp;
+	struct au_drinfo_store *w;
+	struct au_drinfo_rev_elm *elm;
+};
+
+static void au_call_drinfo_do_store(void *args)
+{
+	struct au_call_drinfo_do_store_args *a = args;
+
+	*a->errp = au_drinfo_do_store(a->w, a->elm);
+}
+
+static int au_drinfo_store_sio(struct au_drinfo_store *w,
+			       struct au_drinfo_rev_elm *elm)
+{
+	int err, wkq_err;
+
+	if (w->no_sio)
+		err = au_drinfo_do_store(w, elm);
+	else {
+		struct au_call_drinfo_do_store_args a = {
+			.errp	= &err,
+			.w	= w,
+			.elm	= elm
+		};
+		wkq_err = au_wkq_wait(au_call_drinfo_do_store, &a);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+	AuTraceErr(err);
+
+	return err;
+}
+
+static int au_drinfo_store_work_init(struct au_drinfo_store *w,
+				     aufs_bindex_t btgt)
+{
+	int err;
+
+	memset(w, 0, sizeof(*w));
+	w->allocated = roundup_pow_of_two(sizeof(*w->fdata) + 40);
+	strcpy(w->whname, AUFS_WH_DR_INFO_PFX);
+	w->infoname = w->whname + sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+	w->infonamelen = sizeof(w->whname) - sizeof(AUFS_WH_DR_INFO_PFX);
+	w->btgt = btgt;
+	w->no_sio = !!uid_eq(current_fsuid(), GLOBAL_ROOT_UID);
+
+	err = -ENOMEM;
+	w->fdata = kcalloc(1, w->allocated, GFP_NOFS);
+	if (unlikely(!w->fdata)) {
+		AuTraceErr(err);
+		goto out;
+	}
+	w->fdata->magic = (__force uint32_t)htonl(AUFS_DRINFO_MAGIC_V1);
+	err = 0;
+
+out:
+	return err;
+}
+
+static void au_drinfo_store_work_fin(struct au_drinfo_store *w)
+{
+	au_kfree_rcu(w->fdata);
+}
+
+static void au_drinfo_store_rev(struct au_drinfo_rev *rev,
+				struct au_drinfo_store *w)
+{
+	struct au_drinfo_rev_elm *elm;
+	struct inode *h_dir, *delegated;
+	int err, nelm;
+	struct path infopath = {
+		.mnt = w->h_ppath.mnt
+	};
+
+	h_dir = d_inode(w->h_ppath.dentry);
+	IMustLock(h_dir);
+
+	err = 0;
+	elm = rev->elm;
+	for (nelm = rev->nelm; nelm > 0; nelm--, elm++) {
+		AuDebugOn(elm->created && elm->info_last);
+		if (elm->created) {
+			AuDbg("here\n");
+			delegated = NULL;
+			infopath.dentry = elm->info_dentry;
+			err = vfsub_unlink(h_dir, &infopath, &delegated,
+					   !w->no_sio);
+			AuTraceErr(err);
+			if (unlikely(err == -EWOULDBLOCK))
+				iput(delegated);
+			dput(elm->info_dentry);
+		} else if (elm->info_last) {
+			AuDbg("here\n");
+			w->fdata->drinfo = *elm->info_last;
+			memcpy(w->fdata->drinfo.oldname,
+			       elm->info_last->oldname,
+			       elm->info_last->oldnamelen);
+			err = au_drinfo_store_sio(w, /*elm*/NULL);
+			au_kfree_rcu(elm->info_last);
+		}
+		if (unlikely(err))
+			AuIOErr("%d, %s\n", err, w->whname);
+		/* go on even if err */
+	}
+}
+
+/* caller has to call au_dr_rename_fin() later */
+static int au_drinfo_store(struct dentry *dentry, aufs_bindex_t btgt,
+			   struct qstr *dst_name, void *_rev)
+{
+	int err, sz, nelm;
+	aufs_bindex_t bindex, btail;
+	struct au_drinfo_store work;
+	struct au_drinfo_rev *rev, **p;
+	struct au_drinfo_rev_elm *elm;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_hinode *hdir;
+
+	err = au_drinfo_store_work_init(&work, btgt);
+	AuTraceErr(err);
+	if (unlikely(err))
+		goto out;
+
+	err = -ENOMEM;
+	btail = au_dbtaildir(dentry);
+	nelm = btail - btgt;
+	sz = sizeof(*rev) + sizeof(*elm) * nelm;
+	rev = kcalloc(1, sz, GFP_NOFS);
+	if (unlikely(!rev)) {
+		AuTraceErr(err);
+		goto out_args;
+	}
+	rev->nelm = nelm;
+	elm = rev->elm;
+	p = _rev;
+	*p = rev;
+
+	err = 0;
+	sb = dentry->d_sb;
+	work.h_ppath.dentry = au_h_dptr(dentry, btgt);
+	work.h_ppath.mnt = au_sbr_mnt(sb, btgt);
+	hdir = au_hi(d_inode(dentry), btgt);
+	au_hn_inode_lock_nested(hdir, AuLsc_I_CHILD);
+	for (bindex = btgt + 1; bindex <= btail; bindex++, elm++) {
+		work.h_dentry = au_h_dptr(dentry, bindex);
+		if (!work.h_dentry)
+			continue;
+
+		err = au_drinfo_construct(&work.fdata, work.h_dentry,
+					  &work.allocated);
+		AuTraceErr(err);
+		if (unlikely(err))
+			break;
+
+		work.renameback = au_qstreq(&work.h_dentry->d_name, dst_name);
+		br = au_sbr(sb, bindex);
+		work.whnamelen = sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+		work.whnamelen += au_drinfo_name(br, work.infoname,
+						 work.infonamelen);
+		AuDbg("whname %.*s, i%llu, %.*s\n",
+		      work.whnamelen, work.whname,
+		      be64_to_cpu((__force __be64)work.fdata->drinfo.ino),
+		      work.fdata->drinfo.oldnamelen,
+		      work.fdata->drinfo.oldname);
+
+		err = au_drinfo_store_sio(&work, elm);
+		AuTraceErr(err);
+		if (unlikely(err))
+			break;
+	}
+	if (unlikely(err)) {
+		/* revert all drinfo */
+		au_drinfo_store_rev(rev, &work);
+		au_kfree_try_rcu(rev);
+		*p = NULL;
+	}
+	au_hn_inode_unlock(hdir);
+
+out_args:
+	au_drinfo_store_work_fin(&work);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dr_rename(struct dentry *src, aufs_bindex_t bindex,
+		 struct qstr *dst_name, void *_rev)
+{
+	int err, already;
+	ino_t ino;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_dr_br *dr;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct au_dr_hino *ent;
+	struct au_drinfo_rev *rev, **p;
+
+	AuDbg("bindex %d\n", bindex);
+
+	err = -ENOMEM;
+	ent = kmalloc(sizeof(*ent), GFP_NOFS);
+	if (unlikely(!ent))
+		goto out;
+
+	sb = src->d_sb;
+	br = au_sbr(sb, bindex);
+	dr = &br->br_dirren;
+	h_dentry = au_h_dptr(src, bindex);
+	h_inode = d_inode(h_dentry);
+	ino = h_inode->i_ino;
+	ent->dr_h_ino = ino;
+	already = au_dr_hino_test_add(dr, ino, ent);
+	AuDbg("b%d, hi%llu, already %d\n",
+	      bindex, (unsigned long long)ino, already);
+
+	err = au_drinfo_store(src, bindex, dst_name, _rev);
+	AuTraceErr(err);
+	if (!err) {
+		p = _rev;
+		rev = *p;
+		rev->already = already;
+		goto out; /* success */
+	}
+
+	/* revert */
+	if (!already)
+		au_dr_hino_del(dr, ent);
+	au_kfree_rcu(ent);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+void au_dr_rename_fin(struct dentry *src, aufs_bindex_t btgt, void *_rev)
+{
+	struct au_drinfo_rev *rev;
+	struct au_drinfo_rev_elm *elm;
+	int nelm;
+
+	rev = _rev;
+	elm = rev->elm;
+	for (nelm = rev->nelm; nelm > 0; nelm--, elm++) {
+		dput(elm->info_dentry);
+		au_kfree_rcu(elm->info_last);
+	}
+	au_kfree_try_rcu(rev);
+}
+
+void au_dr_rename_rev(struct dentry *src, aufs_bindex_t btgt, void *_rev)
+{
+	int err;
+	struct au_drinfo_store work;
+	struct au_drinfo_rev *rev = _rev;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct inode *h_inode;
+	struct au_dr_br *dr;
+	struct au_dr_hino *ent;
+
+	err = au_drinfo_store_work_init(&work, btgt);
+	if (unlikely(err))
+		goto out;
+
+	sb = src->d_sb;
+	br = au_sbr(sb, btgt);
+	work.h_ppath.dentry = au_h_dptr(src, btgt);
+	work.h_ppath.mnt = au_br_mnt(br);
+	au_drinfo_store_rev(rev, &work);
+	au_drinfo_store_work_fin(&work);
+	if (rev->already)
+		goto out;
+
+	dr = &br->br_dirren;
+	h_inode = d_inode(work.h_ppath.dentry);
+	ent = au_dr_hino_find(dr, h_inode->i_ino);
+	BUG_ON(!ent);
+	au_dr_hino_del(dr, ent);
+	au_kfree_rcu(ent);
+
+out:
+	au_kfree_try_rcu(rev);
+	if (unlikely(err))
+		pr_err("failed to remove dirren info\n");
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct au_drinfo *au_drinfo_do_load(struct path *h_ppath,
+					   char *whname, int whnamelen,
+					   struct dentry **info_dentry)
+{
+	struct au_drinfo *drinfo;
+	struct file *f;
+	struct inode *h_dir;
+	struct path infopath;
+	int unlocked;
+
+	AuDbg("%pd/%.*s\n", h_ppath->dentry, whnamelen, whname);
+
+	*info_dentry = NULL;
+	drinfo = NULL;
+	unlocked = 0;
+	h_dir = d_inode(h_ppath->dentry);
+	inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+	infopath.dentry = vfsub_lookup_one_len(whname, h_ppath->dentry,
+					       whnamelen);
+	if (IS_ERR(infopath.dentry)) {
+		drinfo = (void *)infopath.dentry;
+		goto out;
+	}
+
+	if (d_is_negative(infopath.dentry))
+		goto out_dput; /* success */
+
+	infopath.mnt = h_ppath->mnt;
+	f = vfsub_dentry_open(&infopath, O_RDONLY);
+	inode_unlock_shared(h_dir);
+	unlocked = 1;
+	if (IS_ERR(f)) {
+		drinfo = (void *)f;
+		goto out_dput;
+	}
+
+	drinfo = au_drinfo_read_k(f, /*h_ino*/0);
+	if (IS_ERR_OR_NULL(drinfo))
+		goto out_fput;
+
+	AuDbg("oldname %.*s\n", drinfo->oldnamelen, drinfo->oldname);
+	*info_dentry = dget(infopath.dentry); /* keep it alive */
+
+out_fput:
+	fput(f);
+out_dput:
+	dput(infopath.dentry);
+out:
+	if (!unlocked)
+		inode_unlock_shared(h_dir);
+	AuTraceErrPtr(drinfo);
+	return drinfo;
+}
+
+struct au_drinfo_do_load_args {
+	struct au_drinfo **drinfop;
+	struct path *h_ppath;
+	char *whname;
+	int whnamelen;
+	struct dentry **info_dentry;
+};
+
+static void au_call_drinfo_do_load(void *args)
+{
+	struct au_drinfo_do_load_args *a = args;
+
+	*a->drinfop = au_drinfo_do_load(a->h_ppath, a->whname, a->whnamelen,
+					a->info_dentry);
+}
+
+struct au_drinfo_load {
+	struct path h_ppath;
+	struct qstr *qname;
+	unsigned char no_sio;
+
+	aufs_bindex_t ninfo;
+	struct au_drinfo **drinfo;
+};
+
+static int au_drinfo_load(struct au_drinfo_load *w, aufs_bindex_t bindex,
+			  struct au_branch *br)
+{
+	int err, wkq_err, whnamelen, e;
+	char whname[sizeof(AUFS_WH_DR_INFO_PFX) + AUFS_DIRREN_ENV_VAL_SZ]
+		= AUFS_WH_DR_INFO_PFX;
+	struct au_drinfo *drinfo;
+	struct qstr oldname;
+	struct inode *h_dir, *delegated;
+	struct dentry *info_dentry;
+	struct path infopath;
+
+	whnamelen = sizeof(AUFS_WH_DR_INFO_PFX) - 1;
+	whnamelen += au_drinfo_name(br, whname + whnamelen,
+				    sizeof(whname) - whnamelen);
+	if (w->no_sio)
+		drinfo = au_drinfo_do_load(&w->h_ppath, whname, whnamelen,
+					   &info_dentry);
+	else {
+		struct au_drinfo_do_load_args args = {
+			.drinfop	= &drinfo,
+			.h_ppath	= &w->h_ppath,
+			.whname		= whname,
+			.whnamelen	= whnamelen,
+			.info_dentry	= &info_dentry
+		};
+		wkq_err = au_wkq_wait(au_call_drinfo_do_load, &args);
+		if (unlikely(wkq_err))
+			drinfo = ERR_PTR(wkq_err);
+	}
+	err = PTR_ERR(drinfo);
+	if (IS_ERR_OR_NULL(drinfo))
+		goto out;
+
+	err = 0;
+	oldname.len = drinfo->oldnamelen;
+	oldname.name = drinfo->oldname;
+	if (au_qstreq(w->qname, &oldname)) {
+		/* the name is renamed back */
+		au_kfree_rcu(drinfo);
+		drinfo = NULL;
+
+		infopath.dentry = info_dentry;
+		infopath.mnt = w->h_ppath.mnt;
+		h_dir = d_inode(w->h_ppath.dentry);
+		delegated = NULL;
+		inode_lock_nested(h_dir, AuLsc_I_PARENT);
+		e = vfsub_unlink(h_dir, &infopath, &delegated, !w->no_sio);
+		inode_unlock(h_dir);
+		if (unlikely(e))
+			AuIOErr("ignored %d, %pd2\n", e, &infopath.dentry);
+		if (unlikely(e == -EWOULDBLOCK))
+			iput(delegated);
+	}
+	au_kfree_rcu(w->drinfo[bindex]);
+	w->drinfo[bindex] = drinfo;
+	dput(info_dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_dr_lkup_free(struct au_drinfo **drinfo, int n)
+{
+	struct au_drinfo **p = drinfo;
+
+	while (n-- > 0)
+		au_kfree_rcu(*drinfo++);
+	au_kfree_try_rcu(p);
+}
+
+int au_dr_lkup(struct au_do_lookup_args *lkup, struct dentry *dentry,
+	       aufs_bindex_t btgt)
+{
+	int err, ninfo;
+	struct au_drinfo_load w;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+	struct inode *h_dir;
+	struct au_dr_hino *ent;
+	struct super_block *sb;
+
+	AuDbg("%.*s, name %.*s, whname %.*s, b%d\n",
+	      AuLNPair(&dentry->d_name), AuLNPair(&lkup->dirren.dr_name),
+	      AuLNPair(&lkup->whname), btgt);
+
+	sb = dentry->d_sb;
+	bbot = au_sbbot(sb);
+	w.ninfo = bbot + 1;
+	if (!lkup->dirren.drinfo) {
+		lkup->dirren.drinfo = kcalloc(w.ninfo,
+					      sizeof(*lkup->dirren.drinfo),
+					      GFP_NOFS);
+		if (unlikely(!lkup->dirren.drinfo)) {
+			err = -ENOMEM;
+			goto out;
+		}
+		lkup->dirren.ninfo = w.ninfo;
+	}
+	w.drinfo = lkup->dirren.drinfo;
+	w.no_sio = !!uid_eq(current_fsuid(), GLOBAL_ROOT_UID);
+	w.h_ppath.dentry = au_h_dptr(dentry, btgt);
+	AuDebugOn(!w.h_ppath.dentry);
+	w.h_ppath.mnt = au_sbr_mnt(sb, btgt);
+	w.qname = &dentry->d_name;
+
+	ninfo = 0;
+	for (bindex = btgt + 1; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_drinfo_load(&w, bindex, br);
+		if (unlikely(err))
+			goto out_free;
+		if (w.drinfo[bindex])
+			ninfo++;
+	}
+	if (!ninfo) {
+		br = au_sbr(sb, btgt);
+		h_dir = d_inode(w.h_ppath.dentry);
+		ent = au_dr_hino_find(&br->br_dirren, h_dir->i_ino);
+		AuDebugOn(!ent);
+		au_dr_hino_del(&br->br_dirren, ent);
+		au_kfree_rcu(ent);
+	}
+	goto out; /* success */
+
+out_free:
+	au_dr_lkup_free(lkup->dirren.drinfo, lkup->dirren.ninfo);
+	lkup->dirren.ninfo = 0;
+	lkup->dirren.drinfo = NULL;
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+void au_dr_lkup_fin(struct au_do_lookup_args *lkup)
+{
+	au_dr_lkup_free(lkup->dirren.drinfo, lkup->dirren.ninfo);
+}
+
+int au_dr_lkup_name(struct au_do_lookup_args *lkup, aufs_bindex_t btgt)
+{
+	int err;
+	struct au_drinfo *drinfo;
+
+	err = 0;
+	if (!lkup->dirren.drinfo)
+		goto out;
+	AuDebugOn(lkup->dirren.ninfo <= btgt);
+	drinfo = lkup->dirren.drinfo[btgt];
+	if (!drinfo)
+		goto out;
+
+	au_kfree_try_rcu(lkup->whname.name);
+	lkup->whname.name = NULL;
+	lkup->dirren.dr_name.len = drinfo->oldnamelen;
+	lkup->dirren.dr_name.name = drinfo->oldname;
+	lkup->name = &lkup->dirren.dr_name;
+	err = au_wh_name_alloc(&lkup->whname, lkup->name);
+	if (!err)
+		AuDbg("name %.*s, whname %.*s, b%d\n",
+		      AuLNPair(lkup->name), AuLNPair(&lkup->whname),
+		      btgt);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_dr_lkup_h_ino(struct au_do_lookup_args *lkup, aufs_bindex_t bindex,
+		     ino_t h_ino)
+{
+	int match;
+	struct au_drinfo *drinfo;
+
+	match = 1;
+	if (!lkup->dirren.drinfo)
+		goto out;
+	AuDebugOn(lkup->dirren.ninfo <= bindex);
+	drinfo = lkup->dirren.drinfo[bindex];
+	if (!drinfo)
+		goto out;
+
+	match = (drinfo->ino == h_ino);
+	AuDbg("match %d\n", match);
+
+out:
+	return match;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dr_opt_set(struct super_block *sb)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	err = 0;
+	bbot = au_sbbot(sb);
+	for (bindex = 0; !err && bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_dr_hino(sb, bindex, /*br*/NULL, &br->br_path);
+	}
+
+	return err;
+}
+
+int au_dr_opt_flush(struct super_block *sb)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	err = 0;
+	bbot = au_sbbot(sb);
+	for (bindex = 0; !err && bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_writable(br->br_perm))
+			err = au_dr_hino(sb, bindex, /*br*/NULL, /*path*/NULL);
+	}
+
+	return err;
+}
+
+int au_dr_opt_clr(struct super_block *sb, int no_flush)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	err = 0;
+	if (!no_flush) {
+		err = au_dr_opt_flush(sb);
+		if (unlikely(err))
+			goto out;
+	}
+
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		au_dr_hino_free(&br->br_dirren);
+	}
+
+out:
+	return err;
+}
diff --git a/fs/aufs/dirren.h b/fs/aufs/dirren.h
new file mode 100644
index 00000000000..1fbc8fb20de
--- /dev/null
+++ b/fs/aufs/dirren.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * renamed dir info
+ */
+
+#ifndef __AUFS_DIRREN_H__
+#define __AUFS_DIRREN_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/statfs.h>
+#include <linux/uuid.h>
+#include "hbl.h"
+
+#define AuDirren_NHASH 100
+
+#ifdef CONFIG_AUFS_DIRREN
+enum au_brid_type {
+	AuBrid_Unset,
+	AuBrid_UUID,
+	AuBrid_FSID,
+	AuBrid_DEV
+};
+
+struct au_dr_brid {
+	enum au_brid_type	type;
+	union {
+		uuid_t	uuid;	/* unimplemented yet */
+		fsid_t	fsid;
+		dev_t	dev;
+	};
+};
+
+/* 20 is the max digits length of ulong 64 */
+/* brid-type "_" uuid "_" inum */
+#define AUFS_DIRREN_FNAME_SZ	(1 + 1 + UUID_STRING_LEN + 20)
+#define AUFS_DIRREN_ENV_VAL_SZ	(AUFS_DIRREN_FNAME_SZ + 1 + 20)
+
+struct au_dr_hino {
+	struct hlist_bl_node	dr_hnode;
+	ino_t			dr_h_ino;
+};
+
+struct au_dr_br {
+	struct hlist_bl_head	dr_h_ino[AuDirren_NHASH];
+	struct au_dr_brid	dr_brid;
+};
+
+struct au_dr_lookup {
+	/* dr_name is pointed by struct au_do_lookup_args.name */
+	struct qstr		dr_name; /* subset of dr_info */
+	aufs_bindex_t		ninfo;
+	struct au_drinfo	**drinfo;
+};
+#else
+struct au_dr_hino;
+/* empty */
+struct au_dr_br { };
+struct au_dr_lookup { };
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+struct au_do_lookup_args;
+struct au_hinode;
+#ifdef CONFIG_AUFS_DIRREN
+int au_dr_hino_test_add(struct au_dr_br *dr, ino_t h_ino,
+			struct au_dr_hino *add_ent);
+void au_dr_hino_free(struct au_dr_br *dr);
+int au_dr_br_init(struct super_block *sb, struct au_branch *br,
+		  const struct path *path);
+int au_dr_br_fin(struct super_block *sb, struct au_branch *br);
+int au_dr_rename(struct dentry *src, aufs_bindex_t bindex,
+		 struct qstr *dst_name, void *_rev);
+void au_dr_rename_fin(struct dentry *src, aufs_bindex_t btgt, void *rev);
+void au_dr_rename_rev(struct dentry *src, aufs_bindex_t bindex, void *rev);
+int au_dr_lkup(struct au_do_lookup_args *lkup, struct dentry *dentry,
+	       aufs_bindex_t bindex);
+int au_dr_lkup_name(struct au_do_lookup_args *lkup, aufs_bindex_t btgt);
+int au_dr_lkup_h_ino(struct au_do_lookup_args *lkup, aufs_bindex_t bindex,
+		     ino_t h_ino);
+void au_dr_lkup_fin(struct au_do_lookup_args *lkup);
+int au_dr_opt_set(struct super_block *sb);
+int au_dr_opt_flush(struct super_block *sb);
+int au_dr_opt_clr(struct super_block *sb, int no_flush);
+#else
+AuStubInt0(au_dr_hino_test_add, struct au_dr_br *dr, ino_t h_ino,
+	   struct au_dr_hino *add_ent);
+AuStubVoid(au_dr_hino_free, struct au_dr_br *dr);
+AuStubInt0(au_dr_br_init, struct super_block *sb, struct au_branch *br,
+	   const struct path *path);
+AuStubInt0(au_dr_br_fin, struct super_block *sb, struct au_branch *br);
+AuStubInt0(au_dr_rename, struct dentry *src, aufs_bindex_t bindex,
+	   struct qstr *dst_name, void *_rev);
+AuStubVoid(au_dr_rename_fin, struct dentry *src, aufs_bindex_t btgt, void *rev);
+AuStubVoid(au_dr_rename_rev, struct dentry *src, aufs_bindex_t bindex,
+	   void *rev);
+AuStubInt0(au_dr_lkup, struct au_do_lookup_args *lkup, struct dentry *dentry,
+	   aufs_bindex_t bindex);
+AuStubInt0(au_dr_lkup_name, struct au_do_lookup_args *lkup, aufs_bindex_t btgt);
+AuStubInt0(au_dr_lkup_h_ino, struct au_do_lookup_args *lkup,
+	   aufs_bindex_t bindex, ino_t h_ino);
+AuStubVoid(au_dr_lkup_fin, struct au_do_lookup_args *lkup);
+AuStubInt0(au_dr_opt_set, struct super_block *sb);
+AuStubInt0(au_dr_opt_flush, struct super_block *sb);
+AuStubInt0(au_dr_opt_clr, struct super_block *sb, int no_flush);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DIRREN
+static inline int au_dr_ihash(ino_t h_ino)
+{
+	return h_ino % AuDirren_NHASH;
+}
+#else
+AuStubInt0(au_dr_ihash, ino_t h_ino);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIRREN_H__ */
diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c
new file mode 100644
index 00000000000..837f94d49f7
--- /dev/null
+++ b/fs/aufs/dynop.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dynamically customizable operations for regular files
+ */
+
+#include "aufs.h"
+
+#define DyPrSym(key)	AuDbgSym(key->dk_op.dy_hop)
+
+/*
+ * How large will these lists be?
+ * Usually just a few elements, 20-30 at most for each, I guess.
+ */
+static struct hlist_bl_head dynop[AuDyLast];
+
+static struct au_dykey *dy_gfind_get(struct hlist_bl_head *hbl,
+				     const void *h_op)
+{
+	struct au_dykey *key, *tmp;
+	struct hlist_bl_node *pos;
+
+	key = NULL;
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(tmp, pos, hbl, dk_hnode)
+		if (tmp->dk_op.dy_hop == h_op) {
+			if (kref_get_unless_zero(&tmp->dk_kref))
+				key = tmp;
+			break;
+		}
+	hlist_bl_unlock(hbl);
+
+	return key;
+}
+
+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
+{
+	struct au_dykey **k, *found;
+	const void *h_op = key->dk_op.dy_hop;
+	int i;
+
+	found = NULL;
+	k = br->br_dykey;
+	for (i = 0; i < AuBrDynOp; i++)
+		if (k[i]) {
+			if (k[i]->dk_op.dy_hop == h_op) {
+				found = k[i];
+				break;
+			}
+		} else
+			break;
+	if (!found) {
+		spin_lock(&br->br_dykey_lock);
+		for (; i < AuBrDynOp; i++)
+			if (k[i]) {
+				if (k[i]->dk_op.dy_hop == h_op) {
+					found = k[i];
+					break;
+				}
+			} else {
+				k[i] = key;
+				break;
+			}
+		spin_unlock(&br->br_dykey_lock);
+		BUG_ON(i == AuBrDynOp); /* expand the array */
+	}
+
+	return found;
+}
+
+/* kref_get() if @key is already added */
+static struct au_dykey *dy_gadd(struct hlist_bl_head *hbl, struct au_dykey *key)
+{
+	struct au_dykey *tmp, *found;
+	struct hlist_bl_node *pos;
+	const void *h_op = key->dk_op.dy_hop;
+
+	found = NULL;
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(tmp, pos, hbl, dk_hnode)
+		if (tmp->dk_op.dy_hop == h_op) {
+			if (kref_get_unless_zero(&tmp->dk_kref))
+				found = tmp;
+			break;
+		}
+	if (!found)
+		hlist_bl_add_head(&key->dk_hnode, hbl);
+	hlist_bl_unlock(hbl);
+
+	if (!found)
+		DyPrSym(key);
+	return found;
+}
+
+static void dy_free_rcu(struct rcu_head *rcu)
+{
+	struct au_dykey *key;
+
+	key = container_of(rcu, struct au_dykey, dk_rcu);
+	DyPrSym(key);
+	kfree(key);
+}
+
+static void dy_free(struct kref *kref)
+{
+	struct au_dykey *key;
+	struct hlist_bl_head *hbl;
+
+	key = container_of(kref, struct au_dykey, dk_kref);
+	hbl = dynop + key->dk_op.dy_type;
+	au_hbl_del(&key->dk_hnode, hbl);
+	call_rcu(&key->dk_rcu, dy_free_rcu);
+}
+
+void au_dy_put(struct au_dykey *key)
+{
+	kref_put(&key->dk_kref, dy_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define DyDbgSize(cnt, op)	AuDebugOn(cnt != sizeof(op)/sizeof(void *))
+
+#ifdef CONFIG_AUFS_DEBUG
+#define DyDbgDeclare(cnt)	unsigned int cnt = 0
+#define DyDbgInc(cnt)		do { cnt++; } while (0)
+#else
+#define DyDbgDeclare(cnt)	do {} while (0)
+#define DyDbgInc(cnt)		do {} while (0)
+#endif
+
+#define DySet(func, dst, src, h_op, h_sb) do {				\
+	DyDbgInc(cnt);							\
+	if (h_op->func) {						\
+		if (src.func)						\
+			dst.func = src.func;				\
+		else							\
+			AuDbg("%s %s\n", au_sbtype(h_sb), #func);	\
+	}								\
+} while (0)
+
+#define DySetForce(func, dst, src) do {		\
+	AuDebugOn(!src.func);			\
+	DyDbgInc(cnt);				\
+	dst.func = src.func;			\
+} while (0)
+
+#define DySetAop(func) \
+	DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
+#define DySetAopForce(func) \
+	DySetForce(func, dyaop->da_op, aufs_aop)
+
+static void dy_aop(struct au_dykey *key, const void *h_op,
+		   struct super_block *h_sb __maybe_unused)
+{
+	struct au_dyaop *dyaop = (void *)key;
+	const struct address_space_operations *h_aop = h_op;
+	DyDbgDeclare(cnt);
+
+	AuDbg("%s\n", au_sbtype(h_sb));
+
+	DySetAop(writepage);
+	DySetAopForce(readpage);	/* force */
+	DySetAop(writepages);
+	DySetAop(set_page_dirty);
+	DySetAop(readpages);
+	DySetAop(write_begin);
+	DySetAop(write_end);
+	DySetAop(bmap);
+	DySetAop(invalidatepage);
+	DySetAop(releasepage);
+	DySetAop(freepage);
+	/* this one will be changed according to an aufs mount option */
+	DySetAop(direct_IO);
+	DySetAop(migratepage);
+	DySetAop(isolate_page);
+	DySetAop(putback_page);
+	DySetAop(launder_page);
+	DySetAop(is_partially_uptodate);
+	DySetAop(is_dirty_writeback);
+	DySetAop(error_remove_page);
+	DySetAop(swap_activate);
+	DySetAop(swap_deactivate);
+
+	DyDbgSize(cnt, *h_aop);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void dy_bug(struct kref *kref)
+{
+	BUG();
+}
+
+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
+{
+	struct au_dykey *key, *old;
+	struct hlist_bl_head *hbl;
+	struct op {
+		unsigned int sz;
+		void (*set)(struct au_dykey *key, const void *h_op,
+			    struct super_block *h_sb __maybe_unused);
+	};
+	static const struct op a[] = {
+		[AuDy_AOP] = {
+			.sz	= sizeof(struct au_dyaop),
+			.set	= dy_aop
+		}
+	};
+	const struct op *p;
+
+	hbl = dynop + op->dy_type;
+	key = dy_gfind_get(hbl, op->dy_hop);
+	if (key)
+		goto out_add; /* success */
+
+	p = a + op->dy_type;
+	key = kzalloc(p->sz, GFP_NOFS);
+	if (unlikely(!key)) {
+		key = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	key->dk_op.dy_hop = op->dy_hop;
+	kref_init(&key->dk_kref);
+	p->set(key, op->dy_hop, au_br_sb(br));
+	old = dy_gadd(hbl, key);
+	if (old) {
+		au_kfree_rcu(key);
+		key = old;
+	}
+
+out_add:
+	old = dy_bradd(br, key);
+	if (old)
+		/* its ref-count should never be zero here */
+		kref_put(&key->dk_kref, dy_bug);
+out:
+	return key;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs prohibits O_DIRECT by default even if the branch supports it.
+ * This behaviour is necessary to return an error from open(O_DIRECT) instead
+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
+ * See the aufs manual in detail.
+ */
+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
+{
+	if (!do_dx)
+		dyaop->da_op.direct_IO = NULL;
+	else
+		dyaop->da_op.direct_IO = aufs_aop.direct_IO;
+}
+
+static struct au_dyaop *dy_aget(struct au_branch *br,
+				const struct address_space_operations *h_aop,
+				int do_dx)
+{
+	struct au_dyaop *dyaop;
+	struct au_dynop op;
+
+	op.dy_type = AuDy_AOP;
+	op.dy_haop = h_aop;
+	dyaop = (void *)dy_get(&op, br);
+	if (IS_ERR(dyaop))
+		goto out;
+	dy_adx(dyaop, do_dx);
+
+out:
+	return dyaop;
+}
+
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+		struct inode *h_inode)
+{
+	int err, do_dx;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_dyaop *dyaop;
+
+	AuDebugOn(!S_ISREG(h_inode->i_mode));
+	IiMustWriteLock(inode);
+
+	sb = inode->i_sb;
+	br = au_sbr(sb, bindex);
+	do_dx = !!au_opt_test(au_mntflags(sb), DIO);
+	dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
+	err = PTR_ERR(dyaop);
+	if (IS_ERR(dyaop))
+		/* unnecessary to call dy_fput() */
+		goto out;
+
+	err = 0;
+	inode->i_mapping->a_ops = &dyaop->da_op;
+
+out:
+	return err;
+}
+
+/*
+ * Is it safe to replace a_ops during the inode/file is in operation?
+ * Yes, I hope so.
+ */
+int au_dy_irefresh(struct inode *inode)
+{
+	int err;
+	aufs_bindex_t btop;
+	struct inode *h_inode;
+
+	err = 0;
+	if (S_ISREG(inode->i_mode)) {
+		btop = au_ibtop(inode);
+		h_inode = au_h_iptr(inode, btop);
+		err = au_dy_iaop(inode, btop, h_inode);
+	}
+	return err;
+}
+
+void au_dy_arefresh(int do_dx)
+{
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_dykey *key;
+
+	hbl = dynop + AuDy_AOP;
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(key, pos, hbl, dk_hnode)
+		dy_adx((void *)key, do_dx);
+	hlist_bl_unlock(hbl);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __init au_dy_init(void)
+{
+	int i;
+
+	for (i = 0; i < AuDyLast; i++)
+		INIT_HLIST_BL_HEAD(dynop + i);
+}
+
+void au_dy_fin(void)
+{
+	int i;
+
+	for (i = 0; i < AuDyLast; i++)
+		WARN_ON(!hlist_bl_empty(dynop + i));
+}
diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h
new file mode 100644
index 00000000000..c0c7a5485ad
--- /dev/null
+++ b/fs/aufs/dynop.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * dynamically customizable operations (for regular files only)
+ */
+
+#ifndef __AUFS_DYNOP_H__
+#define __AUFS_DYNOP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kref.h>
+
+enum {AuDy_AOP, AuDyLast};
+
+struct au_dynop {
+	int						dy_type;
+	union {
+		const void				*dy_hop;
+		const struct address_space_operations	*dy_haop;
+	};
+};
+
+struct au_dykey {
+	union {
+		struct hlist_bl_node	dk_hnode;
+		struct rcu_head		dk_rcu;
+	};
+	struct au_dynop		dk_op;
+
+	/*
+	 * during I am in the branch local array, kref is gotten. when the
+	 * branch is removed, kref is put.
+	 */
+	struct kref		dk_kref;
+};
+
+/* stop unioning since their sizes are very different from each other */
+struct au_dyaop {
+	struct au_dykey			da_key;
+	struct address_space_operations	da_op; /* not const */
+};
+/* make sure that 'struct au_dykey *' can be any type */
+static_assert(!offsetof(struct au_dyaop, da_key));
+
+/* ---------------------------------------------------------------------- */
+
+/* dynop.c */
+struct au_branch;
+void au_dy_put(struct au_dykey *key);
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+		struct inode *h_inode);
+int au_dy_irefresh(struct inode *inode);
+void au_dy_arefresh(int do_dio);
+
+void __init au_dy_init(void);
+void au_dy_fin(void);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DYNOP_H__ */
diff --git a/fs/aufs/export.c b/fs/aufs/export.c
new file mode 100644
index 00000000000..842df6f0551
--- /dev/null
+++ b/fs/aufs/export.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * export via nfs
+ */
+
+#include <linux/exportfs.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/random.h>
+#include <linux/writeback.h>
+#include "aufs.h"
+
+union conv {
+#ifdef CONFIG_AUFS_INO_T_64
+	__u32 a[2];
+#else
+	__u32 a[1];
+#endif
+	ino_t ino;
+};
+
+static ino_t decode_ino(__u32 *a)
+{
+	union conv u;
+
+	BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
+	u.a[0] = a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+	u.a[1] = a[1];
+#endif
+	return u.ino;
+}
+
+static void encode_ino(__u32 *a, ino_t ino)
+{
+	union conv u;
+
+	u.ino = ino;
+	a[0] = u.a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+	a[1] = u.a[1];
+#endif
+}
+
+/* NFS file handle */
+enum {
+	Fh_br_id,
+	Fh_sigen,
+#ifdef CONFIG_AUFS_INO_T_64
+	/* support 64bit inode number */
+	Fh_ino1,
+	Fh_ino2,
+	Fh_dir_ino1,
+	Fh_dir_ino2,
+#else
+	Fh_ino1,
+	Fh_dir_ino1,
+#endif
+	Fh_igen,
+	Fh_h_type,
+	Fh_tail,
+
+	Fh_ino = Fh_ino1,
+	Fh_dir_ino = Fh_dir_ino1
+};
+
+static int au_test_anon(struct dentry *dentry)
+{
+	/* note: read d_flags without d_lock */
+	return !!(dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+int au_test_nfsd(void)
+{
+	int ret;
+	struct task_struct *tsk = current;
+	char comm[sizeof(tsk->comm)];
+
+	ret = 0;
+	if (tsk->flags & PF_KTHREAD) {
+		get_task_comm(comm, tsk);
+		ret = !strcmp(comm, "nfsd");
+	}
+
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+/* inode generation external table */
+
+void au_xigen_inc(struct inode *inode)
+{
+	loff_t pos;
+	ssize_t sz;
+	__u32 igen;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	sb = inode->i_sb;
+	AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+
+	sbinfo = au_sbi(sb);
+	pos = inode->i_ino;
+	pos *= sizeof(igen);
+	igen = inode->i_generation + 1;
+	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
+			 sizeof(igen), &pos);
+	if (sz == sizeof(igen))
+		return; /* success */
+
+	if (unlikely(sz >= 0))
+		AuIOErr("xigen error (%zd)\n", sz);
+}
+
+int au_xigen_new(struct inode *inode)
+{
+	int err;
+	loff_t pos;
+	ssize_t sz;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	err = 0;
+	/* todo: dirty, at mount time */
+	if (inode->i_ino == AUFS_ROOT_INO)
+		goto out;
+	sb = inode->i_sb;
+	SiMustAnyLock(sb);
+	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+		goto out;
+
+	err = -EFBIG;
+	pos = inode->i_ino;
+	if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
+		AuIOErr1("too large i%lld\n", pos);
+		goto out;
+	}
+	pos *= sizeof(inode->i_generation);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	file = sbinfo->si_xigen;
+	BUG_ON(!file);
+
+	if (vfsub_f_size_read(file)
+	    < pos + sizeof(inode->i_generation)) {
+		inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
+		sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
+				 sizeof(inode->i_generation), &pos);
+	} else
+		sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
+				sizeof(inode->i_generation), &pos);
+	if (sz == sizeof(inode->i_generation))
+		goto out; /* success */
+
+	err = sz;
+	if (unlikely(sz >= 0)) {
+		err = -EIO;
+		AuIOErr("xigen error (%zd)\n", sz);
+	}
+
+out:
+	return err;
+}
+
+int au_xigen_set(struct super_block *sb, struct path *path)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	file = au_xino_create2(sb, path, sbinfo->si_xigen);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	err = 0;
+	if (sbinfo->si_xigen)
+		fput(sbinfo->si_xigen);
+	sbinfo->si_xigen = file;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+void au_xigen_clr(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	if (sbinfo->si_xigen) {
+		fput(sbinfo->si_xigen);
+		sbinfo->si_xigen = NULL;
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
+				    ino_t dir_ino)
+{
+	struct dentry *dentry, *d;
+	struct inode *inode;
+	unsigned int sigen;
+
+	dentry = NULL;
+	inode = ilookup(sb, ino);
+	if (!inode)
+		goto out;
+
+	dentry = ERR_PTR(-ESTALE);
+	sigen = au_sigen(sb);
+	if (unlikely(au_is_bad_inode(inode)
+		     || IS_DEADDIR(inode)
+		     || sigen != au_iigen(inode, NULL)))
+		goto out_iput;
+
+	dentry = NULL;
+	if (!dir_ino || S_ISDIR(inode->i_mode))
+		dentry = d_find_alias(inode);
+	else {
+		spin_lock(&inode->i_lock);
+		hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+			spin_lock(&d->d_lock);
+			if (!au_test_anon(d)
+			    && d_inode(d->d_parent)->i_ino == dir_ino) {
+				dentry = dget_dlock(d);
+				spin_unlock(&d->d_lock);
+				break;
+			}
+			spin_unlock(&d->d_lock);
+		}
+		spin_unlock(&inode->i_lock);
+	}
+	if (unlikely(dentry && au_digen_test(dentry, sigen))) {
+		/* need to refresh */
+		dput(dentry);
+		dentry = NULL;
+	}
+
+out_iput:
+	iput(inode);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: dirty? */
+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
+
+struct au_compare_mnt_args {
+	/* input */
+	struct super_block *sb;
+
+	/* output */
+	struct vfsmount *mnt;
+};
+
+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
+{
+	struct au_compare_mnt_args *a = arg;
+
+	if (mnt->mnt_sb != a->sb)
+		return 0;
+	a->mnt = mntget(mnt);
+	return 1;
+}
+
+static struct vfsmount *au_mnt_get(struct super_block *sb)
+{
+	int err;
+	struct path root;
+	struct au_compare_mnt_args args = {
+		.sb = sb
+	};
+
+	get_fs_root(current->fs, &root);
+	rcu_read_lock();
+	err = iterate_mounts(au_compare_mnt, &args, root.mnt);
+	rcu_read_unlock();
+	path_put(&root);
+	AuDebugOn(!err);
+	AuDebugOn(!args.mnt);
+	return args.mnt;
+}
+
+struct au_nfsd_si_lock {
+	unsigned int sigen;
+	aufs_bindex_t bindex, br_id;
+	unsigned char force_lock;
+};
+
+static int si_nfsd_read_lock(struct super_block *sb,
+			     struct au_nfsd_si_lock *nsi_lock)
+{
+	int err;
+	aufs_bindex_t bindex;
+
+	si_read_lock(sb, AuLock_FLUSH);
+
+	/* branch id may be wrapped around */
+	err = 0;
+	bindex = au_br_index(sb, nsi_lock->br_id);
+	if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
+		goto out; /* success */
+
+	err = -ESTALE;
+	bindex = -1;
+	if (!nsi_lock->force_lock)
+		si_read_unlock(sb);
+
+out:
+	nsi_lock->bindex = bindex;
+	return err;
+}
+
+struct find_name_by_ino {
+	struct dir_context ctx;
+	int called, found;
+	ino_t ino;
+	char *name;
+	int namelen;
+};
+
+static int
+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen,
+		 loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino,
+						  ctx);
+
+	a->called++;
+	if (a->ino != ino)
+		return 0;
+
+	memcpy(a->name, name, namelen);
+	a->namelen = namelen;
+	a->found = 1;
+	return 1;
+}
+
+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
+				     struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry, *parent;
+	struct file *file;
+	struct inode *dir;
+	struct find_name_by_ino arg = {
+		.ctx = {
+			.actor = find_name_by_ino
+		}
+	};
+	int err;
+
+	parent = path->dentry;
+	if (nsi_lock)
+		si_read_unlock(parent->d_sb);
+	file = vfsub_dentry_open(path, au_dir_roflags);
+	dentry = (void *)file;
+	if (IS_ERR(file))
+		goto out;
+
+	dentry = ERR_PTR(-ENOMEM);
+	arg.name = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!arg.name))
+		goto out_file;
+	arg.ino = ino;
+	arg.found = 0;
+	do {
+		arg.called = 0;
+		/* smp_mb(); */
+		err = vfsub_iterate_dir(file, &arg.ctx);
+	} while (!err && !arg.found && arg.called);
+	dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_name;
+	/* instead of ENOENT */
+	dentry = ERR_PTR(-ESTALE);
+	if (!arg.found)
+		goto out_name;
+
+	/* do not call vfsub_lkup_one() */
+	dir = d_inode(parent);
+	dentry = vfsub_lookup_one_len_unlocked(arg.name, parent, arg.namelen);
+	AuTraceErrPtr(dentry);
+	if (IS_ERR(dentry))
+		goto out_name;
+	AuDebugOn(au_test_anon(dentry));
+	if (unlikely(d_really_is_negative(dentry))) {
+		dput(dentry);
+		dentry = ERR_PTR(-ENOENT);
+	}
+
+out_name:
+	free_page((unsigned long)arg.name);
+out_file:
+	fput(file);
+out:
+	if (unlikely(nsi_lock
+		     && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
+		if (!IS_ERR(dentry)) {
+			dput(dentry);
+			dentry = ERR_PTR(-ESTALE);
+		}
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
+					ino_t dir_ino,
+					struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry;
+	struct path path;
+
+	if (dir_ino != AUFS_ROOT_INO) {
+		path.dentry = decode_by_ino(sb, dir_ino, 0);
+		dentry = path.dentry;
+		if (!path.dentry || IS_ERR(path.dentry))
+			goto out;
+		AuDebugOn(au_test_anon(path.dentry));
+	} else
+		path.dentry = dget(sb->s_root);
+
+	path.mnt = au_mnt_get(sb);
+	dentry = au_lkup_by_ino(&path, ino, nsi_lock);
+	path_put(&path);
+
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_acceptable(void *expv, struct dentry *dentry)
+{
+	return 1;
+}
+
+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
+			   char *buf, int len, struct super_block *sb)
+{
+	char *p;
+	int n;
+	struct path path;
+
+	p = d_path(h_rootpath, buf, len);
+	if (IS_ERR(p))
+		goto out;
+	n = strlen(p);
+
+	path.mnt = h_rootpath->mnt;
+	path.dentry = h_parent;
+	p = d_path(&path, buf, len);
+	if (IS_ERR(p))
+		goto out;
+	if (n != 1)
+		p += n;
+
+	path.mnt = au_mnt_get(sb);
+	path.dentry = sb->s_root;
+	p = d_path(&path, buf, len - strlen(p));
+	mntput(path.mnt);
+	if (IS_ERR(p))
+		goto out;
+	if (n != 1)
+		p[strlen(p)] = '/';
+
+out:
+	AuTraceErrPtr(p);
+	return p;
+}
+
+static
+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
+			      int fh_len, struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry, *h_parent, *root;
+	struct super_block *h_sb;
+	char *pathname, *p;
+	struct vfsmount *h_mnt;
+	struct au_branch *br;
+	int err;
+	struct path path;
+
+	br = au_sbr(sb, nsi_lock->bindex);
+	h_mnt = au_br_mnt(br);
+	h_sb = h_mnt->mnt_sb;
+	/* todo: call lower fh_to_dentry()? fh_to_parent()? */
+	lockdep_off();
+	h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
+				      fh_len - Fh_tail, fh[Fh_h_type],
+				      h_acceptable, /*context*/NULL);
+	lockdep_on();
+	dentry = h_parent;
+	if (unlikely(!h_parent || IS_ERR(h_parent))) {
+		AuWarn1("%s decode_fh failed, %ld\n",
+			au_sbtype(h_sb), PTR_ERR(h_parent));
+		goto out;
+	}
+	dentry = NULL;
+	if (unlikely(au_test_anon(h_parent))) {
+		AuWarn1("%s decode_fh returned a disconnected dentry\n",
+			au_sbtype(h_sb));
+		goto out_h_parent;
+	}
+
+	dentry = ERR_PTR(-ENOMEM);
+	pathname = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!pathname))
+		goto out_h_parent;
+
+	root = sb->s_root;
+	path.mnt = h_mnt;
+	di_read_lock_parent(root, !AuLock_IR);
+	path.dentry = au_h_dptr(root, nsi_lock->bindex);
+	di_read_unlock(root, !AuLock_IR);
+	p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
+	dentry = (void *)p;
+	if (IS_ERR(p))
+		goto out_pathname;
+
+	si_read_unlock(sb);
+	err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+	dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_relock;
+
+	dentry = ERR_PTR(-ENOENT);
+	AuDebugOn(au_test_anon(path.dentry));
+	if (unlikely(d_really_is_negative(path.dentry)))
+		goto out_path;
+
+	if (ino != d_inode(path.dentry)->i_ino)
+		dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
+	else
+		dentry = dget(path.dentry);
+
+out_path:
+	path_put(&path);
+out_relock:
+	if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
+		if (!IS_ERR(dentry)) {
+			dput(dentry);
+			dentry = ERR_PTR(-ESTALE);
+		}
+out_pathname:
+	free_page((unsigned long)pathname);
+out_h_parent:
+	dput(h_parent);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *
+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
+		  int fh_type)
+{
+	struct dentry *dentry;
+	__u32 *fh = fid->raw;
+	struct au_branch *br;
+	ino_t ino, dir_ino;
+	struct au_nfsd_si_lock nsi_lock = {
+		.force_lock	= 0
+	};
+
+	dentry = ERR_PTR(-ESTALE);
+	/* it should never happen, but the file handle is unreliable */
+	if (unlikely(fh_len < Fh_tail))
+		goto out;
+	nsi_lock.sigen = fh[Fh_sigen];
+	nsi_lock.br_id = fh[Fh_br_id];
+
+	/* branch id may be wrapped around */
+	br = NULL;
+	if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
+		goto out;
+	nsi_lock.force_lock = 1;
+
+	/* is this inode still cached? */
+	ino = decode_ino(fh + Fh_ino);
+	/* it should never happen */
+	if (unlikely(ino == AUFS_ROOT_INO))
+		goto out_unlock;
+
+	dir_ino = decode_ino(fh + Fh_dir_ino);
+	dentry = decode_by_ino(sb, ino, dir_ino);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (dentry)
+		goto accept;
+
+	/* is the parent dir cached? */
+	br = au_sbr(sb, nsi_lock.bindex);
+	au_lcnt_inc(&br->br_nfiles);
+	dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (dentry)
+		goto accept;
+
+	/* lookup path */
+	dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (unlikely(!dentry))
+		/* todo?: make it ESTALE */
+		goto out_unlock;
+
+accept:
+	if (!au_digen_test(dentry, au_sigen(sb))
+	    && d_inode(dentry)->i_generation == fh[Fh_igen])
+		goto out_unlock; /* success */
+
+	dput(dentry);
+	dentry = ERR_PTR(-ESTALE);
+out_unlock:
+	if (br)
+		au_lcnt_dec(&br->br_nfiles);
+	si_read_unlock(sb);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+#if 0 /* reserved for future use */
+/* support subtreecheck option */
+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+					int fh_len, int fh_type)
+{
+	struct dentry *parent;
+	__u32 *fh = fid->raw;
+	ino_t dir_ino;
+
+	dir_ino = decode_ino(fh + Fh_dir_ino);
+	parent = decode_by_ino(sb, dir_ino, 0);
+	if (IS_ERR(parent))
+		goto out;
+	if (!parent)
+		parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
+					dir_ino, fh, fh_len);
+
+out:
+	AuTraceErrPtr(parent);
+	return parent;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+			  struct inode *dir)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb, *h_sb;
+	struct dentry *dentry, *parent, *h_parent;
+	struct inode *h_dir;
+	struct au_branch *br;
+
+	err = -ENOSPC;
+	if (unlikely(*max_len <= Fh_tail)) {
+		AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
+		goto out;
+	}
+
+	err = FILEID_ROOT;
+	if (inode->i_ino == AUFS_ROOT_INO) {
+		AuDebugOn(inode->i_ino != AUFS_ROOT_INO);
+		goto out;
+	}
+
+	h_parent = NULL;
+	sb = inode->i_sb;
+	err = si_read_lock(sb, AuLock_FLUSH);
+	if (unlikely(err))
+		goto out;
+
+#ifdef CONFIG_AUFS_DEBUG
+	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+		AuWarn1("NFS-exporting requires xino\n");
+#endif
+	err = -EIO;
+	parent = NULL;
+	ii_read_lock_child(inode);
+	bindex = au_ibtop(inode);
+	if (!dir) {
+		dentry = d_find_any_alias(inode);
+		if (unlikely(!dentry))
+			goto out_unlock;
+		AuDebugOn(au_test_anon(dentry));
+		parent = dget_parent(dentry);
+		dput(dentry);
+		if (unlikely(!parent))
+			goto out_unlock;
+		if (d_really_is_positive(parent))
+			dir = d_inode(parent);
+	}
+
+	ii_read_lock_parent(dir);
+	h_dir = au_h_iptr(dir, bindex);
+	ii_read_unlock(dir);
+	if (unlikely(!h_dir))
+		goto out_parent;
+	h_parent = d_find_any_alias(h_dir);
+	if (unlikely(!h_parent))
+		goto out_hparent;
+
+	err = -EPERM;
+	br = au_sbr(sb, bindex);
+	h_sb = au_br_sb(br);
+	if (unlikely(!h_sb->s_export_op)) {
+		AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
+		goto out_hparent;
+	}
+
+	fh[Fh_br_id] = br->br_id;
+	fh[Fh_sigen] = au_sigen(sb);
+	encode_ino(fh + Fh_ino, inode->i_ino);
+	encode_ino(fh + Fh_dir_ino, dir->i_ino);
+	fh[Fh_igen] = inode->i_generation;
+
+	*max_len -= Fh_tail;
+	fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
+					   max_len,
+					   /*connectable or subtreecheck*/0);
+	err = fh[Fh_h_type];
+	*max_len += Fh_tail;
+	/* todo: macros? */
+	if (err != FILEID_INVALID)
+		err = 99;
+	else
+		AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
+
+out_hparent:
+	dput(h_parent);
+out_parent:
+	dput(parent);
+out_unlock:
+	ii_read_unlock(inode);
+	si_read_unlock(sb);
+out:
+	if (unlikely(err < 0))
+		err = FILEID_INVALID;
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_commit_metadata(struct inode *inode)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb;
+	struct inode *h_inode;
+	int (*f)(struct inode *inode);
+
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	ii_write_lock_child(inode);
+	bindex = au_ibtop(inode);
+	AuDebugOn(bindex < 0);
+	h_inode = au_h_iptr(inode, bindex);
+
+	f = h_inode->i_sb->s_export_op->commit_metadata;
+	if (f)
+		err = f(h_inode);
+	else {
+		struct writeback_control wbc = {
+			.sync_mode	= WB_SYNC_ALL,
+			.nr_to_write	= 0 /* metadata only */
+		};
+
+		err = sync_inode(h_inode, &wbc);
+	}
+
+	au_cpup_attr_timesizes(inode);
+	ii_write_unlock(inode);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct export_operations aufs_export_op = {
+	.fh_to_dentry		= aufs_fh_to_dentry,
+	/* .fh_to_parent	= aufs_fh_to_parent, */
+	.encode_fh		= aufs_encode_fh,
+	.commit_metadata	= aufs_commit_metadata
+};
+
+void au_export_init(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+	__u32 u;
+
+	BUILD_BUG_ON_MSG(IS_BUILTIN(CONFIG_AUFS_FS)
+			 && IS_MODULE(CONFIG_EXPORTFS),
+			 AUFS_NAME ": unsupported configuration "
+			 "CONFIG_EXPORTFS=m and CONFIG_AUFS_FS=y");
+
+	sb->s_export_op = &aufs_export_op;
+	sbinfo = au_sbi(sb);
+	sbinfo->si_xigen = NULL;
+	get_random_bytes(&u, sizeof(u));
+	BUILD_BUG_ON(sizeof(u) != sizeof(int));
+	atomic_set(&sbinfo->si_xigen_next, u);
+}
diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c
new file mode 100644
index 00000000000..9894e2124bd
--- /dev/null
+++ b/fs/aufs/f_op.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file and vm operations
+ */
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct dentry *dentry, *h_dentry;
+	struct au_finfo *finfo;
+	struct inode *h_inode;
+
+	FiMustWriteLock(file);
+
+	err = 0;
+	dentry = file->f_path.dentry;
+	AuDebugOn(IS_ERR_OR_NULL(dentry));
+	finfo = au_fi(file);
+	memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
+	atomic_set(&finfo->fi_mmapped, 0);
+	bindex = au_dbtop(dentry);
+	if (!h_file) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+		if (unlikely(err))
+			goto out;
+		h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+		if (IS_ERR(h_file)) {
+			err = PTR_ERR(h_file);
+			goto out;
+		}
+	} else {
+		h_dentry = h_file->f_path.dentry;
+		err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+		if (unlikely(err))
+			goto out;
+		/* br ref is already inc-ed */
+	}
+
+	if ((flags & __O_TMPFILE)
+	    && !(flags & O_EXCL)) {
+		h_inode = file_inode(h_file);
+		spin_lock(&h_inode->i_lock);
+		h_inode->i_state |= I_LINKABLE;
+		spin_unlock(&h_inode->i_lock);
+	}
+	au_set_fbtop(file, bindex);
+	au_set_h_fptr(file, bindex, h_file);
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+
+out:
+	return err;
+}
+
+static int aufs_open_nondir(struct inode *inode __maybe_unused,
+			    struct file *file)
+{
+	int err;
+	struct super_block *sb;
+	struct au_do_open_args args = {
+		.open	= au_do_open_nondir
+	};
+
+	AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n",
+	      file, vfsub_file_flags(file), file->f_mode);
+
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	err = au_do_open(file, &args);
+	si_read_unlock(sb);
+	return err;
+}
+
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
+{
+	struct au_finfo *finfo;
+	aufs_bindex_t bindex;
+
+	finfo = au_fi(file);
+	au_hbl_del(&finfo->fi_hlist,
+		   &au_sbi(file->f_path.dentry->d_sb)->si_files);
+	bindex = finfo->fi_btop;
+	if (bindex >= 0)
+		au_set_h_fptr(file, bindex, NULL);
+
+	au_finfo_fin(file);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
+{
+	int err;
+	struct file *h_file;
+
+	err = 0;
+	h_file = au_hf_top(file);
+	if (h_file)
+		err = vfsub_flush(h_file, id);
+	return err;
+}
+
+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
+{
+	return au_do_flush(file, id, au_do_flush_nondir);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * read and write functions acquire [fdi]_rwsem once, but release before
+ * mmap_sem. This is because to stop a race condition between mmap(2).
+ * Releasing these aufs-rwsem should be safe, no branch-management (by keeping
+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
+ * read functions after [fdi]_rwsem are released, but it should be harmless.
+ */
+
+/* Callers should call au_read_post() or fput() in the end */
+struct file *au_read_pre(struct file *file, int keep_fi, unsigned int lsc)
+{
+	struct file *h_file;
+	int err;
+
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0, lsc);
+	if (!err) {
+		di_read_unlock(file->f_path.dentry, AuLock_IR);
+		h_file = au_hf_top(file);
+		get_file(h_file);
+		if (!keep_fi)
+			fi_read_unlock(file);
+	} else
+		h_file = ERR_PTR(err);
+
+	return h_file;
+}
+
+static void au_read_post(struct inode *inode, struct file *h_file)
+{
+	/* update without lock, I don't think it a problem */
+	fsstack_copy_attr_atime(inode, file_inode(h_file));
+	fput(h_file);
+}
+
+struct au_write_pre {
+	/* input */
+	unsigned int lsc;
+
+	/* output */
+	blkcnt_t blks;
+	aufs_bindex_t btop;
+};
+
+/*
+ * return with iinfo is write-locked
+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the
+ * end
+ */
+static struct file *au_write_pre(struct file *file, int do_ready,
+				 struct au_write_pre *wpre)
+{
+	struct file *h_file;
+	struct dentry *dentry;
+	int err;
+	unsigned int lsc;
+	struct au_pin pin;
+
+	lsc = 0;
+	if (wpre)
+		lsc = wpre->lsc;
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1, lsc);
+	h_file = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_path.dentry;
+	if (do_ready) {
+		err = au_ready_to_write(file, -1, &pin);
+		if (unlikely(err)) {
+			h_file = ERR_PTR(err);
+			di_write_unlock(dentry);
+			goto out_fi;
+		}
+	}
+
+	di_downgrade_lock(dentry, /*flags*/0);
+	if (wpre)
+		wpre->btop = au_fbtop(file);
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	if (wpre)
+		wpre->blks = file_inode(h_file)->i_blocks;
+	if (do_ready)
+		au_unpin(&pin);
+	di_read_unlock(dentry, /*flags*/0);
+
+out_fi:
+	fi_write_unlock(file);
+out:
+	return h_file;
+}
+
+static void au_write_post(struct inode *inode, struct file *h_file,
+			  struct au_write_pre *wpre, ssize_t written)
+{
+	struct inode *h_inode;
+
+	au_cpup_attr_timesizes(inode);
+	AuDebugOn(au_ibtop(inode) != wpre->btop);
+	h_inode = file_inode(h_file);
+	inode->i_mode = h_inode->i_mode;
+	ii_write_unlock(inode);
+	/* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */
+	if (written > 0)
+		au_fhsm_wrote(inode->i_sb, wpre->btop,
+			      /*force*/h_inode->i_blocks > wpre->blks);
+	fput(h_file);
+}
+
+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
+			 loff_t *ppos)
+{
+	ssize_t err;
+	struct inode *inode;
+	struct file *h_file;
+	struct super_block *sb;
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	/* filedata may be obsoleted by concurrent copyup, but no problem */
+	err = vfsub_read_u(h_file, buf, count, ppos);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	au_read_post(inode, h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/*
+ * todo: very ugly
+ * it locks both of i_mutex and si_rwsem for read in safe.
+ * if the plink maintenance mode continues forever (that is the problem),
+ * may loop forever.
+ */
+static void au_mtx_and_read_lock(struct inode *inode)
+{
+	int err;
+	struct super_block *sb = inode->i_sb;
+
+	while (1) {
+		inode_lock(inode);
+		err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+		if (!err)
+			break;
+		inode_unlock(inode);
+		si_read_lock(sb, AuLock_NOPLMW);
+		si_read_unlock(sb);
+	}
+}
+
+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
+			  size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	struct au_write_pre wpre;
+	struct inode *inode;
+	struct file *h_file;
+	char __user *buf = (char __user *)ubuf;
+
+	inode = file_inode(file);
+	au_mtx_and_read_lock(inode);
+
+	wpre.lsc = 0;
+	h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = vfsub_write_u(h_file, buf, count, ppos);
+	au_write_post(inode, h_file, &wpre, err);
+
+out:
+	si_read_unlock(inode->i_sb);
+	inode_unlock(inode);
+	return err;
+}
+
+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio,
+			  struct iov_iter *iov_iter)
+{
+	ssize_t err;
+	struct file *file;
+	ssize_t (*iter)(struct kiocb *, struct iov_iter *);
+
+	err = security_file_permission(h_file, rw);
+	if (unlikely(err))
+		goto out;
+
+	err = -ENOSYS;	/* the branch doesn't have its ->(read|write)_iter() */
+	iter = NULL;
+	if (rw == MAY_READ)
+		iter = h_file->f_op->read_iter;
+	else if (rw == MAY_WRITE)
+		iter = h_file->f_op->write_iter;
+
+	file = kio->ki_filp;
+	kio->ki_filp = h_file;
+	if (iter) {
+		lockdep_off();
+		err = iter(kio, iov_iter);
+		lockdep_on();
+	} else
+		/* currently there is no such fs */
+		WARN_ON_ONCE(1);
+	kio->ki_filp = file;
+
+out:
+	return err;
+}
+
+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+	ssize_t err;
+	struct file *file, *h_file;
+	struct inode *inode;
+	struct super_block *sb;
+
+	file = kio->ki_filp;
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/1, /*lsc*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	if (au_test_loopback_kthread()) {
+		au_warn_loopback(h_file->f_path.dentry->d_sb);
+		if (file->f_mapping != h_file->f_mapping) {
+			file->f_mapping = h_file->f_mapping;
+			smp_mb(); /* unnecessary? */
+		}
+	}
+	fi_read_unlock(file);
+
+	err = au_do_iter(h_file, MAY_READ, kio, iov_iter);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	au_read_post(inode, h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+	ssize_t err;
+	struct au_write_pre wpre;
+	struct inode *inode;
+	struct file *file, *h_file;
+
+	file = kio->ki_filp;
+	inode = file_inode(file);
+	au_mtx_and_read_lock(inode);
+
+	wpre.lsc = 0;
+	h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter);
+	au_write_post(inode, h_file, &wpre, err);
+
+out:
+	si_read_unlock(inode->i_sb);
+	inode_unlock(inode);
+	return err;
+}
+
+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
+				struct pipe_inode_info *pipe, size_t len,
+				unsigned int flags)
+{
+	ssize_t err;
+	struct file *h_file;
+	struct inode *inode;
+	struct super_block *sb;
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	au_read_post(inode, h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+static ssize_t
+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
+		  size_t len, unsigned int flags)
+{
+	ssize_t err;
+	struct au_write_pre wpre;
+	struct inode *inode;
+	struct file *h_file;
+
+	inode = file_inode(file);
+	au_mtx_and_read_lock(inode);
+
+	wpre.lsc = 0;
+	h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
+	au_write_post(inode, h_file, &wpre, err);
+
+out:
+	si_read_unlock(inode->i_sb);
+	inode_unlock(inode);
+	return err;
+}
+
+static long aufs_fallocate(struct file *file, int mode, loff_t offset,
+			   loff_t len)
+{
+	long err;
+	struct au_write_pre wpre;
+	struct inode *inode;
+	struct file *h_file;
+
+	inode = file_inode(file);
+	au_mtx_and_read_lock(inode);
+
+	wpre.lsc = 0;
+	h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	lockdep_off();
+	err = vfs_fallocate(h_file, mode, offset, len);
+	lockdep_on();
+	au_write_post(inode, h_file, &wpre, /*written*/1);
+
+out:
+	si_read_unlock(inode->i_sb);
+	inode_unlock(inode);
+	return err;
+}
+
+static ssize_t aufs_copy_file_range(struct file *src, loff_t src_pos,
+				    struct file *dst, loff_t dst_pos,
+				    size_t len, unsigned int flags)
+{
+	ssize_t err;
+	struct au_write_pre wpre;
+	enum { SRC, DST };
+	struct {
+		struct inode *inode;
+		struct file *h_file;
+		struct super_block *h_sb;
+	} a[2];
+#define a_src	a[SRC]
+#define a_dst	a[DST]
+
+	err = -EINVAL;
+	a_src.inode = file_inode(src);
+	if (unlikely(!S_ISREG(a_src.inode->i_mode)))
+		goto out;
+	a_dst.inode = file_inode(dst);
+	if (unlikely(!S_ISREG(a_dst.inode->i_mode)))
+		goto out;
+
+	au_mtx_and_read_lock(a_dst.inode);
+	/*
+	 * in order to match the order in di_write_lock2_{child,parent}(),
+	 * use f_path.dentry for this comparison.
+	 */
+	if (src->f_path.dentry < dst->f_path.dentry) {
+		a_src.h_file = au_read_pre(src, /*keep_fi*/1, AuLsc_FI_1);
+		err = PTR_ERR(a_src.h_file);
+		if (IS_ERR(a_src.h_file))
+			goto out_si;
+
+		wpre.lsc = AuLsc_FI_2;
+		a_dst.h_file = au_write_pre(dst, /*do_ready*/1, &wpre);
+		err = PTR_ERR(a_dst.h_file);
+		if (IS_ERR(a_dst.h_file)) {
+			au_read_post(a_src.inode, a_src.h_file);
+			goto out_si;
+		}
+	} else {
+		wpre.lsc = AuLsc_FI_1;
+		a_dst.h_file = au_write_pre(dst, /*do_ready*/1, &wpre);
+		err = PTR_ERR(a_dst.h_file);
+		if (IS_ERR(a_dst.h_file))
+			goto out_si;
+
+		a_src.h_file = au_read_pre(src, /*keep_fi*/1, AuLsc_FI_2);
+		err = PTR_ERR(a_src.h_file);
+		if (IS_ERR(a_src.h_file)) {
+			au_write_post(a_dst.inode, a_dst.h_file, &wpre,
+				      /*written*/0);
+			goto out_si;
+		}
+	}
+
+	err = -EXDEV;
+	a_src.h_sb = file_inode(a_src.h_file)->i_sb;
+	a_dst.h_sb = file_inode(a_dst.h_file)->i_sb;
+	if (unlikely(a_src.h_sb != a_dst.h_sb)) {
+		AuDbgFile(src);
+		AuDbgFile(dst);
+		goto out_file;
+	}
+
+	err = vfsub_copy_file_range(a_src.h_file, src_pos, a_dst.h_file,
+				    dst_pos, len, flags);
+
+out_file:
+	au_write_post(a_dst.inode, a_dst.h_file, &wpre, err);
+	fi_read_unlock(src);
+	au_read_post(a_src.inode, a_src.h_file);
+out_si:
+	si_read_unlock(a_dst.inode->i_sb);
+	inode_unlock(a_dst.inode);
+out:
+	return err;
+#undef a_src
+#undef a_dst
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * The locking order around current->mmap_sem.
+ * - in most and regular cases
+ *   file I/O syscall -- aufs_read() or something
+ *	-- si_rwsem for read -- mmap_sem
+ *	(Note that [fdi]i_rwsem are released before mmap_sem).
+ * - in mmap case
+ *   mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
+ * This AB-BA order is definitely bad, but is not a problem since "si_rwsem for
+ * read" allows multiple processes to acquire it and [fdi]i_rwsem are not held
+ * in file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
+ * It means that when aufs acquires si_rwsem for write, the process should never
+ * acquire mmap_sem.
+ *
+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a
+ * problem either since any directory is not able to be mmap-ed.
+ * The similar scenario is applied to aufs_readlink() too.
+ */
+
+#if 0 /* stop calling security_file_mmap() */
+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
+#define AuConv_VM_PROT(f, b)	_calc_vm_trans(f, VM_##b, PROT_##b)
+
+static unsigned long au_arch_prot_conv(unsigned long flags)
+{
+	/* currently ppc64 only */
+#ifdef CONFIG_PPC64
+	/* cf. linux/arch/powerpc/include/asm/mman.h */
+	AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
+	return AuConv_VM_PROT(flags, SAO);
+#else
+	AuDebugOn(arch_calc_vm_prot_bits(-1));
+	return 0;
+#endif
+}
+
+static unsigned long au_prot_conv(unsigned long flags)
+{
+	return AuConv_VM_PROT(flags, READ)
+		| AuConv_VM_PROT(flags, WRITE)
+		| AuConv_VM_PROT(flags, EXEC)
+		| au_arch_prot_conv(flags);
+}
+
+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
+#define AuConv_VM_MAP(f, b)	_calc_vm_trans(f, VM_##b, MAP_##b)
+
+static unsigned long au_flag_conv(unsigned long flags)
+{
+	return AuConv_VM_MAP(flags, GROWSDOWN)
+		| AuConv_VM_MAP(flags, DENYWRITE)
+		| AuConv_VM_MAP(flags, LOCKED);
+}
+#endif
+
+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int err;
+	const unsigned char wlock
+		= (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
+	struct super_block *sb;
+	struct file *h_file;
+	struct inode *inode;
+
+	AuDbgVmRegion(file, vma);
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	lockdep_off();
+	si_read_lock(sb, AuLock_NOPLMW);
+
+	h_file = au_write_pre(file, wlock, /*wpre*/NULL);
+	lockdep_on();
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = 0;
+	au_set_mmapped(file);
+	au_vm_file_reset(vma, h_file);
+	/*
+	 * we cannot call security_mmap_file() here since it may acquire
+	 * mmap_sem or i_mutex.
+	 *
+	 * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags),
+	 *			 au_flag_conv(vma->vm_flags));
+	 */
+	if (!err)
+		err = call_mmap(h_file, vma);
+	if (!err) {
+		au_vm_prfile_set(vma, file);
+		fsstack_copy_attr_atime(inode, file_inode(h_file));
+		goto out_fput; /* success */
+	}
+	au_unset_mmapped(file);
+	au_vm_file_reset(vma, file);
+
+out_fput:
+	lockdep_off();
+	ii_write_unlock(inode);
+	lockdep_on();
+	fput(h_file);
+out:
+	lockdep_off();
+	si_read_unlock(sb);
+	lockdep_on();
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
+			     int datasync)
+{
+	int err;
+	struct au_write_pre wpre;
+	struct inode *inode;
+	struct file *h_file;
+
+	err = 0; /* -EBADF; */ /* posix? */
+	if (unlikely(!(file->f_mode & FMODE_WRITE)))
+		goto out;
+
+	inode = file_inode(file);
+	au_mtx_and_read_lock(inode);
+
+	wpre.lsc = 0;
+	h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out_unlock;
+
+	err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+	au_write_post(inode, h_file, &wpre, /*written*/0);
+
+out_unlock:
+	si_read_unlock(inode->i_sb);
+	inode_unlock(inode);
+out:
+	return err;
+}
+
+static int aufs_fasync(int fd, struct file *file, int flag)
+{
+	int err;
+	struct file *h_file;
+	struct super_block *sb;
+
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	if (h_file->f_op->fasync)
+		err = h_file->f_op->fasync(fd, h_file, flag);
+	fput(h_file); /* instead of au_read_post() */
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+static int aufs_setfl(struct file *file, unsigned long arg)
+{
+	int err;
+	struct file *h_file;
+	struct super_block *sb;
+
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	/* stop calling h_file->fasync */
+	arg |= vfsub_file_flags(file) & FASYNC;
+	err = setfl(/*unused fd*/-1, h_file, arg);
+	fput(h_file); /* instead of au_read_post() */
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no one supports this operation, currently */
+#if 0 /* reserved for future use */
+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
+			     size_t len, loff_t *pos, int more)
+{
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_file_fop = {
+	.owner		= THIS_MODULE,
+
+	.llseek		= default_llseek,
+
+	.read		= aufs_read,
+	.write		= aufs_write,
+	.read_iter	= aufs_read_iter,
+	.write_iter	= aufs_write_iter,
+
+#ifdef CONFIG_AUFS_POLL
+	.poll		= aufs_poll,
+#endif
+	.unlocked_ioctl	= aufs_ioctl_nondir,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= aufs_compat_ioctl_nondir,
+#endif
+	.mmap		= aufs_mmap,
+	.open		= aufs_open_nondir,
+	.flush		= aufs_flush_nondir,
+	.release	= aufs_release_nondir,
+	.fsync		= aufs_fsync_nondir,
+	.fasync		= aufs_fasync,
+	/* .sendpage	= aufs_sendpage, */
+	.setfl		= aufs_setfl,
+	.splice_write	= aufs_splice_write,
+	.splice_read	= aufs_splice_read,
+#if 0 /* reserved for future use */
+	.aio_splice_write = aufs_aio_splice_write,
+	.aio_splice_read  = aufs_aio_splice_read,
+#endif
+	.fallocate	= aufs_fallocate,
+	.copy_file_range = aufs_copy_file_range
+};
diff --git a/fs/aufs/fhsm.c b/fs/aufs/fhsm.c
new file mode 100644
index 00000000000..9cef93b4299
--- /dev/null
+++ b/fs/aufs/fhsm.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * File-based Hierarchy Storage Management
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	AuDebugOn(!fhsm);
+	return fhsm->fhsm_bottom;
+}
+
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex)
+{
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	AuDebugOn(!fhsm);
+	fhsm->fhsm_bottom = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br)
+{
+	struct au_br_fhsm *bf;
+
+	bf = br->br_fhsm;
+	MtxMustLock(&bf->bf_lock);
+
+	return !bf->bf_readable
+		|| time_after(jiffies,
+			      bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_fhsm_notify(struct super_block *sb, int val)
+{
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	if (au_fhsm_pid(fhsm)
+	    && atomic_read(&fhsm->fhsm_readable) != -1) {
+		atomic_set(&fhsm->fhsm_readable, val);
+		if (val)
+			wake_up(&fhsm->fhsm_wqh);
+	}
+}
+
+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex,
+			struct aufs_stfs *rstfs, int do_lock, int do_notify)
+{
+	int err;
+	struct au_branch *br;
+	struct au_br_fhsm *bf;
+
+	br = au_sbr(sb, bindex);
+	AuDebugOn(au_br_rdonly(br));
+	bf = br->br_fhsm;
+	AuDebugOn(!bf);
+
+	if (do_lock)
+		mutex_lock(&bf->bf_lock);
+	else
+		MtxMustLock(&bf->bf_lock);
+
+	/* sb->s_root for NFS is unreliable */
+	err = au_br_stfs(br, &bf->bf_stfs);
+	if (unlikely(err)) {
+		AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err);
+		goto out;
+	}
+
+	bf->bf_jiffy = jiffies;
+	bf->bf_readable = 1;
+	if (do_notify)
+		au_fhsm_notify(sb, /*val*/1);
+	if (rstfs)
+		*rstfs = bf->bf_stfs;
+
+out:
+	if (do_lock)
+		mutex_unlock(&bf->bf_lock);
+	au_fhsm_notify(sb, /*val*/1);
+
+	return err;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+	struct au_branch *br;
+	struct au_br_fhsm *bf;
+
+	AuDbg("b%d, force %d\n", bindex, force);
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	if (!au_ftest_si(sbinfo, FHSM)
+	    || fhsm->fhsm_bottom == bindex)
+		return;
+
+	br = au_sbr(sb, bindex);
+	bf = br->br_fhsm;
+	AuDebugOn(!bf);
+	mutex_lock(&bf->bf_lock);
+	if (force
+	    || au_fhsm_pid(fhsm)
+	    || au_fhsm_test_jiffy(sbinfo, br))
+		err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0,
+				  /*do_notify*/1);
+	mutex_unlock(&bf->bf_lock);
+}
+
+void au_fhsm_wrote_all(struct super_block *sb, int force)
+{
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	/* exclude the bottom */
+	bbot = au_fhsm_bottom(sb);
+	for (bindex = 0; bindex < bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_fhsm(br->br_perm))
+			au_fhsm_wrote(sb, bindex, force);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static __poll_t au_fhsm_poll(struct file *file, struct poll_table_struct *wait)
+{
+	__poll_t mask;
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	mask = 0;
+	sbinfo = file->private_data;
+	fhsm = &sbinfo->si_fhsm;
+	poll_wait(file, &fhsm->fhsm_wqh, wait);
+	if (atomic_read(&fhsm->fhsm_readable))
+		mask = EPOLLIN /* | EPOLLRDNORM */;
+
+	if (!mask)
+		AuDbg("mask 0x%x\n", mask);
+	return mask;
+}
+
+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr,
+			      struct aufs_stfs *stfs, __s16 brid)
+{
+	int err;
+
+	err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs));
+	if (!err)
+		err = __put_user(brid, &stbr->brid);
+	if (unlikely(err))
+		err = -EFAULT;
+
+	return err;
+}
+
+static ssize_t au_fhsm_do_read(struct super_block *sb,
+			       struct aufs_stbr __user *stbr, size_t count)
+{
+	ssize_t err;
+	int nstbr;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+	struct au_br_fhsm *bf;
+
+	/* except the bottom branch */
+	err = 0;
+	nstbr = 0;
+	bbot = au_fhsm_bottom(sb);
+	for (bindex = 0; !err && bindex < bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (!au_br_fhsm(br->br_perm))
+			continue;
+
+		bf = br->br_fhsm;
+		mutex_lock(&bf->bf_lock);
+		if (bf->bf_readable) {
+			err = -EFAULT;
+			if (count >= sizeof(*stbr))
+				err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs,
+							  br->br_id);
+			if (!err) {
+				bf->bf_readable = 0;
+				count -= sizeof(*stbr);
+				nstbr++;
+			}
+		}
+		mutex_unlock(&bf->bf_lock);
+	}
+	if (!err)
+		err = sizeof(*stbr) * nstbr;
+
+	return err;
+}
+
+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count,
+			   loff_t *pos)
+{
+	ssize_t err;
+	int readable;
+	aufs_bindex_t nfhsm, bindex, bbot;
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+	struct au_branch *br;
+	struct super_block *sb;
+
+	err = 0;
+	sbinfo = file->private_data;
+	fhsm = &sbinfo->si_fhsm;
+need_data:
+	spin_lock_irq(&fhsm->fhsm_wqh.lock);
+	if (!atomic_read(&fhsm->fhsm_readable)) {
+		if (vfsub_file_flags(file) & O_NONBLOCK)
+			err = -EAGAIN;
+		else
+			err = wait_event_interruptible_locked_irq
+				(fhsm->fhsm_wqh,
+				 atomic_read(&fhsm->fhsm_readable));
+	}
+	spin_unlock_irq(&fhsm->fhsm_wqh.lock);
+	if (unlikely(err))
+		goto out;
+
+	/* sb may already be dead */
+	au_rw_read_lock(&sbinfo->si_rwsem);
+	readable = atomic_read(&fhsm->fhsm_readable);
+	if (readable > 0) {
+		sb = sbinfo->si_sb;
+		AuDebugOn(!sb);
+		/* exclude the bottom branch */
+		nfhsm = 0;
+		bbot = au_fhsm_bottom(sb);
+		for (bindex = 0; bindex < bbot; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (au_br_fhsm(br->br_perm))
+				nfhsm++;
+		}
+		err = -EMSGSIZE;
+		if (nfhsm * sizeof(struct aufs_stbr) <= count) {
+			atomic_set(&fhsm->fhsm_readable, 0);
+			err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf,
+					     count);
+		}
+	}
+	au_rw_read_unlock(&sbinfo->si_rwsem);
+	if (!readable)
+		goto need_data;
+
+out:
+	return err;
+}
+
+static int au_fhsm_release(struct inode *inode, struct file *file)
+{
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	/* sb may already be dead */
+	sbinfo = file->private_data;
+	fhsm = &sbinfo->si_fhsm;
+	spin_lock(&fhsm->fhsm_spin);
+	fhsm->fhsm_pid = 0;
+	spin_unlock(&fhsm->fhsm_spin);
+	kobject_put(&sbinfo->si_kobj);
+
+	return 0;
+}
+
+static const struct file_operations au_fhsm_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+	.read		= au_fhsm_read,
+	.poll		= au_fhsm_poll,
+	.release	= au_fhsm_release
+};
+
+int au_fhsm_fd(struct super_block *sb, int oflags)
+{
+	int err, fd;
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+
+	err = -EPERM;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = -EINVAL;
+	if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK)))
+		goto out;
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	spin_lock(&fhsm->fhsm_spin);
+	if (!fhsm->fhsm_pid)
+		fhsm->fhsm_pid = current->pid;
+	else
+		err = -EBUSY;
+	spin_unlock(&fhsm->fhsm_spin);
+	if (unlikely(err))
+		goto out;
+
+	oflags |= O_RDONLY;
+	/* oflags |= FMODE_NONOTIFY; */
+	fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags);
+	err = fd;
+	if (unlikely(fd < 0))
+		goto out_pid;
+
+	/* succeed regardless 'fhsm' status */
+	kobject_get(&sbinfo->si_kobj);
+	si_noflush_read_lock(sb);
+	if (au_ftest_si(sbinfo, FHSM))
+		au_fhsm_wrote_all(sb, /*force*/0);
+	si_read_unlock(sb);
+	goto out; /* success */
+
+out_pid:
+	spin_lock(&fhsm->fhsm_spin);
+	fhsm->fhsm_pid = 0;
+	spin_unlock(&fhsm->fhsm_spin);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_fhsm_br_alloc(struct au_branch *br)
+{
+	int err;
+
+	err = 0;
+	br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS);
+	if (br->br_fhsm)
+		au_br_fhsm_init(br->br_fhsm);
+	else
+		err = -ENOMEM;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_fhsm_fin(struct super_block *sb)
+{
+	au_fhsm_notify(sb, /*val*/-1);
+}
+
+void au_fhsm_init(struct au_sbinfo *sbinfo)
+{
+	struct au_fhsm *fhsm;
+
+	fhsm = &sbinfo->si_fhsm;
+	spin_lock_init(&fhsm->fhsm_spin);
+	init_waitqueue_head(&fhsm->fhsm_wqh);
+	atomic_set(&fhsm->fhsm_readable, 0);
+	fhsm->fhsm_expire
+		= msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC);
+	fhsm->fhsm_bottom = -1;
+}
+
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec)
+{
+	sbinfo->si_fhsm.fhsm_expire
+		= msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo)
+{
+	unsigned int u;
+
+	if (!au_ftest_si(sbinfo, FHSM))
+		return;
+
+	u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC;
+	if (u != AUFS_FHSM_CACHE_DEF_SEC)
+		seq_printf(seq, ",fhsm_sec=%u", u);
+}
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
new file mode 100644
index 00000000000..b0075b57d8b
--- /dev/null
+++ b/fs/aufs/file.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * handling file/dir, and address_space operation
+ */
+
+#ifdef CONFIG_AUFS_DEBUG
+#include <linux/migrate.h>
+#endif
+#include <linux/pagemap.h>
+#include "aufs.h"
+
+/* drop flags for writing */
+unsigned int au_file_roflags(unsigned int flags)
+{
+	flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
+	flags |= O_RDONLY | O_NOATIME;
+	return flags;
+}
+
+/* common functions to regular file and dir */
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+		       struct file *file, int force_wr)
+{
+	struct file *h_file;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct path h_path;
+	int err;
+
+	/* a race condition can happen between open and unlink/rmdir */
+	h_file = ERR_PTR(-ENOENT);
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (au_test_nfsd() && (!h_dentry || d_is_negative(h_dentry)))
+		goto out;
+	h_inode = d_inode(h_dentry);
+	spin_lock(&h_dentry->d_lock);
+	err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
+		/* || !d_inode(dentry)->i_nlink */
+		;
+	spin_unlock(&h_dentry->d_lock);
+	if (unlikely(err))
+		goto out;
+
+	sb = dentry->d_sb;
+	br = au_sbr(sb, bindex);
+	err = au_br_test_oflag(flags, br);
+	h_file = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	/* drop flags for writing */
+	if (au_test_ro(sb, bindex, d_inode(dentry))) {
+		if (force_wr && !(flags & O_WRONLY))
+			force_wr = 0;
+		flags = au_file_roflags(flags);
+		if (force_wr) {
+			h_file = ERR_PTR(-EROFS);
+			flags = au_file_roflags(flags);
+			if (unlikely(vfsub_native_ro(h_inode)
+				     || IS_APPEND(h_inode)))
+				goto out;
+			flags &= ~O_ACCMODE;
+			flags |= O_WRONLY;
+		}
+	}
+	flags &= ~O_CREAT;
+	au_lcnt_inc(&br->br_nfiles);
+	h_path.dentry = h_dentry;
+	h_path.mnt = au_br_mnt(br);
+	h_file = vfsub_dentry_open(&h_path, flags);
+	if (IS_ERR(h_file))
+		goto out_br;
+
+	if (flags & __FMODE_EXEC) {
+		err = deny_write_access(h_file);
+		if (unlikely(err)) {
+			fput(h_file);
+			h_file = ERR_PTR(err);
+			goto out_br;
+		}
+	}
+	fsnotify_open(h_file);
+	goto out; /* success */
+
+out_br:
+	au_lcnt_dec(&br->br_nfiles);
+out:
+	return h_file;
+}
+
+static int au_cmoo(struct dentry *dentry)
+{
+	int err, cmoo, matched;
+	unsigned int udba;
+	struct path h_path;
+	struct au_pin pin;
+	struct au_cp_generic cpg = {
+		.dentry	= dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= -1,
+		.pin	= &pin,
+		.flags	= AuCpup_DTIME | AuCpup_HOPEN
+	};
+	struct inode *delegated;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct au_fhsm *fhsm;
+	pid_t pid;
+	struct au_branch *br;
+	struct dentry *parent;
+	struct au_hinode *hdir;
+
+	DiMustWriteLock(dentry);
+	IiMustWriteLock(d_inode(dentry));
+
+	err = 0;
+	if (IS_ROOT(dentry))
+		goto out;
+	cpg.bsrc = au_dbtop(dentry);
+	if (!cpg.bsrc)
+		goto out;
+
+	sb = dentry->d_sb;
+	sbinfo = au_sbi(sb);
+	fhsm = &sbinfo->si_fhsm;
+	pid = au_fhsm_pid(fhsm);
+	rcu_read_lock();
+	matched = (pid
+		   && (current->pid == pid
+		       || rcu_dereference(current->real_parent)->pid == pid));
+	rcu_read_unlock();
+	if (matched)
+		goto out;
+
+	br = au_sbr(sb, cpg.bsrc);
+	cmoo = au_br_cmoo(br->br_perm);
+	if (!cmoo)
+		goto out;
+	if (!d_is_reg(dentry))
+		cmoo &= AuBrAttr_COO_ALL;
+	if (!cmoo)
+		goto out;
+
+	parent = dget_parent(dentry);
+	di_write_lock_parent(parent);
+	err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1);
+	cpg.bdst = err;
+	if (unlikely(err < 0)) {
+		err = 0;	/* there is no upper writable branch */
+		goto out_dgrade;
+	}
+	AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst);
+
+	/* do not respect the coo attrib for the target branch */
+	err = au_cpup_dirs(dentry, cpg.bdst);
+	if (unlikely(err))
+		goto out_dgrade;
+
+	di_downgrade_lock(parent, AuLock_IR);
+	udba = au_opt_udba(sb);
+	err = au_pin(&pin, dentry, cpg.bdst, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out_parent;
+
+	err = au_sio_cpup_simple(&cpg);
+	au_unpin(&pin);
+	if (unlikely(err))
+		goto out_parent;
+	if (!(cmoo & AuBrWAttr_MOO))
+		goto out_parent; /* success */
+
+	err = au_pin(&pin, dentry, cpg.bsrc, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out_parent;
+
+	h_path.mnt = au_br_mnt(br);
+	h_path.dentry = au_h_dptr(dentry, cpg.bsrc);
+	hdir = au_hi(d_inode(parent), cpg.bsrc);
+	delegated = NULL;
+	err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1);
+	au_unpin(&pin);
+	/* todo: keep h_dentry or not? */
+	if (unlikely(err == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal unlink\n");
+		iput(delegated);
+	}
+	if (unlikely(err)) {
+		pr_err("unlink %pd after coo failed (%d), ignored\n",
+		       dentry, err);
+		err = 0;
+	}
+	goto out_parent; /* success */
+
+out_dgrade:
+	di_downgrade_lock(parent, AuLock_IR);
+out_parent:
+	di_read_unlock(parent, AuLock_IR);
+	dput(parent);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_do_open(struct file *file, struct au_do_open_args *args)
+{
+	int err, aopen = args->aopen;
+	struct dentry *dentry;
+	struct au_finfo *finfo;
+
+	if (!aopen)
+		err = au_finfo_init(file, args->fidir);
+	else {
+		lockdep_off();
+		err = au_finfo_init(file, args->fidir);
+		lockdep_on();
+	}
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_path.dentry;
+	AuDebugOn(IS_ERR_OR_NULL(dentry));
+	di_write_lock_child(dentry);
+	err = au_cmoo(dentry);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (!err) {
+		if (!aopen)
+			err = args->open(file, vfsub_file_flags(file), NULL);
+		else {
+			lockdep_off();
+			err = args->open(file, vfsub_file_flags(file),
+					 args->h_file);
+			lockdep_on();
+		}
+	}
+	di_read_unlock(dentry, AuLock_IR);
+
+	finfo = au_fi(file);
+	if (!err) {
+		finfo->fi_file = file;
+		au_hbl_add(&finfo->fi_hlist,
+			   &au_sbi(file->f_path.dentry->d_sb)->si_files);
+	}
+	if (!aopen)
+		fi_write_unlock(file);
+	else {
+		lockdep_off();
+		fi_write_unlock(file);
+		lockdep_on();
+	}
+	if (unlikely(err)) {
+		finfo->fi_hdir = NULL;
+		au_finfo_fin(file);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_reopen_nondir(struct file *file)
+{
+	int err;
+	aufs_bindex_t btop;
+	struct dentry *dentry;
+	struct au_branch *br;
+	struct file *h_file, *h_file_tmp;
+
+	dentry = file->f_path.dentry;
+	btop = au_dbtop(dentry);
+	br = au_sbr(dentry->d_sb, btop);
+	h_file_tmp = NULL;
+	if (au_fbtop(file) == btop) {
+		h_file = au_hf_top(file);
+		if (file->f_mode == h_file->f_mode)
+			return 0; /* success */
+		h_file_tmp = h_file;
+		get_file(h_file_tmp);
+		au_lcnt_inc(&br->br_nfiles);
+		au_set_h_fptr(file, btop, NULL);
+	}
+	AuDebugOn(au_fi(file)->fi_hdir);
+	/*
+	 * it can happen
+	 * file exists on both of rw and ro
+	 * open --> dbtop and fbtop are both 0
+	 * prepend a branch as rw, "rw" become ro
+	 * remove rw/file
+	 * delete the top branch, "rw" becomes rw again
+	 *	--> dbtop is 1, fbtop is still 0
+	 * write --> fbtop is 0 but dbtop is 1
+	 */
+	/* AuDebugOn(au_fbtop(file) < btop); */
+
+	h_file = au_h_open(dentry, btop, vfsub_file_flags(file) & ~O_TRUNC,
+			   file, /*force_wr*/0);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file)) {
+		if (h_file_tmp) {
+			/* revert */
+			au_set_h_fptr(file, btop, h_file_tmp);
+			h_file_tmp = NULL;
+		}
+		goto out; /* todo: close all? */
+	}
+
+	err = 0;
+	au_set_fbtop(file, btop);
+	au_set_h_fptr(file, btop, h_file);
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+
+out:
+	if (h_file_tmp) {
+		fput(h_file_tmp);
+		au_lcnt_dec(&br->br_nfiles);
+	}
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
+			struct dentry *hi_wh)
+{
+	int err;
+	aufs_bindex_t btop;
+	struct au_dinfo *dinfo;
+	struct dentry *h_dentry;
+	struct au_hdentry *hdp;
+
+	dinfo = au_di(file->f_path.dentry);
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	btop = dinfo->di_btop;
+	dinfo->di_btop = btgt;
+	hdp = au_hdentry(dinfo, btgt);
+	h_dentry = hdp->hd_dentry;
+	hdp->hd_dentry = hi_wh;
+	err = au_reopen_nondir(file);
+	hdp->hd_dentry = h_dentry;
+	dinfo->di_btop = btop;
+
+	return err;
+}
+
+static int au_ready_to_write_wh(struct file *file, loff_t len,
+				aufs_bindex_t bcpup, struct au_pin *pin)
+{
+	int err;
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry, *hi_wh;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_path.dentry,
+		.bdst	= bcpup,
+		.bsrc	= -1,
+		.len	= len,
+		.pin	= pin
+	};
+
+	au_update_dbtop(cpg.dentry);
+	inode = d_inode(cpg.dentry);
+	h_inode = NULL;
+	if (au_dbtop(cpg.dentry) <= bcpup
+	    && au_dbbot(cpg.dentry) >= bcpup) {
+		h_dentry = au_h_dptr(cpg.dentry, bcpup);
+		if (h_dentry && d_is_positive(h_dentry))
+			h_inode = d_inode(h_dentry);
+	}
+	hi_wh = au_hi_wh(inode, bcpup);
+	if (!hi_wh && !h_inode)
+		err = au_sio_cpup_wh(&cpg, file);
+	else
+		/* already copied-up after unlink */
+		err = au_reopen_wh(file, bcpup, hi_wh);
+
+	if (!err
+	    && (inode->i_nlink > 1
+		|| (inode->i_state & I_LINKABLE))
+	    && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
+		au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
+
+	return err;
+}
+
+/*
+ * prepare the @file for writing.
+ */
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
+{
+	int err;
+	aufs_bindex_t dbtop;
+	struct dentry *parent;
+	struct inode *inode;
+	struct super_block *sb;
+	struct file *h_file;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_path.dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= len,
+		.pin	= pin,
+		.flags	= AuCpup_DTIME
+	};
+
+	sb = cpg.dentry->d_sb;
+	inode = d_inode(cpg.dentry);
+	cpg.bsrc = au_fbtop(file);
+	err = au_test_ro(sb, cpg.bsrc, inode);
+	if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
+		err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
+			     /*flags*/0);
+		goto out;
+	}
+
+	/* need to cpup or reopen */
+	parent = dget_parent(cpg.dentry);
+	di_write_lock_parent(parent);
+	err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+	cpg.bdst = err;
+	if (unlikely(err < 0))
+		goto out_dgrade;
+	err = 0;
+
+	if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
+		err = au_cpup_dirs(cpg.dentry, cpg.bdst);
+		if (unlikely(err))
+			goto out_dgrade;
+	}
+
+	err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out_dgrade;
+
+	dbtop = au_dbtop(cpg.dentry);
+	if (dbtop <= cpg.bdst)
+		cpg.bsrc = cpg.bdst;
+
+	if (dbtop <= cpg.bdst		/* just reopen */
+	    || !d_unhashed(cpg.dentry)	/* copyup and reopen */
+		) {
+		h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0);
+		if (IS_ERR(h_file))
+			err = PTR_ERR(h_file);
+		else {
+			di_downgrade_lock(parent, AuLock_IR);
+			if (dbtop > cpg.bdst)
+				err = au_sio_cpup_simple(&cpg);
+			if (!err)
+				err = au_reopen_nondir(file);
+			au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
+		}
+	} else {			/* copyup as wh and reopen */
+		/*
+		 * since writable hfsplus branch is not supported,
+		 * h_open_pre/post() are unnecessary.
+		 */
+		err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
+		di_downgrade_lock(parent, AuLock_IR);
+	}
+
+	if (!err) {
+		au_pin_set_parent_lflag(pin, /*lflag*/0);
+		goto out_dput; /* success */
+	}
+	au_unpin(pin);
+	goto out_unlock;
+
+out_dgrade:
+	di_downgrade_lock(parent, AuLock_IR);
+out_unlock:
+	di_read_unlock(parent, AuLock_IR);
+out_dput:
+	dput(parent);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_do_flush(struct file *file, fl_owner_t id,
+		int (*flush)(struct file *file, fl_owner_t id))
+{
+	int err;
+	struct super_block *sb;
+	struct inode *inode;
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	si_noflush_read_lock(sb);
+	fi_read_lock(file);
+	ii_read_lock_child(inode);
+
+	err = flush(file, id);
+	au_cpup_attr_timesizes(inode);
+
+	ii_read_unlock(inode);
+	fi_read_unlock(file);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
+{
+	int err;
+	struct au_pin pin;
+	struct au_finfo *finfo;
+	struct dentry *parent, *hi_wh;
+	struct inode *inode;
+	struct super_block *sb;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_path.dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= -1,
+		.pin	= &pin,
+		.flags	= AuCpup_DTIME
+	};
+
+	FiMustWriteLock(file);
+
+	err = 0;
+	finfo = au_fi(file);
+	sb = cpg.dentry->d_sb;
+	inode = d_inode(cpg.dentry);
+	cpg.bdst = au_ibtop(inode);
+	if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
+		goto out;
+
+	parent = dget_parent(cpg.dentry);
+	if (au_test_ro(sb, cpg.bdst, inode)) {
+		di_read_lock_parent(parent, !AuLock_IR);
+		err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+		cpg.bdst = err;
+		di_read_unlock(parent, !AuLock_IR);
+		if (unlikely(err < 0))
+			goto out_parent;
+		err = 0;
+	}
+
+	di_read_lock_parent(parent, AuLock_IR);
+	hi_wh = au_hi_wh(inode, cpg.bdst);
+	if (!S_ISDIR(inode->i_mode)
+	    && au_opt_test(au_mntflags(sb), PLINK)
+	    && au_plink_test(inode)
+	    && !d_unhashed(cpg.dentry)
+	    && cpg.bdst < au_dbtop(cpg.dentry)) {
+		err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
+		if (unlikely(err))
+			goto out_unlock;
+
+		/* always superio. */
+		err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+		if (!err) {
+			err = au_sio_cpup_simple(&cpg);
+			au_unpin(&pin);
+		}
+	} else if (hi_wh) {
+		/* already copied-up after unlink */
+		err = au_reopen_wh(file, cpg.bdst, hi_wh);
+		*need_reopen = 0;
+	}
+
+out_unlock:
+	di_read_unlock(parent, AuLock_IR);
+out_parent:
+	dput(parent);
+out:
+	return err;
+}
+
+static void au_do_refresh_dir(struct file *file)
+{
+	aufs_bindex_t bindex, bbot, new_bindex, brid;
+	struct au_hfile *p, tmp, *q;
+	struct au_finfo *finfo;
+	struct super_block *sb;
+	struct au_fidir *fidir;
+
+	FiMustWriteLock(file);
+
+	sb = file->f_path.dentry->d_sb;
+	finfo = au_fi(file);
+	fidir = finfo->fi_hdir;
+	AuDebugOn(!fidir);
+	p = fidir->fd_hfile + finfo->fi_btop;
+	brid = p->hf_br->br_id;
+	bbot = fidir->fd_bbot;
+	for (bindex = finfo->fi_btop; bindex <= bbot; bindex++, p++) {
+		if (!p->hf_file)
+			continue;
+
+		new_bindex = au_br_index(sb, p->hf_br->br_id);
+		if (new_bindex == bindex)
+			continue;
+		if (new_bindex < 0) {
+			au_set_h_fptr(file, bindex, NULL);
+			continue;
+		}
+
+		/* swap two lower inode, and loop again */
+		q = fidir->fd_hfile + new_bindex;
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hf_file) {
+			bindex--;
+			p--;
+		}
+	}
+
+	p = fidir->fd_hfile;
+	if (!au_test_mmapped(file) && !d_unlinked(file->f_path.dentry)) {
+		bbot = au_sbbot(sb);
+		for (finfo->fi_btop = 0; finfo->fi_btop <= bbot;
+		     finfo->fi_btop++, p++)
+			if (p->hf_file) {
+				if (file_inode(p->hf_file))
+					break;
+				au_hfput(p, /*execed*/0);
+			}
+	} else {
+		bbot = au_br_index(sb, brid);
+		for (finfo->fi_btop = 0; finfo->fi_btop < bbot;
+		     finfo->fi_btop++, p++)
+			if (p->hf_file)
+				au_hfput(p, /*execed*/0);
+		bbot = au_sbbot(sb);
+	}
+
+	p = fidir->fd_hfile + bbot;
+	for (fidir->fd_bbot = bbot; fidir->fd_bbot >= finfo->fi_btop;
+	     fidir->fd_bbot--, p--)
+		if (p->hf_file) {
+			if (file_inode(p->hf_file))
+				break;
+			au_hfput(p, /*execed*/0);
+		}
+	AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
+}
+
+/*
+ * after branch manipulating, refresh the file.
+ */
+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
+{
+	int err, need_reopen, nbr;
+	aufs_bindex_t bbot, bindex;
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct au_finfo *finfo;
+	struct au_hfile *hfile;
+
+	dentry = file->f_path.dentry;
+	sb = dentry->d_sb;
+	nbr = au_sbbot(sb) + 1;
+	finfo = au_fi(file);
+	if (!finfo->fi_hdir) {
+		hfile = &finfo->fi_htop;
+		AuDebugOn(!hfile->hf_file);
+		bindex = au_br_index(sb, hfile->hf_br->br_id);
+		AuDebugOn(bindex < 0);
+		if (bindex != finfo->fi_btop)
+			au_set_fbtop(file, bindex);
+	} else {
+		err = au_fidir_realloc(finfo, nbr, /*may_shrink*/0);
+		if (unlikely(err))
+			goto out;
+		au_do_refresh_dir(file);
+	}
+
+	err = 0;
+	need_reopen = 1;
+	if (!au_test_mmapped(file))
+		err = au_file_refresh_by_inode(file, &need_reopen);
+	if (finfo->fi_hdir)
+		/* harmless if err */
+		au_fidir_realloc(finfo, nbr, /*may_shrink*/1);
+	if (!err && need_reopen && !d_unlinked(dentry))
+		err = reopen(file);
+	if (!err) {
+		au_update_figen(file);
+		goto out; /* success */
+	}
+
+	/* error, close all lower files */
+	if (finfo->fi_hdir) {
+		bbot = au_fbbot_dir(file);
+		for (bindex = au_fbtop(file); bindex <= bbot; bindex++)
+			au_set_h_fptr(file, bindex, NULL);
+	}
+
+out:
+	return err;
+}
+
+/* common function to regular file and dir */
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+			  int wlock, unsigned int fi_lsc)
+{
+	int err;
+	unsigned int sigen, figen;
+	aufs_bindex_t btop;
+	unsigned char pseudo_link;
+	struct dentry *dentry;
+	struct inode *inode;
+
+	err = 0;
+	dentry = file->f_path.dentry;
+	inode = d_inode(dentry);
+	sigen = au_sigen(dentry->d_sb);
+	fi_write_lock_nested(file, fi_lsc);
+	figen = au_figen(file);
+	if (!fi_lsc)
+		di_write_lock_child(dentry);
+	else
+		di_write_lock_child2(dentry);
+	btop = au_dbtop(dentry);
+	pseudo_link = (btop != au_ibtop(inode));
+	if (sigen == figen && !pseudo_link && au_fbtop(file) == btop) {
+		if (!wlock) {
+			di_downgrade_lock(dentry, AuLock_IR);
+			fi_downgrade_lock(file);
+		}
+		goto out; /* success */
+	}
+
+	AuDbg("sigen %d, figen %d\n", sigen, figen);
+	if (au_digen_test(dentry, sigen)) {
+		err = au_reval_dpath(dentry, sigen);
+		AuDebugOn(!err && au_digen_test(dentry, sigen));
+	}
+
+	if (!err)
+		err = refresh_file(file, reopen);
+	if (!err) {
+		if (!wlock) {
+			di_downgrade_lock(dentry, AuLock_IR);
+			fi_downgrade_lock(file);
+		}
+	} else {
+		di_write_unlock(dentry);
+		fi_write_unlock(file);
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* cf. aufs_nopage() */
+/* for madvise(2) */
+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
+{
+	unlock_page(page);
+	return 0;
+}
+
+/* it will never be called, but necessary to support O_DIRECT */
+static ssize_t aufs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{ BUG(); return 0; }
+
+/* they will never be called. */
+#ifdef CONFIG_AUFS_DEBUG
+static int aufs_write_begin(struct file *file, struct address_space *mapping,
+			    loff_t pos, unsigned len, unsigned flags,
+			    struct page **pagep, void **fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_write_end(struct file *file, struct address_space *mapping,
+			  loff_t pos, unsigned len, unsigned copied,
+			  struct page *page, void *fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
+{ AuUnsupport(); return 0; }
+
+static int aufs_set_page_dirty(struct page *page)
+{ AuUnsupport(); return 0; }
+static void aufs_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length)
+{ AuUnsupport(); }
+static int aufs_releasepage(struct page *page, gfp_t gfp)
+{ AuUnsupport(); return 0; }
+#if 0 /* called by memory compaction regardless file */
+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
+			    struct page *page, enum migrate_mode mode)
+{ AuUnsupport(); return 0; }
+#endif
+static bool aufs_isolate_page(struct page *page, isolate_mode_t mode)
+{ AuUnsupport(); return true; }
+static void aufs_putback_page(struct page *page)
+{ AuUnsupport(); }
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+				      unsigned long from,
+				      unsigned long count)
+{ AuUnsupport(); return 0; }
+static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
+				    bool *writeback)
+{ AuUnsupport(); }
+static int aufs_error_remove_page(struct address_space *mapping,
+				  struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
+			      sector_t *span)
+{ AuUnsupport(); return 0; }
+static void aufs_swap_deactivate(struct file *file)
+{ AuUnsupport(); }
+#endif /* CONFIG_AUFS_DEBUG */
+
+const struct address_space_operations aufs_aop = {
+	.readpage		= aufs_readpage,
+	.direct_IO		= aufs_direct_IO,
+#ifdef CONFIG_AUFS_DEBUG
+	.writepage		= aufs_writepage,
+	/* no writepages, because of writepage */
+	.set_page_dirty		= aufs_set_page_dirty,
+	/* no readpages, because of readpage */
+	.write_begin		= aufs_write_begin,
+	.write_end		= aufs_write_end,
+	/* no bmap, no block device */
+	.invalidatepage		= aufs_invalidatepage,
+	.releasepage		= aufs_releasepage,
+	/* is fallback_migrate_page ok? */
+	/* .migratepage		= aufs_migratepage, */
+	.isolate_page		= aufs_isolate_page,
+	.putback_page		= aufs_putback_page,
+	.launder_page		= aufs_launder_page,
+	.is_partially_uptodate	= aufs_is_partially_uptodate,
+	.is_dirty_writeback	= aufs_is_dirty_writeback,
+	.error_remove_page	= aufs_error_remove_page,
+	.swap_activate		= aufs_swap_activate,
+	.swap_deactivate	= aufs_swap_deactivate
+#endif /* CONFIG_AUFS_DEBUG */
+};
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
new file mode 100644
index 00000000000..d124d9c8216
--- /dev/null
+++ b/fs/aufs/file.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file operations
+ */
+
+#ifndef __AUFS_FILE_H__
+#define __AUFS_FILE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm_types.h>
+#include <linux/poll.h>
+#include "rwsem.h"
+
+struct au_branch;
+struct au_hfile {
+	struct file		*hf_file;
+	struct au_branch	*hf_br;
+};
+
+struct au_vdir;
+struct au_fidir {
+	aufs_bindex_t		fd_bbot;
+	aufs_bindex_t		fd_nent;
+	struct au_vdir		*fd_vdir_cache;
+	struct au_hfile		fd_hfile[];
+};
+
+static inline int au_fidir_sz(int nent)
+{
+	AuDebugOn(nent < 0);
+	return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
+}
+
+struct au_finfo {
+	atomic_t		fi_generation;
+
+	struct au_rwsem		fi_rwsem;
+	aufs_bindex_t		fi_btop;
+
+	/* do not union them */
+	struct {				/* for non-dir */
+		struct au_hfile			fi_htop;
+		atomic_t			fi_mmapped;
+	};
+	struct au_fidir		*fi_hdir;	/* for dir only */
+
+	struct hlist_bl_node	fi_hlist;
+	struct file		*fi_file;	/* very ugly */
+	struct rcu_head		rcu;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* file.c */
+extern const struct address_space_operations aufs_aop;
+unsigned int au_file_roflags(unsigned int flags);
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+		       struct file *file, int force_wr);
+struct au_do_open_args {
+	int		aopen;
+	int		(*open)(struct file *file, int flags,
+				struct file *h_file);
+	struct au_fidir	*fidir;
+	struct file	*h_file;
+};
+int au_do_open(struct file *file, struct au_do_open_args *args);
+int au_reopen_nondir(struct file *file);
+struct au_pin;
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+			  int wlock, unsigned int fi_lsc);
+int au_do_flush(struct file *file, fl_owner_t id,
+		int (*flush)(struct file *file, fl_owner_t id));
+
+/* poll.c */
+#ifdef CONFIG_AUFS_POLL
+__poll_t aufs_poll(struct file *file, struct poll_table_struct *pt);
+#endif
+
+#ifdef CONFIG_AUFS_BR_HFSPLUS
+/* hfsplus.c */
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+			   int force_wr);
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+		    struct file *h_file);
+#else
+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry,
+       aufs_bindex_t bindex, int force_wr)
+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
+	   struct file *h_file);
+#endif
+
+/* f_op.c */
+extern const struct file_operations aufs_file_fop;
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file);
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
+struct file *au_read_pre(struct file *file, int keep_fi, unsigned int lsc);
+
+/* finfo.c */
+void au_hfput(struct au_hfile *hf, int execed);
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
+		   struct file *h_file);
+
+void au_update_figen(struct file *file);
+struct au_fidir *au_fidir_alloc(struct super_block *sb);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink);
+
+void au_fi_init_once(void *_fi);
+void au_finfo_fin(struct file *file);
+int au_finfo_init(struct file *file, struct au_fidir *fidir);
+
+/* ioctl.c */
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+			   unsigned long arg);
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+			      unsigned long arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_finfo *au_fi(struct file *file)
+{
+	return file->private_data;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define fi_read_lock(f)	au_rw_read_lock(&au_fi(f)->fi_rwsem)
+#define fi_write_lock(f)	au_rw_write_lock(&au_fi(f)->fi_rwsem)
+#define fi_read_trylock(f)	au_rw_read_trylock(&au_fi(f)->fi_rwsem)
+#define fi_write_trylock(f)	au_rw_write_trylock(&au_fi(f)->fi_rwsem)
+/*
+#define fi_read_trylock_nested(f) \
+	au_rw_read_trylock_nested(&au_fi(f)->fi_rwsem)
+#define fi_write_trylock_nested(f) \
+	au_rw_write_trylock_nested(&au_fi(f)->fi_rwsem)
+*/
+
+#define fi_read_unlock(f)	au_rw_read_unlock(&au_fi(f)->fi_rwsem)
+#define fi_write_unlock(f)	au_rw_write_unlock(&au_fi(f)->fi_rwsem)
+#define fi_downgrade_lock(f)	au_rw_dgrade_lock(&au_fi(f)->fi_rwsem)
+
+/* lock subclass for finfo */
+enum {
+	AuLsc_FI_1,
+	AuLsc_FI_2
+};
+
+static inline void fi_read_lock_nested(struct file *f, unsigned int lsc)
+{
+	au_rw_read_lock_nested(&au_fi(f)->fi_rwsem, lsc);
+}
+
+static inline void fi_write_lock_nested(struct file *f, unsigned int lsc)
+{
+	au_rw_write_lock_nested(&au_fi(f)->fi_rwsem, lsc);
+}
+
+/*
+ * fi_read_lock_1, fi_write_lock_1,
+ * fi_read_lock_2, fi_write_lock_2
+ */
+#define AuReadLockFunc(name) \
+static inline void fi_read_lock_##name(struct file *f) \
+{ fi_read_lock_nested(f, AuLsc_FI_##name); }
+
+#define AuWriteLockFunc(name) \
+static inline void fi_write_lock_##name(struct file *f) \
+{ fi_write_lock_nested(f, AuLsc_FI_##name); }
+
+#define AuRWLockFuncs(name) \
+	AuReadLockFunc(name) \
+	AuWriteLockFunc(name)
+
+AuRWLockFuncs(1);
+AuRWLockFuncs(2);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define FiMustNoWaiters(f)	AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
+#define FiMustAnyLock(f)	AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
+#define FiMustWriteLock(f)	AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: hard/soft set? */
+static inline aufs_bindex_t au_fbtop(struct file *file)
+{
+	FiMustAnyLock(file);
+	return au_fi(file)->fi_btop;
+}
+
+static inline aufs_bindex_t au_fbbot_dir(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_bbot;
+}
+
+static inline struct au_vdir *au_fvdir_cache(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_vdir_cache;
+}
+
+static inline void au_set_fbtop(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustWriteLock(file);
+	au_fi(file)->fi_btop = bindex;
+}
+
+static inline void au_set_fbbot_dir(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustWriteLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	au_fi(file)->fi_hdir->fd_bbot = bindex;
+}
+
+static inline void au_set_fvdir_cache(struct file *file,
+				      struct au_vdir *vdir_cache)
+{
+	FiMustWriteLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
+}
+
+static inline struct file *au_hf_top(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_htop.hf_file;
+}
+
+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
+}
+
+/* todo: memory barrier? */
+static inline unsigned int au_figen(struct file *f)
+{
+	return atomic_read(&au_fi(f)->fi_generation);
+}
+
+static inline void au_set_mmapped(struct file *f)
+{
+	if (atomic_inc_return(&au_fi(f)->fi_mmapped))
+		return;
+	pr_warn("fi_mmapped wrapped around\n");
+	while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
+		;
+}
+
+static inline void au_unset_mmapped(struct file *f)
+{
+	atomic_dec(&au_fi(f)->fi_mmapped);
+}
+
+static inline int au_test_mmapped(struct file *f)
+{
+	return atomic_read(&au_fi(f)->fi_mmapped);
+}
+
+/* customize vma->vm_file */
+
+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
+				       struct file *file)
+{
+	struct file *f;
+
+	f = vma->vm_file;
+	get_file(file);
+	vma->vm_file = file;
+	fput(f);
+}
+
+#ifdef CONFIG_MMU
+#define AuDbgVmRegion(file, vma) do {} while (0)
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+				    struct file *file)
+{
+	au_do_vm_file_reset(vma, file);
+}
+#else
+#define AuDbgVmRegion(file, vma) \
+	AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+				    struct file *file)
+{
+	struct file *f;
+
+	au_do_vm_file_reset(vma, file);
+	f = vma->vm_region->vm_file;
+	get_file(file);
+	vma->vm_region->vm_file = file;
+	fput(f);
+}
+#endif /* CONFIG_MMU */
+
+/* handle vma->vm_prfile */
+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
+				    struct file *file)
+{
+	get_file(file);
+	vma->vm_prfile = file;
+#ifndef CONFIG_MMU
+	get_file(file);
+	vma->vm_region->vm_prfile = file;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FILE_H__ */
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
new file mode 100644
index 00000000000..25077670a50
--- /dev/null
+++ b/fs/aufs/finfo.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * file private data
+ */
+
+#include "aufs.h"
+
+void au_hfput(struct au_hfile *hf, int execed)
+{
+	if (execed)
+		allow_write_access(hf->hf_file);
+	fput(hf->hf_file);
+	hf->hf_file = NULL;
+	au_lcnt_dec(&hf->hf_br->br_nfiles);
+	hf->hf_br = NULL;
+}
+
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
+{
+	struct au_finfo *finfo = au_fi(file);
+	struct au_hfile *hf;
+	struct au_fidir *fidir;
+
+	fidir = finfo->fi_hdir;
+	if (!fidir) {
+		AuDebugOn(finfo->fi_btop != bindex);
+		hf = &finfo->fi_htop;
+	} else
+		hf = fidir->fd_hfile + bindex;
+
+	if (hf && hf->hf_file)
+		au_hfput(hf, vfsub_file_execed(file));
+	if (val) {
+		FiMustWriteLock(file);
+		AuDebugOn(IS_ERR_OR_NULL(file->f_path.dentry));
+		hf->hf_file = val;
+		hf->hf_br = au_sbr(file->f_path.dentry->d_sb, bindex);
+	}
+}
+
+void au_update_figen(struct file *file)
+{
+	atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_path.dentry));
+	/* smp_mb(); */ /* atomic_set */
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_fidir *au_fidir_alloc(struct super_block *sb)
+{
+	struct au_fidir *fidir;
+	int nbr;
+
+	nbr = au_sbbot(sb) + 1;
+	if (nbr < 2)
+		nbr = 2; /* initial allocate for 2 branches */
+	fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
+	if (fidir) {
+		fidir->fd_bbot = -1;
+		fidir->fd_nent = nbr;
+	}
+
+	return fidir;
+}
+
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink)
+{
+	int err;
+	struct au_fidir *fidir, *p;
+
+	AuRwMustWriteLock(&finfo->fi_rwsem);
+	fidir = finfo->fi_hdir;
+	AuDebugOn(!fidir);
+
+	err = -ENOMEM;
+	p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
+			 GFP_NOFS, may_shrink);
+	if (p) {
+		p->fd_nent = nbr;
+		finfo->fi_hdir = p;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_finfo_fin(struct file *file)
+{
+	struct au_finfo *finfo;
+
+	au_lcnt_dec(&au_sbi(file->f_path.dentry->d_sb)->si_nfiles);
+
+	finfo = au_fi(file);
+	AuDebugOn(finfo->fi_hdir);
+	AuRwDestroy(&finfo->fi_rwsem);
+	au_cache_free_finfo(finfo);
+}
+
+void au_fi_init_once(void *_finfo)
+{
+	struct au_finfo *finfo = _finfo;
+
+	au_rw_init(&finfo->fi_rwsem);
+}
+
+int au_finfo_init(struct file *file, struct au_fidir *fidir)
+{
+	int err;
+	struct au_finfo *finfo;
+	struct dentry *dentry;
+
+	err = -ENOMEM;
+	dentry = file->f_path.dentry;
+	finfo = au_cache_alloc_finfo();
+	if (unlikely(!finfo))
+		goto out;
+
+	err = 0;
+	au_lcnt_inc(&au_sbi(dentry->d_sb)->si_nfiles);
+	au_rw_write_lock(&finfo->fi_rwsem);
+	finfo->fi_btop = -1;
+	finfo->fi_hdir = fidir;
+	atomic_set(&finfo->fi_generation, au_digen(dentry));
+	/* smp_mb(); */ /* atomic_set */
+
+	file->private_data = finfo;
+
+out:
+	return err;
+}
diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h
new file mode 100644
index 00000000000..af4bc6c0dd4
--- /dev/null
+++ b/fs/aufs/fstype.h
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * judging filesystem type
+ */
+
+#ifndef __AUFS_FSTYPE_H__
+#define __AUFS_FSTYPE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/nfs_fs.h>
+#include <linux/romfs_fs.h>
+
+static inline int au_test_aufs(struct super_block *sb)
+{
+	return sb->s_magic == AUFS_SUPER_MAGIC;
+}
+
+static inline const char *au_sbtype(struct super_block *sb)
+{
+	return sb->s_type->name;
+}
+
+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ISO9660_FS)
+	return sb->s_magic == ISOFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ROMFS_FS)
+	return sb->s_magic == ROMFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_CRAMFS)
+	return sb->s_magic == CRAMFS_MAGIC;
+#endif
+	return 0;
+}
+
+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_NFS_FS)
+	return sb->s_magic == NFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_FUSE_FS)
+	return sb->s_magic == FUSE_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_XFS_FS)
+	return sb->s_magic == XFS_SB_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_TMPFS
+	return sb->s_magic == TMPFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_ECRYPT_FS)
+	return !strcmp(au_sbtype(sb), "ecryptfs");
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ramfs(struct super_block *sb)
+{
+	return sb->s_magic == RAMFS_MAGIC;
+}
+
+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_UBIFS_FS)
+	return sb->s_magic == UBIFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_PROC_FS
+	return sb->s_magic == PROC_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SYSFS
+	return sb->s_magic == SYSFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+	return sb->s_magic == CONFIGFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_minix(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_MINIX_FS)
+	return sb->s_magic == MINIX3_SUPER_MAGIC
+		|| sb->s_magic == MINIX2_SUPER_MAGIC
+		|| sb->s_magic == MINIX2_SUPER_MAGIC2
+		|| sb->s_magic == MINIX_SUPER_MAGIC
+		|| sb->s_magic == MINIX_SUPER_MAGIC2;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_fat(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_FAT_FS)
+	return sb->s_magic == MSDOS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_msdos(struct super_block *sb)
+{
+	return au_test_fat(sb);
+}
+
+static inline int au_test_vfat(struct super_block *sb)
+{
+	return au_test_fat(sb);
+}
+
+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SECURITYFS
+	return sb->s_magic == SECURITYFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_SQUASHFS)
+	return sb->s_magic == SQUASHFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_BTRFS_FS)
+	return sb->s_magic == BTRFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_XENFS)
+	return sb->s_magic == XENFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_DEBUG_FS
+	return sb->s_magic == DEBUGFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_NILFS)
+	return sb->s_magic == NILFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
+{
+#if IS_ENABLED(CONFIG_HFSPLUS_FS)
+	return sb->s_magic == HFSPLUS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * they can't be an aufs branch.
+ */
+static inline int au_test_fs_unsuppoted(struct super_block *sb)
+{
+	return
+#ifndef CONFIG_AUFS_BR_RAMFS
+		au_test_ramfs(sb) ||
+#endif
+		au_test_procfs(sb)
+		|| au_test_sysfs(sb)
+		|| au_test_configfs(sb)
+		|| au_test_debugfs(sb)
+		|| au_test_securityfs(sb)
+		|| au_test_xenfs(sb)
+		|| au_test_ecryptfs(sb)
+		/* || !strcmp(au_sbtype(sb), "unionfs") */
+		|| au_test_aufs(sb); /* will be supported in next version */
+}
+
+static inline int au_test_fs_remote(struct super_block *sb)
+{
+	return !au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+		&& !au_test_ramfs(sb)
+#endif
+		&& !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Note: these functions (below) are created after reading ->getattr() in all
+ * filesystems under linux/fs. it means we have to do so in every update...
+ */
+
+/*
+ * some filesystems require getattr to refresh the inode attributes before
+ * referencing.
+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
+ * and leave the work for d_revalidate()
+ */
+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
+{
+	return au_test_nfs(sb)
+		|| au_test_fuse(sb)
+		/* || au_test_btrfs(sb) */	/* untested */
+		;
+}
+
+/*
+ * filesystems which don't maintain i_size or i_blocks.
+ */
+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
+{
+	return au_test_xfs(sb)
+		|| au_test_btrfs(sb)
+		|| au_test_ubifs(sb)
+		|| au_test_hfsplus(sb)	/* maintained, but incorrect */
+		/* || au_test_minix(sb) */	/* untested */
+		;
+}
+
+/*
+ * filesystems which don't store the correct value in some of their inode
+ * attributes.
+ */
+static inline int au_test_fs_bad_iattr(struct super_block *sb)
+{
+	return au_test_fs_bad_iattr_size(sb)
+		|| au_test_fat(sb)
+		|| au_test_msdos(sb)
+		|| au_test_vfat(sb);
+}
+
+/* they don't check i_nlink in link(2) */
+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
+{
+	return au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+		|| au_test_ramfs(sb)
+#endif
+		|| au_test_ubifs(sb)
+		|| au_test_hfsplus(sb);
+}
+
+/*
+ * filesystems which sets S_NOATIME and S_NOCMTIME.
+ */
+static inline int au_test_fs_notime(struct super_block *sb)
+{
+	return au_test_nfs(sb)
+		|| au_test_fuse(sb)
+		|| au_test_ubifs(sb)
+		;
+}
+
+/* temporary support for i#1 in cramfs */
+static inline int au_test_fs_unique_ino(struct inode *inode)
+{
+	if (au_test_cramfs(inode->i_sb))
+		return inode->i_ino != 1;
+	return 1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * the filesystem where the xino files placed must support i/o after unlink and
+ * maintain i_size and i_blocks.
+ */
+static inline int au_test_fs_bad_xino(struct super_block *sb)
+{
+	return au_test_fs_remote(sb)
+		|| au_test_fs_bad_iattr_size(sb)
+		/* don't want unnecessary work for xino */
+		|| au_test_aufs(sb)
+		|| au_test_ecryptfs(sb)
+		|| au_test_nilfs(sb);
+}
+
+static inline int au_test_fs_trunc_xino(struct super_block *sb)
+{
+	return au_test_tmpfs(sb)
+		|| au_test_ramfs(sb);
+}
+
+/*
+ * test if the @sb is real-readonly.
+ */
+static inline int au_test_fs_rr(struct super_block *sb)
+{
+	return au_test_squashfs(sb)
+		|| au_test_iso9660(sb)
+		|| au_test_cramfs(sb)
+		|| au_test_romfs(sb);
+}
+
+/*
+ * test if the @inode is nfs with 'noacl' option
+ * NFS always sets SB_POSIXACL regardless its mount option 'noacl.'
+ */
+static inline int au_test_nfs_noacl(struct inode *inode)
+{
+	return au_test_nfs(inode->i_sb)
+		/* && IS_POSIXACL(inode) */
+		&& !nfs_server_capable(inode, NFS_CAP_ACLS);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FSTYPE_H__ */
diff --git a/fs/aufs/hbl.h b/fs/aufs/hbl.h
new file mode 100644
index 00000000000..4d5016bccc5
--- /dev/null
+++ b/fs/aufs/hbl.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * helpers for hlist_bl.h
+ */
+
+#ifndef __AUFS_HBL_H__
+#define __AUFS_HBL_H__
+
+#ifdef __KERNEL__
+
+#include <linux/list_bl.h>
+
+static inline void au_hbl_add(struct hlist_bl_node *node,
+			      struct hlist_bl_head *hbl)
+{
+	hlist_bl_lock(hbl);
+	hlist_bl_add_head(node, hbl);
+	hlist_bl_unlock(hbl);
+}
+
+static inline void au_hbl_del(struct hlist_bl_node *node,
+			      struct hlist_bl_head *hbl)
+{
+	hlist_bl_lock(hbl);
+	hlist_bl_del(node);
+	hlist_bl_unlock(hbl);
+}
+
+#define au_hbl_for_each(pos, head)					\
+	for (pos = hlist_bl_first(head);				\
+	     pos;							\
+	     pos = pos->next)
+
+static inline unsigned long au_hbl_count(struct hlist_bl_head *hbl)
+{
+	unsigned long cnt;
+	struct hlist_bl_node *pos;
+
+	cnt = 0;
+	hlist_bl_lock(hbl);
+	au_hbl_for_each(pos, hbl)
+		cnt++;
+	hlist_bl_unlock(hbl);
+	return cnt;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_HBL_H__ */
diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c
new file mode 100644
index 00000000000..cb4eeb1e606
--- /dev/null
+++ b/fs/aufs/hfsnotify.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * fsnotify for the lower directories
+ */
+
+#include "aufs.h"
+
+/* FS_IN_IGNORED is unnecessary */
+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
+				 | FS_CREATE | FS_EVENT_ON_CHILD);
+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
+
+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
+{
+	struct au_hnotify *hn = container_of(mark, struct au_hnotify,
+					     hn_mark);
+	/* AuDbg("here\n"); */
+	au_cache_free_hnotify(hn);
+	smp_mb__before_atomic(); /* for atomic64_dec */
+	if (atomic64_dec_and_test(&au_hfsn_ifree))
+		wake_up(&au_hfsn_wq);
+}
+
+static int au_hfsn_alloc(struct au_hinode *hinode)
+{
+	int err;
+	struct au_hnotify *hn;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct fsnotify_mark *mark;
+	aufs_bindex_t bindex;
+
+	hn = hinode->hi_notify;
+	sb = hn->hn_aufs_inode->i_sb;
+	bindex = au_br_index(sb, hinode->hi_id);
+	br = au_sbr(sb, bindex);
+	AuDebugOn(!br->br_hfsn);
+
+	mark = &hn->hn_mark;
+	fsnotify_init_mark(mark, br->br_hfsn->hfsn_group);
+	mark->mask = AuHfsnMask;
+	/*
+	 * by udba rename or rmdir, aufs assign a new inode to the known
+	 * h_inode, so specify 1 to allow dups.
+	 */
+	lockdep_off();
+	err = fsnotify_add_inode_mark(mark, hinode->hi_inode, /*allow_dups*/1);
+	lockdep_on();
+
+	return err;
+}
+
+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
+{
+	struct fsnotify_mark *mark;
+	unsigned long long ull;
+	struct fsnotify_group *group;
+
+	ull = atomic64_inc_return(&au_hfsn_ifree);
+	BUG_ON(!ull);
+
+	mark = &hn->hn_mark;
+	spin_lock(&mark->lock);
+	group = mark->group;
+	fsnotify_get_group(group);
+	spin_unlock(&mark->lock);
+	lockdep_off();
+	fsnotify_destroy_mark(mark, group);
+	fsnotify_put_mark(mark);
+	fsnotify_put_group(group);
+	lockdep_on();
+
+	/* free hn by myself */
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
+{
+	struct fsnotify_mark *mark;
+
+	mark = &hinode->hi_notify->hn_mark;
+	spin_lock(&mark->lock);
+	if (do_set) {
+		AuDebugOn(mark->mask & AuHfsnMask);
+		mark->mask |= AuHfsnMask;
+	} else {
+		AuDebugOn(!(mark->mask & AuHfsnMask));
+		mark->mask &= ~AuHfsnMask;
+	}
+	spin_unlock(&mark->lock);
+	/* fsnotify_recalc_inode_mask(hinode->hi_inode); */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* #define AuDbgHnotify */
+#ifdef AuDbgHnotify
+static char *au_hfsn_name(u32 mask)
+{
+#ifdef CONFIG_AUFS_DEBUG
+#define test_ret(flag)				\
+	do {					\
+		if (mask & flag)		\
+			return #flag;		\
+	} while (0)
+	test_ret(FS_ACCESS);
+	test_ret(FS_MODIFY);
+	test_ret(FS_ATTRIB);
+	test_ret(FS_CLOSE_WRITE);
+	test_ret(FS_CLOSE_NOWRITE);
+	test_ret(FS_OPEN);
+	test_ret(FS_MOVED_FROM);
+	test_ret(FS_MOVED_TO);
+	test_ret(FS_CREATE);
+	test_ret(FS_DELETE);
+	test_ret(FS_DELETE_SELF);
+	test_ret(FS_MOVE_SELF);
+	test_ret(FS_UNMOUNT);
+	test_ret(FS_Q_OVERFLOW);
+	test_ret(FS_IN_IGNORED);
+	test_ret(FS_ISDIR);
+	test_ret(FS_IN_ONESHOT);
+	test_ret(FS_EVENT_ON_CHILD);
+	return "";
+#undef test_ret
+#else
+	return "??";
+#endif
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_free_group(struct fsnotify_group *group)
+{
+	struct au_br_hfsnotify *hfsn = group->private;
+
+	/* AuDbg("here\n"); */
+	au_kfree_try_rcu(hfsn);
+}
+
+static int au_hfsn_handle_event(struct fsnotify_group *group,
+				struct inode *inode,
+				u32 mask, const void *data, int data_type,
+				const struct qstr *file_name, u32 cookie,
+				struct fsnotify_iter_info *iter_info)
+{
+	int err;
+	struct au_hnotify *hnotify;
+	struct inode *h_dir, *h_inode;
+	struct fsnotify_mark *inode_mark;
+
+	AuDebugOn(data_type != FSNOTIFY_EVENT_INODE);
+
+	err = 0;
+	/* if FS_UNMOUNT happens, there must be another bug */
+	AuDebugOn(mask & FS_UNMOUNT);
+	if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
+		goto out;
+
+	h_dir = inode;
+	h_inode = NULL;
+#ifdef AuDbgHnotify
+	au_debug_on();
+	if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
+	    || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
+		AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
+		      h_dir->i_ino, mask, au_hfsn_name(mask),
+		      AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
+		/* WARN_ON(1); */
+	}
+	au_debug_off();
+#endif
+
+	inode_mark = fsnotify_iter_inode_mark(iter_info);
+	AuDebugOn(!inode_mark);
+	hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
+	err = au_hnotify(h_dir, hnotify, mask, file_name, h_inode);
+
+out:
+	return err;
+}
+
+static struct fsnotify_ops au_hfsn_ops = {
+	.handle_event		= au_hfsn_handle_event,
+	.free_group_priv	= au_hfsn_free_group,
+	.free_mark		= au_hfsn_free_mark
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin_br(struct au_branch *br)
+{
+	struct au_br_hfsnotify *hfsn;
+
+	hfsn = br->br_hfsn;
+	if (hfsn) {
+		lockdep_off();
+		fsnotify_put_group(hfsn->hfsn_group);
+		lockdep_on();
+	}
+}
+
+static int au_hfsn_init_br(struct au_branch *br, int perm)
+{
+	int err;
+	struct fsnotify_group *group;
+	struct au_br_hfsnotify *hfsn;
+
+	err = 0;
+	br->br_hfsn = NULL;
+	if (!au_br_hnotifyable(perm))
+		goto out;
+
+	err = -ENOMEM;
+	hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS);
+	if (unlikely(!hfsn))
+		goto out;
+
+	err = 0;
+	group = fsnotify_alloc_group(&au_hfsn_ops);
+	if (IS_ERR(group)) {
+		err = PTR_ERR(group);
+		pr_err("fsnotify_alloc_group() failed, %d\n", err);
+		goto out_hfsn;
+	}
+
+	group->private = hfsn;
+	hfsn->hfsn_group = group;
+	br->br_hfsn = hfsn;
+	goto out; /* success */
+
+out_hfsn:
+	au_kfree_try_rcu(hfsn);
+out:
+	return err;
+}
+
+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+	int err;
+
+	err = 0;
+	if (!br->br_hfsn)
+		err = au_hfsn_init_br(br, perm);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin(void)
+{
+	AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
+	wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
+}
+
+const struct au_hnotify_op au_hnotify_op = {
+	.ctl		= au_hfsn_ctl,
+	.alloc		= au_hfsn_alloc,
+	.free		= au_hfsn_free,
+
+	.fin		= au_hfsn_fin,
+
+	.reset_br	= au_hfsn_reset_br,
+	.fin_br		= au_hfsn_fin_br,
+	.init_br	= au_hfsn_init_br
+};
diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c
new file mode 100644
index 00000000000..d250f019bed
--- /dev/null
+++ b/fs/aufs/hfsplus.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * special support for filesystems which acquires an inode mutex
+ * at final closing a file, eg, hfsplus.
+ *
+ * This trick is very simple and stupid, just to open the file before really
+ * necessary open to tell hfsplus that this is not the final closing.
+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
+ * and au_h_open_post() after releasing it.
+ */
+
+#include "aufs.h"
+
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+			   int force_wr)
+{
+	struct file *h_file;
+	struct dentry *h_dentry;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	AuDebugOn(!h_dentry);
+	AuDebugOn(d_is_negative(h_dentry));
+
+	h_file = NULL;
+	if (au_test_hfsplus(h_dentry->d_sb)
+	    && d_is_reg(h_dentry))
+		h_file = au_h_open(dentry, bindex,
+				   O_RDONLY | O_NOATIME | O_LARGEFILE,
+				   /*file*/NULL, force_wr);
+	return h_file;
+}
+
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+		    struct file *h_file)
+{
+	struct au_branch *br;
+
+	if (h_file) {
+		fput(h_file);
+		br = au_sbr(dentry->d_sb, bindex);
+		au_lcnt_dec(&br->br_nfiles);
+	}
+}
diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c
new file mode 100644
index 00000000000..231edd1b475
--- /dev/null
+++ b/fs/aufs/hnotify.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * abstraction to notify the direct changes on lower directories
+ */
+
+/* #include <linux/iversion.h> */
+#include "aufs.h"
+
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
+{
+	int err;
+	struct au_hnotify *hn;
+
+	err = -ENOMEM;
+	hn = au_cache_alloc_hnotify();
+	if (hn) {
+		hn->hn_aufs_inode = inode;
+		hinode->hi_notify = hn;
+		err = au_hnotify_op.alloc(hinode);
+		AuTraceErr(err);
+		if (unlikely(err)) {
+			hinode->hi_notify = NULL;
+			au_cache_free_hnotify(hn);
+			/*
+			 * The upper dir was removed by udba, but the same named
+			 * dir left. In this case, aufs assigns a new inode
+			 * number and set the monitor again.
+			 * For the lower dir, the old monitor is still left.
+			 */
+			if (err == -EEXIST)
+				err = 0;
+		}
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+void au_hn_free(struct au_hinode *hinode)
+{
+	struct au_hnotify *hn;
+
+	hn = hinode->hi_notify;
+	if (hn) {
+		hinode->hi_notify = NULL;
+		if (au_hnotify_op.free(hinode, hn))
+			au_cache_free_hnotify(hn);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_hn_ctl(struct au_hinode *hinode, int do_set)
+{
+	if (hinode->hi_notify)
+		au_hnotify_op.ctl(hinode, do_set);
+}
+
+void au_hn_reset(struct inode *inode, unsigned int flags)
+{
+	aufs_bindex_t bindex, bbot;
+	struct inode *hi;
+	struct dentry *iwhdentry;
+
+	bbot = au_ibbot(inode);
+	for (bindex = au_ibtop(inode); bindex <= bbot; bindex++) {
+		hi = au_h_iptr(inode, bindex);
+		if (!hi)
+			continue;
+
+		/* inode_lock_nested(hi, AuLsc_I_CHILD); */
+		iwhdentry = au_hi_wh(inode, bindex);
+		if (iwhdentry)
+			dget(iwhdentry);
+		au_igrab(hi);
+		au_set_h_iptr(inode, bindex, NULL, 0);
+		au_set_h_iptr(inode, bindex, au_igrab(hi),
+			      flags & ~AuHi_XINO);
+		iput(hi);
+		dput(iwhdentry);
+		/* inode_unlock(hi); */
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int hn_xino(struct inode *inode, struct inode *h_inode)
+{
+	int err;
+	aufs_bindex_t bindex, bbot, bfound, btop;
+	struct inode *h_i;
+
+	err = 0;
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("branch root dir was changed\n");
+		goto out;
+	}
+
+	bfound = -1;
+	bbot = au_ibbot(inode);
+	btop = au_ibtop(inode);
+#if 0 /* reserved for future use */
+	if (bindex == bbot) {
+		/* keep this ino in rename case */
+		goto out;
+	}
+#endif
+	for (bindex = btop; bindex <= bbot; bindex++)
+		if (au_h_iptr(inode, bindex) == h_inode) {
+			bfound = bindex;
+			break;
+		}
+	if (bfound < 0)
+		goto out;
+
+	for (bindex = btop; bindex <= bbot; bindex++) {
+		h_i = au_h_iptr(inode, bindex);
+		if (!h_i)
+			continue;
+
+		err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
+		/* ignore this error */
+		/* bad action? */
+	}
+
+	/* children inode number will be broken */
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int hn_gen_tree(struct dentry *dentry)
+{
+	int err, i, j, ndentry;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			struct dentry *d;
+
+			d = dentries[j];
+			if (IS_ROOT(d))
+				continue;
+
+			au_digen_dec(d);
+			if (d_really_is_positive(d))
+				/* todo: reset children xino?
+				   cached children only? */
+				au_iigen_dec(d_inode(d));
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+/*
+ * return 0 if processed.
+ */
+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
+			   const unsigned int isdir)
+{
+	int err;
+	struct dentry *d;
+	struct qstr *dname;
+
+	err = 1;
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("branch root dir was changed\n");
+		err = 0;
+		goto out;
+	}
+
+	if (!isdir) {
+		AuDebugOn(!name);
+		au_iigen_dec(inode);
+		spin_lock(&inode->i_lock);
+		hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+			spin_lock(&d->d_lock);
+			dname = &d->d_name;
+			if (dname->len != nlen
+			    && memcmp(dname->name, name, nlen)) {
+				spin_unlock(&d->d_lock);
+				continue;
+			}
+			err = 0;
+			au_digen_dec(d);
+			spin_unlock(&d->d_lock);
+			break;
+		}
+		spin_unlock(&inode->i_lock);
+	} else {
+		au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
+		d = d_find_any_alias(inode);
+		if (!d) {
+			au_iigen_dec(inode);
+			goto out;
+		}
+
+		spin_lock(&d->d_lock);
+		dname = &d->d_name;
+		if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
+			spin_unlock(&d->d_lock);
+			err = hn_gen_tree(d);
+			spin_lock(&d->d_lock);
+		}
+		spin_unlock(&d->d_lock);
+		dput(d);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
+{
+	int err;
+
+	if (IS_ROOT(dentry)) {
+		pr_warn("branch root dir was changed\n");
+		return 0;
+	}
+
+	err = 0;
+	if (!isdir) {
+		au_digen_dec(dentry);
+		if (d_really_is_positive(dentry))
+			au_iigen_dec(d_inode(dentry));
+	} else {
+		au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
+		if (d_really_is_positive(dentry))
+			err = hn_gen_tree(dentry);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* hnotify job flags */
+#define AuHnJob_XINO0		1
+#define AuHnJob_GEN		(1 << 1)
+#define AuHnJob_DIRENT		(1 << 2)
+#define AuHnJob_ISDIR		(1 << 3)
+#define AuHnJob_TRYXINO0	(1 << 4)
+#define AuHnJob_MNTPNT		(1 << 5)
+#define au_ftest_hnjob(flags, name)	((flags) & AuHnJob_##name)
+#define au_fset_hnjob(flags, name) \
+	do { (flags) |= AuHnJob_##name; } while (0)
+#define au_fclr_hnjob(flags, name) \
+	do { (flags) &= ~AuHnJob_##name; } while (0)
+
+enum {
+	AuHn_CHILD,
+	AuHn_PARENT,
+	AuHnLast
+};
+
+struct au_hnotify_args {
+	struct inode *h_dir, *dir, *h_child_inode;
+	u32 mask;
+	unsigned int flags[AuHnLast];
+	unsigned int h_child_nlen;
+	char h_child_name[];
+};
+
+struct hn_job_args {
+	unsigned int flags;
+	struct inode *inode, *h_inode, *dir, *h_dir;
+	struct dentry *dentry;
+	char *h_name;
+	int h_nlen;
+};
+
+static int hn_job(struct hn_job_args *a)
+{
+	const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
+	int e;
+
+	/* reset xino */
+	if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
+		hn_xino(a->inode, a->h_inode); /* ignore this error */
+
+	if (au_ftest_hnjob(a->flags, TRYXINO0)
+	    && a->inode
+	    && a->h_inode) {
+		inode_lock_shared_nested(a->h_inode, AuLsc_I_CHILD);
+		if (!a->h_inode->i_nlink
+		    && !(a->h_inode->i_state & I_LINKABLE))
+			hn_xino(a->inode, a->h_inode); /* ignore this error */
+		inode_unlock_shared(a->h_inode);
+	}
+
+	/* make the generation obsolete */
+	if (au_ftest_hnjob(a->flags, GEN)) {
+		e = -1;
+		if (a->inode)
+			e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
+					      isdir);
+		if (e && a->dentry)
+			hn_gen_by_name(a->dentry, isdir);
+		/* ignore this error */
+	}
+
+	/* make dir entries obsolete */
+	if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
+		struct au_vdir *vdir;
+
+		vdir = au_ivdir(a->inode);
+		if (vdir)
+			vdir->vd_jiffy = 0;
+		/* IMustLock(a->inode); */
+		/* inode_inc_iversion(a->inode); */
+	}
+
+	/* can do nothing but warn */
+	if (au_ftest_hnjob(a->flags, MNTPNT)
+	    && a->dentry
+	    && d_mountpoint(a->dentry))
+		pr_warn("mount-point %pd is removed or renamed\n", a->dentry);
+
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
+					   struct inode *dir)
+{
+	struct dentry *dentry, *d, *parent;
+	struct qstr *dname;
+
+	parent = d_find_any_alias(dir);
+	if (!parent)
+		return NULL;
+
+	dentry = NULL;
+	spin_lock(&parent->d_lock);
+	list_for_each_entry(d, &parent->d_subdirs, d_child) {
+		/* AuDbg("%pd\n", d); */
+		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+		dname = &d->d_name;
+		if (dname->len != nlen || memcmp(dname->name, name, nlen))
+			goto cont_unlock;
+		if (au_di(d))
+			au_digen_dec(d);
+		else
+			goto cont_unlock;
+		if (au_dcount(d) > 0) {
+			dentry = dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			break;
+		}
+
+cont_unlock:
+		spin_unlock(&d->d_lock);
+	}
+	spin_unlock(&parent->d_lock);
+	dput(parent);
+
+	if (dentry)
+		di_write_lock_child(dentry);
+
+	return dentry;
+}
+
+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
+					 aufs_bindex_t bindex, ino_t h_ino)
+{
+	struct inode *inode;
+	ino_t ino;
+	int err;
+
+	inode = NULL;
+	err = au_xino_read(sb, bindex, h_ino, &ino);
+	if (!err && ino)
+		inode = ilookup(sb, ino);
+	if (!inode)
+		goto out;
+
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("wrong root branch\n");
+		iput(inode);
+		inode = NULL;
+		goto out;
+	}
+
+	ii_write_lock_child(inode);
+
+out:
+	return inode;
+}
+
+static void au_hn_bh(void *_args)
+{
+	struct au_hnotify_args *a = _args;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bbot, bfound;
+	unsigned char xino, try_iput;
+	int err;
+	struct inode *inode;
+	ino_t h_ino;
+	struct hn_job_args args;
+	struct dentry *dentry;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(!_args);
+	AuDebugOn(!a->h_dir);
+	AuDebugOn(!a->dir);
+	AuDebugOn(!a->mask);
+	AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
+	      a->mask, a->dir->i_ino, a->h_dir->i_ino,
+	      a->h_child_inode ? a->h_child_inode->i_ino : 0);
+
+	inode = NULL;
+	dentry = NULL;
+	/*
+	 * do not lock a->dir->i_mutex here
+	 * because of d_revalidate() may cause a deadlock.
+	 */
+	sb = a->dir->i_sb;
+	AuDebugOn(!sb);
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!sbinfo);
+	si_write_lock(sb, AuLock_NOPLMW);
+
+	if (au_opt_test(sbinfo->si_mntflags, DIRREN))
+		switch (a->mask & FS_EVENTS_POSS_ON_CHILD) {
+		case FS_MOVED_FROM:
+		case FS_MOVED_TO:
+			AuWarn1("DIRREN with UDBA may not work correctly "
+				"for the direct rename(2)\n");
+		}
+
+	ii_read_lock_parent(a->dir);
+	bfound = -1;
+	bbot = au_ibbot(a->dir);
+	for (bindex = au_ibtop(a->dir); bindex <= bbot; bindex++)
+		if (au_h_iptr(a->dir, bindex) == a->h_dir) {
+			bfound = bindex;
+			break;
+		}
+	ii_read_unlock(a->dir);
+	if (unlikely(bfound < 0))
+		goto out;
+
+	xino = !!au_opt_test(au_mntflags(sb), XINO);
+	h_ino = 0;
+	if (a->h_child_inode)
+		h_ino = a->h_child_inode->i_ino;
+
+	if (a->h_child_nlen
+	    && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
+		dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
+					      a->dir);
+	try_iput = 0;
+	if (dentry && d_really_is_positive(dentry))
+		inode = d_inode(dentry);
+	if (xino && !inode && h_ino
+	    && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
+		inode = lookup_wlock_by_ino(sb, bfound, h_ino);
+		try_iput = 1;
+	}
+
+	args.flags = a->flags[AuHn_CHILD];
+	args.dentry = dentry;
+	args.inode = inode;
+	args.h_inode = a->h_child_inode;
+	args.dir = a->dir;
+	args.h_dir = a->h_dir;
+	args.h_name = a->h_child_name;
+	args.h_nlen = a->h_child_nlen;
+	err = hn_job(&args);
+	if (dentry) {
+		if (au_di(dentry))
+			di_write_unlock(dentry);
+		dput(dentry);
+	}
+	if (inode && try_iput) {
+		ii_write_unlock(inode);
+		iput(inode);
+	}
+
+	ii_write_lock_parent(a->dir);
+	args.flags = a->flags[AuHn_PARENT];
+	args.dentry = NULL;
+	args.inode = a->dir;
+	args.h_inode = a->h_dir;
+	args.dir = NULL;
+	args.h_dir = NULL;
+	args.h_name = NULL;
+	args.h_nlen = 0;
+	err = hn_job(&args);
+	ii_write_unlock(a->dir);
+
+out:
+	iput(a->h_child_inode);
+	iput(a->h_dir);
+	iput(a->dir);
+	si_write_unlock(sb);
+	au_nwt_done(&sbinfo->si_nowait);
+	au_kfree_rcu(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+	       const struct qstr *h_child_qstr, struct inode *h_child_inode)
+{
+	int err, len;
+	unsigned int flags[AuHnLast], f;
+	unsigned char isdir, isroot, wh;
+	struct inode *dir;
+	struct au_hnotify_args *args;
+	char *p, *h_child_name;
+
+	err = 0;
+	AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
+	dir = igrab(hnotify->hn_aufs_inode);
+	if (!dir)
+		goto out;
+
+	isroot = (dir->i_ino == AUFS_ROOT_INO);
+	wh = 0;
+	h_child_name = (void *)h_child_qstr->name;
+	len = h_child_qstr->len;
+	if (h_child_name) {
+		if (len > AUFS_WH_PFX_LEN
+		    && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+			h_child_name += AUFS_WH_PFX_LEN;
+			len -= AUFS_WH_PFX_LEN;
+			wh = 1;
+		}
+	}
+
+	isdir = 0;
+	if (h_child_inode)
+		isdir = !!S_ISDIR(h_child_inode->i_mode);
+	flags[AuHn_PARENT] = AuHnJob_ISDIR;
+	flags[AuHn_CHILD] = 0;
+	if (isdir)
+		flags[AuHn_CHILD] = AuHnJob_ISDIR;
+	au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
+	au_fset_hnjob(flags[AuHn_CHILD], GEN);
+	switch (mask & ALL_FSNOTIFY_DIRENT_EVENTS) {
+	case FS_MOVED_FROM:
+	case FS_MOVED_TO:
+		au_fset_hnjob(flags[AuHn_CHILD], XINO0);
+		au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+		/*FALLTHROUGH*/
+	case FS_CREATE:
+		AuDebugOn(!h_child_name);
+		break;
+
+	case FS_DELETE:
+		/*
+		 * aufs never be able to get this child inode.
+		 * revalidation should be in d_revalidate()
+		 * by checking i_nlink, i_generation or d_unhashed().
+		 */
+		AuDebugOn(!h_child_name);
+		au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
+		au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+		break;
+
+	default:
+		AuDebugOn(1);
+	}
+
+	if (wh)
+		h_child_inode = NULL;
+
+	err = -ENOMEM;
+	/* iput() and kfree() will be called in au_hnotify() */
+	args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
+	if (unlikely(!args)) {
+		AuErr1("no memory\n");
+		iput(dir);
+		goto out;
+	}
+	args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
+	args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
+	args->mask = mask;
+	args->dir = dir;
+	args->h_dir = igrab(h_dir);
+	if (h_child_inode)
+		h_child_inode = igrab(h_child_inode); /* can be NULL */
+	args->h_child_inode = h_child_inode;
+	args->h_child_nlen = len;
+	if (len) {
+		p = (void *)args;
+		p += sizeof(*args);
+		memcpy(p, h_child_name, len);
+		p[len] = 0;
+	}
+
+	/* NFS fires the event for silly-renamed one from kworker */
+	f = 0;
+	if (!dir->i_nlink
+	    || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE)))
+		f = AuWkq_NEST;
+	err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+	if (unlikely(err)) {
+		pr_err("wkq %d\n", err);
+		iput(args->h_child_inode);
+		iput(args->h_dir);
+		iput(args->dir);
+		au_kfree_rcu(args);
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+	int err;
+
+	AuDebugOn(!(udba & AuOptMask_UDBA));
+
+	err = 0;
+	if (au_hnotify_op.reset_br)
+		err = au_hnotify_op.reset_br(udba, br, perm);
+
+	return err;
+}
+
+int au_hnotify_init_br(struct au_branch *br, int perm)
+{
+	int err;
+
+	err = 0;
+	if (au_hnotify_op.init_br)
+		err = au_hnotify_op.init_br(br, perm);
+
+	return err;
+}
+
+void au_hnotify_fin_br(struct au_branch *br)
+{
+	if (au_hnotify_op.fin_br)
+		au_hnotify_op.fin_br(br);
+}
+
+static void au_hn_destroy_cache(void)
+{
+	kmem_cache_destroy(au_cache[AuCache_HNOTIFY]);
+	au_cache[AuCache_HNOTIFY] = NULL;
+}
+
+int __init au_hnotify_init(void)
+{
+	int err;
+
+	err = -ENOMEM;
+	au_cache[AuCache_HNOTIFY] = AuCache(au_hnotify);
+	if (au_cache[AuCache_HNOTIFY]) {
+		err = 0;
+		if (au_hnotify_op.init)
+			err = au_hnotify_op.init();
+		if (unlikely(err))
+			au_hn_destroy_cache();
+	}
+	AuTraceErr(err);
+	return err;
+}
+
+void au_hnotify_fin(void)
+{
+	if (au_hnotify_op.fin)
+		au_hnotify_op.fin();
+
+	/* cf. au_cache_fin() */
+	if (au_cache[AuCache_HNOTIFY])
+		au_hn_destroy_cache();
+}
diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c
new file mode 100644
index 00000000000..2d09f80153b
--- /dev/null
+++ b/fs/aufs/i_op.c
@@ -0,0 +1,1502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (except add/del/rename)
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/fs_stack.h>
+#include <linux/iversion.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+static int h_permission(struct inode *h_inode, int mask,
+			struct path *h_path, int brperm)
+{
+	int err;
+	const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
+	err = -EPERM;
+	if (write_mask && IS_IMMUTABLE(h_inode))
+		goto out;
+
+	err = -EACCES;
+	if (((mask & MAY_EXEC)
+	     && S_ISREG(h_inode->i_mode)
+	     && (path_noexec(h_path)
+		 || !(h_inode->i_mode & 0111))))
+		goto out;
+
+	/*
+	 * - skip the lower fs test in the case of write to ro branch.
+	 * - nfs dir permission write check is optimized, but a policy for
+	 *   link/rename requires a real check.
+	 * - nfs always sets SB_POSIXACL regardless its mount option 'noacl.'
+	 *   in this case, generic_permission() returns -EOPNOTSUPP.
+	 */
+	if ((write_mask && !au_br_writable(brperm))
+	    || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
+		&& write_mask && !(mask & MAY_READ))
+	    || !h_inode->i_op->permission) {
+		/* AuLabel(generic_permission); */
+		/* AuDbg("get_acl %ps\n", h_inode->i_op->get_acl); */
+		err = generic_permission(h_inode, mask);
+		if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode))
+			err = h_inode->i_op->permission(h_inode, mask);
+		AuTraceErr(err);
+	} else {
+		/* AuLabel(h_inode->permission); */
+		err = h_inode->i_op->permission(h_inode, mask);
+		AuTraceErr(err);
+	}
+
+	if (!err)
+		err = devcgroup_inode_permission(h_inode, mask);
+	if (!err)
+		err = security_inode_permission(h_inode, mask);
+
+out:
+	return err;
+}
+
+static int aufs_permission(struct inode *inode, int mask)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	const unsigned char isdir = !!S_ISDIR(inode->i_mode),
+		write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+	struct inode *h_inode;
+	struct super_block *sb;
+	struct au_branch *br;
+
+	/* todo: support rcu-walk? */
+	if (mask & MAY_NOT_BLOCK)
+		return -ECHILD;
+
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	ii_read_lock_child(inode);
+#if 0 /* reserved for future use */
+	/*
+	 * This test may be rather 'too much' since the test is essentially done
+	 * in the aufs_lookup().  Theoretically it is possible that the inode
+	 * generation doesn't match to the superblock's here.  But it isn't a
+	 * big deal I suppose.
+	 */
+	err = au_iigen_test(inode, au_sigen(sb));
+	if (unlikely(err))
+		goto out;
+#endif
+
+	if (!isdir
+	    || write_mask
+	    || au_opt_test(au_mntflags(sb), DIRPERM1)) {
+		err = au_busy_or_stale();
+		h_inode = au_h_iptr(inode, au_ibtop(inode));
+		if (unlikely(!h_inode
+			     || (h_inode->i_mode & S_IFMT)
+			     != (inode->i_mode & S_IFMT)))
+			goto out;
+
+		err = 0;
+		bindex = au_ibtop(inode);
+		br = au_sbr(sb, bindex);
+		err = h_permission(h_inode, mask, &br->br_path, br->br_perm);
+		if (write_mask
+		    && !err
+		    && !special_file(h_inode->i_mode)) {
+			/* test whether the upper writable branch exists */
+			err = -EROFS;
+			for (; bindex >= 0; bindex--)
+				if (!au_br_rdonly(au_sbr(sb, bindex))) {
+					err = 0;
+					break;
+				}
+		}
+		goto out;
+	}
+
+	/* non-write to dir */
+	err = 0;
+	bbot = au_ibbot(inode);
+	for (bindex = au_ibtop(inode); !err && bindex <= bbot; bindex++) {
+		h_inode = au_h_iptr(inode, bindex);
+		if (h_inode) {
+			err = au_busy_or_stale();
+			if (unlikely(!S_ISDIR(h_inode->i_mode)))
+				break;
+
+			br = au_sbr(sb, bindex);
+			err = h_permission(h_inode, mask, &br->br_path,
+					   br->br_perm);
+		}
+	}
+
+out:
+	ii_read_unlock(inode);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct dentry *ret, *parent;
+	struct inode *inode;
+	struct super_block *sb;
+	int err, npositive;
+
+	IMustLock(dir);
+
+	/* todo: support rcu-walk? */
+	ret = ERR_PTR(-ECHILD);
+	if (flags & LOOKUP_RCU)
+		goto out;
+
+	ret = ERR_PTR(-ENAMETOOLONG);
+	if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		goto out;
+
+	sb = dir->i_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	ret = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	err = au_di_init(dentry);
+	ret = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_si;
+
+	inode = NULL;
+	npositive = 0; /* suppress a warning */
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_read_lock_parent(parent, AuLock_IR);
+	err = au_alive_dir(parent);
+	if (!err)
+		err = au_digen_test(parent, au_sigen(sb));
+	if (!err) {
+		/* regardless LOOKUP_CREATE, always ALLOW_NEG */
+		npositive = au_lkup_dentry(dentry, au_dbtop(parent),
+					   AuLkup_ALLOW_NEG);
+		err = npositive;
+	}
+	di_read_unlock(parent, AuLock_IR);
+	ret = ERR_PTR(err);
+	if (unlikely(err < 0))
+		goto out_unlock;
+
+	if (npositive) {
+		inode = au_new_inode(dentry, /*must_new*/0);
+		if (IS_ERR(inode)) {
+			ret = (void *)inode;
+			inode = NULL;
+			goto out_unlock;
+		}
+	}
+
+	if (inode)
+		atomic_inc(&inode->i_count);
+	ret = d_splice_alias(inode, dentry);
+#if 0 /* reserved for future use */
+	if (unlikely(d_need_lookup(dentry))) {
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+		spin_unlock(&dentry->d_lock);
+	} else
+#endif
+	if (inode) {
+		if (!IS_ERR(ret)) {
+			iput(inode);
+			if (ret && ret != dentry)
+				ii_write_unlock(inode);
+		} else {
+			ii_write_unlock(inode);
+			iput(inode);
+			inode = NULL;
+		}
+	}
+
+out_unlock:
+	di_write_unlock(dentry);
+out_si:
+	si_read_unlock(sb);
+out:
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * very dirty and complicated aufs ->atomic_open().
+ * aufs_atomic_open()
+ * + au_aopen_or_create()
+ *   + add_simple()
+ *     + vfsub_atomic_open()
+ *       + branch fs ->atomic_open()
+ *	   may call the actual 'open' for h_file
+ *       + inc br_nfiles only if opened
+ * + au_aopen_no_open() or au_aopen_do_open()
+ *
+ * au_aopen_do_open()
+ * + finish_open()
+ *   + au_do_aopen()
+ *     + au_do_open() the body of all 'open'
+ *       + au_do_open_nondir()
+ *	   set the passed h_file
+ *
+ * au_aopen_no_open()
+ * + finish_no_open()
+ */
+
+struct aopen_node {
+	struct hlist_bl_node hblist;
+	struct file *file, *h_file;
+};
+
+static int au_do_aopen(struct inode *inode, struct file *file)
+{
+	struct hlist_bl_head *aopen;
+	struct hlist_bl_node *pos;
+	struct aopen_node *node;
+	struct au_do_open_args args = {
+		.aopen	= 1,
+		.open	= au_do_open_nondir
+	};
+
+	aopen = &au_sbi(inode->i_sb)->si_aopen;
+	hlist_bl_lock(aopen);
+	hlist_bl_for_each_entry(node, pos, aopen, hblist)
+		if (node->file == file) {
+			args.h_file = node->h_file;
+			break;
+		}
+	hlist_bl_unlock(aopen);
+	/* AuDebugOn(!args.h_file); */
+
+	return au_do_open(file, &args);
+}
+
+static int au_aopen_do_open(struct file *file, struct dentry *dentry,
+			    struct aopen_node *aopen_node)
+{
+	int err;
+	struct hlist_bl_head *aopen;
+
+	AuLabel(here);
+	aopen = &au_sbi(dentry->d_sb)->si_aopen;
+	au_hbl_add(&aopen_node->hblist, aopen);
+	err = finish_open(file, dentry, au_do_aopen);
+	au_hbl_del(&aopen_node->hblist, aopen);
+	/* AuDbgFile(file); */
+	AuDbg("%pd%s%s\n", dentry,
+	      (file->f_mode & FMODE_CREATED) ? " created" : "",
+	      (file->f_mode & FMODE_OPENED) ? " opened" : "");
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_aopen_no_open(struct file *file, struct dentry *dentry)
+{
+	int err;
+
+	AuLabel(here);
+	dget(dentry);
+	err = finish_no_open(file, dentry);
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry,
+			    struct file *file, unsigned int open_flag,
+			    umode_t create_mode)
+{
+	int err, did_open;
+	unsigned int lkup_flags;
+	aufs_bindex_t bindex;
+	struct super_block *sb;
+	struct dentry *parent, *d;
+	struct vfsub_aopen_args args = {
+		.open_flag	= open_flag,
+		.create_mode	= create_mode
+	};
+	struct aopen_node aopen_node = {
+		.file	= file
+	};
+
+	IMustLock(dir);
+	AuDbg("open_flag 0%o\n", open_flag);
+	AuDbgDentry(dentry);
+
+	err = 0;
+	if (!au_di(dentry)) {
+		lkup_flags = LOOKUP_OPEN;
+		if (open_flag & O_CREAT)
+			lkup_flags |= LOOKUP_CREATE;
+		d = aufs_lookup(dir, dentry, lkup_flags);
+		if (IS_ERR(d)) {
+			err = PTR_ERR(d);
+			AuTraceErr(err);
+			goto out;
+		} else if (d) {
+			/*
+			 * obsoleted dentry found.
+			 * another error will be returned later.
+			 */
+			d_drop(d);
+			AuDbgDentry(d);
+			dput(d);
+		}
+		AuDbgDentry(dentry);
+	}
+
+	if (d_is_positive(dentry)
+	    || d_unhashed(dentry)
+	    || d_unlinked(dentry)
+	    || !(open_flag & O_CREAT)) {
+		err = au_aopen_no_open(file, dentry);
+		goto out; /* success */
+	}
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+	if (unlikely(err))
+		goto out;
+
+	sb = dentry->d_sb;
+	parent = dentry->d_parent;	/* dir is locked */
+	di_write_lock_parent(parent);
+	err = au_lkup_dentry(dentry, /*btop*/0, AuLkup_ALLOW_NEG);
+	if (unlikely(err < 0))
+		goto out_parent;
+
+	AuDbgDentry(dentry);
+	if (d_is_positive(dentry)) {
+		err = au_aopen_no_open(file, dentry);
+		goto out_parent; /* success */
+	}
+
+	args.file = alloc_empty_file(file->f_flags, current_cred());
+	err = PTR_ERR(args.file);
+	if (IS_ERR(args.file))
+		goto out_parent;
+
+	bindex = au_dbtop(dentry);
+	err = au_aopen_or_create(dir, dentry, &args);
+	AuTraceErr(err);
+	AuDbgFile(args.file);
+	file->f_mode = args.file->f_mode & ~FMODE_OPENED;
+	did_open = !!(args.file->f_mode & FMODE_OPENED);
+	if (!did_open) {
+		fput(args.file);
+		args.file = NULL;
+	}
+	di_write_unlock(parent);
+	di_write_unlock(dentry);
+	if (unlikely(err < 0)) {
+		if (args.file)
+			fput(args.file);
+		goto out_sb;
+	}
+
+	if (!did_open)
+		err = au_aopen_no_open(file, dentry);
+	else {
+		aopen_node.h_file = args.file;
+		err = au_aopen_do_open(file, dentry, &aopen_node);
+	}
+	if (unlikely(err < 0)) {
+		if (args.file)
+			fput(args.file);
+		if (did_open)
+			au_lcnt_dec(&args.br->br_nfiles);
+	}
+	goto out_sb; /* success */
+
+out_parent:
+	di_write_unlock(parent);
+	di_write_unlock(dentry);
+out_sb:
+	si_read_unlock(sb);
+out:
+	AuTraceErr(err);
+	AuDbgFile(file);
+	return err;
+}
+
+
+/* ---------------------------------------------------------------------- */
+
+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
+			  const unsigned char add_entry, aufs_bindex_t bcpup,
+			  aufs_bindex_t btop)
+{
+	int err;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+
+	if (add_entry)
+		IMustLock(d_inode(parent));
+	else
+		di_write_lock_parent(parent);
+
+	err = 0;
+	if (!au_h_dptr(parent, bcpup)) {
+		if (btop > bcpup)
+			err = au_cpup_dirs(dentry, bcpup);
+		else if (btop < bcpup)
+			err = au_cpdown_dirs(dentry, bcpup);
+		else
+			BUG();
+	}
+	if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) {
+		h_parent = au_h_dptr(parent, bcpup);
+		h_dir = d_inode(h_parent);
+		inode_lock_shared_nested(h_dir, AuLsc_I_PARENT);
+		err = au_lkup_neg(dentry, bcpup, /*wh*/0);
+		/* todo: no unlock here */
+		inode_unlock_shared(h_dir);
+
+		AuDbg("bcpup %d\n", bcpup);
+		if (!err) {
+			if (d_really_is_negative(dentry))
+				au_set_h_dptr(dentry, btop, NULL);
+			au_update_dbrange(dentry, /*do_put_zero*/0);
+		}
+	}
+
+	if (!add_entry)
+		di_write_unlock(parent);
+	if (!err)
+		err = bcpup; /* success */
+
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * decide the branch and the parent dir where we will create a new entry.
+ * returns new bindex or an error.
+ * copyup the parent dir if needed.
+ */
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+	      struct au_wr_dir_args *args)
+{
+	int err;
+	unsigned int flags;
+	aufs_bindex_t bcpup, btop, src_btop;
+	const unsigned char add_entry
+		= au_ftest_wrdir(args->flags, ADD_ENTRY)
+		| au_ftest_wrdir(args->flags, TMPFILE);
+	struct super_block *sb;
+	struct dentry *parent;
+	struct au_sbinfo *sbinfo;
+
+	sb = dentry->d_sb;
+	sbinfo = au_sbi(sb);
+	parent = dget_parent(dentry);
+	btop = au_dbtop(dentry);
+	bcpup = btop;
+	if (args->force_btgt < 0) {
+		if (src_dentry) {
+			src_btop = au_dbtop(src_dentry);
+			if (src_btop < btop)
+				bcpup = src_btop;
+		} else if (add_entry) {
+			flags = 0;
+			if (au_ftest_wrdir(args->flags, ISDIR))
+				au_fset_wbr(flags, DIR);
+			err = AuWbrCreate(sbinfo, dentry, flags);
+			bcpup = err;
+		}
+
+		if (bcpup < 0 || au_test_ro(sb, bcpup, d_inode(dentry))) {
+			if (add_entry)
+				err = AuWbrCopyup(sbinfo, dentry);
+			else {
+				if (!IS_ROOT(dentry)) {
+					di_read_lock_parent(parent, !AuLock_IR);
+					err = AuWbrCopyup(sbinfo, dentry);
+					di_read_unlock(parent, !AuLock_IR);
+				} else
+					err = AuWbrCopyup(sbinfo, dentry);
+			}
+			bcpup = err;
+			if (unlikely(err < 0))
+				goto out;
+		}
+	} else {
+		bcpup = args->force_btgt;
+		AuDebugOn(au_test_ro(sb, bcpup, d_inode(dentry)));
+	}
+
+	AuDbg("btop %d, bcpup %d\n", btop, bcpup);
+	err = bcpup;
+	if (bcpup == btop)
+		goto out; /* success */
+
+	/* copyup the new parent into the branch we process */
+	err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, btop);
+	if (err >= 0) {
+		if (d_really_is_negative(dentry)) {
+			au_set_h_dptr(dentry, btop, NULL);
+			au_set_dbtop(dentry, bcpup);
+			au_set_dbbot(dentry, bcpup);
+		}
+		AuDebugOn(add_entry
+			  && !au_ftest_wrdir(args->flags, TMPFILE)
+			  && !au_h_dptr(dentry, bcpup));
+	}
+
+out:
+	dput(parent);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_pin_hdir_unlock(struct au_pin *p)
+{
+	if (p->hdir)
+		au_hn_inode_unlock(p->hdir);
+}
+
+int au_pin_hdir_lock(struct au_pin *p)
+{
+	int err;
+
+	err = 0;
+	if (!p->hdir)
+		goto out;
+
+	/* even if an error happens later, keep this lock */
+	au_hn_inode_lock_nested(p->hdir, p->lsc_hi);
+
+	err = -EBUSY;
+	if (unlikely(p->hdir->hi_inode != d_inode(p->h_parent)))
+		goto out;
+
+	err = 0;
+	if (p->h_dentry)
+		err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
+				  p->h_parent, p->br);
+
+out:
+	return err;
+}
+
+int au_pin_hdir_relock(struct au_pin *p)
+{
+	int err, i;
+	struct inode *h_i;
+	struct dentry *h_d[] = {
+		p->h_dentry,
+		p->h_parent
+	};
+
+	err = au_pin_hdir_lock(p);
+	if (unlikely(err))
+		goto out;
+
+	for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
+		if (!h_d[i])
+			continue;
+		if (d_is_positive(h_d[i])) {
+			h_i = d_inode(h_d[i]);
+			err = !h_i->i_nlink;
+		}
+	}
+
+out:
+	return err;
+}
+
+static void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
+{
+	atomic_long_set(&p->hdir->hi_inode->i_rwsem.owner, (long)task);
+}
+
+void au_pin_hdir_acquire_nest(struct au_pin *p)
+{
+	if (p->hdir) {
+		rwsem_acquire_nest(&p->hdir->hi_inode->i_rwsem.dep_map,
+				   p->lsc_hi, 0, NULL, _RET_IP_);
+		au_pin_hdir_set_owner(p, current);
+	}
+}
+
+void au_pin_hdir_release(struct au_pin *p)
+{
+	if (p->hdir) {
+		au_pin_hdir_set_owner(p, p->task);
+		rwsem_release(&p->hdir->hi_inode->i_rwsem.dep_map, _RET_IP_);
+	}
+}
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin)
+{
+	if (pin && pin->parent)
+		return au_h_dptr(pin->parent, pin->bindex);
+	return NULL;
+}
+
+void au_unpin(struct au_pin *p)
+{
+	if (p->hdir)
+		au_pin_hdir_unlock(p);
+	if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
+		vfsub_mnt_drop_write(p->h_mnt);
+	if (!p->hdir)
+		return;
+
+	if (!au_ftest_pin(p->flags, DI_LOCKED))
+		di_read_unlock(p->parent, AuLock_IR);
+	iput(p->hdir->hi_inode);
+	dput(p->parent);
+	p->parent = NULL;
+	p->hdir = NULL;
+	p->h_mnt = NULL;
+	/* do not clear p->task */
+}
+
+int au_do_pin(struct au_pin *p)
+{
+	int err;
+	struct super_block *sb;
+	struct inode *h_dir;
+
+	err = 0;
+	sb = p->dentry->d_sb;
+	p->br = au_sbr(sb, p->bindex);
+	if (IS_ROOT(p->dentry)) {
+		if (au_ftest_pin(p->flags, MNT_WRITE)) {
+			p->h_mnt = au_br_mnt(p->br);
+			err = vfsub_mnt_want_write(p->h_mnt);
+			if (unlikely(err)) {
+				au_fclr_pin(p->flags, MNT_WRITE);
+				goto out_err;
+			}
+		}
+		goto out;
+	}
+
+	p->h_dentry = NULL;
+	if (p->bindex <= au_dbbot(p->dentry))
+		p->h_dentry = au_h_dptr(p->dentry, p->bindex);
+
+	p->parent = dget_parent(p->dentry);
+	if (!au_ftest_pin(p->flags, DI_LOCKED))
+		di_read_lock(p->parent, AuLock_IR, p->lsc_di);
+
+	h_dir = NULL;
+	p->h_parent = au_h_dptr(p->parent, p->bindex);
+	p->hdir = au_hi(d_inode(p->parent), p->bindex);
+	if (p->hdir)
+		h_dir = p->hdir->hi_inode;
+
+	/*
+	 * udba case, or
+	 * if DI_LOCKED is not set, then p->parent may be different
+	 * and h_parent can be NULL.
+	 */
+	if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
+		err = -EBUSY;
+		if (!au_ftest_pin(p->flags, DI_LOCKED))
+			di_read_unlock(p->parent, AuLock_IR);
+		dput(p->parent);
+		p->parent = NULL;
+		goto out_err;
+	}
+
+	if (au_ftest_pin(p->flags, MNT_WRITE)) {
+		p->h_mnt = au_br_mnt(p->br);
+		err = vfsub_mnt_want_write(p->h_mnt);
+		if (unlikely(err)) {
+			au_fclr_pin(p->flags, MNT_WRITE);
+			if (!au_ftest_pin(p->flags, DI_LOCKED))
+				di_read_unlock(p->parent, AuLock_IR);
+			dput(p->parent);
+			p->parent = NULL;
+			goto out_err;
+		}
+	}
+
+	au_igrab(h_dir);
+	err = au_pin_hdir_lock(p);
+	if (!err)
+		goto out; /* success */
+
+	au_unpin(p);
+
+out_err:
+	pr_err("err %d\n", err);
+	err = au_busy_or_stale();
+out:
+	return err;
+}
+
+void au_pin_init(struct au_pin *p, struct dentry *dentry,
+		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+		 unsigned int udba, unsigned char flags)
+{
+	p->dentry = dentry;
+	p->udba = udba;
+	p->lsc_di = lsc_di;
+	p->lsc_hi = lsc_hi;
+	p->flags = flags;
+	p->bindex = bindex;
+
+	p->parent = NULL;
+	p->hdir = NULL;
+	p->h_mnt = NULL;
+
+	p->h_dentry = NULL;
+	p->h_parent = NULL;
+	p->br = NULL;
+	p->task = current;
+}
+
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+	   unsigned int udba, unsigned char flags)
+{
+	au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
+		    udba, flags);
+	return au_do_pin(pin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * ->setattr() and ->getattr() are called in various cases.
+ * chmod, stat: dentry is revalidated.
+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
+ *		  unhashed.
+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
+ */
+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct dentry *parent;
+
+	err = 0;
+	if (au_digen_test(dentry, sigen)) {
+		parent = dget_parent(dentry);
+		di_read_lock_parent(parent, AuLock_IR);
+		err = au_refresh_dentry(dentry, parent);
+		di_read_unlock(parent, AuLock_IR);
+		dput(parent);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+		     struct au_icpup_args *a)
+{
+	int err;
+	loff_t sz;
+	aufs_bindex_t btop, ibtop;
+	struct dentry *hi_wh, *parent;
+	struct inode *inode;
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= 0
+	};
+
+	if (d_is_dir(dentry))
+		au_fset_wrdir(wr_dir_args.flags, ISDIR);
+	/* plink or hi_wh() case */
+	btop = au_dbtop(dentry);
+	inode = d_inode(dentry);
+	ibtop = au_ibtop(inode);
+	if (btop != ibtop && !au_test_ro(inode->i_sb, ibtop, inode))
+		wr_dir_args.force_btgt = ibtop;
+	err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+	if (unlikely(err < 0))
+		goto out;
+	a->btgt = err;
+	if (err != btop)
+		au_fset_icpup(a->flags, DID_CPUP);
+
+	err = 0;
+	a->pin_flags = AuPin_MNT_WRITE;
+	parent = NULL;
+	if (!IS_ROOT(dentry)) {
+		au_fset_pin(a->pin_flags, DI_LOCKED);
+		parent = dget_parent(dentry);
+		di_write_lock_parent(parent);
+	}
+
+	err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
+	if (unlikely(err))
+		goto out_parent;
+
+	sz = -1;
+	a->h_path.dentry = au_h_dptr(dentry, btop);
+	a->h_inode = d_inode(a->h_path.dentry);
+	if (ia && (ia->ia_valid & ATTR_SIZE)) {
+		inode_lock_shared_nested(a->h_inode, AuLsc_I_CHILD);
+		if (ia->ia_size < i_size_read(a->h_inode))
+			sz = ia->ia_size;
+		inode_unlock_shared(a->h_inode);
+	}
+
+	hi_wh = NULL;
+	if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
+		hi_wh = au_hi_wh(inode, a->btgt);
+		if (!hi_wh) {
+			struct au_cp_generic cpg = {
+				.dentry	= dentry,
+				.bdst	= a->btgt,
+				.bsrc	= -1,
+				.len	= sz,
+				.pin	= &a->pin
+			};
+			err = au_sio_cpup_wh(&cpg, /*file*/NULL);
+			if (unlikely(err))
+				goto out_unlock;
+			hi_wh = au_hi_wh(inode, a->btgt);
+			/* todo: revalidate hi_wh? */
+		}
+	}
+
+	if (parent) {
+		au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
+		di_downgrade_lock(parent, AuLock_IR);
+		dput(parent);
+		parent = NULL;
+	}
+	if (!au_ftest_icpup(a->flags, DID_CPUP))
+		goto out; /* success */
+
+	if (!d_unhashed(dentry)) {
+		struct au_cp_generic cpg = {
+			.dentry	= dentry,
+			.bdst	= a->btgt,
+			.bsrc	= btop,
+			.len	= sz,
+			.pin	= &a->pin,
+			.flags	= AuCpup_DTIME | AuCpup_HOPEN
+		};
+		err = au_sio_cpup_simple(&cpg);
+		if (!err)
+			a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+	} else if (!hi_wh)
+		a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+	else
+		a->h_path.dentry = hi_wh; /* do not dget here */
+
+out_unlock:
+	a->h_inode = d_inode(a->h_path.dentry);
+	if (!err)
+		goto out; /* success */
+	au_unpin(&a->pin);
+out_parent:
+	if (parent) {
+		di_write_unlock(parent);
+		dput(parent);
+	}
+out:
+	if (!err)
+		inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+	return err;
+}
+
+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+	int err;
+	struct inode *inode, *delegated;
+	struct super_block *sb;
+	struct file *file;
+	struct au_icpup_args *a;
+
+	inode = d_inode(dentry);
+	IMustLock(inode);
+
+	err = setattr_prepare(dentry, ia);
+	if (unlikely(err))
+		goto out;
+
+	err = -ENOMEM;
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+		ia->ia_valid &= ~ATTR_MODE;
+
+	file = NULL;
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_kfree;
+
+	if (ia->ia_valid & ATTR_FILE) {
+		/* currently ftruncate(2) only */
+		AuDebugOn(!d_is_reg(dentry));
+		file = ia->ia_file;
+		err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1,
+					    /*fi_lsc*/0);
+		if (unlikely(err))
+			goto out_si;
+		ia->ia_file = au_hf_top(file);
+		a->udba = AuOpt_UDBA_NONE;
+	} else {
+		/* fchmod() doesn't pass ia_file */
+		a->udba = au_opt_udba(sb);
+		di_write_lock_child(dentry);
+		/* no d_unlinked(), to set UDBA_NONE for root */
+		if (d_unhashed(dentry))
+			a->udba = AuOpt_UDBA_NONE;
+		if (a->udba != AuOpt_UDBA_NONE) {
+			AuDebugOn(IS_ROOT(dentry));
+			err = au_reval_for_attr(dentry, au_sigen(sb));
+			if (unlikely(err))
+				goto out_dentry;
+		}
+	}
+
+	err = au_pin_and_icpup(dentry, ia, a);
+	if (unlikely(err < 0))
+		goto out_dentry;
+	if (au_ftest_icpup(a->flags, DID_CPUP)) {
+		ia->ia_file = NULL;
+		ia->ia_valid &= ~ATTR_FILE;
+	}
+
+	a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
+	if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
+	    == (ATTR_MODE | ATTR_CTIME)) {
+		err = security_path_chmod(&a->h_path, ia->ia_mode);
+		if (unlikely(err))
+			goto out_unlock;
+	} else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
+		   && (ia->ia_valid & ATTR_CTIME)) {
+		err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
+		if (unlikely(err))
+			goto out_unlock;
+	}
+
+	if (ia->ia_valid & ATTR_SIZE) {
+		struct file *f;
+
+		if (ia->ia_size < i_size_read(inode))
+			/* unmap only */
+			truncate_setsize(inode, ia->ia_size);
+
+		f = NULL;
+		if (ia->ia_valid & ATTR_FILE)
+			f = ia->ia_file;
+		inode_unlock(a->h_inode);
+		err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
+		inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+	} else {
+		delegated = NULL;
+		while (1) {
+			err = vfsub_notify_change(&a->h_path, ia, &delegated);
+			if (delegated) {
+				err = break_deleg_wait(&delegated);
+				if (!err)
+					continue;
+			}
+			break;
+		}
+	}
+	/*
+	 * regardless aufs 'acl' option setting.
+	 * why don't all acl-aware fs call this func from their ->setattr()?
+	 */
+	if (!err && (ia->ia_valid & ATTR_MODE))
+		err = vfsub_acl_chmod(a->h_inode, ia->ia_mode);
+	if (!err)
+		au_cpup_attr_changeable(inode);
+
+out_unlock:
+	inode_unlock(a->h_inode);
+	au_unpin(&a->pin);
+	if (unlikely(err))
+		au_update_dbtop(dentry);
+out_dentry:
+	di_write_unlock(dentry);
+	if (file) {
+		fi_write_unlock(file);
+		ia->ia_file = file;
+		ia->ia_valid |= ATTR_FILE;
+	}
+out_si:
+	si_read_unlock(sb);
+out_kfree:
+	au_kfree_rcu(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+static int au_h_path_to_set_attr(struct dentry *dentry,
+				 struct au_icpup_args *a, struct path *h_path)
+{
+	int err;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	a->udba = au_opt_udba(sb);
+	/* no d_unlinked(), to set UDBA_NONE for root */
+	if (d_unhashed(dentry))
+		a->udba = AuOpt_UDBA_NONE;
+	if (a->udba != AuOpt_UDBA_NONE) {
+		AuDebugOn(IS_ROOT(dentry));
+		err = au_reval_for_attr(dentry, au_sigen(sb));
+		if (unlikely(err))
+			goto out;
+	}
+	err = au_pin_and_icpup(dentry, /*ia*/NULL, a);
+	if (unlikely(err < 0))
+		goto out;
+
+	h_path->dentry = a->h_path.dentry;
+	h_path->mnt = au_sbr_mnt(sb, a->btgt);
+
+out:
+	return err;
+}
+
+ssize_t au_sxattr(struct dentry *dentry, struct inode *inode,
+		  struct au_sxattr *arg)
+{
+	int err;
+	struct path h_path;
+	struct super_block *sb;
+	struct au_icpup_args *a;
+	struct inode *h_inode;
+
+	IMustLock(inode);
+
+	err = -ENOMEM;
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_kfree;
+
+	h_path.dentry = NULL;	/* silence gcc */
+	di_write_lock_child(dentry);
+	err = au_h_path_to_set_attr(dentry, a, &h_path);
+	if (unlikely(err))
+		goto out_di;
+
+	inode_unlock(a->h_inode);
+	switch (arg->type) {
+	case AU_XATTR_SET:
+		AuDebugOn(d_is_negative(h_path.dentry));
+		err = vfsub_setxattr(h_path.dentry,
+				     arg->u.set.name, arg->u.set.value,
+				     arg->u.set.size, arg->u.set.flags);
+		break;
+	case AU_ACL_SET:
+		err = -EOPNOTSUPP;
+		h_inode = d_inode(h_path.dentry);
+		if (h_inode->i_op->set_acl)
+			/* this will call posix_acl_update_mode */
+			err = h_inode->i_op->set_acl(h_inode,
+						     arg->u.acl_set.acl,
+						     arg->u.acl_set.type);
+		break;
+	}
+	if (!err)
+		au_cpup_attr_timesizes(inode);
+
+	au_unpin(&a->pin);
+	if (unlikely(err))
+		au_update_dbtop(dentry);
+
+out_di:
+	di_write_unlock(dentry);
+	si_read_unlock(sb);
+out_kfree:
+	au_kfree_rcu(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+#endif
+
+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
+			     unsigned int nlink)
+{
+	unsigned int n;
+
+	inode->i_mode = st->mode;
+	/* don't i_[ug]id_write() here */
+	inode->i_uid = st->uid;
+	inode->i_gid = st->gid;
+	inode->i_atime = st->atime;
+	inode->i_mtime = st->mtime;
+	inode->i_ctime = st->ctime;
+
+	au_cpup_attr_nlink(inode, /*force*/0);
+	if (S_ISDIR(inode->i_mode)) {
+		n = inode->i_nlink;
+		n -= nlink;
+		n += st->nlink;
+		smp_mb(); /* for i_nlink */
+		/* 0 can happen */
+		set_nlink(inode, n);
+	}
+
+	spin_lock(&inode->i_lock);
+	inode->i_blocks = st->blocks;
+	i_size_write(inode, st->size);
+	spin_unlock(&inode->i_lock);
+}
+
+/*
+ * common routine for aufs_getattr() and au_getxattr().
+ * returns zero or negative (an error).
+ * @dentry will be read-locked in success.
+ */
+int au_h_path_getattr(struct dentry *dentry, struct inode *inode, int force,
+		      struct path *h_path, int locked)
+{
+	int err;
+	unsigned int mnt_flags, sigen;
+	unsigned char udba_none;
+	aufs_bindex_t bindex;
+	struct super_block *sb, *h_sb;
+
+	h_path->mnt = NULL;
+	h_path->dentry = NULL;
+
+	err = 0;
+	sb = dentry->d_sb;
+	mnt_flags = au_mntflags(sb);
+	udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
+	if (unlikely(locked))
+		goto body; /* skip locking dinfo */
+
+	/* support fstat(2) */
+	if (!d_unlinked(dentry) && !udba_none) {
+		sigen = au_sigen(sb);
+		err = au_digen_test(dentry, sigen);
+		if (!err) {
+			di_read_lock_child(dentry, AuLock_IR);
+			err = au_dbrange_test(dentry);
+			if (unlikely(err)) {
+				di_read_unlock(dentry, AuLock_IR);
+				goto out;
+			}
+		} else {
+			AuDebugOn(IS_ROOT(dentry));
+			di_write_lock_child(dentry);
+			err = au_dbrange_test(dentry);
+			if (!err)
+				err = au_reval_for_attr(dentry, sigen);
+			if (!err)
+				di_downgrade_lock(dentry, AuLock_IR);
+			else {
+				di_write_unlock(dentry);
+				goto out;
+			}
+		}
+	} else
+		di_read_lock_child(dentry, AuLock_IR);
+
+body:
+	if (!inode) {
+		inode = d_inode(dentry);
+		if (unlikely(!inode))
+			goto out;
+	}
+	bindex = au_ibtop(inode);
+	h_path->mnt = au_sbr_mnt(sb, bindex);
+	h_sb = h_path->mnt->mnt_sb;
+	if (!force
+	    && !au_test_fs_bad_iattr(h_sb)
+	    && udba_none)
+		goto out; /* success */
+
+	if (au_dbtop(dentry) == bindex)
+		h_path->dentry = au_h_dptr(dentry, bindex);
+	else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
+		h_path->dentry = au_plink_lkup(inode, bindex);
+		if (IS_ERR(h_path->dentry))
+			/* pretending success */
+			h_path->dentry = NULL;
+		else
+			dput(h_path->dentry);
+	}
+
+out:
+	return err;
+}
+
+static int aufs_getattr(const struct path *path, struct kstat *st,
+			u32 request, unsigned int query)
+{
+	int err;
+	unsigned char positive;
+	struct path h_path;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct super_block *sb;
+
+	dentry = path->dentry;
+	inode = d_inode(dentry);
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out;
+	err = au_h_path_getattr(dentry, /*inode*/NULL, /*force*/0, &h_path,
+				/*locked*/0);
+	if (unlikely(err))
+		goto out_si;
+	if (unlikely(!h_path.dentry))
+		/* illegally overlapped or something */
+		goto out_fill; /* pretending success */
+
+	positive = d_is_positive(h_path.dentry);
+	if (positive)
+		/* no vfsub version */
+		err = vfs_getattr(&h_path, st, request, query);
+	if (!err) {
+		if (positive)
+			au_refresh_iattr(inode, st,
+					 d_inode(h_path.dentry)->i_nlink);
+		goto out_fill; /* success */
+	}
+	AuTraceErr(err);
+	goto out_di;
+
+out_fill:
+	generic_fillattr(inode, st);
+out_di:
+	di_read_unlock(dentry, AuLock_IR);
+out_si:
+	si_read_unlock(sb);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const char *aufs_get_link(struct dentry *dentry, struct inode *inode,
+				 struct delayed_call *done)
+{
+	const char *ret;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	int err;
+	aufs_bindex_t bindex;
+
+	ret = NULL; /* suppress a warning */
+	err = -ECHILD;
+	if (!dentry)
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+	if (unlikely(err))
+		goto out;
+
+	err = au_d_hashed_positive(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+
+	err = -EINVAL;
+	inode = d_inode(dentry);
+	bindex = au_ibtop(inode);
+	h_inode = au_h_iptr(inode, bindex);
+	if (unlikely(!h_inode->i_op->get_link))
+		goto out_unlock;
+
+	err = -EBUSY;
+	h_dentry = NULL;
+	if (au_dbtop(dentry) <= bindex) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry)
+			dget(h_dentry);
+	}
+	if (!h_dentry) {
+		h_dentry = d_find_any_alias(h_inode);
+		if (IS_ERR(h_dentry)) {
+			err = PTR_ERR(h_dentry);
+			goto out_unlock;
+		}
+	}
+	if (unlikely(!h_dentry))
+		goto out_unlock;
+
+	err = 0;
+	AuDbg("%ps\n", h_inode->i_op->get_link);
+	AuDbgDentry(h_dentry);
+	ret = vfs_get_link(h_dentry, done);
+	dput(h_dentry);
+	if (IS_ERR(ret))
+		err = PTR_ERR(ret);
+
+out_unlock:
+	aufs_read_unlock(dentry, AuLock_IR);
+out:
+	if (unlikely(err))
+		ret = ERR_PTR(err);
+	AuTraceErrPtr(ret);
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_is_special(struct inode *inode)
+{
+	return (inode->i_mode & (S_IFBLK | S_IFCHR | S_IFIFO | S_IFSOCK));
+}
+
+static int aufs_update_time(struct inode *inode, struct timespec64 *ts,
+			    int flags)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb;
+	struct inode *h_inode;
+	struct vfsmount *h_mnt;
+
+	sb = inode->i_sb;
+	WARN_ONCE((flags & S_ATIME) && !IS_NOATIME(inode),
+		  "unexpected s_flags 0x%lx", sb->s_flags);
+
+	/* mmap_sem might be acquired already, cf. aufs_mmap() */
+	lockdep_off();
+	si_read_lock(sb, AuLock_FLUSH);
+	ii_write_lock_child(inode);
+
+	err = 0;
+	bindex = au_ibtop(inode);
+	h_inode = au_h_iptr(inode, bindex);
+	if (!au_test_ro(sb, bindex, inode)) {
+		h_mnt = au_sbr_mnt(sb, bindex);
+		err = vfsub_mnt_want_write(h_mnt);
+		if (!err) {
+			err = vfsub_update_time(h_inode, ts, flags);
+			vfsub_mnt_drop_write(h_mnt);
+		}
+	} else if (au_is_special(h_inode)) {
+		/*
+		 * Never copy-up here.
+		 * These special files may already be opened and used for
+		 * communicating. If we copied it up, then the communication
+		 * would be corrupted.
+		 */
+		AuWarn1("timestamps for i%lu are ignored "
+			"since it is on readonly branch (hi%lu).\n",
+			inode->i_ino, h_inode->i_ino);
+	} else if (flags & ~S_ATIME) {
+		err = -EIO;
+		AuIOErr1("unexpected flags 0x%x\n", flags);
+		AuDebugOn(1);
+	}
+
+	if (!err)
+		au_cpup_attr_timesizes(inode);
+	ii_write_unlock(inode);
+	si_read_unlock(sb);
+	lockdep_on();
+
+	if (!err && (flags & S_VERSION))
+		inode_inc_iversion(inode);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no getattr version will be set by module.c:aufs_init() */
+struct inode_operations aufs_iop_nogetattr[AuIop_Last],
+	aufs_iop[] = {
+	[AuIop_SYMLINK] = {
+		.permission	= aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+		.get_acl	= aufs_get_acl,
+		.set_acl	= aufs_set_acl, /* unsupport for symlink? */
+#endif
+
+		.setattr	= aufs_setattr,
+		.getattr	= aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+		.listxattr	= aufs_listxattr,
+#endif
+
+		.get_link	= aufs_get_link,
+
+		/* .update_time	= aufs_update_time */
+	},
+	[AuIop_DIR] = {
+		.create		= aufs_create,
+		.lookup		= aufs_lookup,
+		.link		= aufs_link,
+		.unlink		= aufs_unlink,
+		.symlink	= aufs_symlink,
+		.mkdir		= aufs_mkdir,
+		.rmdir		= aufs_rmdir,
+		.mknod		= aufs_mknod,
+		.rename		= aufs_rename,
+
+		.permission	= aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+		.get_acl	= aufs_get_acl,
+		.set_acl	= aufs_set_acl,
+#endif
+
+		.setattr	= aufs_setattr,
+		.getattr	= aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+		.listxattr	= aufs_listxattr,
+#endif
+
+		.update_time	= aufs_update_time,
+		.atomic_open	= aufs_atomic_open,
+		.tmpfile	= aufs_tmpfile
+	},
+	[AuIop_OTHER] = {
+		.permission	= aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+		.get_acl	= aufs_get_acl,
+		.set_acl	= aufs_set_acl,
+#endif
+
+		.setattr	= aufs_setattr,
+		.getattr	= aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+		.listxattr	= aufs_listxattr,
+#endif
+
+		.update_time	= aufs_update_time
+	}
+};
diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c
new file mode 100644
index 00000000000..80f2cfeeaaa
--- /dev/null
+++ b/fs/aufs/i_op_add.c
@@ -0,0 +1,936 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (add entry)
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+/*
+ * final procedure of adding a new entry, except link(2).
+ * remove whiteout, instantiate, copyup the parent dir's times and size
+ * and update version.
+ * if it failed, re-create the removed whiteout.
+ */
+static int epilog(struct inode *dir, aufs_bindex_t bindex,
+		  struct dentry *wh_dentry, struct dentry *dentry)
+{
+	int err, rerr;
+	aufs_bindex_t bwh;
+	struct path h_path;
+	struct super_block *sb;
+	struct inode *inode, *h_dir;
+	struct dentry *wh;
+
+	bwh = -1;
+	sb = dir->i_sb;
+	if (wh_dentry) {
+		h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+		IMustLock(h_dir);
+		AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
+		bwh = au_dbwh(dentry);
+		h_path.dentry = wh_dentry;
+		h_path.mnt = au_sbr_mnt(sb, bindex);
+		err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
+					  dentry);
+		if (unlikely(err))
+			goto out;
+	}
+
+	inode = au_new_inode(dentry, /*must_new*/1);
+	if (!IS_ERR(inode)) {
+		d_instantiate(dentry, inode);
+		dir = d_inode(dentry->d_parent); /* dir inode is locked */
+		IMustLock(dir);
+		au_dir_ts(dir, bindex);
+		inode_inc_iversion(dir);
+		au_fhsm_wrote(sb, bindex, /*force*/0);
+		return 0; /* success */
+	}
+
+	err = PTR_ERR(inode);
+	if (!wh_dentry)
+		goto out;
+
+	/* revert */
+	/* dir inode is locked */
+	wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
+	rerr = PTR_ERR(wh);
+	if (IS_ERR(wh)) {
+		AuIOErr("%pd reverting whiteout failed(%d, %d)\n",
+			dentry, err, rerr);
+		err = -EIO;
+	} else
+		dput(wh);
+
+out:
+	return err;
+}
+
+static int au_d_may_add(struct dentry *dentry)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(d_unhashed(dentry)))
+		err = -ENOENT;
+	if (unlikely(d_really_is_positive(dentry)))
+		err = -EEXIST;
+	return err;
+}
+
+/*
+ * simple tests for the adding inode operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir)
+{
+	int err;
+	umode_t h_mode;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	err = -ENAMETOOLONG;
+	if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		goto out;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (d_really_is_negative(dentry)) {
+		err = -EEXIST;
+		if (unlikely(d_is_positive(h_dentry)))
+			goto out;
+	} else {
+		/* rename(2) case */
+		err = -EIO;
+		if (unlikely(d_is_negative(h_dentry)))
+			goto out;
+		h_inode = d_inode(h_dentry);
+		if (unlikely(!h_inode->i_nlink))
+			goto out;
+
+		h_mode = h_inode->i_mode;
+		if (!isdir) {
+			err = -EISDIR;
+			if (unlikely(S_ISDIR(h_mode)))
+				goto out;
+		} else if (unlikely(!S_ISDIR(h_mode))) {
+			err = -ENOTDIR;
+			goto out;
+		}
+	}
+
+	err = 0;
+	/* expected parent dir is locked */
+	if (unlikely(h_parent != h_dentry->d_parent))
+		err = -EIO;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * initial procedure of adding a new entry.
+ * prepare writable branch and the parent dir, lock it,
+ * and lookup whiteout for the new entry.
+ */
+static struct dentry*
+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
+		  struct dentry *src_dentry, struct au_pin *pin,
+		  struct au_wr_dir_args *wr_dir_args)
+{
+	struct dentry *wh_dentry, *h_parent;
+	struct super_block *sb;
+	struct au_branch *br;
+	int err;
+	unsigned int udba;
+	aufs_bindex_t bcpup;
+
+	AuDbg("%pd\n", dentry);
+
+	err = au_wr_dir(dentry, src_dentry, wr_dir_args);
+	bcpup = err;
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err < 0))
+		goto out;
+
+	sb = dentry->d_sb;
+	udba = au_opt_udba(sb);
+	err = au_pin(pin, dentry, bcpup, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	h_parent = au_pinned_h_parent(pin);
+	if (udba != AuOpt_UDBA_NONE
+	    && au_dbtop(dentry) == bcpup)
+		err = au_may_add(dentry, bcpup, h_parent,
+				 au_ftest_wrdir(wr_dir_args->flags, ISDIR));
+	else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		err = -ENAMETOOLONG;
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_unpin;
+
+	br = au_sbr(sb, bcpup);
+	if (dt) {
+		struct path tmp = {
+			.dentry	= h_parent,
+			.mnt	= au_br_mnt(br)
+		};
+		au_dtime_store(dt, au_pinned_parent(pin), &tmp);
+	}
+
+	wh_dentry = NULL;
+	if (bcpup != au_dbwh(dentry))
+		goto out; /* success */
+
+	/*
+	 * ENAMETOOLONG here means that if we allowed create such name, then it
+	 * would not be able to removed in the future. So we don't allow such
+	 * name here and we don't handle ENAMETOOLONG differently here.
+	 */
+	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+
+out_unpin:
+	if (IS_ERR(wh_dentry))
+		au_unpin(pin);
+out:
+	return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum { Mknod, Symlink, Creat };
+struct simple_arg {
+	int type;
+	union {
+		struct {
+			umode_t			mode;
+			bool			want_excl;
+			bool			try_aopen;
+			struct vfsub_aopen_args	*aopen;
+		} c;
+		struct {
+			const char *symname;
+		} s;
+		struct {
+			umode_t mode;
+			dev_t dev;
+		} m;
+	} u;
+};
+
+static int add_simple(struct inode *dir, struct dentry *dentry,
+		      struct simple_arg *arg)
+{
+	int err, rerr;
+	aufs_bindex_t btop;
+	unsigned char created;
+	const unsigned char try_aopen
+		= (arg->type == Creat && arg->u.c.try_aopen);
+	struct vfsub_aopen_args *aopen = arg->u.c.aopen;
+	struct dentry *wh_dentry, *parent;
+	struct inode *h_dir;
+	struct super_block *sb;
+	struct au_branch *br;
+	/* to reduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+		struct path h_path;
+		struct au_wr_dir_args wr_dir_args;
+	} *a;
+
+	AuDbg("%pd\n", dentry);
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+	a->wr_dir_args.force_btgt = -1;
+	a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	if (!try_aopen) {
+		err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+		if (unlikely(err))
+			goto out_free;
+	}
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	if (!try_aopen)
+		di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+				      &a->pin, &a->wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	btop = au_dbtop(dentry);
+	sb = dentry->d_sb;
+	br = au_sbr(sb, btop);
+	a->h_path.dentry = au_h_dptr(dentry, btop);
+	a->h_path.mnt = au_br_mnt(br);
+	h_dir = au_pinned_h_dir(&a->pin);
+	switch (arg->type) {
+	case Creat:
+		if (!try_aopen || !h_dir->i_op->atomic_open) {
+			err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode,
+					   arg->u.c.want_excl);
+			created = !err;
+			if (!err && try_aopen)
+				aopen->file->f_mode |= FMODE_CREATED;
+		} else {
+			aopen->br = br;
+			err = vfsub_atomic_open(h_dir, a->h_path.dentry, aopen);
+			AuDbg("err %d\n", err);
+			AuDbgFile(aopen->file);
+			created = err >= 0
+				&& !!(aopen->file->f_mode & FMODE_CREATED);
+		}
+		break;
+	case Symlink:
+		err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
+		created = !err;
+		break;
+	case Mknod:
+		err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
+				  arg->u.m.dev);
+		created = !err;
+		break;
+	default:
+		BUG();
+	}
+	if (unlikely(err < 0))
+		goto out_unpin;
+
+	err = epilog(dir, btop, wh_dentry, dentry);
+	if (!err)
+		goto out_unpin; /* success */
+
+	/* revert */
+	if (created /* && d_is_positive(a->h_path.dentry) */) {
+		/* no delegation since it is just created */
+		rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL,
+				    /*force*/0);
+		if (rerr) {
+			AuIOErr("%pd revert failure(%d, %d)\n",
+				dentry, err, rerr);
+			err = -EIO;
+		}
+		au_dtime_revert(&a->dt);
+	}
+	if (try_aopen && h_dir->i_op->atomic_open
+	    && (aopen->file->f_mode & FMODE_OPENED))
+		/* aopen->file is still opened */
+		au_lcnt_dec(&aopen->br->br_nfiles);
+
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+out_parent:
+	if (!try_aopen)
+		di_write_unlock(parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbtop(dentry);
+		d_drop(dentry);
+	}
+	if (!try_aopen)
+		aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	au_kfree_rcu(a);
+out:
+	return err;
+}
+
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+	       dev_t dev)
+{
+	struct simple_arg arg = {
+		.type = Mknod,
+		.u.m = {
+			.mode	= mode,
+			.dev	= dev
+		}
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct simple_arg arg = {
+		.type = Symlink,
+		.u.s.symname = symname
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		bool want_excl)
+{
+	struct simple_arg arg = {
+		.type = Creat,
+		.u.c = {
+			.mode		= mode,
+			.want_excl	= want_excl
+		}
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+		       struct vfsub_aopen_args *aopen_args)
+{
+	struct simple_arg arg = {
+		.type = Creat,
+		.u.c = {
+			.mode		= aopen_args->create_mode,
+			.want_excl	= aopen_args->open_flag & O_EXCL,
+			.try_aopen	= true,
+			.aopen		= aopen_args
+		}
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb;
+	struct dentry *parent, *h_parent, *h_dentry;
+	struct inode *h_dir, *inode;
+	struct vfsmount *h_mnt;
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= AuWrDir_TMPFILE
+	};
+
+	/* copy-up may happen */
+	inode_lock(dir);
+
+	sb = dir->i_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out;
+
+	err = au_di_init(dentry);
+	if (unlikely(err))
+		goto out_si;
+
+	err = -EBUSY;
+	parent = d_find_any_alias(dir);
+	AuDebugOn(!parent);
+	di_write_lock_parent(parent);
+	if (unlikely(d_inode(parent) != dir))
+		goto out_parent;
+
+	err = au_digen_test(parent, au_sigen(sb));
+	if (unlikely(err))
+		goto out_parent;
+
+	bindex = au_dbtop(parent);
+	au_set_dbtop(dentry, bindex);
+	au_set_dbbot(dentry, bindex);
+	err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+	bindex = err;
+	if (unlikely(err < 0))
+		goto out_parent;
+
+	err = -EOPNOTSUPP;
+	h_dir = au_h_iptr(dir, bindex);
+	if (unlikely(!h_dir->i_op->tmpfile))
+		goto out_parent;
+
+	h_mnt = au_sbr_mnt(sb, bindex);
+	err = vfsub_mnt_want_write(h_mnt);
+	if (unlikely(err))
+		goto out_parent;
+
+	h_parent = au_h_dptr(parent, bindex);
+	h_dentry = vfs_tmpfile(h_parent, mode, /*open_flag*/0);
+	if (IS_ERR(h_dentry)) {
+		err = PTR_ERR(h_dentry);
+		goto out_mnt;
+	}
+
+	au_set_dbtop(dentry, bindex);
+	au_set_dbbot(dentry, bindex);
+	au_set_h_dptr(dentry, bindex, dget(h_dentry));
+	inode = au_new_inode(dentry, /*must_new*/1);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		au_set_h_dptr(dentry, bindex, NULL);
+		au_set_dbtop(dentry, -1);
+		au_set_dbbot(dentry, -1);
+	} else {
+		if (!inode->i_nlink)
+			set_nlink(inode, 1);
+		d_tmpfile(dentry, inode);
+		au_di(dentry)->di_tmpfile = 1;
+
+		/* update without i_mutex */
+		if (au_ibtop(dir) == au_dbtop(dentry))
+			au_cpup_attr_timesizes(dir);
+	}
+	dput(h_dentry);
+
+out_mnt:
+	vfsub_mnt_drop_write(h_mnt);
+out_parent:
+	di_write_unlock(parent);
+	dput(parent);
+	di_write_unlock(dentry);
+	if (unlikely(err)) {
+		au_di_fin(dentry);
+		dentry->d_fsdata = NULL;
+	}
+out_si:
+	si_read_unlock(sb);
+out:
+	inode_unlock(dir);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_link_args {
+	aufs_bindex_t bdst, bsrc;
+	struct au_pin pin;
+	struct path h_path;
+	struct dentry *src_parent, *parent;
+};
+
+static int au_cpup_before_link(struct dentry *src_dentry,
+			       struct au_link_args *a)
+{
+	int err;
+	struct dentry *h_src_dentry;
+	struct au_cp_generic cpg = {
+		.dentry	= src_dentry,
+		.bdst	= a->bdst,
+		.bsrc	= a->bsrc,
+		.len	= -1,
+		.pin	= &a->pin,
+		.flags	= AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
+	};
+
+	di_read_lock_parent(a->src_parent, AuLock_IR);
+	err = au_test_and_cpup_dirs(src_dentry, a->bdst);
+	if (unlikely(err))
+		goto out;
+
+	h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
+	err = au_pin(&a->pin, src_dentry, a->bdst,
+		     au_opt_udba(src_dentry->d_sb),
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out;
+
+	err = au_sio_cpup_simple(&cpg);
+	au_unpin(&a->pin);
+
+out:
+	di_read_unlock(a->src_parent, AuLock_IR);
+	return err;
+}
+
+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
+			   struct au_link_args *a)
+{
+	int err;
+	unsigned char plink;
+	aufs_bindex_t bbot;
+	struct dentry *h_src_dentry;
+	struct inode *h_inode, *inode, *delegated;
+	struct super_block *sb;
+	struct file *h_file;
+
+	plink = 0;
+	h_inode = NULL;
+	sb = src_dentry->d_sb;
+	inode = d_inode(src_dentry);
+	if (au_ibtop(inode) <= a->bdst)
+		h_inode = au_h_iptr(inode, a->bdst);
+	if (!h_inode || !h_inode->i_nlink) {
+		/* copyup src_dentry as the name of dentry. */
+		bbot = au_dbbot(dentry);
+		if (bbot < a->bsrc)
+			au_set_dbbot(dentry, a->bsrc);
+		au_set_h_dptr(dentry, a->bsrc,
+			      dget(au_h_dptr(src_dentry, a->bsrc)));
+		dget(a->h_path.dentry);
+		au_set_h_dptr(dentry, a->bdst, NULL);
+		AuDbg("temporary d_inode...\n");
+		spin_lock(&dentry->d_lock);
+		dentry->d_inode = d_inode(src_dentry); /* tmp */
+		spin_unlock(&dentry->d_lock);
+		h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0);
+		if (IS_ERR(h_file))
+			err = PTR_ERR(h_file);
+		else {
+			struct au_cp_generic cpg = {
+				.dentry	= dentry,
+				.bdst	= a->bdst,
+				.bsrc	= -1,
+				.len	= -1,
+				.pin	= &a->pin,
+				.flags	= AuCpup_KEEPLINO
+			};
+			err = au_sio_cpup_simple(&cpg);
+			au_h_open_post(dentry, a->bsrc, h_file);
+			if (!err) {
+				dput(a->h_path.dentry);
+				a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+			} else
+				au_set_h_dptr(dentry, a->bdst,
+					      a->h_path.dentry);
+		}
+		spin_lock(&dentry->d_lock);
+		dentry->d_inode = NULL; /* restore */
+		spin_unlock(&dentry->d_lock);
+		AuDbg("temporary d_inode...done\n");
+		au_set_h_dptr(dentry, a->bsrc, NULL);
+		au_set_dbbot(dentry, bbot);
+	} else {
+		/* the inode of src_dentry already exists on a.bdst branch */
+		h_src_dentry = d_find_alias(h_inode);
+		if (!h_src_dentry && au_plink_test(inode)) {
+			plink = 1;
+			h_src_dentry = au_plink_lkup(inode, a->bdst);
+			err = PTR_ERR(h_src_dentry);
+			if (IS_ERR(h_src_dentry))
+				goto out;
+
+			if (unlikely(d_is_negative(h_src_dentry))) {
+				dput(h_src_dentry);
+				h_src_dentry = NULL;
+			}
+
+		}
+		if (h_src_dentry) {
+			delegated = NULL;
+			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+					 &a->h_path, &delegated);
+			if (unlikely(err == -EWOULDBLOCK)) {
+				pr_warn("cannot retry for NFSv4 delegation"
+					" for an internal link\n");
+				iput(delegated);
+			}
+			dput(h_src_dentry);
+		} else {
+			AuIOErr("no dentry found for hi%lu on b%d\n",
+				h_inode->i_ino, a->bdst);
+			err = -EIO;
+		}
+	}
+
+	if (!err && !plink)
+		au_plink_append(inode, a->bdst, a->h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+	      struct dentry *dentry)
+{
+	int err, rerr;
+	struct au_dtime dt;
+	struct au_link_args *a;
+	struct dentry *wh_dentry, *h_src_dentry;
+	struct inode *inode, *delegated;
+	struct super_block *sb;
+	struct au_wr_dir_args wr_dir_args = {
+		/* .force_btgt	= -1, */
+		.flags		= AuWrDir_ADD_ENTRY
+	};
+
+	IMustLock(dir);
+	inode = d_inode(src_dentry);
+	IMustLock(inode);
+
+	err = -ENOMEM;
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	a->parent = dentry->d_parent; /* dir inode is locked */
+	err = aufs_read_and_write_lock2(dentry, src_dentry,
+					AuLock_NOPLM | AuLock_GEN);
+	if (unlikely(err))
+		goto out_kfree;
+	err = au_d_linkable(src_dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+
+	a->src_parent = dget_parent(src_dentry);
+	wr_dir_args.force_btgt = au_ibtop(inode);
+
+	di_write_lock_parent(a->parent);
+	wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
+				      &wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	err = 0;
+	sb = dentry->d_sb;
+	a->bdst = au_dbtop(dentry);
+	a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+	a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
+	a->bsrc = au_ibtop(inode);
+	h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+	if (!h_src_dentry && au_di(src_dentry)->di_tmpfile)
+		h_src_dentry = dget(au_hi_wh(inode, a->bsrc));
+	if (!h_src_dentry) {
+		a->bsrc = au_dbtop(src_dentry);
+		h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+		AuDebugOn(!h_src_dentry);
+	} else if (IS_ERR(h_src_dentry)) {
+		err = PTR_ERR(h_src_dentry);
+		goto out_parent;
+	}
+
+	/*
+	 * aufs doesn't touch the credential so
+	 * security_dentry_create_files_as() is unnecessary.
+	 */
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		if (a->bdst < a->bsrc
+		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
+			err = au_cpup_or_link(src_dentry, dentry, a);
+		else {
+			delegated = NULL;
+			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+					 &a->h_path, &delegated);
+			if (unlikely(err == -EWOULDBLOCK)) {
+				pr_warn("cannot retry for NFSv4 delegation"
+					" for an internal link\n");
+				iput(delegated);
+			}
+		}
+		dput(h_src_dentry);
+	} else {
+		/*
+		 * copyup src_dentry to the branch we process,
+		 * and then link(2) to it.
+		 */
+		dput(h_src_dentry);
+		if (a->bdst < a->bsrc
+		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
+			au_unpin(&a->pin);
+			di_write_unlock(a->parent);
+			err = au_cpup_before_link(src_dentry, a);
+			di_write_lock_parent(a->parent);
+			if (!err)
+				err = au_pin(&a->pin, dentry, a->bdst,
+					     au_opt_udba(sb),
+					     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+			if (unlikely(err))
+				goto out_wh;
+		}
+		if (!err) {
+			h_src_dentry = au_h_dptr(src_dentry, a->bdst);
+			err = -ENOENT;
+			if (h_src_dentry && d_is_positive(h_src_dentry)) {
+				delegated = NULL;
+				err = vfsub_link(h_src_dentry,
+						 au_pinned_h_dir(&a->pin),
+						 &a->h_path, &delegated);
+				if (unlikely(err == -EWOULDBLOCK)) {
+					pr_warn("cannot retry"
+						" for NFSv4 delegation"
+						" for an internal link\n");
+					iput(delegated);
+				}
+			}
+		}
+	}
+	if (unlikely(err))
+		goto out_unpin;
+
+	if (wh_dentry) {
+		a->h_path.dentry = wh_dentry;
+		err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
+					  dentry);
+		if (unlikely(err))
+			goto out_revert;
+	}
+
+	au_dir_ts(dir, a->bdst);
+	inode_inc_iversion(dir);
+	inc_nlink(inode);
+	inode->i_ctime = dir->i_ctime;
+	d_instantiate(dentry, au_igrab(inode));
+	if (d_unhashed(a->h_path.dentry))
+		/* some filesystem calls d_drop() */
+		d_drop(dentry);
+	/* some filesystems consume an inode even hardlink */
+	au_fhsm_wrote(sb, a->bdst, /*force*/0);
+	goto out_unpin; /* success */
+
+out_revert:
+	/* no delegation since it is just created */
+	rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path,
+			    /*delegated*/NULL, /*force*/0);
+	if (unlikely(rerr)) {
+		AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr);
+		err = -EIO;
+	}
+	au_dtime_revert(&dt);
+out_unpin:
+	au_unpin(&a->pin);
+out_wh:
+	dput(wh_dentry);
+out_parent:
+	di_write_unlock(a->parent);
+	dput(a->src_parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbtop(dentry);
+		d_drop(dentry);
+	}
+	aufs_read_and_write_unlock2(dentry, src_dentry);
+out_kfree:
+	au_kfree_rcu(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	int err, rerr;
+	aufs_bindex_t bindex;
+	unsigned char diropq;
+	struct path h_path;
+	struct dentry *wh_dentry, *parent, *opq_dentry;
+	struct inode *h_inode;
+	struct super_block *sb;
+	struct {
+		struct au_pin pin;
+		struct au_dtime dt;
+	} *a; /* reduce the stack usage */
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
+	};
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+				      &a->pin, &wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	sb = dentry->d_sb;
+	bindex = au_dbtop(dentry);
+	h_path.dentry = au_h_dptr(dentry, bindex);
+	h_path.mnt = au_sbr_mnt(sb, bindex);
+	err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
+	if (unlikely(err))
+		goto out_unpin;
+
+	/* make the dir opaque */
+	diropq = 0;
+	h_inode = d_inode(h_path.dentry);
+	if (wh_dentry
+	    || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
+		inode_lock_nested(h_inode, AuLsc_I_CHILD);
+		opq_dentry = au_diropq_create(dentry, bindex);
+		inode_unlock(h_inode);
+		err = PTR_ERR(opq_dentry);
+		if (IS_ERR(opq_dentry))
+			goto out_dir;
+		dput(opq_dentry);
+		diropq = 1;
+	}
+
+	err = epilog(dir, bindex, wh_dentry, dentry);
+	if (!err) {
+		inc_nlink(dir);
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	if (diropq) {
+		AuLabel(revert opq);
+		inode_lock_nested(h_inode, AuLsc_I_CHILD);
+		rerr = au_diropq_remove(dentry, bindex);
+		inode_unlock(h_inode);
+		if (rerr) {
+			AuIOErr("%pd reverting diropq failed(%d, %d)\n",
+				dentry, err, rerr);
+			err = -EIO;
+		}
+	}
+
+out_dir:
+	AuLabel(revert dir);
+	rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
+	if (rerr) {
+		AuIOErr("%pd reverting dir failed(%d, %d)\n",
+			dentry, err, rerr);
+		err = -EIO;
+	}
+	au_dtime_revert(&a->dt);
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+out_parent:
+	di_write_unlock(parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbtop(dentry);
+		d_drop(dentry);
+	}
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	au_kfree_rcu(a);
+out:
+	return err;
+}
diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c
new file mode 100644
index 00000000000..9beba2ba8db
--- /dev/null
+++ b/fs/aufs/i_op_del.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations (del entry)
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+/*
+ * decide if a new whiteout for @dentry is necessary or not.
+ * when it is necessary, prepare the parent dir for the upper branch whose
+ * branch index is @bcpup for creation. the actual creation of the whiteout will
+ * be done by caller.
+ * return value:
+ * 0: wh is unnecessary
+ * plus: wh is necessary
+ * minus: error
+ */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
+{
+	int need_wh, err;
+	aufs_bindex_t btop;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	btop = au_dbtop(dentry);
+	if (*bcpup < 0) {
+		*bcpup = btop;
+		if (au_test_ro(sb, btop, d_inode(dentry))) {
+			err = AuWbrCopyup(au_sbi(sb), dentry);
+			*bcpup = err;
+			if (unlikely(err < 0))
+				goto out;
+		}
+	} else
+		AuDebugOn(btop < *bcpup
+			  || au_test_ro(sb, *bcpup, d_inode(dentry)));
+	AuDbg("bcpup %d, btop %d\n", *bcpup, btop);
+
+	if (*bcpup != btop) {
+		err = au_cpup_dirs(dentry, *bcpup);
+		if (unlikely(err))
+			goto out;
+		need_wh = 1;
+	} else {
+		struct au_dinfo *dinfo, *tmp;
+
+		need_wh = -ENOMEM;
+		dinfo = au_di(dentry);
+		tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+		if (tmp) {
+			au_di_cp(tmp, dinfo);
+			au_di_swap(tmp, dinfo);
+			/* returns the number of positive dentries */
+			need_wh = au_lkup_dentry(dentry, btop + 1,
+						 /* AuLkup_IGNORE_PERM */ 0);
+			au_di_swap(tmp, dinfo);
+			au_rw_write_unlock(&tmp->di_rwsem);
+			au_di_free(tmp);
+		}
+	}
+	AuDbg("need_wh %d\n", need_wh);
+	err = need_wh;
+
+out:
+	return err;
+}
+
+/*
+ * simple tests for the del-entry operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir)
+{
+	int err;
+	umode_t h_mode;
+	struct dentry *h_dentry, *h_latest;
+	struct inode *h_inode;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (d_really_is_positive(dentry)) {
+		err = -ENOENT;
+		if (unlikely(d_is_negative(h_dentry)))
+			goto out;
+		h_inode = d_inode(h_dentry);
+		if (unlikely(!h_inode->i_nlink))
+			goto out;
+
+		h_mode = h_inode->i_mode;
+		if (!isdir) {
+			err = -EISDIR;
+			if (unlikely(S_ISDIR(h_mode)))
+				goto out;
+		} else if (unlikely(!S_ISDIR(h_mode))) {
+			err = -ENOTDIR;
+			goto out;
+		}
+	} else {
+		/* rename(2) case */
+		err = -EIO;
+		if (unlikely(d_is_positive(h_dentry)))
+			goto out;
+	}
+
+	err = -ENOENT;
+	/* expected parent dir is locked */
+	if (unlikely(h_parent != h_dentry->d_parent))
+		goto out;
+	err = 0;
+
+	/*
+	 * rmdir a dir may break the consistency on some filesystem.
+	 * let's try heavy test.
+	 */
+	err = -EACCES;
+	if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)
+		     && au_test_h_perm(d_inode(h_parent),
+				       MAY_EXEC | MAY_WRITE)))
+		goto out;
+
+	h_latest = au_sio_lkup_one(&dentry->d_name, h_parent);
+	err = -EIO;
+	if (IS_ERR(h_latest))
+		goto out;
+	if (h_latest == h_dentry)
+		err = 0;
+	dput(h_latest);
+
+out:
+	return err;
+}
+
+/*
+ * decide the branch where we operate for @dentry. the branch index will be set
+ * @rbcpup. after deciding it, 'pin' it and store the timestamps of the parent
+ * dir for reverting.
+ * when a new whiteout is necessary, create it.
+ */
+static struct dentry*
+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
+		    struct au_dtime *dt, struct au_pin *pin)
+{
+	struct dentry *wh_dentry;
+	struct super_block *sb;
+	struct path h_path;
+	int err, need_wh;
+	unsigned int udba;
+	aufs_bindex_t bcpup;
+
+	need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
+	wh_dentry = ERR_PTR(need_wh);
+	if (unlikely(need_wh < 0))
+		goto out;
+
+	sb = dentry->d_sb;
+	udba = au_opt_udba(sb);
+	bcpup = *rbcpup;
+	err = au_pin(pin, dentry, bcpup, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	h_path.dentry = au_pinned_h_parent(pin);
+	if (udba != AuOpt_UDBA_NONE
+	    && au_dbtop(dentry) == bcpup) {
+		err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
+		wh_dentry = ERR_PTR(err);
+		if (unlikely(err))
+			goto out_unpin;
+	}
+
+	h_path.mnt = au_sbr_mnt(sb, bcpup);
+	au_dtime_store(dt, au_pinned_parent(pin), &h_path);
+	wh_dentry = NULL;
+	if (!need_wh)
+		goto out; /* success, no need to create whiteout */
+
+	wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_unpin;
+
+	/* returns with the parent is locked and wh_dentry is dget-ed */
+	goto out; /* success */
+
+out_unpin:
+	au_unpin(pin);
+out:
+	return wh_dentry;
+}
+
+/*
+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
+ * in order to be revertible and save time for removing many child whiteouts
+ * under the dir.
+ * returns 1 when there are too many child whiteout and caller should remove
+ * them asynchronously. returns 0 when the number of children is enough small to
+ * remove now or the branch fs is a remote fs.
+ * otherwise return an error.
+ */
+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
+			   struct au_nhash *whlist, struct inode *dir)
+{
+	int rmdir_later, err, dirwh;
+	struct dentry *h_dentry;
+	struct super_block *sb;
+	struct inode *inode;
+
+	sb = dentry->d_sb;
+	SiMustAnyLock(sb);
+	h_dentry = au_h_dptr(dentry, bindex);
+	err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
+	if (unlikely(err))
+		goto out;
+
+	/* stop monitoring */
+	inode = d_inode(dentry);
+	au_hn_free(au_hi(inode, bindex));
+
+	if (!au_test_fs_remote(h_dentry->d_sb)) {
+		dirwh = au_sbi(sb)->si_dirwh;
+		rmdir_later = (dirwh <= 1);
+		if (!rmdir_later)
+			rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
+							      dirwh);
+		if (rmdir_later)
+			return rmdir_later;
+	}
+
+	err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
+	if (unlikely(err)) {
+		AuIOErr("rmdir %pd, b%d failed, %d. ignored\n",
+			h_dentry, bindex, err);
+		err = 0;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * final procedure for deleting a entry.
+ * maintain dentry and iattr.
+ */
+static void epilog(struct inode *dir, struct dentry *dentry,
+		   aufs_bindex_t bindex)
+{
+	struct inode *inode;
+
+	inode = d_inode(dentry);
+	d_drop(dentry);
+	inode->i_ctime = dir->i_ctime;
+
+	au_dir_ts(dir, bindex);
+	inode_inc_iversion(dir);
+}
+
+/*
+ * when an error happened, remove the created whiteout and revert everything.
+ */
+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
+		     aufs_bindex_t bwh, struct dentry *wh_dentry,
+		     struct dentry *dentry, struct au_dtime *dt)
+{
+	int rerr;
+	struct path h_path = {
+		.dentry	= wh_dentry,
+		.mnt	= au_sbr_mnt(dir->i_sb, bindex)
+	};
+
+	rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
+	if (!rerr) {
+		au_set_dbwh(dentry, bwh);
+		au_dtime_revert(dt);
+		return 0;
+	}
+
+	AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr);
+	return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bwh, bindex, btop;
+	struct inode *inode, *h_dir, *delegated;
+	struct dentry *parent, *wh_dentry;
+	/* to reduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+		struct path h_path;
+	} *a;
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_d_hashed_positive(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	inode = d_inode(dentry);
+	IMustLock(inode);
+	err = -EISDIR;
+	if (unlikely(d_is_dir(dentry)))
+		goto out_unlock; /* possible? */
+
+	btop = au_dbtop(dentry);
+	bwh = au_dbwh(dentry);
+	bindex = -1;
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
+					&a->pin);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	a->h_path.mnt = au_sbr_mnt(dentry->d_sb, btop);
+	a->h_path.dentry = au_h_dptr(dentry, btop);
+	dget(a->h_path.dentry);
+	if (bindex == btop) {
+		h_dir = au_pinned_h_dir(&a->pin);
+		delegated = NULL;
+		err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal unlink\n");
+			iput(delegated);
+		}
+	} else {
+		/* dir inode is locked */
+		h_dir = d_inode(wh_dentry->d_parent);
+		IMustLock(h_dir);
+		err = 0;
+	}
+
+	if (!err) {
+		vfsub_drop_nlink(inode);
+		epilog(dir, dentry, bindex);
+
+		/* update target timestamps */
+		if (bindex == btop) {
+			vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
+			/*ignore*/
+			inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+		} else
+			/* todo: this timestamp may be reverted later */
+			inode->i_ctime = h_dir->i_ctime;
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	if (wh_dentry) {
+		int rerr;
+
+		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+				 &a->dt);
+		if (rerr)
+			err = rerr;
+	}
+
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+	dput(a->h_path.dentry);
+out_parent:
+	di_write_unlock(parent);
+out_unlock:
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	au_kfree_rcu(a);
+out:
+	return err;
+}
+
+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int err, rmdir_later;
+	aufs_bindex_t bwh, bindex, btop;
+	struct inode *inode;
+	struct dentry *parent, *wh_dentry, *h_dentry;
+	struct au_whtmp_rmdir *args;
+	/* to reduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+	} *a;
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_alive_dir(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	inode = d_inode(dentry);
+	IMustLock(inode);
+	err = -ENOTDIR;
+	if (unlikely(!d_is_dir(dentry)))
+		goto out_unlock; /* possible? */
+
+	err = -ENOMEM;
+	args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
+	if (unlikely(!args))
+		goto out_unlock;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	err = au_test_empty(dentry, &args->whlist);
+	if (unlikely(err))
+		goto out_parent;
+
+	btop = au_dbtop(dentry);
+	bwh = au_dbwh(dentry);
+	bindex = -1;
+	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
+					&a->pin);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	h_dentry = au_h_dptr(dentry, btop);
+	dget(h_dentry);
+	rmdir_later = 0;
+	if (bindex == btop) {
+		err = renwh_and_rmdir(dentry, btop, &args->whlist, dir);
+		if (err > 0) {
+			rmdir_later = err;
+			err = 0;
+		}
+	} else {
+		/* stop monitoring */
+		au_hn_free(au_hi(inode, btop));
+
+		/* dir inode is locked */
+		IMustLock(d_inode(wh_dentry->d_parent));
+		err = 0;
+	}
+
+	if (!err) {
+		vfsub_dead_dir(inode);
+		au_set_dbdiropq(dentry, -1);
+		epilog(dir, dentry, bindex);
+
+		if (rmdir_later) {
+			au_whtmp_kick_rmdir(dir, btop, h_dentry, args);
+			args = NULL;
+		}
+
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	AuLabel(revert);
+	if (wh_dentry) {
+		int rerr;
+
+		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+				 &a->dt);
+		if (rerr)
+			err = rerr;
+	}
+
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+	dput(h_dentry);
+out_parent:
+	di_write_unlock(parent);
+	if (args)
+		au_whtmp_rmdir_free(args);
+out_unlock:
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	au_kfree_rcu(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c
new file mode 100644
index 00000000000..435fa50811f
--- /dev/null
+++ b/fs/aufs/i_op_ren.c
@@ -0,0 +1,1250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operation (rename entry)
+ * todo: this is crazy monster
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+enum { AuSRC, AuDST, AuSrcDst };
+enum { AuPARENT, AuCHILD, AuParentChild };
+
+#define AuRen_ISDIR_SRC		1
+#define AuRen_ISDIR_DST		(1 << 1)
+#define AuRen_ISSAMEDIR		(1 << 2)
+#define AuRen_WHSRC		(1 << 3)
+#define AuRen_WHDST		(1 << 4)
+#define AuRen_MNT_WRITE		(1 << 5)
+#define AuRen_DT_DSTDIR		(1 << 6)
+#define AuRen_DIROPQ_SRC	(1 << 7)
+#define AuRen_DIROPQ_DST	(1 << 8)
+#define AuRen_DIRREN		(1 << 9)
+#define AuRen_DROPPED_SRC	(1 << 10)
+#define AuRen_DROPPED_DST	(1 << 11)
+#define au_ftest_ren(flags, name)	((flags) & AuRen_##name)
+#define au_fset_ren(flags, name) \
+	do { (flags) |= AuRen_##name; } while (0)
+#define au_fclr_ren(flags, name) \
+	do { (flags) &= ~AuRen_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuRen_DIRREN
+#define AuRen_DIRREN		0
+#endif
+
+struct au_ren_args {
+	struct {
+		struct dentry *dentry, *h_dentry, *parent, *h_parent,
+			*wh_dentry;
+		struct inode *dir, *inode;
+		struct au_hinode *hdir, *hinode;
+		struct au_dtime dt[AuParentChild];
+		aufs_bindex_t btop, bdiropq;
+	} sd[AuSrcDst];
+
+#define src_dentry	sd[AuSRC].dentry
+#define src_dir		sd[AuSRC].dir
+#define src_inode	sd[AuSRC].inode
+#define src_h_dentry	sd[AuSRC].h_dentry
+#define src_parent	sd[AuSRC].parent
+#define src_h_parent	sd[AuSRC].h_parent
+#define src_wh_dentry	sd[AuSRC].wh_dentry
+#define src_hdir	sd[AuSRC].hdir
+#define src_hinode	sd[AuSRC].hinode
+#define src_h_dir	sd[AuSRC].hdir->hi_inode
+#define src_dt		sd[AuSRC].dt
+#define src_btop	sd[AuSRC].btop
+#define src_bdiropq	sd[AuSRC].bdiropq
+
+#define dst_dentry	sd[AuDST].dentry
+#define dst_dir		sd[AuDST].dir
+#define dst_inode	sd[AuDST].inode
+#define dst_h_dentry	sd[AuDST].h_dentry
+#define dst_parent	sd[AuDST].parent
+#define dst_h_parent	sd[AuDST].h_parent
+#define dst_wh_dentry	sd[AuDST].wh_dentry
+#define dst_hdir	sd[AuDST].hdir
+#define dst_hinode	sd[AuDST].hinode
+#define dst_h_dir	sd[AuDST].hdir->hi_inode
+#define dst_dt		sd[AuDST].dt
+#define dst_btop	sd[AuDST].btop
+#define dst_bdiropq	sd[AuDST].bdiropq
+
+	struct dentry *h_trap;
+	struct au_branch *br;
+	struct path h_path;
+	struct au_nhash whlist;
+	aufs_bindex_t btgt, src_bwh;
+
+	struct {
+		unsigned short auren_flags;
+		unsigned char flags;	/* syscall parameter */
+		unsigned char exchange;
+	} __packed;
+
+	struct au_whtmp_rmdir *thargs;
+	struct dentry *h_dst;
+	struct au_hinode *h_root;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * functions for reverting.
+ * when an error happened in a single rename systemcall, we should revert
+ * everything as if nothing happened.
+ * we don't need to revert the copied-up/down the parent dir since they are
+ * harmless.
+ */
+
+#define RevertFailure(fmt, ...) do { \
+	AuIOErr("revert failure: " fmt " (%d, %d)\n", \
+		##__VA_ARGS__, err, rerr); \
+	err = -EIO; \
+} while (0)
+
+static void au_ren_do_rev_diropq(int err, struct au_ren_args *a, int idx)
+{
+	int rerr;
+	struct dentry *d;
+#define src_or_dst(member) a->sd[idx].member
+
+	d = src_or_dst(dentry); /* {src,dst}_dentry */
+	au_hn_inode_lock_nested(src_or_dst(hinode), AuLsc_I_CHILD);
+	rerr = au_diropq_remove(d, a->btgt);
+	au_hn_inode_unlock(src_or_dst(hinode));
+	au_set_dbdiropq(d, src_or_dst(bdiropq));
+	if (rerr)
+		RevertFailure("remove diropq %pd", d);
+
+#undef src_or_dst_
+}
+
+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
+{
+	if (au_ftest_ren(a->auren_flags, DIROPQ_SRC))
+		au_ren_do_rev_diropq(err, a, AuSRC);
+	if (au_ftest_ren(a->auren_flags, DIROPQ_DST))
+		au_ren_do_rev_diropq(err, a, AuDST);
+}
+
+static void au_ren_rev_rename(int err, struct au_ren_args *a)
+{
+	int rerr;
+	struct inode *delegated;
+
+	a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name,
+					  a->src_h_parent);
+	rerr = PTR_ERR(a->h_path.dentry);
+	if (IS_ERR(a->h_path.dentry)) {
+		RevertFailure("lkup one %pd", a->src_dentry);
+		return;
+	}
+
+	delegated = NULL;
+	rerr = vfsub_rename(a->dst_h_dir,
+			    au_h_dptr(a->src_dentry, a->btgt),
+			    a->src_h_dir, &a->h_path, &delegated, a->flags);
+	if (unlikely(rerr == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal rename\n");
+		iput(delegated);
+	}
+	d_drop(a->h_path.dentry);
+	dput(a->h_path.dentry);
+	/* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
+	if (rerr)
+		RevertFailure("rename %pd", a->src_dentry);
+}
+
+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
+{
+	int rerr;
+	struct inode *delegated;
+
+	a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name,
+					  a->dst_h_parent);
+	rerr = PTR_ERR(a->h_path.dentry);
+	if (IS_ERR(a->h_path.dentry)) {
+		RevertFailure("lkup one %pd", a->dst_dentry);
+		return;
+	}
+	if (d_is_positive(a->h_path.dentry)) {
+		d_drop(a->h_path.dentry);
+		dput(a->h_path.dentry);
+		return;
+	}
+
+	delegated = NULL;
+	rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path,
+			    &delegated, a->flags);
+	if (unlikely(rerr == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal rename\n");
+		iput(delegated);
+	}
+	d_drop(a->h_path.dentry);
+	dput(a->h_path.dentry);
+	if (!rerr)
+		au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
+	else
+		RevertFailure("rename %pd", a->h_dst);
+}
+
+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	a->h_path.dentry = a->src_wh_dentry;
+	rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
+	au_set_dbwh(a->src_dentry, a->src_bwh);
+	if (rerr)
+		RevertFailure("unlink %pd", a->src_wh_dentry);
+}
+#undef RevertFailure
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * when we have to copyup the renaming entry, do it with the rename-target name
+ * in order to minimize the cost (the later actual rename is unnecessary).
+ * otherwise rename it on the target branch.
+ */
+static int au_ren_or_cpup(struct au_ren_args *a)
+{
+	int err;
+	struct dentry *d;
+	struct inode *delegated;
+
+	d = a->src_dentry;
+	if (au_dbtop(d) == a->btgt) {
+		a->h_path.dentry = a->dst_h_dentry;
+		AuDebugOn(au_dbtop(d) != a->btgt);
+		delegated = NULL;
+		err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
+				   a->dst_h_dir, &a->h_path, &delegated,
+				   a->flags);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal rename\n");
+			iput(delegated);
+		}
+	} else
+		BUG();
+
+	if (!err && a->h_dst)
+		/* it will be set to dinfo later */
+		dget(a->h_dst);
+
+	return err;
+}
+
+/* cf. aufs_rmdir() */
+static int au_ren_del_whtmp(struct au_ren_args *a)
+{
+	int err;
+	struct inode *dir;
+
+	dir = a->dst_dir;
+	SiMustAnyLock(dir->i_sb);
+	if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
+				     au_sbi(dir->i_sb)->si_dirwh)
+	    || au_test_fs_remote(a->h_dst->d_sb)) {
+		err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
+		if (unlikely(err))
+			pr_warn("failed removing whtmp dir %pd (%d), "
+				"ignored.\n", a->h_dst, err);
+	} else {
+		au_nhash_wh_free(&a->thargs->whlist);
+		a->thargs->whlist = a->whlist;
+		a->whlist.nh_num = 0;
+		au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
+		dput(a->h_dst);
+		a->thargs = NULL;
+	}
+
+	return 0;
+}
+
+/* make it 'opaque' dir. */
+static int au_ren_do_diropq(struct au_ren_args *a, int idx)
+{
+	int err;
+	struct dentry *d, *diropq;
+#define src_or_dst(member) a->sd[idx].member
+
+	err = 0;
+	d = src_or_dst(dentry); /* {src,dst}_dentry */
+	src_or_dst(bdiropq) = au_dbdiropq(d);
+	src_or_dst(hinode) = au_hi(src_or_dst(inode), a->btgt);
+	au_hn_inode_lock_nested(src_or_dst(hinode), AuLsc_I_CHILD);
+	diropq = au_diropq_create(d, a->btgt);
+	au_hn_inode_unlock(src_or_dst(hinode));
+	if (IS_ERR(diropq))
+		err = PTR_ERR(diropq);
+	else
+		dput(diropq);
+
+#undef src_or_dst_
+	return err;
+}
+
+static int au_ren_diropq(struct au_ren_args *a)
+{
+	int err;
+	unsigned char always;
+	struct dentry *d;
+
+	err = 0;
+	d = a->dst_dentry; /* already renamed on the branch */
+	always = !!au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ);
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC)
+	    && !au_ftest_ren(a->auren_flags, DIRREN)
+	    && a->btgt != au_dbdiropq(a->src_dentry)
+	    && (a->dst_wh_dentry
+		|| a->btgt <= au_dbdiropq(d)
+		/* hide the lower to keep xino */
+		/* the lowers may not be a dir, but we hide them anyway */
+		|| a->btgt < au_dbbot(d)
+		|| always)) {
+		AuDbg("here\n");
+		err = au_ren_do_diropq(a, AuSRC);
+		if (unlikely(err))
+			goto out;
+		au_fset_ren(a->auren_flags, DIROPQ_SRC);
+	}
+	if (!a->exchange)
+		goto out; /* success */
+
+	d = a->src_dentry; /* already renamed on the branch */
+	if (au_ftest_ren(a->auren_flags, ISDIR_DST)
+	    && a->btgt != au_dbdiropq(a->dst_dentry)
+	    && (a->btgt < au_dbdiropq(d)
+		|| a->btgt < au_dbbot(d)
+		|| always)) {
+		AuDbgDentry(a->src_dentry);
+		AuDbgDentry(a->dst_dentry);
+		err = au_ren_do_diropq(a, AuDST);
+		if (unlikely(err))
+			goto out_rev_src;
+		au_fset_ren(a->auren_flags, DIROPQ_DST);
+	}
+	goto out; /* success */
+
+out_rev_src:
+	AuDbg("err %d, reverting src\n", err);
+	au_ren_rev_diropq(err, a);
+out:
+	return err;
+}
+
+static int do_rename(struct au_ren_args *a)
+{
+	int err;
+	struct dentry *d, *h_d;
+
+	if (!a->exchange) {
+		/* prepare workqueue args for asynchronous rmdir */
+		h_d = a->dst_h_dentry;
+		if (au_ftest_ren(a->auren_flags, ISDIR_DST)
+		    /* && !au_ftest_ren(a->auren_flags, DIRREN) */
+		    && d_is_positive(h_d)) {
+			err = -ENOMEM;
+			a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb,
+							 GFP_NOFS);
+			if (unlikely(!a->thargs))
+				goto out;
+			a->h_dst = dget(h_d);
+		}
+
+		/* create whiteout for src_dentry */
+		if (au_ftest_ren(a->auren_flags, WHSRC)) {
+			a->src_bwh = au_dbwh(a->src_dentry);
+			AuDebugOn(a->src_bwh >= 0);
+			a->src_wh_dentry = au_wh_create(a->src_dentry, a->btgt,
+							a->src_h_parent);
+			err = PTR_ERR(a->src_wh_dentry);
+			if (IS_ERR(a->src_wh_dentry))
+				goto out_thargs;
+		}
+
+		/* lookup whiteout for dentry */
+		if (au_ftest_ren(a->auren_flags, WHDST)) {
+			h_d = au_wh_lkup(a->dst_h_parent,
+					 &a->dst_dentry->d_name, a->br);
+			err = PTR_ERR(h_d);
+			if (IS_ERR(h_d))
+				goto out_whsrc;
+			if (d_is_negative(h_d))
+				dput(h_d);
+			else
+				a->dst_wh_dentry = h_d;
+		}
+
+		/* rename dentry to tmpwh */
+		if (a->thargs) {
+			err = au_whtmp_ren(a->dst_h_dentry, a->br);
+			if (unlikely(err))
+				goto out_whdst;
+
+			d = a->dst_dentry;
+			au_set_h_dptr(d, a->btgt, NULL);
+			err = au_lkup_neg(d, a->btgt, /*wh*/0);
+			if (unlikely(err))
+				goto out_whtmp;
+			a->dst_h_dentry = au_h_dptr(d, a->btgt);
+		}
+	}
+
+	BUG_ON(d_is_positive(a->dst_h_dentry) && a->src_btop != a->btgt);
+#if 0 /* debugging */
+	BUG_ON(!au_ftest_ren(a->auren_flags, DIRREN)
+	       && d_is_positive(a->dst_h_dentry)
+	       && a->src_btop != a->btgt);
+#endif
+
+	/* rename by vfs_rename or cpup */
+	err = au_ren_or_cpup(a);
+	if (unlikely(err))
+		/* leave the copied-up one */
+		goto out_whtmp;
+
+	/* make dir opaque */
+	err = au_ren_diropq(a);
+	if (unlikely(err))
+		goto out_rename;
+
+	/* update target timestamps */
+	if (a->exchange) {
+		AuDebugOn(au_dbtop(a->dst_dentry) != a->btgt);
+		a->h_path.dentry = au_h_dptr(a->dst_dentry, a->btgt);
+		vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+		a->dst_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+	}
+	AuDebugOn(au_dbtop(a->src_dentry) != a->btgt);
+	a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
+	vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+	a->src_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+
+	if (!a->exchange) {
+		/* remove whiteout for dentry */
+		if (a->dst_wh_dentry) {
+			a->h_path.dentry = a->dst_wh_dentry;
+			err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
+						  a->dst_dentry);
+			if (unlikely(err))
+				goto out_diropq;
+		}
+
+		/* remove whtmp */
+		if (a->thargs)
+			au_ren_del_whtmp(a); /* ignore this error */
+
+		au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0);
+	}
+	err = 0;
+	goto out_success;
+
+out_diropq:
+	au_ren_rev_diropq(err, a);
+out_rename:
+	au_ren_rev_rename(err, a);
+	dput(a->h_dst);
+out_whtmp:
+	if (a->thargs)
+		au_ren_rev_whtmp(err, a);
+out_whdst:
+	dput(a->dst_wh_dentry);
+	a->dst_wh_dentry = NULL;
+out_whsrc:
+	if (a->src_wh_dentry)
+		au_ren_rev_whsrc(err, a);
+out_success:
+	dput(a->src_wh_dentry);
+	dput(a->dst_wh_dentry);
+out_thargs:
+	if (a->thargs) {
+		dput(a->h_dst);
+		au_whtmp_rmdir_free(a->thargs);
+		a->thargs = NULL;
+	}
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if @dentry dir can be rename destination or not.
+ * success means, it is a logically empty dir.
+ */
+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
+{
+	return au_test_empty(dentry, whlist);
+}
+
+/*
+ * test if @a->src_dentry dir can be rename source or not.
+ * if it can, return 0.
+ * success means,
+ * - it is a logically empty dir.
+ * - or, it exists on writable branch and has no children including whiteouts
+ *   on the lower branch unless DIRREN is on.
+ */
+static int may_rename_srcdir(struct au_ren_args *a)
+{
+	int err;
+	unsigned int rdhash;
+	aufs_bindex_t btop, btgt;
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	dentry = a->src_dentry;
+	sb = dentry->d_sb;
+	sbinfo = au_sbi(sb);
+	if (au_opt_test(sbinfo->si_mntflags, DIRREN))
+		au_fset_ren(a->auren_flags, DIRREN);
+
+	btgt = a->btgt;
+	btop = au_dbtop(dentry);
+	if (btop != btgt) {
+		struct au_nhash whlist;
+
+		SiMustAnyLock(sb);
+		rdhash = sbinfo->si_rdhash;
+		if (!rdhash)
+			rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
+							   dentry));
+		err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+		if (unlikely(err))
+			goto out;
+		err = au_test_empty(dentry, &whlist);
+		au_nhash_wh_free(&whlist);
+		goto out;
+	}
+
+	if (btop == au_dbtaildir(dentry))
+		return 0; /* success */
+
+	err = au_test_empty_lower(dentry);
+
+out:
+	if (err == -ENOTEMPTY) {
+		if (au_ftest_ren(a->auren_flags, DIRREN)) {
+			err = 0;
+		} else {
+			AuWarn1("renaming dir who has child(ren) on multiple "
+				"branches, is not supported\n");
+			err = -EXDEV;
+		}
+	}
+	return err;
+}
+
+/* side effect: sets whlist and h_dentry */
+static int au_ren_may_dir(struct au_ren_args *a)
+{
+	int err;
+	unsigned int rdhash;
+	struct dentry *d;
+
+	d = a->dst_dentry;
+	SiMustAnyLock(d->d_sb);
+
+	err = 0;
+	if (au_ftest_ren(a->auren_flags, ISDIR_DST) && a->dst_inode) {
+		rdhash = au_sbi(d->d_sb)->si_rdhash;
+		if (!rdhash)
+			rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
+		err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
+		if (unlikely(err))
+			goto out;
+
+		if (!a->exchange) {
+			au_set_dbtop(d, a->dst_btop);
+			err = may_rename_dstdir(d, &a->whlist);
+			au_set_dbtop(d, a->btgt);
+		} else
+			err = may_rename_srcdir(a);
+	}
+	a->dst_h_dentry = au_h_dptr(d, au_dbtop(d));
+	if (unlikely(err))
+		goto out;
+
+	d = a->src_dentry;
+	a->src_h_dentry = au_h_dptr(d, au_dbtop(d));
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC)) {
+		err = may_rename_srcdir(a);
+		if (unlikely(err)) {
+			au_nhash_wh_free(&a->whlist);
+			a->whlist.nh_num = 0;
+		}
+	}
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * simple tests for rename.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+static int au_may_ren(struct au_ren_args *a)
+{
+	int err, isdir;
+	struct inode *h_inode;
+
+	if (a->src_btop == a->btgt) {
+		err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
+				 au_ftest_ren(a->auren_flags, ISDIR_SRC));
+		if (unlikely(err))
+			goto out;
+		err = -EINVAL;
+		if (unlikely(a->src_h_dentry == a->h_trap))
+			goto out;
+	}
+
+	err = 0;
+	if (a->dst_btop != a->btgt)
+		goto out;
+
+	err = -ENOTEMPTY;
+	if (unlikely(a->dst_h_dentry == a->h_trap))
+		goto out;
+
+	err = -EIO;
+	isdir = !!au_ftest_ren(a->auren_flags, ISDIR_DST);
+	if (d_really_is_negative(a->dst_dentry)) {
+		if (d_is_negative(a->dst_h_dentry))
+			err = au_may_add(a->dst_dentry, a->btgt,
+					 a->dst_h_parent, isdir);
+	} else {
+		if (unlikely(d_is_negative(a->dst_h_dentry)))
+			goto out;
+		h_inode = d_inode(a->dst_h_dentry);
+		if (h_inode->i_nlink)
+			err = au_may_del(a->dst_dentry, a->btgt,
+					 a->dst_h_parent, isdir);
+	}
+
+out:
+	if (unlikely(err == -ENOENT || err == -EEXIST))
+		err = -EIO;
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * locking order
+ * (VFS)
+ * - src_dir and dir by lock_rename()
+ * - inode if exists
+ * (aufs)
+ * - lock all
+ *   + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
+ *     + si_read_lock
+ *     + di_write_lock2_child()
+ *       + di_write_lock_child()
+ *	   + ii_write_lock_child()
+ *       + di_write_lock_child2()
+ *	   + ii_write_lock_child2()
+ *     + src_parent and parent
+ *       + di_write_lock_parent()
+ *	   + ii_write_lock_parent()
+ *       + di_write_lock_parent2()
+ *	   + ii_write_lock_parent2()
+ *   + lower src_dir and dir by vfsub_lock_rename()
+ *   + verify the every relationships between child and parent. if any
+ *     of them failed, unlock all and return -EBUSY.
+ */
+static void au_ren_unlock(struct au_ren_args *a)
+{
+	vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
+			    a->dst_h_parent, a->dst_hdir);
+	if (au_ftest_ren(a->auren_flags, DIRREN)
+	    && a->h_root)
+		au_hn_inode_unlock(a->h_root);
+	if (au_ftest_ren(a->auren_flags, MNT_WRITE))
+		vfsub_mnt_drop_write(au_br_mnt(a->br));
+}
+
+static int au_ren_lock(struct au_ren_args *a)
+{
+	int err;
+	unsigned int udba;
+
+	err = 0;
+	a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
+	a->src_hdir = au_hi(a->src_dir, a->btgt);
+	a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
+	a->dst_hdir = au_hi(a->dst_dir, a->btgt);
+
+	err = vfsub_mnt_want_write(au_br_mnt(a->br));
+	if (unlikely(err))
+		goto out;
+	au_fset_ren(a->auren_flags, MNT_WRITE);
+	if (au_ftest_ren(a->auren_flags, DIRREN)) {
+		struct dentry *root;
+		struct inode *dir;
+
+		/*
+		 * sbinfo is already locked, so this ii_read_lock is
+		 * unnecessary. but our debugging feature checks it.
+		 */
+		root = a->src_inode->i_sb->s_root;
+		if (root != a->src_parent && root != a->dst_parent) {
+			dir = d_inode(root);
+			ii_read_lock_parent3(dir);
+			a->h_root = au_hi(dir, a->btgt);
+			ii_read_unlock(dir);
+			au_hn_inode_lock_nested(a->h_root, AuLsc_I_PARENT3);
+		}
+	}
+	a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
+				      a->dst_h_parent, a->dst_hdir);
+	udba = au_opt_udba(a->src_dentry->d_sb);
+	if (unlikely(a->src_hdir->hi_inode != d_inode(a->src_h_parent)
+		     || a->dst_hdir->hi_inode != d_inode(a->dst_h_parent)))
+		err = au_busy_or_stale();
+	if (!err && au_dbtop(a->src_dentry) == a->btgt)
+		err = au_h_verify(a->src_h_dentry, udba,
+				  d_inode(a->src_h_parent), a->src_h_parent,
+				  a->br);
+	if (!err && au_dbtop(a->dst_dentry) == a->btgt)
+		err = au_h_verify(a->dst_h_dentry, udba,
+				  d_inode(a->dst_h_parent), a->dst_h_parent,
+				  a->br);
+	if (!err)
+		goto out; /* success */
+
+	err = au_busy_or_stale();
+	au_ren_unlock(a);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_ren_refresh_dir(struct au_ren_args *a)
+{
+	struct inode *dir;
+
+	dir = a->dst_dir;
+	inode_inc_iversion(dir);
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC)) {
+		/* is this updating defined in POSIX? */
+		au_cpup_attr_timesizes(a->src_inode);
+		au_cpup_attr_nlink(dir, /*force*/1);
+	}
+	au_dir_ts(dir, a->btgt);
+
+	if (a->exchange) {
+		dir = a->src_dir;
+		inode_inc_iversion(dir);
+		if (au_ftest_ren(a->auren_flags, ISDIR_DST)) {
+			/* is this updating defined in POSIX? */
+			au_cpup_attr_timesizes(a->dst_inode);
+			au_cpup_attr_nlink(dir, /*force*/1);
+		}
+		au_dir_ts(dir, a->btgt);
+	}
+
+	if (au_ftest_ren(a->auren_flags, ISSAMEDIR))
+		return;
+
+	dir = a->src_dir;
+	inode_inc_iversion(dir);
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC))
+		au_cpup_attr_nlink(dir, /*force*/1);
+	au_dir_ts(dir, a->btgt);
+}
+
+static void au_ren_refresh(struct au_ren_args *a)
+{
+	aufs_bindex_t bbot, bindex;
+	struct dentry *d, *h_d;
+	struct inode *i, *h_i;
+	struct super_block *sb;
+
+	d = a->dst_dentry;
+	d_drop(d);
+	if (a->h_dst)
+		/* already dget-ed by au_ren_or_cpup() */
+		au_set_h_dptr(d, a->btgt, a->h_dst);
+
+	i = a->dst_inode;
+	if (i) {
+		if (!a->exchange) {
+			if (!au_ftest_ren(a->auren_flags, ISDIR_DST))
+				vfsub_drop_nlink(i);
+			else {
+				vfsub_dead_dir(i);
+				au_cpup_attr_timesizes(i);
+			}
+			au_update_dbrange(d, /*do_put_zero*/1);
+		} else
+			au_cpup_attr_nlink(i, /*force*/1);
+	} else {
+		bbot = a->btgt;
+		for (bindex = au_dbtop(d); bindex < bbot; bindex++)
+			au_set_h_dptr(d, bindex, NULL);
+		bbot = au_dbbot(d);
+		for (bindex = a->btgt + 1; bindex <= bbot; bindex++)
+			au_set_h_dptr(d, bindex, NULL);
+		au_update_dbrange(d, /*do_put_zero*/0);
+	}
+
+	if (a->exchange
+	    || au_ftest_ren(a->auren_flags, DIRREN)) {
+		d_drop(a->src_dentry);
+		if (au_ftest_ren(a->auren_flags, DIRREN))
+			au_set_dbwh(a->src_dentry, -1);
+		return;
+	}
+
+	d = a->src_dentry;
+	au_set_dbwh(d, -1);
+	bbot = au_dbbot(d);
+	for (bindex = a->btgt + 1; bindex <= bbot; bindex++) {
+		h_d = au_h_dptr(d, bindex);
+		if (h_d)
+			au_set_h_dptr(d, bindex, NULL);
+	}
+	au_set_dbbot(d, a->btgt);
+
+	sb = d->d_sb;
+	i = a->src_inode;
+	if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
+		return; /* success */
+
+	bbot = au_ibbot(i);
+	for (bindex = a->btgt + 1; bindex <= bbot; bindex++) {
+		h_i = au_h_iptr(i, bindex);
+		if (h_i) {
+			au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
+			/* ignore this error */
+			au_set_h_iptr(i, bindex, NULL, 0);
+		}
+	}
+	au_set_ibbot(i, a->btgt);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* mainly for link(2) and rename(2) */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
+{
+	aufs_bindex_t bdiropq, bwh;
+	struct dentry *parent;
+	struct au_branch *br;
+
+	parent = dentry->d_parent;
+	IMustLock(d_inode(parent)); /* dir is locked */
+
+	bdiropq = au_dbdiropq(parent);
+	bwh = au_dbwh(dentry);
+	br = au_sbr(dentry->d_sb, btgt);
+	if (au_br_rdonly(br)
+	    || (0 <= bdiropq && bdiropq < btgt)
+	    || (0 <= bwh && bwh < btgt))
+		btgt = -1;
+
+	AuDbg("btgt %d\n", btgt);
+	return btgt;
+}
+
+/* sets src_btop, dst_btop and btgt */
+static int au_ren_wbr(struct au_ren_args *a)
+{
+	int err;
+	struct au_wr_dir_args wr_dir_args = {
+		/* .force_btgt	= -1, */
+		.flags		= AuWrDir_ADD_ENTRY
+	};
+
+	a->src_btop = au_dbtop(a->src_dentry);
+	a->dst_btop = au_dbtop(a->dst_dentry);
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC)
+	    || au_ftest_ren(a->auren_flags, ISDIR_DST))
+		au_fset_wrdir(wr_dir_args.flags, ISDIR);
+	wr_dir_args.force_btgt = a->src_btop;
+	if (a->dst_inode && a->dst_btop < a->src_btop)
+		wr_dir_args.force_btgt = a->dst_btop;
+	wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
+	err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
+	a->btgt = err;
+	if (a->exchange)
+		au_update_dbtop(a->dst_dentry);
+
+	return err;
+}
+
+static void au_ren_dt(struct au_ren_args *a)
+{
+	a->h_path.dentry = a->src_h_parent;
+	au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
+	if (!au_ftest_ren(a->auren_flags, ISSAMEDIR)) {
+		a->h_path.dentry = a->dst_h_parent;
+		au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
+	}
+
+	au_fclr_ren(a->auren_flags, DT_DSTDIR);
+	if (!au_ftest_ren(a->auren_flags, ISDIR_SRC)
+	    && !a->exchange)
+		return;
+
+	a->h_path.dentry = a->src_h_dentry;
+	au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
+	if (d_is_positive(a->dst_h_dentry)) {
+		au_fset_ren(a->auren_flags, DT_DSTDIR);
+		a->h_path.dentry = a->dst_h_dentry;
+		au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
+	}
+}
+
+static void au_ren_rev_dt(int err, struct au_ren_args *a)
+{
+	struct dentry *h_d;
+	struct inode *h_inode;
+
+	au_dtime_revert(a->src_dt + AuPARENT);
+	if (!au_ftest_ren(a->auren_flags, ISSAMEDIR))
+		au_dtime_revert(a->dst_dt + AuPARENT);
+
+	if (au_ftest_ren(a->auren_flags, ISDIR_SRC) && err != -EIO) {
+		h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
+		h_inode = d_inode(h_d);
+		inode_lock_nested(h_inode, AuLsc_I_CHILD);
+		au_dtime_revert(a->src_dt + AuCHILD);
+		inode_unlock(h_inode);
+
+		if (au_ftest_ren(a->auren_flags, DT_DSTDIR)) {
+			h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
+			h_inode = d_inode(h_d);
+			inode_lock_nested(h_inode, AuLsc_I_CHILD);
+			au_dtime_revert(a->dst_dt + AuCHILD);
+			inode_unlock(h_inode);
+		}
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
+		struct inode *_dst_dir, struct dentry *_dst_dentry,
+		unsigned int _flags)
+{
+	int err, lock_flags;
+	void *rev;
+	/* reduce stack space */
+	struct au_ren_args *a;
+	struct au_pin pin;
+
+	AuDbg("%pd, %pd, 0x%x\n", _src_dentry, _dst_dentry, _flags);
+	IMustLock(_src_dir);
+	IMustLock(_dst_dir);
+
+	err = -EINVAL;
+	if (unlikely(_flags & RENAME_WHITEOUT))
+		goto out;
+
+	err = -ENOMEM;
+	BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	a->flags = _flags;
+	BUILD_BUG_ON(sizeof(a->exchange) == sizeof(u8)
+		     && RENAME_EXCHANGE > U8_MAX);
+	a->exchange = _flags & RENAME_EXCHANGE;
+	a->src_dir = _src_dir;
+	a->src_dentry = _src_dentry;
+	a->src_inode = NULL;
+	if (d_really_is_positive(a->src_dentry))
+		a->src_inode = d_inode(a->src_dentry);
+	a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
+	a->dst_dir = _dst_dir;
+	a->dst_dentry = _dst_dentry;
+	a->dst_inode = NULL;
+	if (d_really_is_positive(a->dst_dentry))
+		a->dst_inode = d_inode(a->dst_dentry);
+	a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
+	if (a->dst_inode) {
+		/*
+		 * if EXCHANGE && src is non-dir && dst is dir,
+		 * dst is not locked.
+		 */
+		/* IMustLock(a->dst_inode); */
+		au_igrab(a->dst_inode);
+	}
+
+	err = -ENOTDIR;
+	lock_flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
+	if (d_is_dir(a->src_dentry)) {
+		au_fset_ren(a->auren_flags, ISDIR_SRC);
+		if (unlikely(!a->exchange
+			     && d_really_is_positive(a->dst_dentry)
+			     && !d_is_dir(a->dst_dentry)))
+			goto out_free;
+		lock_flags |= AuLock_DIRS;
+	}
+	if (a->dst_inode && d_is_dir(a->dst_dentry)) {
+		au_fset_ren(a->auren_flags, ISDIR_DST);
+		if (unlikely(!a->exchange
+			     && d_really_is_positive(a->src_dentry)
+			     && !d_is_dir(a->src_dentry)))
+			goto out_free;
+		lock_flags |= AuLock_DIRS;
+	}
+	err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+					lock_flags);
+	if (unlikely(err))
+		goto out_free;
+
+	err = au_d_hashed_positive(a->src_dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	err = -ENOENT;
+	if (a->dst_inode) {
+		/*
+		 * If it is a dir, VFS unhash it before this
+		 * function. It means we cannot rely upon d_unhashed().
+		 */
+		if (unlikely(!a->dst_inode->i_nlink))
+			goto out_unlock;
+		if (!au_ftest_ren(a->auren_flags, ISDIR_DST)) {
+			err = au_d_hashed_positive(a->dst_dentry);
+			if (unlikely(err && !a->exchange))
+				goto out_unlock;
+		} else if (unlikely(IS_DEADDIR(a->dst_inode)))
+			goto out_unlock;
+	} else if (unlikely(d_unhashed(a->dst_dentry)))
+		goto out_unlock;
+
+	/*
+	 * is it possible?
+	 * yes, it happened (in linux-3.3-rcN) but I don't know why.
+	 * there may exist a problem somewhere else.
+	 */
+	err = -EINVAL;
+	if (unlikely(d_inode(a->dst_parent) == d_inode(a->src_dentry)))
+		goto out_unlock;
+
+	au_fset_ren(a->auren_flags, ISSAMEDIR); /* temporary */
+	di_write_lock_parent(a->dst_parent);
+
+	/* which branch we process */
+	err = au_ren_wbr(a);
+	if (unlikely(err < 0))
+		goto out_parent;
+	a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
+	a->h_path.mnt = au_br_mnt(a->br);
+
+	/* are they available to be renamed */
+	err = au_ren_may_dir(a);
+	if (unlikely(err))
+		goto out_children;
+
+	/* prepare the writable parent dir on the same branch */
+	if (a->dst_btop == a->btgt) {
+		au_fset_ren(a->auren_flags, WHDST);
+	} else {
+		err = au_cpup_dirs(a->dst_dentry, a->btgt);
+		if (unlikely(err))
+			goto out_children;
+	}
+
+	err = 0;
+	if (!a->exchange) {
+		if (a->src_dir != a->dst_dir) {
+			/*
+			 * this temporary unlock is safe,
+			 * because both dir->i_mutex are locked.
+			 */
+			di_write_unlock(a->dst_parent);
+			di_write_lock_parent(a->src_parent);
+			err = au_wr_dir_need_wh(a->src_dentry,
+						au_ftest_ren(a->auren_flags,
+							     ISDIR_SRC),
+						&a->btgt);
+			di_write_unlock(a->src_parent);
+			di_write_lock2_parent(a->src_parent, a->dst_parent,
+					      /*isdir*/1);
+			au_fclr_ren(a->auren_flags, ISSAMEDIR);
+		} else
+			err = au_wr_dir_need_wh(a->src_dentry,
+						au_ftest_ren(a->auren_flags,
+							     ISDIR_SRC),
+						&a->btgt);
+	}
+	if (unlikely(err < 0))
+		goto out_children;
+	if (err)
+		au_fset_ren(a->auren_flags, WHSRC);
+
+	/* cpup src */
+	if (a->src_btop != a->btgt) {
+		err = au_pin(&pin, a->src_dentry, a->btgt,
+			     au_opt_udba(a->src_dentry->d_sb),
+			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+		if (!err) {
+			struct au_cp_generic cpg = {
+				.dentry	= a->src_dentry,
+				.bdst	= a->btgt,
+				.bsrc	= a->src_btop,
+				.len	= -1,
+				.pin	= &pin,
+				.flags	= AuCpup_DTIME | AuCpup_HOPEN
+			};
+			AuDebugOn(au_dbtop(a->src_dentry) != a->src_btop);
+			err = au_sio_cpup_simple(&cpg);
+			au_unpin(&pin);
+		}
+		if (unlikely(err))
+			goto out_children;
+		a->src_btop = a->btgt;
+		a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
+		if (!a->exchange)
+			au_fset_ren(a->auren_flags, WHSRC);
+	}
+
+	/* cpup dst */
+	if (a->exchange && a->dst_inode
+	    && a->dst_btop != a->btgt) {
+		err = au_pin(&pin, a->dst_dentry, a->btgt,
+			     au_opt_udba(a->dst_dentry->d_sb),
+			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+		if (!err) {
+			struct au_cp_generic cpg = {
+				.dentry	= a->dst_dentry,
+				.bdst	= a->btgt,
+				.bsrc	= a->dst_btop,
+				.len	= -1,
+				.pin	= &pin,
+				.flags	= AuCpup_DTIME | AuCpup_HOPEN
+			};
+			err = au_sio_cpup_simple(&cpg);
+			au_unpin(&pin);
+		}
+		if (unlikely(err))
+			goto out_children;
+		a->dst_btop = a->btgt;
+		a->dst_h_dentry = au_h_dptr(a->dst_dentry, a->btgt);
+	}
+
+	/* lock them all */
+	err = au_ren_lock(a);
+	if (unlikely(err))
+		/* leave the copied-up one */
+		goto out_children;
+
+	if (!a->exchange) {
+		if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
+			err = au_may_ren(a);
+		else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
+			err = -ENAMETOOLONG;
+		if (unlikely(err))
+			goto out_hdir;
+	}
+
+	/* store timestamps to be revertible */
+	au_ren_dt(a);
+
+	/* store dirren info */
+	if (au_ftest_ren(a->auren_flags, DIRREN)) {
+		err = au_dr_rename(a->src_dentry, a->btgt,
+				   &a->dst_dentry->d_name, &rev);
+		AuTraceErr(err);
+		if (unlikely(err))
+			goto out_dt;
+	}
+
+	/* here we go */
+	err = do_rename(a);
+	if (unlikely(err))
+		goto out_dirren;
+
+	if (au_ftest_ren(a->auren_flags, DIRREN))
+		au_dr_rename_fin(a->src_dentry, a->btgt, rev);
+
+	/* update dir attributes */
+	au_ren_refresh_dir(a);
+
+	/* dput/iput all lower dentries */
+	au_ren_refresh(a);
+
+	goto out_hdir; /* success */
+
+out_dirren:
+	if (au_ftest_ren(a->auren_flags, DIRREN))
+		au_dr_rename_rev(a->src_dentry, a->btgt, rev);
+out_dt:
+	au_ren_rev_dt(err, a);
+out_hdir:
+	au_ren_unlock(a);
+out_children:
+	au_nhash_wh_free(&a->whlist);
+	if (err && a->dst_inode && a->dst_btop != a->btgt) {
+		AuDbg("btop %d, btgt %d\n", a->dst_btop, a->btgt);
+		au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
+		au_set_dbtop(a->dst_dentry, a->dst_btop);
+	}
+out_parent:
+	if (!err) {
+		if (d_unhashed(a->src_dentry))
+			au_fset_ren(a->auren_flags, DROPPED_SRC);
+		if (d_unhashed(a->dst_dentry))
+			au_fset_ren(a->auren_flags, DROPPED_DST);
+		if (!a->exchange)
+			d_move(a->src_dentry, a->dst_dentry);
+		else {
+			d_exchange(a->src_dentry, a->dst_dentry);
+			if (au_ftest_ren(a->auren_flags, DROPPED_DST))
+				d_drop(a->dst_dentry);
+		}
+		if (au_ftest_ren(a->auren_flags, DROPPED_SRC))
+			d_drop(a->src_dentry);
+	} else {
+		au_update_dbtop(a->dst_dentry);
+		if (!a->dst_inode)
+			d_drop(a->dst_dentry);
+	}
+	if (au_ftest_ren(a->auren_flags, ISSAMEDIR))
+		di_write_unlock(a->dst_parent);
+	else
+		di_write_unlock2(a->src_parent, a->dst_parent);
+out_unlock:
+	aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
+out_free:
+	iput(a->dst_inode);
+	if (a->thargs)
+		au_whtmp_rmdir_free(a->thargs);
+	au_kfree_rcu(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
new file mode 100644
index 00000000000..016db39451c
--- /dev/null
+++ b/fs/aufs/iinfo.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode private data
+ */
+
+#include "aufs.h"
+
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
+{
+	struct inode *h_inode;
+	struct au_hinode *hinode;
+
+	IiMustAnyLock(inode);
+
+	hinode = au_hinode(au_ii(inode), bindex);
+	h_inode = hinode->hi_inode;
+	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+	return h_inode;
+}
+
+/* todo: hard/soft set? */
+void au_hiput(struct au_hinode *hinode)
+{
+	au_hn_free(hinode);
+	dput(hinode->hi_whdentry);
+	iput(hinode->hi_inode);
+}
+
+unsigned int au_hi_flags(struct inode *inode, int isdir)
+{
+	unsigned int flags;
+	const unsigned int mnt_flags = au_mntflags(inode->i_sb);
+
+	flags = 0;
+	if (au_opt_test(mnt_flags, XINO))
+		au_fset_hi(flags, XINO);
+	if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
+		au_fset_hi(flags, HNOTIFY);
+	return flags;
+}
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+		   struct inode *h_inode, unsigned int flags)
+{
+	struct au_hinode *hinode;
+	struct inode *hi;
+	struct au_iinfo *iinfo = au_ii(inode);
+
+	IiMustWriteLock(inode);
+
+	hinode = au_hinode(iinfo, bindex);
+	hi = hinode->hi_inode;
+	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+
+	if (hi)
+		au_hiput(hinode);
+	hinode->hi_inode = h_inode;
+	if (h_inode) {
+		int err;
+		struct super_block *sb = inode->i_sb;
+		struct au_branch *br;
+
+		AuDebugOn(inode->i_mode
+			  && (h_inode->i_mode & S_IFMT)
+			  != (inode->i_mode & S_IFMT));
+		if (bindex == iinfo->ii_btop)
+			au_cpup_igen(inode, h_inode);
+		br = au_sbr(sb, bindex);
+		hinode->hi_id = br->br_id;
+		if (au_ftest_hi(flags, XINO)) {
+			err = au_xino_write(sb, bindex, h_inode->i_ino,
+					    inode->i_ino);
+			if (unlikely(err))
+				AuIOErr1("failed au_xino_write() %d\n", err);
+		}
+
+		if (au_ftest_hi(flags, HNOTIFY)
+		    && au_br_hnotifyable(br->br_perm)) {
+			err = au_hn_alloc(hinode, inode);
+			if (unlikely(err))
+				AuIOErr1("au_hn_alloc() %d\n", err);
+		}
+	}
+}
+
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+		  struct dentry *h_wh)
+{
+	struct au_hinode *hinode;
+
+	IiMustWriteLock(inode);
+
+	hinode = au_hinode(au_ii(inode), bindex);
+	AuDebugOn(hinode->hi_whdentry);
+	hinode->hi_whdentry = h_wh;
+}
+
+void au_update_iigen(struct inode *inode, int half)
+{
+	struct au_iinfo *iinfo;
+	struct au_iigen *iigen;
+	unsigned int sigen;
+
+	sigen = au_sigen(inode->i_sb);
+	iinfo = au_ii(inode);
+	iigen = &iinfo->ii_generation;
+	spin_lock(&iigen->ig_spin);
+	iigen->ig_generation = sigen;
+	if (half)
+		au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
+	else
+		au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
+	spin_unlock(&iigen->ig_spin);
+}
+
+/* it may be called at remount time, too */
+void au_update_ibrange(struct inode *inode, int do_put_zero)
+{
+	struct au_iinfo *iinfo;
+	aufs_bindex_t bindex, bbot;
+
+	AuDebugOn(au_is_bad_inode(inode));
+	IiMustWriteLock(inode);
+
+	iinfo = au_ii(inode);
+	if (do_put_zero && iinfo->ii_btop >= 0) {
+		for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot;
+		     bindex++) {
+			struct inode *h_i;
+
+			h_i = au_hinode(iinfo, bindex)->hi_inode;
+			if (h_i
+			    && !h_i->i_nlink
+			    && !(h_i->i_state & I_LINKABLE))
+				au_set_h_iptr(inode, bindex, NULL, 0);
+		}
+	}
+
+	iinfo->ii_btop = -1;
+	iinfo->ii_bbot = -1;
+	bbot = au_sbbot(inode->i_sb);
+	for (bindex = 0; bindex <= bbot; bindex++)
+		if (au_hinode(iinfo, bindex)->hi_inode) {
+			iinfo->ii_btop = bindex;
+			break;
+		}
+	if (iinfo->ii_btop >= 0)
+		for (bindex = bbot; bindex >= iinfo->ii_btop; bindex--)
+			if (au_hinode(iinfo, bindex)->hi_inode) {
+				iinfo->ii_bbot = bindex;
+				break;
+			}
+	AuDebugOn(iinfo->ii_btop > iinfo->ii_bbot);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_icntnr_init_once(void *_c)
+{
+	struct au_icntnr *c = _c;
+	struct au_iinfo *iinfo = &c->iinfo;
+
+	spin_lock_init(&iinfo->ii_generation.ig_spin);
+	au_rw_init(&iinfo->ii_rwsem);
+	inode_init_once(&c->vfs_inode);
+}
+
+void au_hinode_init(struct au_hinode *hinode)
+{
+	hinode->hi_inode = NULL;
+	hinode->hi_id = -1;
+	au_hn_init(hinode);
+	hinode->hi_whdentry = NULL;
+}
+
+int au_iinfo_init(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct super_block *sb;
+	struct au_hinode *hi;
+	int nbr, i;
+
+	sb = inode->i_sb;
+	iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+	nbr = au_sbbot(sb) + 1;
+	if (unlikely(nbr <= 0))
+		nbr = 1;
+	hi = kmalloc_array(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
+	if (hi) {
+		au_lcnt_inc(&au_sbi(sb)->si_ninodes);
+
+		iinfo->ii_hinode = hi;
+		for (i = 0; i < nbr; i++, hi++)
+			au_hinode_init(hi);
+
+		iinfo->ii_generation.ig_generation = au_sigen(sb);
+		iinfo->ii_btop = -1;
+		iinfo->ii_bbot = -1;
+		iinfo->ii_vdir = NULL;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink)
+{
+	int err, i;
+	struct au_hinode *hip;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	err = -ENOMEM;
+	hip = au_krealloc(iinfo->ii_hinode, sizeof(*hip) * nbr, GFP_NOFS,
+			  may_shrink);
+	if (hip) {
+		iinfo->ii_hinode = hip;
+		i = iinfo->ii_bbot + 1;
+		hip += i;
+		for (; i < nbr; i++, hip++)
+			au_hinode_init(hip);
+		err = 0;
+	}
+
+	return err;
+}
+
+void au_iinfo_fin(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct au_hinode *hi;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bbot;
+	const unsigned char unlinked = !inode->i_nlink;
+
+	AuDebugOn(au_is_bad_inode(inode));
+
+	sb = inode->i_sb;
+	au_lcnt_dec(&au_sbi(sb)->si_ninodes);
+	if (si_pid_test(sb))
+		au_xino_delete_inode(inode, unlinked);
+	else {
+		/*
+		 * it is safe to hide the dependency between sbinfo and
+		 * sb->s_umount.
+		 */
+		lockdep_off();
+		si_noflush_read_lock(sb);
+		au_xino_delete_inode(inode, unlinked);
+		si_read_unlock(sb);
+		lockdep_on();
+	}
+
+	iinfo = au_ii(inode);
+	if (iinfo->ii_vdir)
+		au_vdir_free(iinfo->ii_vdir);
+
+	bindex = iinfo->ii_btop;
+	if (bindex >= 0) {
+		hi = au_hinode(iinfo, bindex);
+		bbot = iinfo->ii_bbot;
+		while (bindex++ <= bbot) {
+			if (hi->hi_inode)
+				au_hiput(hi);
+			hi++;
+		}
+	}
+	au_kfree_rcu(iinfo->ii_hinode);
+	AuRwDestroy(&iinfo->ii_rwsem);
+}
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
new file mode 100644
index 00000000000..4de1b7507c5
--- /dev/null
+++ b/fs/aufs/inode.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode functions
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+struct inode *au_igrab(struct inode *inode)
+{
+	if (inode) {
+		AuDebugOn(!atomic_read(&inode->i_count));
+		ihold(inode);
+	}
+	return inode;
+}
+
+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
+{
+	au_cpup_attr_all(inode, /*force*/0);
+	au_update_iigen(inode, /*half*/1);
+	if (do_version)
+		inode_inc_iversion(inode);
+}
+
+static int au_ii_refresh(struct inode *inode, int *update)
+{
+	int err, e, nbr;
+	umode_t type;
+	aufs_bindex_t bindex, new_bindex;
+	struct super_block *sb;
+	struct au_iinfo *iinfo;
+	struct au_hinode *p, *q, tmp;
+
+	AuDebugOn(au_is_bad_inode(inode));
+	IiMustWriteLock(inode);
+
+	*update = 0;
+	sb = inode->i_sb;
+	nbr = au_sbbot(sb) + 1;
+	type = inode->i_mode & S_IFMT;
+	iinfo = au_ii(inode);
+	err = au_hinode_realloc(iinfo, nbr, /*may_shrink*/0);
+	if (unlikely(err))
+		goto out;
+
+	AuDebugOn(iinfo->ii_btop < 0);
+	p = au_hinode(iinfo, iinfo->ii_btop);
+	for (bindex = iinfo->ii_btop; bindex <= iinfo->ii_bbot;
+	     bindex++, p++) {
+		if (!p->hi_inode)
+			continue;
+
+		AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
+		new_bindex = au_br_index(sb, p->hi_id);
+		if (new_bindex == bindex)
+			continue;
+
+		if (new_bindex < 0) {
+			*update = 1;
+			au_hiput(p);
+			p->hi_inode = NULL;
+			continue;
+		}
+
+		if (new_bindex < iinfo->ii_btop)
+			iinfo->ii_btop = new_bindex;
+		if (iinfo->ii_bbot < new_bindex)
+			iinfo->ii_bbot = new_bindex;
+		/* swap two lower inode, and loop again */
+		q = au_hinode(iinfo, new_bindex);
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hi_inode) {
+			bindex--;
+			p--;
+		}
+	}
+	au_update_ibrange(inode, /*do_put_zero*/0);
+	au_hinode_realloc(iinfo, nbr, /*may_shrink*/1); /* harmless if err */
+	e = au_dy_irefresh(inode);
+	if (unlikely(e && !err))
+		err = e;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+void au_refresh_iop(struct inode *inode, int force_getattr)
+{
+	int type;
+	struct au_sbinfo *sbi = au_sbi(inode->i_sb);
+	const struct inode_operations *iop
+		= force_getattr ? aufs_iop : sbi->si_iop_array;
+
+	if (inode->i_op == iop)
+		return;
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFDIR:
+		type = AuIop_DIR;
+		break;
+	case S_IFLNK:
+		type = AuIop_SYMLINK;
+		break;
+	default:
+		type = AuIop_OTHER;
+		break;
+	}
+
+	inode->i_op = iop + type;
+	/* unnecessary smp_wmb() */
+}
+
+int au_refresh_hinode_self(struct inode *inode)
+{
+	int err, update;
+
+	err = au_ii_refresh(inode, &update);
+	if (!err)
+		au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
+
+	AuTraceErr(err);
+	return err;
+}
+
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
+{
+	int err, e, update;
+	unsigned int flags;
+	umode_t mode;
+	aufs_bindex_t bindex, bbot;
+	unsigned char isdir;
+	struct au_hinode *p;
+	struct au_iinfo *iinfo;
+
+	err = au_ii_refresh(inode, &update);
+	if (unlikely(err))
+		goto out;
+
+	update = 0;
+	iinfo = au_ii(inode);
+	p = au_hinode(iinfo, iinfo->ii_btop);
+	mode = (inode->i_mode & S_IFMT);
+	isdir = S_ISDIR(mode);
+	flags = au_hi_flags(inode, isdir);
+	bbot = au_dbbot(dentry);
+	for (bindex = au_dbtop(dentry); bindex <= bbot; bindex++) {
+		struct inode *h_i, *h_inode;
+		struct dentry *h_d;
+
+		h_d = au_h_dptr(dentry, bindex);
+		if (!h_d || d_is_negative(h_d))
+			continue;
+
+		h_inode = d_inode(h_d);
+		AuDebugOn(mode != (h_inode->i_mode & S_IFMT));
+		if (iinfo->ii_btop <= bindex && bindex <= iinfo->ii_bbot) {
+			h_i = au_h_iptr(inode, bindex);
+			if (h_i) {
+				if (h_i == h_inode)
+					continue;
+				err = -EIO;
+				break;
+			}
+		}
+		if (bindex < iinfo->ii_btop)
+			iinfo->ii_btop = bindex;
+		if (iinfo->ii_bbot < bindex)
+			iinfo->ii_bbot = bindex;
+		au_set_h_iptr(inode, bindex, au_igrab(h_inode), flags);
+		update = 1;
+	}
+	au_update_ibrange(inode, /*do_put_zero*/0);
+	e = au_dy_irefresh(inode);
+	if (unlikely(e && !err))
+		err = e;
+	if (!err)
+		au_refresh_hinode_attr(inode, update && isdir);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int set_inode(struct inode *inode, struct dentry *dentry)
+{
+	int err;
+	unsigned int flags;
+	umode_t mode;
+	aufs_bindex_t bindex, btop, btail;
+	unsigned char isdir;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct au_iinfo *iinfo;
+	const struct inode_operations *iop;
+
+	IiMustWriteLock(inode);
+
+	err = 0;
+	isdir = 0;
+	iop = au_sbi(inode->i_sb)->si_iop_array;
+	btop = au_dbtop(dentry);
+	h_dentry = au_h_dptr(dentry, btop);
+	h_inode = d_inode(h_dentry);
+	mode = h_inode->i_mode;
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+		btail = au_dbtail(dentry);
+		inode->i_op = iop + AuIop_OTHER;
+		inode->i_fop = &aufs_file_fop;
+		err = au_dy_iaop(inode, btop, h_inode);
+		if (unlikely(err))
+			goto out;
+		break;
+	case S_IFDIR:
+		isdir = 1;
+		btail = au_dbtaildir(dentry);
+		inode->i_op = iop + AuIop_DIR;
+		inode->i_fop = &aufs_dir_fop;
+		break;
+	case S_IFLNK:
+		btail = au_dbtail(dentry);
+		inode->i_op = iop + AuIop_SYMLINK;
+		break;
+	case S_IFBLK:
+	case S_IFCHR:
+	case S_IFIFO:
+	case S_IFSOCK:
+		btail = au_dbtail(dentry);
+		inode->i_op = iop + AuIop_OTHER;
+		init_special_inode(inode, mode, h_inode->i_rdev);
+		break;
+	default:
+		AuIOErr("Unknown file type 0%o\n", mode);
+		err = -EIO;
+		goto out;
+	}
+
+	/* do not set hnotify for whiteouted dirs (SHWH mode) */
+	flags = au_hi_flags(inode, isdir);
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
+	    && au_ftest_hi(flags, HNOTIFY)
+	    && dentry->d_name.len > AUFS_WH_PFX_LEN
+	    && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
+		au_fclr_hi(flags, HNOTIFY);
+	iinfo = au_ii(inode);
+	iinfo->ii_btop = btop;
+	iinfo->ii_bbot = btail;
+	for (bindex = btop; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry)
+			au_set_h_iptr(inode, bindex,
+				      au_igrab(d_inode(h_dentry)), flags);
+	}
+	au_cpup_attr_all(inode, /*force*/1);
+	/*
+	 * to force calling aufs_get_acl() every time,
+	 * do not call cache_no_acl() for aufs inode.
+	 */
+
+out:
+	return err;
+}
+
+/*
+ * successful returns with iinfo write_locked
+ * minus: errno
+ * zero: success, matched
+ * plus: no error, but unmatched
+ */
+static int reval_inode(struct inode *inode, struct dentry *dentry)
+{
+	int err;
+	unsigned int gen, igflags;
+	aufs_bindex_t bindex, bbot;
+	struct inode *h_inode, *h_dinode;
+	struct dentry *h_dentry;
+
+	/*
+	 * before this function, if aufs got any iinfo lock, it must be only
+	 * one, the parent dir.
+	 * it can happen by UDBA and the obsoleted inode number.
+	 */
+	err = -EIO;
+	if (unlikely(inode->i_ino == parent_ino(dentry)))
+		goto out;
+
+	err = 1;
+	ii_write_lock_new_child(inode);
+	h_dentry = au_h_dptr(dentry, au_dbtop(dentry));
+	h_dinode = d_inode(h_dentry);
+	bbot = au_ibbot(inode);
+	for (bindex = au_ibtop(inode); bindex <= bbot; bindex++) {
+		h_inode = au_h_iptr(inode, bindex);
+		if (!h_inode || h_inode != h_dinode)
+			continue;
+
+		err = 0;
+		gen = au_iigen(inode, &igflags);
+		if (gen == au_digen(dentry)
+		    && !au_ig_ftest(igflags, HALF_REFRESHED))
+			break;
+
+		/* fully refresh inode using dentry */
+		err = au_refresh_hinode(inode, dentry);
+		if (!err)
+			au_update_iigen(inode, /*half*/0);
+		break;
+	}
+
+	if (unlikely(err))
+		ii_write_unlock(inode);
+out:
+	return err;
+}
+
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+	   unsigned int d_type, ino_t *ino)
+{
+	int err, idx;
+	const int isnondir = d_type != DT_DIR;
+
+	/* prevent hardlinked inode number from race condition */
+	if (isnondir) {
+		err = au_xinondir_enter(sb, bindex, h_ino, &idx);
+		if (unlikely(err))
+			goto out;
+	}
+
+	err = au_xino_read(sb, bindex, h_ino, ino);
+	if (unlikely(err))
+		goto out_xinondir;
+
+	if (!*ino) {
+		err = -EIO;
+		*ino = au_xino_new_ino(sb);
+		if (unlikely(!*ino))
+			goto out_xinondir;
+		err = au_xino_write(sb, bindex, h_ino, *ino);
+		if (unlikely(err))
+			goto out_xinondir;
+	}
+
+out_xinondir:
+	if (isnondir && idx >= 0)
+		au_xinondir_leave(sb, bindex, h_ino, idx);
+out:
+	return err;
+}
+
+/* successful returns with iinfo write_locked */
+/* todo: return with unlocked? */
+struct inode *au_new_inode(struct dentry *dentry, int must_new)
+{
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry;
+	struct super_block *sb;
+	ino_t h_ino, ino;
+	int err, idx, hlinked;
+	aufs_bindex_t btop;
+
+	sb = dentry->d_sb;
+	btop = au_dbtop(dentry);
+	h_dentry = au_h_dptr(dentry, btop);
+	h_inode = d_inode(h_dentry);
+	h_ino = h_inode->i_ino;
+	hlinked = !d_is_dir(h_dentry) && h_inode->i_nlink > 1;
+
+new_ino:
+	/*
+	 * stop 'race'-ing between hardlinks under different
+	 * parents.
+	 */
+	if (hlinked) {
+		err = au_xinondir_enter(sb, btop, h_ino, &idx);
+		inode = ERR_PTR(err);
+		if (unlikely(err))
+			goto out;
+	}
+
+	err = au_xino_read(sb, btop, h_ino, &ino);
+	inode = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_xinondir;
+
+	if (!ino) {
+		ino = au_xino_new_ino(sb);
+		if (unlikely(!ino)) {
+			inode = ERR_PTR(-EIO);
+			goto out_xinondir;
+		}
+	}
+
+	AuDbg("i%lu\n", (unsigned long)ino);
+	inode = au_iget_locked(sb, ino);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_xinondir;
+
+	AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
+	if (inode->i_state & I_NEW) {
+		ii_write_lock_new_child(inode);
+		err = set_inode(inode, dentry);
+		if (!err) {
+			unlock_new_inode(inode);
+			goto out_xinondir; /* success */
+		}
+
+		/*
+		 * iget_failed() calls iput(), but we need to call
+		 * ii_write_unlock() after iget_failed(). so dirty hack for
+		 * i_count.
+		 */
+		atomic_inc(&inode->i_count);
+		iget_failed(inode);
+		ii_write_unlock(inode);
+		au_xino_write(sb, btop, h_ino, /*ino*/0);
+		/* ignore this error */
+		goto out_iput;
+	} else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
+		/*
+		 * horrible race condition between lookup, readdir and copyup
+		 * (or something).
+		 */
+		if (hlinked && idx >= 0)
+			au_xinondir_leave(sb, btop, h_ino, idx);
+		err = reval_inode(inode, dentry);
+		if (unlikely(err < 0)) {
+			hlinked = 0;
+			goto out_iput;
+		}
+		if (!err)
+			goto out; /* success */
+		else if (hlinked && idx >= 0) {
+			err = au_xinondir_enter(sb, btop, h_ino, &idx);
+			if (unlikely(err)) {
+				iput(inode);
+				inode = ERR_PTR(err);
+				goto out;
+			}
+		}
+	}
+
+	if (unlikely(au_test_fs_unique_ino(h_inode)))
+		AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
+			" b%d, %s, %pd, hi%lu, i%lu.\n",
+			btop, au_sbtype(h_dentry->d_sb), dentry,
+			(unsigned long)h_ino, (unsigned long)ino);
+	ino = 0;
+	err = au_xino_write(sb, btop, h_ino, /*ino*/0);
+	if (!err) {
+		iput(inode);
+		if (hlinked && idx >= 0)
+			au_xinondir_leave(sb, btop, h_ino, idx);
+		goto new_ino;
+	}
+
+out_iput:
+	iput(inode);
+	inode = ERR_PTR(err);
+out_xinondir:
+	if (hlinked && idx >= 0)
+		au_xinondir_leave(sb, btop, h_ino, idx);
+out:
+	return inode;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+	       struct inode *inode)
+{
+	int err;
+	struct inode *hi;
+
+	err = au_br_rdonly(au_sbr(sb, bindex));
+
+	/* pseudo-link after flushed may happen out of bounds */
+	if (!err
+	    && inode
+	    && au_ibtop(inode) <= bindex
+	    && bindex <= au_ibbot(inode)) {
+		/*
+		 * permission check is unnecessary since vfsub routine
+		 * will be called later
+		 */
+		hi = au_h_iptr(inode, bindex);
+		if (hi)
+			err = IS_IMMUTABLE(hi) ? -EROFS : 0;
+	}
+
+	return err;
+}
+
+int au_test_h_perm(struct inode *h_inode, int mask)
+{
+	if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
+		return 0;
+	return inode_permission(h_inode, mask);
+}
+
+int au_test_h_perm_sio(struct inode *h_inode, int mask)
+{
+	if (au_test_nfs(h_inode->i_sb)
+	    && (mask & MAY_WRITE)
+	    && S_ISDIR(h_inode->i_mode))
+		mask |= MAY_READ; /* force permission check */
+	return au_test_h_perm(h_inode, mask);
+}
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
new file mode 100644
index 00000000000..af32a233200
--- /dev/null
+++ b/fs/aufs/inode.h
@@ -0,0 +1,698 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * inode operations
+ */
+
+#ifndef __AUFS_INODE_H__
+#define __AUFS_INODE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fsnotify.h>
+#include "rwsem.h"
+
+struct vfsmount;
+
+struct au_hnotify {
+#ifdef CONFIG_AUFS_HNOTIFY
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	/* never use fsnotify_add_vfsmount_mark() */
+	struct fsnotify_mark		hn_mark;
+#endif
+	struct inode		*hn_aufs_inode;	/* no get/put */
+	struct rcu_head		rcu;
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct au_hinode {
+	struct inode		*hi_inode;
+	aufs_bindex_t		hi_id;
+#ifdef CONFIG_AUFS_HNOTIFY
+	struct au_hnotify	*hi_notify;
+#endif
+
+	/* reference to the copied-up whiteout with get/put */
+	struct dentry		*hi_whdentry;
+};
+
+/* ig_flags */
+#define AuIG_HALF_REFRESHED		1
+#define au_ig_ftest(flags, name)	((flags) & AuIG_##name)
+#define au_ig_fset(flags, name) \
+	do { (flags) |= AuIG_##name; } while (0)
+#define au_ig_fclr(flags, name) \
+	do { (flags) &= ~AuIG_##name; } while (0)
+
+struct au_iigen {
+	spinlock_t	ig_spin;
+	__u32		ig_generation, ig_flags;
+};
+
+struct au_vdir;
+struct au_iinfo {
+	struct au_iigen		ii_generation;
+	struct super_block	*ii_hsb1;	/* no get/put */
+
+	struct au_rwsem		ii_rwsem;
+	aufs_bindex_t		ii_btop, ii_bbot;
+	__u32			ii_higen;
+	struct au_hinode	*ii_hinode;
+	struct au_vdir		*ii_vdir;
+};
+
+struct au_icntnr {
+	struct au_iinfo		iinfo;
+	struct inode		vfs_inode;
+	struct hlist_bl_node	plink;
+	struct rcu_head		rcu;
+} ____cacheline_aligned_in_smp;
+
+/* au_pin flags */
+#define AuPin_DI_LOCKED		1
+#define AuPin_MNT_WRITE		(1 << 1)
+#define au_ftest_pin(flags, name)	((flags) & AuPin_##name)
+#define au_fset_pin(flags, name) \
+	do { (flags) |= AuPin_##name; } while (0)
+#define au_fclr_pin(flags, name) \
+	do { (flags) &= ~AuPin_##name; } while (0)
+
+struct au_pin {
+	/* input */
+	struct dentry *dentry;
+	unsigned int udba;
+	unsigned char lsc_di, lsc_hi, flags;
+	aufs_bindex_t bindex;
+
+	/* output */
+	struct dentry *parent;
+	struct au_hinode *hdir;
+	struct vfsmount *h_mnt;
+
+	/* temporary unlock/relock for copyup */
+	struct dentry *h_dentry, *h_parent;
+	struct au_branch *br;
+	struct task_struct *task;
+};
+
+void au_pin_hdir_unlock(struct au_pin *p);
+int au_pin_hdir_lock(struct au_pin *p);
+int au_pin_hdir_relock(struct au_pin *p);
+void au_pin_hdir_acquire_nest(struct au_pin *p);
+void au_pin_hdir_release(struct au_pin *p);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_iinfo *au_ii(struct inode *inode)
+{
+	BUG_ON(is_bad_inode(inode));
+	return &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* inode.c */
+struct inode *au_igrab(struct inode *inode);
+void au_refresh_iop(struct inode *inode, int force_getattr);
+int au_refresh_hinode_self(struct inode *inode);
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+	   unsigned int d_type, ino_t *ino);
+struct inode *au_new_inode(struct dentry *dentry, int must_new);
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+	       struct inode *inode);
+int au_test_h_perm(struct inode *h_inode, int mask);
+int au_test_h_perm_sio(struct inode *h_inode, int mask);
+
+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
+			    ino_t h_ino, unsigned int d_type, ino_t *ino)
+{
+#ifdef CONFIG_AUFS_SHWH
+	return au_ino(sb, bindex, h_ino, d_type, ino);
+#else
+	return 0;
+#endif
+}
+
+/* i_op.c */
+enum {
+	AuIop_SYMLINK,
+	AuIop_DIR,
+	AuIop_OTHER,
+	AuIop_Last
+};
+extern struct inode_operations aufs_iop[AuIop_Last], /* not const */
+	aufs_iop_nogetattr[AuIop_Last];
+
+/* au_wr_dir flags */
+#define AuWrDir_ADD_ENTRY	1
+#define AuWrDir_ISDIR		(1 << 1)
+#define AuWrDir_TMPFILE		(1 << 2)
+#define au_ftest_wrdir(flags, name)	((flags) & AuWrDir_##name)
+#define au_fset_wrdir(flags, name) \
+	do { (flags) |= AuWrDir_##name; } while (0)
+#define au_fclr_wrdir(flags, name) \
+	do { (flags) &= ~AuWrDir_##name; } while (0)
+
+struct au_wr_dir_args {
+	aufs_bindex_t force_btgt;
+	unsigned char flags;
+};
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+	      struct au_wr_dir_args *args);
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin);
+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
+		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+		 unsigned int udba, unsigned char flags);
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+	   unsigned int udba, unsigned char flags) __must_check;
+int au_do_pin(struct au_pin *pin) __must_check;
+void au_unpin(struct au_pin *pin);
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen);
+
+#define AuIcpup_DID_CPUP	1
+#define au_ftest_icpup(flags, name)	((flags) & AuIcpup_##name)
+#define au_fset_icpup(flags, name) \
+	do { (flags) |= AuIcpup_##name; } while (0)
+#define au_fclr_icpup(flags, name) \
+	do { (flags) &= ~AuIcpup_##name; } while (0)
+
+struct au_icpup_args {
+	unsigned char flags;
+	unsigned char pin_flags;
+	aufs_bindex_t btgt;
+	unsigned int udba;
+	struct au_pin pin;
+	struct path h_path;
+	struct inode *h_inode;
+};
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+		     struct au_icpup_args *a);
+
+int au_h_path_getattr(struct dentry *dentry, struct inode *inode, int force,
+		      struct path *h_path, int locked);
+
+/* i_op_add.c */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir);
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+	       dev_t dev);
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		bool want_excl);
+struct vfsub_aopen_args;
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+		       struct vfsub_aopen_args *args);
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode);
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+	      struct dentry *dentry);
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+/* i_op_del.c */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir);
+int aufs_unlink(struct inode *dir, struct dentry *dentry);
+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
+
+/* i_op_ren.c */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
+		struct inode *dir, struct dentry *dentry,
+		unsigned int flags);
+
+/* iinfo.c */
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
+void au_hiput(struct au_hinode *hinode);
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+		  struct dentry *h_wh);
+unsigned int au_hi_flags(struct inode *inode, int isdir);
+
+/* hinode flags */
+#define AuHi_XINO	1
+#define AuHi_HNOTIFY	(1 << 1)
+#define au_ftest_hi(flags, name)	((flags) & AuHi_##name)
+#define au_fset_hi(flags, name) \
+	do { (flags) |= AuHi_##name; } while (0)
+#define au_fclr_hi(flags, name) \
+	do { (flags) &= ~AuHi_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuHi_HNOTIFY
+#define AuHi_HNOTIFY	0
+#endif
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+		   struct inode *h_inode, unsigned int flags);
+
+void au_update_iigen(struct inode *inode, int half);
+void au_update_ibrange(struct inode *inode, int do_put_zero);
+
+void au_icntnr_init_once(void *_c);
+void au_hinode_init(struct au_hinode *hinode);
+int au_iinfo_init(struct inode *inode);
+void au_iinfo_fin(struct inode *inode);
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink);
+
+#ifdef CONFIG_PROC_FS
+/* plink.c */
+int au_plink_maint(struct super_block *sb, int flags);
+struct au_sbinfo;
+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
+int au_plink_maint_enter(struct super_block *sb);
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb);
+#else
+AuStubVoid(au_plink_list, struct super_block *sb)
+#endif
+int au_plink_test(struct inode *inode);
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+		     struct dentry *h_dentry);
+void au_plink_put(struct super_block *sb, int verbose);
+void au_plink_clean(struct super_block *sb, int verbose);
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
+#else
+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
+AuStubVoid(au_plink_list, struct super_block *sb);
+AuStubInt0(au_plink_test, struct inode *inode);
+AuStub(struct dentry *, au_plink_lkup, return NULL,
+       struct inode *inode, aufs_bindex_t bindex);
+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
+	   struct dentry *h_dentry);
+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_AUFS_XATTR
+/* xattr.c */
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+		  unsigned int verbose);
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size);
+void au_xattr_init(struct super_block *sb);
+#else
+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src,
+	   int ignore_flags, unsigned int verbose);
+AuStubVoid(au_xattr_init, struct super_block *sb);
+#endif
+
+#ifdef CONFIG_FS_POSIX_ACL
+struct posix_acl *aufs_get_acl(struct inode *inode, int type);
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+#endif
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+enum {
+	AU_XATTR_SET,
+	AU_ACL_SET
+};
+
+struct au_sxattr {
+	int type;
+	union {
+		struct {
+			const char	*name;
+			const void	*value;
+			size_t		size;
+			int		flags;
+		} set;
+		struct {
+			struct posix_acl *acl;
+			int		type;
+		} acl_set;
+	} u;
+};
+ssize_t au_sxattr(struct dentry *dentry, struct inode *inode,
+		  struct au_sxattr *arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for iinfo */
+enum {
+	AuLsc_II_CHILD,		/* child first */
+	AuLsc_II_CHILD2,	/* rename(2), link(2), and cpup at hnotify */
+	AuLsc_II_CHILD3,	/* copyup dirs */
+	AuLsc_II_PARENT,	/* see AuLsc_I_PARENT in vfsub.h */
+	AuLsc_II_PARENT2,
+	AuLsc_II_PARENT3,	/* copyup dirs */
+	AuLsc_II_NEW_CHILD
+};
+
+/*
+ * ii_read_lock_child, ii_write_lock_child,
+ * ii_read_lock_child2, ii_write_lock_child2,
+ * ii_read_lock_child3, ii_write_lock_child3,
+ * ii_read_lock_parent, ii_write_lock_parent,
+ * ii_read_lock_parent2, ii_write_lock_parent2,
+ * ii_read_lock_parent3, ii_write_lock_parent3,
+ * ii_read_lock_new_child, ii_write_lock_new_child,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void ii_read_lock_##name(struct inode *i) \
+{ \
+	au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void ii_write_lock_##name(struct inode *i) \
+{ \
+	au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuRWLockFuncs(name, lsc) \
+	AuReadLockFunc(name, lsc) \
+	AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+AuRWLockFuncs(new_child, NEW_CHILD);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define ii_read_unlock(i)	au_rw_read_unlock(&au_ii(i)->ii_rwsem)
+#define ii_write_unlock(i)	au_rw_write_unlock(&au_ii(i)->ii_rwsem)
+#define ii_downgrade_lock(i)	au_rw_dgrade_lock(&au_ii(i)->ii_rwsem)
+
+#define IiMustNoWaiters(i)	AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
+#define IiMustAnyLock(i)	AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
+#define IiMustWriteLock(i)	AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void au_icntnr_init(struct au_icntnr *c)
+{
+#ifdef CONFIG_AUFS_DEBUG
+	c->vfs_inode.i_mode = 0;
+#endif
+}
+
+static inline unsigned int au_iigen(struct inode *inode, unsigned int *igflags)
+{
+	unsigned int gen;
+	struct au_iinfo *iinfo;
+	struct au_iigen *iigen;
+
+	iinfo = au_ii(inode);
+	iigen = &iinfo->ii_generation;
+	spin_lock(&iigen->ig_spin);
+	if (igflags)
+		*igflags = iigen->ig_flags;
+	gen = iigen->ig_generation;
+	spin_unlock(&iigen->ig_spin);
+
+	return gen;
+}
+
+/* tiny test for inode number */
+/* tmpfs generation is too rough */
+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
+{
+	struct au_iinfo *iinfo;
+
+	iinfo = au_ii(inode);
+	AuRwMustAnyLock(&iinfo->ii_rwsem);
+	return !(iinfo->ii_hsb1 == h_inode->i_sb
+		 && iinfo->ii_higen == h_inode->i_generation);
+}
+
+static inline void au_iigen_dec(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct au_iigen *iigen;
+
+	iinfo = au_ii(inode);
+	iigen = &iinfo->ii_generation;
+	spin_lock(&iigen->ig_spin);
+	iigen->ig_generation--;
+	spin_unlock(&iigen->ig_spin);
+}
+
+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(inode && au_iigen(inode, NULL) != sigen))
+		err = -EIO;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_hinode *au_hinode(struct au_iinfo *iinfo,
+					  aufs_bindex_t bindex)
+{
+	return iinfo->ii_hinode + bindex;
+}
+
+static inline int au_is_bad_inode(struct inode *inode)
+{
+	return !!(is_bad_inode(inode) || !au_hinode(au_ii(inode), 0));
+}
+
+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
+					aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_hinode(au_ii(inode), bindex)->hi_id;
+}
+
+static inline aufs_bindex_t au_ibtop(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_btop;
+}
+
+static inline aufs_bindex_t au_ibbot(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_bbot;
+}
+
+static inline struct au_vdir *au_ivdir(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_vdir;
+}
+
+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_hinode(au_ii(inode), bindex)->hi_whdentry;
+}
+
+static inline void au_set_ibtop(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_btop = bindex;
+}
+
+static inline void au_set_ibbot(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_bbot = bindex;
+}
+
+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_vdir = vdir;
+}
+
+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_hinode(au_ii(inode), bindex);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
+{
+	if (pin)
+		return pin->parent;
+	return NULL;
+}
+
+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
+{
+	if (pin && pin->hdir)
+		return pin->hdir->hi_inode;
+	return NULL;
+}
+
+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
+{
+	if (pin)
+		return pin->hdir;
+	return NULL;
+}
+
+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
+{
+	if (pin)
+		pin->dentry = dentry;
+}
+
+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
+					   unsigned char lflag)
+{
+	if (pin) {
+		if (lflag)
+			au_fset_pin(pin->flags, DI_LOCKED);
+		else
+			au_fclr_pin(pin->flags, DI_LOCKED);
+	}
+}
+
+#if 0 /* reserved */
+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
+{
+	if (pin) {
+		dput(pin->parent);
+		pin->parent = dget(parent);
+	}
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+#ifdef CONFIG_AUFS_HNOTIFY
+struct au_hnotify_op {
+	void (*ctl)(struct au_hinode *hinode, int do_set);
+	int (*alloc)(struct au_hinode *hinode);
+
+	/*
+	 * if it returns true, the the caller should free hinode->hi_notify,
+	 * otherwise ->free() frees it.
+	 */
+	int (*free)(struct au_hinode *hinode,
+		    struct au_hnotify *hn) __must_check;
+
+	void (*fin)(void);
+	int (*init)(void);
+
+	int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
+	void (*fin_br)(struct au_branch *br);
+	int (*init_br)(struct au_branch *br, int perm);
+};
+
+/* hnotify.c */
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
+void au_hn_free(struct au_hinode *hinode);
+void au_hn_ctl(struct au_hinode *hinode, int do_set);
+void au_hn_reset(struct inode *inode, unsigned int flags);
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+	       const struct qstr *h_child_qstr, struct inode *h_child_inode);
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
+int au_hnotify_init_br(struct au_branch *br, int perm);
+void au_hnotify_fin_br(struct au_branch *br);
+int __init au_hnotify_init(void);
+void au_hnotify_fin(void);
+
+/* hfsnotify.c */
+extern const struct au_hnotify_op au_hnotify_op;
+
+static inline
+void au_hn_init(struct au_hinode *hinode)
+{
+	hinode->hi_notify = NULL;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+	return hinode->hi_notify;
+}
+
+#else
+AuStub(int, au_hn_alloc, return -EOPNOTSUPP,
+       struct au_hinode *hinode __maybe_unused,
+       struct inode *inode __maybe_unused)
+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode)
+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
+	   int do_set __maybe_unused)
+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
+	   unsigned int flags __maybe_unused)
+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
+	   struct au_branch *br __maybe_unused,
+	   int perm __maybe_unused)
+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
+	   int perm __maybe_unused)
+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
+AuStubInt0(__init au_hnotify_init, void)
+AuStubVoid(au_hnotify_fin, void)
+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+static inline void au_hn_suspend(struct au_hinode *hdir)
+{
+	au_hn_ctl(hdir, /*do_set*/0);
+}
+
+static inline void au_hn_resume(struct au_hinode *hdir)
+{
+	au_hn_ctl(hdir, /*do_set*/1);
+}
+
+static inline void au_hn_inode_lock(struct au_hinode *hdir)
+{
+	inode_lock(hdir->hi_inode);
+	au_hn_suspend(hdir);
+}
+
+static inline void au_hn_inode_lock_nested(struct au_hinode *hdir,
+					  unsigned int sc __maybe_unused)
+{
+	inode_lock_nested(hdir->hi_inode, sc);
+	au_hn_suspend(hdir);
+}
+
+#if 0 /* unused */
+#include "vfsub.h"
+static inline void au_hn_inode_lock_shared_nested(struct au_hinode *hdir,
+						  unsigned int sc)
+{
+	inode_lock_shared_nested(hdir->hi_inode, sc);
+	au_hn_suspend(hdir);
+}
+#endif
+
+static inline void au_hn_inode_unlock(struct au_hinode *hdir)
+{
+	au_hn_resume(hdir);
+	inode_unlock(hdir->hi_inode);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_INODE_H__ */
diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c
new file mode 100644
index 00000000000..ae4f73386b9
--- /dev/null
+++ b/fs/aufs/ioctl.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * ioctl
+ * plink-management and readdir in userspace.
+ * assist the pathconf(3) wrapper library.
+ * move-down
+ * File-based Hierarchical Storage Management.
+ */
+
+#include <linux/compat.h>
+#include <linux/file.h>
+#include "aufs.h"
+
+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
+{
+	int err, fd;
+	aufs_bindex_t wbi, bindex, bbot;
+	struct file *h_file;
+	struct super_block *sb;
+	struct dentry *root;
+	struct au_branch *br;
+	struct aufs_wbr_fd wbrfd = {
+		.oflags	= au_dir_roflags,
+		.brid	= -1
+	};
+	const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
+		| O_NOATIME | O_CLOEXEC;
+
+	AuDebugOn(wbrfd.oflags & ~valid);
+
+	if (arg) {
+		err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
+		if (unlikely(err)) {
+			err = -EFAULT;
+			goto out;
+		}
+
+		err = -EINVAL;
+		AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
+		wbrfd.oflags |= au_dir_roflags;
+		AuDbg("0%o\n", wbrfd.oflags);
+		if (unlikely(wbrfd.oflags & ~valid))
+			goto out;
+	}
+
+	fd = get_unused_fd_flags(0);
+	err = fd;
+	if (unlikely(fd < 0))
+		goto out;
+
+	h_file = ERR_PTR(-EINVAL);
+	wbi = 0;
+	br = NULL;
+	sb = path->dentry->d_sb;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_IR);
+	bbot = au_sbbot(sb);
+	if (wbrfd.brid >= 0) {
+		wbi = au_br_index(sb, wbrfd.brid);
+		if (unlikely(wbi < 0 || wbi > bbot))
+			goto out_unlock;
+	}
+
+	h_file = ERR_PTR(-ENOENT);
+	br = au_sbr(sb, wbi);
+	if (!au_br_writable(br->br_perm)) {
+		if (arg)
+			goto out_unlock;
+
+		bindex = wbi + 1;
+		wbi = -1;
+		for (; bindex <= bbot; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (au_br_writable(br->br_perm)) {
+				wbi = bindex;
+				br = au_sbr(sb, wbi);
+				break;
+			}
+		}
+	}
+	AuDbg("wbi %d\n", wbi);
+	if (wbi >= 0)
+		h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
+				   /*force_wr*/0);
+
+out_unlock:
+	aufs_read_unlock(root, AuLock_IR);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out_fd;
+
+	au_lcnt_dec(&br->br_nfiles); /* cf. au_h_open() */
+	fd_install(fd, h_file);
+	err = fd;
+	goto out; /* success */
+
+out_fd:
+	put_unused_fd(fd);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err;
+	struct dentry *dentry;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ioctl(file, cmd, arg);
+		break;
+
+	case AUFS_CTL_WBR_FD:
+		err = au_wbr_fd(&file->f_path, (void __user *)arg);
+		break;
+
+	case AUFS_CTL_IBUSY:
+		err = au_ibusy_ioctl(file, arg);
+		break;
+
+	case AUFS_CTL_BRINFO:
+		err = au_brinfo_ioctl(file, arg);
+		break;
+
+	case AUFS_CTL_FHSM_FD:
+		dentry = file->f_path.dentry;
+		if (IS_ROOT(dentry))
+			err = au_fhsm_fd(dentry->d_sb, arg);
+		else
+			err = -ENOTTY;
+		break;
+
+	default:
+		/* do not call the lower */
+		AuDbg("0x%x\n", cmd);
+		err = -ENOTTY;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err;
+
+	switch (cmd) {
+	case AUFS_CTL_MVDOWN:
+		err = au_mvdown(file->f_path.dentry, (void __user *)arg);
+		break;
+
+	case AUFS_CTL_WBR_FD:
+		err = au_wbr_fd(&file->f_path, (void __user *)arg);
+		break;
+
+	default:
+		/* do not call the lower */
+		AuDbg("0x%x\n", cmd);
+		err = -ENOTTY;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	long err;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_compat_ioctl(file, cmd, arg);
+		break;
+
+	case AUFS_CTL_IBUSY:
+		err = au_ibusy_compat_ioctl(file, arg);
+		break;
+
+	case AUFS_CTL_BRINFO:
+		err = au_brinfo_compat_ioctl(file, arg);
+		break;
+
+	default:
+		err = aufs_ioctl_dir(file, cmd, arg);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/aufs/lcnt.h b/fs/aufs/lcnt.h
new file mode 100644
index 00000000000..8afcabe5582
--- /dev/null
+++ b/fs/aufs/lcnt.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * simple long counter wrapper
+ */
+
+#ifndef __AUFS_LCNT_H__
+#define __AUFS_LCNT_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+#define AuLCntATOMIC	1
+#define AuLCntPCPUCNT	2
+/*
+ * why does percpu_refcount require extra synchronize_rcu()s in
+ * au_br_do_free()
+ */
+#define AuLCntPCPUREF	3
+
+/* #define AuLCntChosen	AuLCntATOMIC */
+#define AuLCntChosen	AuLCntPCPUCNT
+/* #define AuLCntChosen	AuLCntPCPUREF */
+
+#if AuLCntChosen == AuLCntATOMIC
+#include <linux/atomic.h>
+
+typedef atomic_long_t au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, void *release __maybe_unused)
+{
+	atomic_long_set(cnt, 0);
+	return 0;
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+	/* empty */
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt __maybe_unused,
+			       int do_sync __maybe_unused)
+{
+	/* empty */
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+	atomic_long_inc(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+	atomic_long_dec(cnt);
+}
+
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev __maybe_unused)
+{
+	return atomic_long_read(cnt);
+}
+#endif
+
+#if AuLCntChosen == AuLCntPCPUCNT
+#include <linux/percpu_counter.h>
+
+typedef struct percpu_counter au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, void *release __maybe_unused)
+{
+	return percpu_counter_init(cnt, 0, GFP_NOFS);
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+	/* empty */
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt, int do_sync __maybe_unused)
+{
+	percpu_counter_destroy(cnt);
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+	percpu_counter_inc(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+	percpu_counter_dec(cnt);
+}
+
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev __maybe_unused)
+{
+	s64 n;
+
+	n = percpu_counter_sum(cnt);
+	BUG_ON(n < 0);
+	if (LONG_MAX != LLONG_MAX
+	    && n > LONG_MAX)
+		AuWarn1("%s\n", "wrap-around");
+
+	return n;
+}
+#endif
+
+#if AuLCntChosen == AuLCntPCPUREF
+#include <linux/percpu-refcount.h>
+
+typedef struct percpu_ref au_lcnt_t;
+
+static inline int au_lcnt_init(au_lcnt_t *cnt, percpu_ref_func_t *release)
+{
+	if (!release)
+		release = percpu_ref_exit;
+	return percpu_ref_init(cnt, release, /*percpu mode*/0, GFP_NOFS);
+}
+
+static inline void au_lcnt_wait_for_fin(au_lcnt_t *cnt __maybe_unused)
+{
+	synchronize_rcu();
+}
+
+static inline void au_lcnt_fin(au_lcnt_t *cnt, int do_sync)
+{
+	percpu_ref_kill(cnt);
+	if (do_sync)
+		au_lcnt_wait_for_fin(cnt);
+}
+
+static inline void au_lcnt_inc(au_lcnt_t *cnt)
+{
+	percpu_ref_get(cnt);
+}
+
+static inline void au_lcnt_dec(au_lcnt_t *cnt)
+{
+	percpu_ref_put(cnt);
+}
+
+/*
+ * avoid calling this func as possible.
+ */
+static inline long au_lcnt_read(au_lcnt_t *cnt, int do_rev)
+{
+	long l;
+
+	percpu_ref_switch_to_atomic_sync(cnt);
+	l = atomic_long_read(&cnt->count);
+	if (do_rev)
+		percpu_ref_switch_to_percpu(cnt);
+
+	/* percpu_ref is initialized by 1 instead of 0 */
+	return l - 1;
+}
+#endif
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuLCntZero(val) do {			\
+	long l = val;				\
+	if (l)					\
+		AuDbg("%s = %ld\n", #val, l);	\
+} while (0)
+#else
+#define AuLCntZero(val)		do {} while (0)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LCNT_H__ */
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
new file mode 100644
index 00000000000..63732e93868
--- /dev/null
+++ b/fs/aufs/loop.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * support for loopback block device as a branch
+ */
+
+#include "aufs.h"
+
+/* added into drivers/block/loop.c */
+static struct file *(*backing_file_func)(struct super_block *sb);
+
+/*
+ * test if two lower dentries have overlapping branches.
+ */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
+{
+	struct super_block *h_sb;
+	struct file *backing_file;
+
+	if (unlikely(!backing_file_func)) {
+		/* don't load "loop" module here */
+		backing_file_func = symbol_get(loop_backing_file);
+		if (unlikely(!backing_file_func))
+			/* "loop" module is not loaded */
+			return 0;
+	}
+
+	h_sb = h_adding->d_sb;
+	backing_file = backing_file_func(h_sb);
+	if (!backing_file)
+		return 0;
+
+	h_adding = backing_file->f_path.dentry;
+	/*
+	 * h_adding can be local NFS.
+	 * in this case aufs cannot detect the loop.
+	 */
+	if (unlikely(h_adding->d_sb == sb))
+		return 1;
+	return !!au_test_subdir(h_adding, sb->s_root);
+}
+
+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
+int au_test_loopback_kthread(void)
+{
+	int ret;
+	struct task_struct *tsk = current;
+	char c, comm[sizeof(tsk->comm)];
+
+	ret = 0;
+	if (tsk->flags & PF_KTHREAD) {
+		get_task_comm(comm, tsk);
+		c = comm[4];
+		ret = ('0' <= c && c <= '9'
+		       && !strncmp(comm, "loop", 4));
+	}
+
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define au_warn_loopback_step	16
+static int au_warn_loopback_nelem = au_warn_loopback_step;
+static unsigned long *au_warn_loopback_array;
+
+void au_warn_loopback(struct super_block *h_sb)
+{
+	int i, new_nelem;
+	unsigned long *a, magic;
+	static DEFINE_SPINLOCK(spin);
+
+	magic = h_sb->s_magic;
+	spin_lock(&spin);
+	a = au_warn_loopback_array;
+	for (i = 0; i < au_warn_loopback_nelem && *a; i++)
+		if (a[i] == magic) {
+			spin_unlock(&spin);
+			return;
+		}
+
+	/* h_sb is new to us, print it */
+	if (i < au_warn_loopback_nelem) {
+		a[i] = magic;
+		goto pr;
+	}
+
+	/* expand the array */
+	new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
+	a = au_kzrealloc(au_warn_loopback_array,
+			 au_warn_loopback_nelem * sizeof(unsigned long),
+			 new_nelem * sizeof(unsigned long), GFP_ATOMIC,
+			 /*may_shrink*/0);
+	if (a) {
+		au_warn_loopback_nelem = new_nelem;
+		au_warn_loopback_array = a;
+		a[i] = magic;
+		goto pr;
+	}
+
+	spin_unlock(&spin);
+	AuWarn1("realloc failed, ignored\n");
+	return;
+
+pr:
+	spin_unlock(&spin);
+	pr_warn("you may want to try another patch for loopback file "
+		"on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
+}
+
+int au_loopback_init(void)
+{
+	int err;
+	struct super_block *sb __maybe_unused;
+
+	BUILD_BUG_ON(sizeof(sb->s_magic) != sizeof(*au_warn_loopback_array));
+
+	err = 0;
+	au_warn_loopback_array = kcalloc(au_warn_loopback_step,
+					 sizeof(unsigned long), GFP_NOFS);
+	if (unlikely(!au_warn_loopback_array))
+		err = -ENOMEM;
+
+	return err;
+}
+
+void au_loopback_fin(void)
+{
+	if (backing_file_func)
+		symbol_put(loop_backing_file);
+	au_kfree_try_rcu(au_warn_loopback_array);
+}
diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h
new file mode 100644
index 00000000000..65c38bbb5d5
--- /dev/null
+++ b/fs/aufs/loop.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * support for loopback mount as a branch
+ */
+
+#ifndef __AUFS_LOOP_H__
+#define __AUFS_LOOP_H__
+
+#ifdef __KERNEL__
+
+struct dentry;
+struct super_block;
+
+#ifdef CONFIG_AUFS_BDEV_LOOP
+/* drivers/block/loop.c */
+struct file *loop_backing_file(struct super_block *sb);
+
+/* loop.c */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
+int au_test_loopback_kthread(void);
+void au_warn_loopback(struct super_block *h_sb);
+
+int au_loopback_init(void);
+void au_loopback_fin(void);
+#else
+AuStub(struct file *, loop_backing_file, return NULL, struct super_block *sb)
+
+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
+	   struct dentry *h_adding)
+AuStubInt0(au_test_loopback_kthread, void)
+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
+
+AuStubInt0(au_loopback_init, void)
+AuStubVoid(au_loopback_fin, void)
+#endif /* BLK_DEV_LOOP */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LOOP_H__ */
diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk
new file mode 100644
index 00000000000..7bc9eef3ffe
--- /dev/null
+++ b/fs/aufs/magic.mk
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# defined in ${srctree}/fs/fuse/inode.c
+# tristate
+ifdef CONFIG_FUSE_FS
+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
+endif
+
+# defined in ${srctree}/fs/xfs/xfs_sb.h
+# tristate
+ifdef CONFIG_XFS_FS
+ccflags-y += -DXFS_SB_MAGIC=0x58465342
+endif
+
+# defined in ${srctree}/fs/configfs/mount.c
+# tristate
+ifdef CONFIG_CONFIGFS_FS
+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
+endif
+
+# defined in ${srctree}/fs/ubifs/ubifs.h
+# tristate
+ifdef CONFIG_UBIFS_FS
+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
+endif
+
+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
+# tristate
+ifdef CONFIG_HFSPLUS_FS
+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
+endif
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
new file mode 100644
index 00000000000..7245197a6cb
--- /dev/null
+++ b/fs/aufs/module.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * module global variables and operations
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+/* shrinkable realloc */
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink)
+{
+	size_t sz;
+	int diff;
+
+	sz = 0;
+	diff = -1;
+	if (p) {
+#if 0 /* unused */
+		if (!new_sz) {
+			au_kfree_rcu(p);
+			p = NULL;
+			goto out;
+		}
+#else
+		AuDebugOn(!new_sz);
+#endif
+		sz = ksize(p);
+		diff = au_kmidx_sub(sz, new_sz);
+	}
+	if (sz && !diff)
+		goto out;
+
+	if (sz < new_sz)
+		/* expand or SLOB */
+		p = krealloc(p, new_sz, gfp);
+	else if (new_sz < sz && may_shrink) {
+		/* shrink */
+		void *q;
+
+		q = kmalloc(new_sz, gfp);
+		if (q) {
+			if (p) {
+				memcpy(q, p, new_sz);
+				au_kfree_try_rcu(p);
+			}
+			p = q;
+		} else
+			p = NULL;
+	}
+
+out:
+	return p;
+}
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+		   int may_shrink)
+{
+	p = au_krealloc(p, new_sz, gfp, may_shrink);
+	if (p && new_sz > nused)
+		memset(p + nused, 0, new_sz - nused);
+	return p;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * aufs caches
+ */
+struct kmem_cache *au_cache[AuCache_Last];
+
+static void au_cache_fin(void)
+{
+	int i;
+
+	/*
+	 * Make sure all delayed rcu free inodes are flushed before we
+	 * destroy cache.
+	 */
+	rcu_barrier();
+
+	/* excluding AuCache_HNOTIFY */
+	BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
+	for (i = 0; i < AuCache_HNOTIFY; i++) {
+		kmem_cache_destroy(au_cache[i]);
+		au_cache[i] = NULL;
+	}
+}
+
+static int __init au_cache_init(void)
+{
+	au_cache[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
+	if (au_cache[AuCache_DINFO])
+		/* SLAB_DESTROY_BY_RCU */
+		au_cache[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
+						       au_icntnr_init_once);
+	if (au_cache[AuCache_ICNTNR])
+		au_cache[AuCache_FINFO] = AuCacheCtor(au_finfo,
+						      au_fi_init_once);
+	if (au_cache[AuCache_FINFO])
+		au_cache[AuCache_VDIR] = AuCache(au_vdir);
+	if (au_cache[AuCache_VDIR])
+		au_cache[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
+	if (au_cache[AuCache_DEHSTR])
+		return 0;
+
+	au_cache_fin();
+	return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dir_roflags;
+
+#ifdef CONFIG_AUFS_SBILIST
+/*
+ * iterate_supers_type() doesn't protect us from
+ * remounting (branch management)
+ */
+struct hlist_bl_head au_sbilist;
+#endif
+
+/*
+ * functions for module interface.
+ */
+MODULE_LICENSE("GPL");
+/* MODULE_LICENSE("GPL v2"); */
+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
+MODULE_DESCRIPTION(AUFS_NAME
+	" -- Advanced multi layered unification filesystem");
+MODULE_VERSION(AUFS_VERSION);
+MODULE_ALIAS_FS(AUFS_NAME);
+
+/* this module parameter has no meaning when SYSFS is disabled */
+int sysaufs_brs = 1;
+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
+module_param_named(brs, sysaufs_brs, int, 0444);
+
+/* this module parameter has no meaning when USER_NS is disabled */
+bool au_userns;
+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns");
+module_param_named(allow_userns, au_userns, bool, 0444);
+
+/* ---------------------------------------------------------------------- */
+
+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
+
+int au_seq_path(struct seq_file *seq, struct path *path)
+{
+	int err;
+
+	err = seq_path(seq, path, au_esc_chars);
+	if (err >= 0)
+		err = 0;
+	else
+		err = -ENOMEM;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int __init aufs_init(void)
+{
+	int err, i;
+	char *p;
+
+	p = au_esc_chars;
+	for (i = 1; i <= ' '; i++)
+		*p++ = i;
+	*p++ = '\\';
+	*p++ = '\x7f';
+	*p = 0;
+
+	au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
+
+	memcpy(aufs_iop_nogetattr, aufs_iop, sizeof(aufs_iop));
+	for (i = 0; i < AuIop_Last; i++)
+		aufs_iop_nogetattr[i].getattr = NULL;
+
+	memset(au_cache, 0, sizeof(au_cache));	/* including hnotify */
+
+	au_sbilist_init();
+	sysaufs_brs_init();
+	au_debug_init();
+	au_dy_init();
+	err = sysaufs_init();
+	if (unlikely(err))
+		goto out;
+	err = dbgaufs_init();
+	if (unlikely(err))
+		goto out_sysaufs;
+	err = au_procfs_init();
+	if (unlikely(err))
+		goto out_dbgaufs;
+	err = au_wkq_init();
+	if (unlikely(err))
+		goto out_procfs;
+	err = au_loopback_init();
+	if (unlikely(err))
+		goto out_wkq;
+	err = au_hnotify_init();
+	if (unlikely(err))
+		goto out_loopback;
+	err = au_sysrq_init();
+	if (unlikely(err))
+		goto out_hin;
+	err = au_cache_init();
+	if (unlikely(err))
+		goto out_sysrq;
+
+	aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0;
+	err = register_filesystem(&aufs_fs_type);
+	if (unlikely(err))
+		goto out_cache;
+
+	/* since we define pr_fmt, call printk directly */
+	printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
+	goto out; /* success */
+
+out_cache:
+	au_cache_fin();
+out_sysrq:
+	au_sysrq_fin();
+out_hin:
+	au_hnotify_fin();
+out_loopback:
+	au_loopback_fin();
+out_wkq:
+	au_wkq_fin();
+out_procfs:
+	au_procfs_fin();
+out_dbgaufs:
+	dbgaufs_fin();
+out_sysaufs:
+	sysaufs_fin();
+	au_dy_fin();
+out:
+	return err;
+}
+
+static void __exit aufs_exit(void)
+{
+	unregister_filesystem(&aufs_fs_type);
+	au_cache_fin();
+	au_sysrq_fin();
+	au_hnotify_fin();
+	au_loopback_fin();
+	au_wkq_fin();
+	au_procfs_fin();
+	dbgaufs_fin();
+	sysaufs_fin();
+	au_dy_fin();
+}
+
+module_init(aufs_init);
+module_exit(aufs_exit);
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
new file mode 100644
index 00000000000..d6a76788d2f
--- /dev/null
+++ b/fs/aufs/module.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * module initialization and module-global
+ */
+
+#ifndef __AUFS_MODULE_H__
+#define __AUFS_MODULE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/slab.h>
+#include "debug.h"
+#include "dentry.h"
+#include "dir.h"
+#include "file.h"
+#include "inode.h"
+
+struct path;
+struct seq_file;
+
+/* module parameters */
+extern int sysaufs_brs;
+extern bool au_userns;
+
+/* ---------------------------------------------------------------------- */
+
+extern int au_dir_roflags;
+
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink);
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+		   int may_shrink);
+
+/*
+ * Comparing the size of the object with sizeof(struct rcu_head)
+ * case 1: object is always larger
+ *	--> au_kfree_rcu() or au_kfree_do_rcu()
+ * case 2: object is always smaller
+ *	--> au_kfree_small()
+ * case 3: object can be any size
+ *	--> au_kfree_try_rcu()
+ */
+
+static inline void au_kfree_do_rcu(const void *p)
+{
+	struct {
+		struct rcu_head rcu;
+	} *a = (void *)p;
+
+	kfree_rcu(a, rcu);
+}
+
+#define au_kfree_rcu(_p) do {						\
+		typeof(_p) p = (_p);					\
+		BUILD_BUG_ON(sizeof(*p) < sizeof(struct rcu_head));	\
+		if (p)							\
+			au_kfree_do_rcu(p);				\
+	} while (0)
+
+#define au_kfree_do_sz_test(sz)	(sz >= sizeof(struct rcu_head))
+#define au_kfree_sz_test(p)	(p && au_kfree_do_sz_test(ksize(p)))
+
+static inline void au_kfree_try_rcu(const void *p)
+{
+	if (!p)
+		return;
+	if (au_kfree_sz_test(p))
+		au_kfree_do_rcu(p);
+	else
+		kfree(p);
+}
+
+static inline void au_kfree_small(const void *p)
+{
+	if (!p)
+		return;
+	AuDebugOn(au_kfree_sz_test(p));
+	kfree(p);
+}
+
+static inline int au_kmidx_sub(size_t sz, size_t new_sz)
+{
+#ifndef CONFIG_SLOB
+	return kmalloc_index(sz) - kmalloc_index(new_sz);
+#else
+	return -1; /* SLOB is untested */
+#endif
+}
+
+int au_seq_path(struct seq_file *seq, struct path *path);
+
+#ifdef CONFIG_PROC_FS
+/* procfs.c */
+int __init au_procfs_init(void);
+void au_procfs_fin(void);
+#else
+AuStubInt0(au_procfs_init, void);
+AuStubVoid(au_procfs_fin, void);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* kmem cache */
+enum {
+	AuCache_DINFO,
+	AuCache_ICNTNR,
+	AuCache_FINFO,
+	AuCache_VDIR,
+	AuCache_DEHSTR,
+	AuCache_HNOTIFY, /* must be last */
+	AuCache_Last
+};
+
+extern struct kmem_cache *au_cache[AuCache_Last];
+
+#define AuCacheFlags		(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
+#define AuCache(type)		KMEM_CACHE(type, AuCacheFlags)
+#define AuCacheCtor(type, ctor)	\
+	kmem_cache_create(#type, sizeof(struct type), \
+			  __alignof__(struct type), AuCacheFlags, ctor)
+
+#define AuCacheFuncs(name, index)					\
+	static inline struct au_##name *au_cache_alloc_##name(void)	\
+	{ return kmem_cache_alloc(au_cache[AuCache_##index], GFP_NOFS); } \
+	static inline void au_cache_free_##name##_norcu(struct au_##name *p) \
+	{ kmem_cache_free(au_cache[AuCache_##index], p); }		\
+									\
+	static inline void au_cache_free_##name##_rcu_cb(struct rcu_head *rcu) \
+	{ void *p = rcu;						\
+		p -= offsetof(struct au_##name, rcu);			\
+		kmem_cache_free(au_cache[AuCache_##index], p); }	\
+	static inline void au_cache_free_##name##_rcu(struct au_##name *p) \
+	{ BUILD_BUG_ON(sizeof(struct au_##name) < sizeof(struct rcu_head)); \
+		call_rcu(&p->rcu, au_cache_free_##name##_rcu_cb); }	\
+									\
+	static inline void au_cache_free_##name(struct au_##name *p)	\
+	{ /* au_cache_free_##name##_norcu(p); */			\
+		au_cache_free_##name##_rcu(p); }
+
+AuCacheFuncs(dinfo, DINFO);
+AuCacheFuncs(icntnr, ICNTNR);
+AuCacheFuncs(finfo, FINFO);
+AuCacheFuncs(vdir, VDIR);
+AuCacheFuncs(vdir_dehstr, DEHSTR);
+#ifdef CONFIG_AUFS_HNOTIFY
+AuCacheFuncs(hnotify, HNOTIFY);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_MODULE_H__ */
diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c
new file mode 100644
index 00000000000..c39a39e52eb
--- /dev/null
+++ b/fs/aufs/mvdown.c
@@ -0,0 +1,706 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * move-down, opposite of copy-up
+ */
+
+#include "aufs.h"
+
+struct au_mvd_args {
+	struct {
+		struct super_block *h_sb;
+		struct dentry *h_parent;
+		struct au_hinode *hdir;
+		struct inode *h_dir, *h_inode;
+		struct au_pin pin;
+	} info[AUFS_MVDOWN_NARRAY];
+
+	struct aufs_mvdown mvdown;
+	struct dentry *dentry, *parent;
+	struct inode *inode, *dir;
+	struct super_block *sb;
+	aufs_bindex_t bopq, bwh, bfound;
+	unsigned char rename_lock;
+};
+
+#define mvd_errno		mvdown.au_errno
+#define mvd_bsrc		mvdown.stbr[AUFS_MVDOWN_UPPER].bindex
+#define mvd_src_brid		mvdown.stbr[AUFS_MVDOWN_UPPER].brid
+#define mvd_bdst		mvdown.stbr[AUFS_MVDOWN_LOWER].bindex
+#define mvd_dst_brid		mvdown.stbr[AUFS_MVDOWN_LOWER].brid
+
+#define mvd_h_src_sb		info[AUFS_MVDOWN_UPPER].h_sb
+#define mvd_h_src_parent	info[AUFS_MVDOWN_UPPER].h_parent
+#define mvd_hdir_src		info[AUFS_MVDOWN_UPPER].hdir
+#define mvd_h_src_dir		info[AUFS_MVDOWN_UPPER].h_dir
+#define mvd_h_src_inode		info[AUFS_MVDOWN_UPPER].h_inode
+#define mvd_pin_src		info[AUFS_MVDOWN_UPPER].pin
+
+#define mvd_h_dst_sb		info[AUFS_MVDOWN_LOWER].h_sb
+#define mvd_h_dst_parent	info[AUFS_MVDOWN_LOWER].h_parent
+#define mvd_hdir_dst		info[AUFS_MVDOWN_LOWER].hdir
+#define mvd_h_dst_dir		info[AUFS_MVDOWN_LOWER].h_dir
+#define mvd_h_dst_inode		info[AUFS_MVDOWN_LOWER].h_inode
+#define mvd_pin_dst		info[AUFS_MVDOWN_LOWER].pin
+
+#define AU_MVD_PR(flag, ...) do {			\
+		if (flag)				\
+			pr_err(__VA_ARGS__);		\
+	} while (0)
+
+static int find_lower_writable(struct au_mvd_args *a)
+{
+	struct super_block *sb;
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	sb = a->sb;
+	bindex = a->mvd_bsrc;
+	bbot = au_sbbot(sb);
+	if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER)
+		for (bindex++; bindex <= bbot; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (au_br_fhsm(br->br_perm)
+			    && !sb_rdonly(au_br_sb(br)))
+				return bindex;
+		}
+	else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER))
+		for (bindex++; bindex <= bbot; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (!au_br_rdonly(br))
+				return bindex;
+		}
+	else
+		for (bindex++; bindex <= bbot; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (!sb_rdonly(au_br_sb(br))) {
+				if (au_br_rdonly(br))
+					a->mvdown.flags
+						|= AUFS_MVDOWN_ROLOWER_R;
+				return bindex;
+			}
+		}
+
+	return -1;
+}
+
+/* make the parent dir on bdst */
+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+
+	err = 0;
+	a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc);
+	a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst);
+	a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc);
+	a->mvd_h_dst_parent = NULL;
+	if (au_dbbot(a->parent) >= a->mvd_bdst)
+		a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+	if (!a->mvd_h_dst_parent) {
+		err = au_cpdown_dirs(a->dentry, a->mvd_bdst);
+		if (unlikely(err)) {
+			AU_MVD_PR(dmsg, "cpdown_dirs failed\n");
+			goto out;
+		}
+		a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* lock them all */
+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct dentry *h_trap;
+
+	a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc);
+	a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst);
+	err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst,
+		     au_opt_udba(a->sb),
+		     AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+	AuTraceErr(err);
+	if (unlikely(err)) {
+		AU_MVD_PR(dmsg, "pin_dst failed\n");
+		goto out;
+	}
+
+	if (a->mvd_h_src_sb != a->mvd_h_dst_sb) {
+		a->rename_lock = 0;
+		au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+			    AuLsc_DI_PARENT, AuLsc_I_PARENT3,
+			    au_opt_udba(a->sb),
+			    AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+		err = au_do_pin(&a->mvd_pin_src);
+		AuTraceErr(err);
+		a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+		if (unlikely(err)) {
+			AU_MVD_PR(dmsg, "pin_src failed\n");
+			goto out_dst;
+		}
+		goto out; /* success */
+	}
+
+	a->rename_lock = 1;
+	au_pin_hdir_unlock(&a->mvd_pin_dst);
+	err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+		     au_opt_udba(a->sb),
+		     AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+	AuTraceErr(err);
+	a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+	if (unlikely(err)) {
+		AU_MVD_PR(dmsg, "pin_src failed\n");
+		au_pin_hdir_lock(&a->mvd_pin_dst);
+		goto out_dst;
+	}
+	au_pin_hdir_unlock(&a->mvd_pin_src);
+	h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+				   a->mvd_h_dst_parent, a->mvd_hdir_dst);
+	if (h_trap) {
+		err = (h_trap != a->mvd_h_src_parent);
+		if (err)
+			err = (h_trap != a->mvd_h_dst_parent);
+	}
+	BUG_ON(err); /* it should never happen */
+	if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) {
+		err = -EBUSY;
+		AuTraceErr(err);
+		vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+				    a->mvd_h_dst_parent, a->mvd_hdir_dst);
+		au_pin_hdir_lock(&a->mvd_pin_src);
+		au_unpin(&a->mvd_pin_src);
+		au_pin_hdir_lock(&a->mvd_pin_dst);
+		goto out_dst;
+	}
+	goto out; /* success */
+
+out_dst:
+	au_unpin(&a->mvd_pin_dst);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	if (!a->rename_lock)
+		au_unpin(&a->mvd_pin_src);
+	else {
+		vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+				    a->mvd_h_dst_parent, a->mvd_hdir_dst);
+		au_pin_hdir_lock(&a->mvd_pin_src);
+		au_unpin(&a->mvd_pin_src);
+		au_pin_hdir_lock(&a->mvd_pin_dst);
+	}
+	au_unpin(&a->mvd_pin_dst);
+}
+
+/* copy-down the file */
+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct au_cp_generic cpg = {
+		.dentry	= a->dentry,
+		.bdst	= a->mvd_bdst,
+		.bsrc	= a->mvd_bsrc,
+		.len	= -1,
+		.pin	= &a->mvd_pin_dst,
+		.flags	= AuCpup_DTIME | AuCpup_HOPEN
+	};
+
+	AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst);
+	if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+		au_fset_cpup(cpg.flags, OVERWRITE);
+	if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER)
+		au_fset_cpup(cpg.flags, RWDST);
+	err = au_sio_cpdown_simple(&cpg);
+	if (unlikely(err))
+		AU_MVD_PR(dmsg, "cpdown failed\n");
+
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * unlink the whiteout on bdst if exist which may be created by UDBA while we
+ * were sleeping
+ */
+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct path h_path;
+	struct au_branch *br;
+	struct inode *delegated;
+
+	br = au_sbr(a->sb, a->mvd_bdst);
+	h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry)) {
+		AU_MVD_PR(dmsg, "wh_lkup failed\n");
+		goto out;
+	}
+
+	err = 0;
+	if (d_is_positive(h_path.dentry)) {
+		h_path.mnt = au_br_mnt(br);
+		delegated = NULL;
+		err = vfsub_unlink(d_inode(a->mvd_h_dst_parent), &h_path,
+				   &delegated, /*force*/0);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal unlink\n");
+			iput(delegated);
+		}
+		if (unlikely(err))
+			AU_MVD_PR(dmsg, "wh_unlink failed\n");
+	}
+	dput(h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * unlink the topmost h_dentry
+ */
+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct path h_path;
+	struct inode *delegated;
+
+	h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc);
+	h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc);
+	delegated = NULL;
+	err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0);
+	if (unlikely(err == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal unlink\n");
+		iput(delegated);
+	}
+	if (unlikely(err))
+		AU_MVD_PR(dmsg, "unlink failed\n");
+
+	AuTraceErr(err);
+	return err;
+}
+
+/* Since mvdown succeeded, we ignore an error of this function */
+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct au_branch *br;
+
+	a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED;
+	br = au_sbr(a->sb, a->mvd_bsrc);
+	err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs);
+	if (!err) {
+		br = au_sbr(a->sb, a->mvd_bdst);
+		a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id;
+		err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs);
+	}
+	if (!err)
+		a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED;
+	else
+		AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err);
+}
+
+/*
+ * copy-down the file and unlink the bsrc file.
+ * - unlink the bdst whout if exist
+ * - copy-down the file (with whtmp name and rename)
+ * - unlink the bsrc file
+ */
+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+
+	err = au_do_mkdir(dmsg, a);
+	if (!err)
+		err = au_do_lock(dmsg, a);
+	if (unlikely(err))
+		goto out;
+
+	/*
+	 * do not revert the activities we made on bdst since they should be
+	 * harmless in aufs.
+	 */
+
+	err = au_do_cpdown(dmsg, a);
+	if (!err)
+		err = au_do_unlink_wh(dmsg, a);
+	if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER))
+		err = au_do_unlink(dmsg, a);
+	if (unlikely(err))
+		goto out_unlock;
+
+	AuDbg("%pd2, 0x%x, %d --> %d\n",
+	      a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst);
+	if (find_lower_writable(a) < 0)
+		a->mvdown.flags |= AUFS_MVDOWN_BOTTOM;
+
+	if (a->mvdown.flags & AUFS_MVDOWN_STFS)
+		au_do_stfs(dmsg, a);
+
+	/* maintain internal array */
+	if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) {
+		au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL);
+		au_set_dbtop(a->dentry, a->mvd_bdst);
+		au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0);
+		au_set_ibtop(a->inode, a->mvd_bdst);
+	} else {
+		/* hide the lower */
+		au_set_h_dptr(a->dentry, a->mvd_bdst, NULL);
+		au_set_dbbot(a->dentry, a->mvd_bsrc);
+		au_set_h_iptr(a->inode, a->mvd_bdst, NULL, /*flags*/0);
+		au_set_ibbot(a->inode, a->mvd_bsrc);
+	}
+	if (au_dbbot(a->dentry) < a->mvd_bdst)
+		au_set_dbbot(a->dentry, a->mvd_bdst);
+	if (au_ibbot(a->inode) < a->mvd_bdst)
+		au_set_ibbot(a->inode, a->mvd_bdst);
+
+out_unlock:
+	au_do_unlock(dmsg, a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* make sure the file is idle */
+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err, plinked;
+
+	err = 0;
+	plinked = !!au_opt_test(au_mntflags(a->sb), PLINK);
+	if (au_dbtop(a->dentry) == a->mvd_bsrc
+	    && au_dcount(a->dentry) == 1
+	    && atomic_read(&a->inode->i_count) == 1
+	    /* && a->mvd_h_src_inode->i_nlink == 1 */
+	    && (!plinked || !au_plink_test(a->inode))
+	    && a->inode->i_nlink == 1)
+		goto out;
+
+	err = -EBUSY;
+	AU_MVD_PR(dmsg,
+		  "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n",
+		  a->mvd_bsrc, au_dbtop(a->dentry), au_dcount(a->dentry),
+		  atomic_read(&a->inode->i_count), a->inode->i_nlink,
+		  a->mvd_h_src_inode->i_nlink,
+		  plinked, plinked ? au_plink_test(a->inode) : 0);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* make sure the parent dir is fine */
+static int au_mvd_args_parent(const unsigned char dmsg,
+			      struct au_mvd_args *a)
+{
+	int err;
+	aufs_bindex_t bindex;
+
+	err = 0;
+	if (unlikely(au_alive_dir(a->parent))) {
+		err = -ENOENT;
+		AU_MVD_PR(dmsg, "parent dir is dead\n");
+		goto out;
+	}
+
+	a->bopq = au_dbdiropq(a->parent);
+	bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst);
+	AuDbg("b%d\n", bindex);
+	if (unlikely((bindex >= 0 && bindex < a->mvd_bdst)
+		     || (a->bopq != -1 && a->bopq < a->mvd_bdst))) {
+		err = -EINVAL;
+		a->mvd_errno = EAU_MVDOWN_OPAQUE;
+		AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n",
+			  a->bopq, a->mvd_bdst);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args_intermediate(const unsigned char dmsg,
+				    struct au_mvd_args *a)
+{
+	int err;
+	struct au_dinfo *dinfo, *tmp;
+
+	/* lookup the next lower positive entry */
+	err = -ENOMEM;
+	tmp = au_di_alloc(a->sb, AuLsc_DI_TMP);
+	if (unlikely(!tmp))
+		goto out;
+
+	a->bfound = -1;
+	a->bwh = -1;
+	dinfo = au_di(a->dentry);
+	au_di_cp(tmp, dinfo);
+	au_di_swap(tmp, dinfo);
+
+	/* returns the number of positive dentries */
+	err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1,
+			     /* AuLkup_IGNORE_PERM */ 0);
+	if (!err)
+		a->bwh = au_dbwh(a->dentry);
+	else if (err > 0)
+		a->bfound = au_dbtop(a->dentry);
+
+	au_di_swap(tmp, dinfo);
+	au_rw_write_unlock(&tmp->di_rwsem);
+	au_di_free(tmp);
+	if (unlikely(err < 0))
+		AU_MVD_PR(dmsg, "failed look-up lower\n");
+
+	/*
+	 * here, we have these cases.
+	 * bfound == -1
+	 *	no positive dentry under bsrc. there are more sub-cases.
+	 *	bwh < 0
+	 *		there no whiteout, we can safely move-down.
+	 *	bwh <= bsrc
+	 *		impossible
+	 *	bsrc < bwh && bwh < bdst
+	 *		there is a whiteout on RO branch. cannot proceed.
+	 *	bwh == bdst
+	 *		there is a whiteout on the RW target branch. it should
+	 *		be removed.
+	 *	bdst < bwh
+	 *		there is a whiteout somewhere unrelated branch.
+	 * -1 < bfound && bfound <= bsrc
+	 *	impossible.
+	 * bfound < bdst
+	 *	found, but it is on RO branch between bsrc and bdst. cannot
+	 *	proceed.
+	 * bfound == bdst
+	 *	found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return
+	 *	error.
+	 * bdst < bfound
+	 *	found, after we create the file on bdst, it will be hidden.
+	 */
+
+	AuDebugOn(a->bfound == -1
+		  && a->bwh != -1
+		  && a->bwh <= a->mvd_bsrc);
+	AuDebugOn(-1 < a->bfound
+		  && a->bfound <= a->mvd_bsrc);
+
+	err = -EINVAL;
+	if (a->bfound == -1
+	    && a->mvd_bsrc < a->bwh
+	    && a->bwh != -1
+	    && a->bwh < a->mvd_bdst) {
+		a->mvd_errno = EAU_MVDOWN_WHITEOUT;
+		AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n",
+			  a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh);
+		goto out;
+	} else if (a->bfound != -1 && a->bfound < a->mvd_bdst) {
+		a->mvd_errno = EAU_MVDOWN_UPPER;
+		AU_MVD_PR(dmsg, "bdst %d, bfound %d\n",
+			  a->mvd_bdst, a->bfound);
+		goto out;
+	}
+
+	err = 0; /* success */
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+
+	err = 0;
+	if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+	    && a->bfound == a->mvd_bdst)
+		err = -EEXIST;
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a)
+{
+	int err;
+	struct au_branch *br;
+
+	err = -EISDIR;
+	if (unlikely(S_ISDIR(a->inode->i_mode)))
+		goto out;
+
+	err = -EINVAL;
+	if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER))
+		a->mvd_bsrc = au_ibtop(a->inode);
+	else {
+		a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid);
+		if (unlikely(a->mvd_bsrc < 0
+			     || (a->mvd_bsrc < au_dbtop(a->dentry)
+				 || au_dbbot(a->dentry) < a->mvd_bsrc
+				 || !au_h_dptr(a->dentry, a->mvd_bsrc))
+			     || (a->mvd_bsrc < au_ibtop(a->inode)
+				 || au_ibbot(a->inode) < a->mvd_bsrc
+				 || !au_h_iptr(a->inode, a->mvd_bsrc)))) {
+			a->mvd_errno = EAU_MVDOWN_NOUPPER;
+			AU_MVD_PR(dmsg, "no upper\n");
+			goto out;
+		}
+	}
+	if (unlikely(a->mvd_bsrc == au_sbbot(a->sb))) {
+		a->mvd_errno = EAU_MVDOWN_BOTTOM;
+		AU_MVD_PR(dmsg, "on the bottom\n");
+		goto out;
+	}
+	a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc);
+	br = au_sbr(a->sb, a->mvd_bsrc);
+	err = au_br_rdonly(br);
+	if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) {
+		if (unlikely(err))
+			goto out;
+	} else if (!(vfsub_native_ro(a->mvd_h_src_inode)
+		     || IS_APPEND(a->mvd_h_src_inode))) {
+		if (err)
+			a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R;
+		/* go on */
+	} else
+		goto out;
+
+	err = -EINVAL;
+	if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) {
+		a->mvd_bdst = find_lower_writable(a);
+		if (unlikely(a->mvd_bdst < 0)) {
+			a->mvd_errno = EAU_MVDOWN_BOTTOM;
+			AU_MVD_PR(dmsg, "no writable lower branch\n");
+			goto out;
+		}
+	} else {
+		a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid);
+		if (unlikely(a->mvd_bdst < 0
+			     || au_sbbot(a->sb) < a->mvd_bdst)) {
+			a->mvd_errno = EAU_MVDOWN_NOLOWERBR;
+			AU_MVD_PR(dmsg, "no lower brid\n");
+			goto out;
+		}
+	}
+
+	err = au_mvd_args_busy(dmsg, a);
+	if (!err)
+		err = au_mvd_args_parent(dmsg, a);
+	if (!err)
+		err = au_mvd_args_intermediate(dmsg, a);
+	if (!err)
+		err = au_mvd_args_exist(dmsg, a);
+	if (!err)
+		AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg)
+{
+	int err, e;
+	unsigned char dmsg;
+	struct au_mvd_args *args;
+	struct inode *inode;
+
+	inode = d_inode(dentry);
+	err = -EPERM;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = -ENOMEM;
+	args = kmalloc(sizeof(*args), GFP_NOFS);
+	if (unlikely(!args))
+		goto out;
+
+	err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown));
+	if (!err)
+		/* VERIFY_WRITE */
+		err = !access_ok(uarg, sizeof(*uarg));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out_free;
+	}
+	AuDbg("flags 0x%x\n", args->mvdown.flags);
+	args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R);
+	args->mvdown.au_errno = 0;
+	args->dentry = dentry;
+	args->inode = inode;
+	args->sb = dentry->d_sb;
+
+	err = -ENOENT;
+	dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG);
+	args->parent = dget_parent(dentry);
+	args->dir = d_inode(args->parent);
+	inode_lock_nested(args->dir, I_MUTEX_PARENT);
+	dput(args->parent);
+	if (unlikely(args->parent != dentry->d_parent)) {
+		AU_MVD_PR(dmsg, "parent dir is moved\n");
+		goto out_dir;
+	}
+
+	inode_lock_nested(inode, I_MUTEX_CHILD);
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_NOPLMW);
+	if (unlikely(err))
+		goto out_inode;
+
+	di_write_lock_parent(args->parent);
+	err = au_mvd_args(dmsg, args);
+	if (unlikely(err))
+		goto out_parent;
+
+	err = au_do_mvdown(dmsg, args);
+	if (unlikely(err))
+		goto out_parent;
+
+	au_cpup_attr_timesizes(args->dir);
+	au_cpup_attr_timesizes(inode);
+	if (!(args->mvdown.flags & AUFS_MVDOWN_KUPPER))
+		au_cpup_igen(inode, au_h_iptr(inode, args->mvd_bdst));
+	/* au_digen_dec(dentry); */
+
+out_parent:
+	di_write_unlock(args->parent);
+	aufs_read_unlock(dentry, AuLock_DW);
+out_inode:
+	inode_unlock(inode);
+out_dir:
+	inode_unlock(args->dir);
+out_free:
+	e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown));
+	if (unlikely(e))
+		err = -EFAULT;
+	au_kfree_rcu(args);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c
new file mode 100644
index 00000000000..16e785facf9
--- /dev/null
+++ b/fs/aufs/opts.c
@@ -0,0 +1,1880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount options/flags
+ */
+
+#include <linux/namei.h>
+#include <linux/types.h> /* a distribution requires */
+#include <linux/parser.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+	Opt_br,
+	Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend,
+	Opt_idel, Opt_imod,
+	Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash,
+	Opt_rdblk_def, Opt_rdhash_def,
+	Opt_xino, Opt_noxino,
+	Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
+	Opt_trunc_xino_path, Opt_itrunc_xino,
+	Opt_trunc_xib, Opt_notrunc_xib,
+	Opt_shwh, Opt_noshwh,
+	Opt_plink, Opt_noplink, Opt_list_plink,
+	Opt_udba,
+	Opt_dio, Opt_nodio,
+	Opt_diropq_a, Opt_diropq_w,
+	Opt_warn_perm, Opt_nowarn_perm,
+	Opt_wbr_copyup, Opt_wbr_create,
+	Opt_fhsm_sec,
+	Opt_verbose, Opt_noverbose,
+	Opt_sum, Opt_nosum, Opt_wsum,
+	Opt_dirperm1, Opt_nodirperm1,
+	Opt_dirren, Opt_nodirren,
+	Opt_acl, Opt_noacl,
+	Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
+};
+
+static match_table_t options = {
+	{Opt_br, "br=%s"},
+	{Opt_br, "br:%s"},
+
+	{Opt_add, "add=%d:%s"},
+	{Opt_add, "add:%d:%s"},
+	{Opt_add, "ins=%d:%s"},
+	{Opt_add, "ins:%d:%s"},
+	{Opt_append, "append=%s"},
+	{Opt_append, "append:%s"},
+	{Opt_prepend, "prepend=%s"},
+	{Opt_prepend, "prepend:%s"},
+
+	{Opt_del, "del=%s"},
+	{Opt_del, "del:%s"},
+	/* {Opt_idel, "idel:%d"}, */
+	{Opt_mod, "mod=%s"},
+	{Opt_mod, "mod:%s"},
+	/* {Opt_imod, "imod:%d:%s"}, */
+
+	{Opt_dirwh, "dirwh=%d"},
+
+	{Opt_xino, "xino=%s"},
+	{Opt_noxino, "noxino"},
+	{Opt_trunc_xino, "trunc_xino"},
+	{Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
+	{Opt_notrunc_xino, "notrunc_xino"},
+	{Opt_trunc_xino_path, "trunc_xino=%s"},
+	{Opt_itrunc_xino, "itrunc_xino=%d"},
+	/* {Opt_zxino, "zxino=%s"}, */
+	{Opt_trunc_xib, "trunc_xib"},
+	{Opt_notrunc_xib, "notrunc_xib"},
+
+#ifdef CONFIG_PROC_FS
+	{Opt_plink, "plink"},
+#else
+	{Opt_ignore_silent, "plink"},
+#endif
+
+	{Opt_noplink, "noplink"},
+
+#ifdef CONFIG_AUFS_DEBUG
+	{Opt_list_plink, "list_plink"},
+#endif
+
+	{Opt_udba, "udba=%s"},
+
+	{Opt_dio, "dio"},
+	{Opt_nodio, "nodio"},
+
+#ifdef CONFIG_AUFS_DIRREN
+	{Opt_dirren, "dirren"},
+	{Opt_nodirren, "nodirren"},
+#else
+	{Opt_ignore, "dirren"},
+	{Opt_ignore_silent, "nodirren"},
+#endif
+
+#ifdef CONFIG_AUFS_FHSM
+	{Opt_fhsm_sec, "fhsm_sec=%d"},
+#else
+	{Opt_ignore, "fhsm_sec=%d"},
+#endif
+
+	{Opt_diropq_a, "diropq=always"},
+	{Opt_diropq_a, "diropq=a"},
+	{Opt_diropq_w, "diropq=whiteouted"},
+	{Opt_diropq_w, "diropq=w"},
+
+	{Opt_warn_perm, "warn_perm"},
+	{Opt_nowarn_perm, "nowarn_perm"},
+
+	/* keep them temporary */
+	{Opt_ignore_silent, "nodlgt"},
+	{Opt_ignore, "clean_plink"},
+
+#ifdef CONFIG_AUFS_SHWH
+	{Opt_shwh, "shwh"},
+#endif
+	{Opt_noshwh, "noshwh"},
+
+	{Opt_dirperm1, "dirperm1"},
+	{Opt_nodirperm1, "nodirperm1"},
+
+	{Opt_verbose, "verbose"},
+	{Opt_verbose, "v"},
+	{Opt_noverbose, "noverbose"},
+	{Opt_noverbose, "quiet"},
+	{Opt_noverbose, "q"},
+	{Opt_noverbose, "silent"},
+
+	{Opt_sum, "sum"},
+	{Opt_nosum, "nosum"},
+	{Opt_wsum, "wsum"},
+
+	{Opt_rdcache, "rdcache=%d"},
+	{Opt_rdblk, "rdblk=%d"},
+	{Opt_rdblk_def, "rdblk=def"},
+	{Opt_rdhash, "rdhash=%d"},
+	{Opt_rdhash_def, "rdhash=def"},
+
+	{Opt_wbr_create, "create=%s"},
+	{Opt_wbr_create, "create_policy=%s"},
+	{Opt_wbr_copyup, "cpup=%s"},
+	{Opt_wbr_copyup, "copyup=%s"},
+	{Opt_wbr_copyup, "copyup_policy=%s"},
+
+	/* generic VFS flag */
+#ifdef CONFIG_FS_POSIX_ACL
+	{Opt_acl, "acl"},
+	{Opt_noacl, "noacl"},
+#else
+	{Opt_ignore, "acl"},
+	{Opt_ignore_silent, "noacl"},
+#endif
+
+	/* internal use for the scripts */
+	{Opt_ignore_silent, "si=%s"},
+
+	{Opt_br, "dirs=%s"},
+	{Opt_ignore, "debug=%d"},
+	{Opt_ignore, "delete=whiteout"},
+	{Opt_ignore, "delete=all"},
+	{Opt_ignore, "imap=%s"},
+
+	/* temporary workaround, due to old mount(8)? */
+	{Opt_ignore_silent, "relatime"},
+
+	{Opt_err, NULL}
+};
+
+/* ---------------------------------------------------------------------- */
+
+static const char *au_parser_pattern(int val, match_table_t tbl)
+{
+	struct match_token *p;
+
+	p = tbl;
+	while (p->pattern) {
+		if (p->token == val)
+			return p->pattern;
+		p++;
+	}
+	BUG();
+	return "??";
+}
+
+static const char *au_optstr(int *val, match_table_t tbl)
+{
+	struct match_token *p;
+	int v;
+
+	v = *val;
+	if (!v)
+		goto out;
+	p = tbl;
+	while (p->pattern) {
+		if (p->token
+		    && (v & p->token) == p->token) {
+			*val &= ~p->token;
+			return p->pattern;
+		}
+		p++;
+	}
+
+out:
+	return NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t brperm = {
+	{AuBrPerm_RO, AUFS_BRPERM_RO},
+	{AuBrPerm_RR, AUFS_BRPERM_RR},
+	{AuBrPerm_RW, AUFS_BRPERM_RW},
+	{0, NULL}
+};
+
+static match_table_t brattr = {
+	/* general */
+	{AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG},
+	{AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL},
+	/* 'unpin' attrib is meaningless since linux-3.18-rc1 */
+	{AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
+#ifdef CONFIG_AUFS_FHSM
+	{AuBrAttr_FHSM, AUFS_BRATTR_FHSM},
+#endif
+#ifdef CONFIG_AUFS_XATTR
+	{AuBrAttr_ICEX, AUFS_BRATTR_ICEX},
+	{AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC},
+	{AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS},
+	{AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR},
+	{AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR},
+	{AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH},
+#endif
+
+	/* ro/rr branch */
+	{AuBrRAttr_WH, AUFS_BRRATTR_WH},
+
+	/* rw branch */
+	{AuBrWAttr_MOO, AUFS_BRWATTR_MOO},
+	{AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
+
+	{0, NULL}
+};
+
+static int br_attr_val(char *str, match_table_t table, substring_t args[])
+{
+	int attr, v;
+	char *p;
+
+	attr = 0;
+	do {
+		p = strchr(str, '+');
+		if (p)
+			*p = 0;
+		v = match_token(str, table, args);
+		if (v) {
+			if (v & AuBrAttr_CMOO_Mask)
+				attr &= ~AuBrAttr_CMOO_Mask;
+			attr |= v;
+		} else {
+			if (p)
+				*p = '+';
+			pr_warn("ignored branch attribute %s\n", str);
+			break;
+		}
+		if (p)
+			str = p + 1;
+	} while (p);
+
+	return attr;
+}
+
+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm)
+{
+	int sz;
+	const char *p;
+	char *q;
+
+	q = str->a;
+	*q = 0;
+	p = au_optstr(&perm, brattr);
+	if (p) {
+		sz = strlen(p);
+		memcpy(q, p, sz + 1);
+		q += sz;
+	} else
+		goto out;
+
+	do {
+		p = au_optstr(&perm, brattr);
+		if (p) {
+			*q++ = '+';
+			sz = strlen(p);
+			memcpy(q, p, sz + 1);
+			q += sz;
+		}
+	} while (p);
+
+out:
+	return q - str->a;
+}
+
+static int noinline_for_stack br_perm_val(char *perm)
+{
+	int val, bad, sz;
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	au_br_perm_str_t attr;
+
+	p = strchr(perm, '+');
+	if (p)
+		*p = 0;
+	val = match_token(perm, brperm, args);
+	if (!val) {
+		if (p)
+			*p = '+';
+		pr_warn("ignored branch permission %s\n", perm);
+		val = AuBrPerm_RO;
+		goto out;
+	}
+	if (!p)
+		goto out;
+
+	val |= br_attr_val(p + 1, brattr, args);
+
+	bad = 0;
+	switch (val & AuBrPerm_Mask) {
+	case AuBrPerm_RO:
+	case AuBrPerm_RR:
+		bad = val & AuBrWAttr_Mask;
+		val &= ~AuBrWAttr_Mask;
+		break;
+	case AuBrPerm_RW:
+		bad = val & AuBrRAttr_Mask;
+		val &= ~AuBrRAttr_Mask;
+		break;
+	}
+
+	/*
+	 * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs
+	 * does not treat it as an error, just warning.
+	 * this is a tiny guard for the user operation.
+	 */
+	if (val & AuBrAttr_UNPIN) {
+		bad |= AuBrAttr_UNPIN;
+		val &= ~AuBrAttr_UNPIN;
+	}
+
+	if (unlikely(bad)) {
+		sz = au_do_optstr_br_attr(&attr, bad);
+		AuDebugOn(!sz);
+		pr_warn("ignored branch attribute %s\n", attr.a);
+	}
+
+out:
+	return val;
+}
+
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm)
+{
+	au_br_perm_str_t attr;
+	const char *p;
+	char *q;
+	int sz;
+
+	q = str->a;
+	p = au_optstr(&perm, brperm);
+	AuDebugOn(!p || !*p);
+	sz = strlen(p);
+	memcpy(q, p, sz + 1);
+	q += sz;
+
+	sz = au_do_optstr_br_attr(&attr, perm);
+	if (sz) {
+		*q++ = '+';
+		memcpy(q, attr.a, sz + 1);
+	}
+
+	AuDebugOn(strlen(str->a) >= sizeof(str->a));
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t udbalevel = {
+	{AuOpt_UDBA_REVAL, "reval"},
+	{AuOpt_UDBA_NONE, "none"},
+#ifdef CONFIG_AUFS_HNOTIFY
+	{AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	{AuOpt_UDBA_HNOTIFY, "fsnotify"},
+#endif
+#endif
+	{-1, NULL}
+};
+
+static int noinline_for_stack udba_val(char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	return match_token(str, udbalevel, args);
+}
+
+const char *au_optstr_udba(int udba)
+{
+	return au_parser_pattern(udba, udbalevel);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t au_wbr_create_policy = {
+	{AuWbrCreate_TDP, "tdp"},
+	{AuWbrCreate_TDP, "top-down-parent"},
+	{AuWbrCreate_RR, "rr"},
+	{AuWbrCreate_RR, "round-robin"},
+	{AuWbrCreate_MFS, "mfs"},
+	{AuWbrCreate_MFS, "most-free-space"},
+	{AuWbrCreate_MFSV, "mfs:%d"},
+	{AuWbrCreate_MFSV, "most-free-space:%d"},
+
+	/* top-down regardless the parent, and then mfs */
+	{AuWbrCreate_TDMFS, "tdmfs:%d"},
+	{AuWbrCreate_TDMFSV, "tdmfs:%d:%d"},
+
+	{AuWbrCreate_MFSRR, "mfsrr:%d"},
+	{AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
+	{AuWbrCreate_PMFS, "pmfs"},
+	{AuWbrCreate_PMFSV, "pmfs:%d"},
+	{AuWbrCreate_PMFSRR, "pmfsrr:%d"},
+	{AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"},
+
+	{-1, NULL}
+};
+
+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
+			    struct au_opt_wbr_create *create)
+{
+	int err;
+	unsigned long long ull;
+
+	err = 0;
+	if (!match_u64(arg, &ull))
+		create->mfsrr_watermark = ull;
+	else {
+		pr_err("bad integer in %s\n", str);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int au_wbr_mfs_sec(substring_t *arg, char *str,
+			  struct au_opt_wbr_create *create)
+{
+	int n, err;
+
+	err = 0;
+	if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
+		create->mfs_second = n;
+	else {
+		pr_err("bad integer in %s\n", str);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int noinline_for_stack
+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
+{
+	int err, e;
+	substring_t args[MAX_OPT_ARGS];
+
+	err = match_token(str, au_wbr_create_policy, args);
+	create->wbr_create = err;
+	switch (err) {
+	case AuWbrCreate_MFSRRV:
+	case AuWbrCreate_TDMFSV:
+	case AuWbrCreate_PMFSRRV:
+		e = au_wbr_mfs_wmark(&args[0], str, create);
+		if (!e)
+			e = au_wbr_mfs_sec(&args[1], str, create);
+		if (unlikely(e))
+			err = e;
+		break;
+	case AuWbrCreate_MFSRR:
+	case AuWbrCreate_TDMFS:
+	case AuWbrCreate_PMFSRR:
+		e = au_wbr_mfs_wmark(&args[0], str, create);
+		if (unlikely(e)) {
+			err = e;
+			break;
+		}
+		/*FALLTHROUGH*/
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_PMFS:
+		create->mfs_second = AUFS_MFS_DEF_SEC;
+		break;
+	case AuWbrCreate_MFSV:
+	case AuWbrCreate_PMFSV:
+		e = au_wbr_mfs_sec(&args[0], str, create);
+		if (unlikely(e))
+			err = e;
+		break;
+	}
+
+	return err;
+}
+
+const char *au_optstr_wbr_create(int wbr_create)
+{
+	return au_parser_pattern(wbr_create, au_wbr_create_policy);
+}
+
+static match_table_t au_wbr_copyup_policy = {
+	{AuWbrCopyup_TDP, "tdp"},
+	{AuWbrCopyup_TDP, "top-down-parent"},
+	{AuWbrCopyup_BUP, "bup"},
+	{AuWbrCopyup_BUP, "bottom-up-parent"},
+	{AuWbrCopyup_BU, "bu"},
+	{AuWbrCopyup_BU, "bottom-up"},
+	{-1, NULL}
+};
+
+static int noinline_for_stack au_wbr_copyup_val(char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	return match_token(str, au_wbr_copyup_policy, args);
+}
+
+const char *au_optstr_wbr_copyup(int wbr_copyup)
+{
+	return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+static void dump_opts(struct au_opts *opts)
+{
+#ifdef CONFIG_AUFS_DEBUG
+	/* reduce stack space */
+	union {
+		struct au_opt_add *add;
+		struct au_opt_del *del;
+		struct au_opt_mod *mod;
+		struct au_opt_xino *xino;
+		struct au_opt_xino_itrunc *xino_itrunc;
+		struct au_opt_wbr_create *create;
+	} u;
+	struct au_opt *opt;
+
+	opt = opts->opt;
+	while (opt->type != Opt_tail) {
+		switch (opt->type) {
+		case Opt_add:
+			u.add = &opt->add;
+			AuDbg("add {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_del:
+		case Opt_idel:
+			u.del = &opt->del;
+			AuDbg("del {%s, %p}\n",
+			      u.del->pathname, u.del->h_path.dentry);
+			break;
+		case Opt_mod:
+		case Opt_imod:
+			u.mod = &opt->mod;
+			AuDbg("mod {%s, 0x%x, %p}\n",
+				  u.mod->path, u.mod->perm, u.mod->h_root);
+			break;
+		case Opt_append:
+			u.add = &opt->add;
+			AuDbg("append {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_prepend:
+			u.add = &opt->add;
+			AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_dirwh:
+			AuDbg("dirwh %d\n", opt->dirwh);
+			break;
+		case Opt_rdcache:
+			AuDbg("rdcache %d\n", opt->rdcache);
+			break;
+		case Opt_rdblk:
+			AuDbg("rdblk %u\n", opt->rdblk);
+			break;
+		case Opt_rdblk_def:
+			AuDbg("rdblk_def\n");
+			break;
+		case Opt_rdhash:
+			AuDbg("rdhash %u\n", opt->rdhash);
+			break;
+		case Opt_rdhash_def:
+			AuDbg("rdhash_def\n");
+			break;
+		case Opt_xino:
+			u.xino = &opt->xino;
+			AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file);
+			break;
+		case Opt_trunc_xino:
+			AuLabel(trunc_xino);
+			break;
+		case Opt_notrunc_xino:
+			AuLabel(notrunc_xino);
+			break;
+		case Opt_trunc_xino_path:
+		case Opt_itrunc_xino:
+			u.xino_itrunc = &opt->xino_itrunc;
+			AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
+			break;
+		case Opt_noxino:
+			AuLabel(noxino);
+			break;
+		case Opt_trunc_xib:
+			AuLabel(trunc_xib);
+			break;
+		case Opt_notrunc_xib:
+			AuLabel(notrunc_xib);
+			break;
+		case Opt_shwh:
+			AuLabel(shwh);
+			break;
+		case Opt_noshwh:
+			AuLabel(noshwh);
+			break;
+		case Opt_dirperm1:
+			AuLabel(dirperm1);
+			break;
+		case Opt_nodirperm1:
+			AuLabel(nodirperm1);
+			break;
+		case Opt_plink:
+			AuLabel(plink);
+			break;
+		case Opt_noplink:
+			AuLabel(noplink);
+			break;
+		case Opt_list_plink:
+			AuLabel(list_plink);
+			break;
+		case Opt_udba:
+			AuDbg("udba %d, %s\n",
+				  opt->udba, au_optstr_udba(opt->udba));
+			break;
+		case Opt_dio:
+			AuLabel(dio);
+			break;
+		case Opt_nodio:
+			AuLabel(nodio);
+			break;
+		case Opt_diropq_a:
+			AuLabel(diropq_a);
+			break;
+		case Opt_diropq_w:
+			AuLabel(diropq_w);
+			break;
+		case Opt_warn_perm:
+			AuLabel(warn_perm);
+			break;
+		case Opt_nowarn_perm:
+			AuLabel(nowarn_perm);
+			break;
+		case Opt_verbose:
+			AuLabel(verbose);
+			break;
+		case Opt_noverbose:
+			AuLabel(noverbose);
+			break;
+		case Opt_sum:
+			AuLabel(sum);
+			break;
+		case Opt_nosum:
+			AuLabel(nosum);
+			break;
+		case Opt_wsum:
+			AuLabel(wsum);
+			break;
+		case Opt_wbr_create:
+			u.create = &opt->wbr_create;
+			AuDbg("create %d, %s\n", u.create->wbr_create,
+				  au_optstr_wbr_create(u.create->wbr_create));
+			switch (u.create->wbr_create) {
+			case AuWbrCreate_MFSV:
+			case AuWbrCreate_PMFSV:
+				AuDbg("%d sec\n", u.create->mfs_second);
+				break;
+			case AuWbrCreate_MFSRR:
+			case AuWbrCreate_TDMFS:
+				AuDbg("%llu watermark\n",
+					  u.create->mfsrr_watermark);
+				break;
+			case AuWbrCreate_MFSRRV:
+			case AuWbrCreate_TDMFSV:
+			case AuWbrCreate_PMFSRRV:
+				AuDbg("%llu watermark, %d sec\n",
+					  u.create->mfsrr_watermark,
+					  u.create->mfs_second);
+				break;
+			}
+			break;
+		case Opt_wbr_copyup:
+			AuDbg("copyup %d, %s\n", opt->wbr_copyup,
+				  au_optstr_wbr_copyup(opt->wbr_copyup));
+			break;
+		case Opt_fhsm_sec:
+			AuDbg("fhsm_sec %u\n", opt->fhsm_second);
+			break;
+		case Opt_dirren:
+			AuLabel(dirren);
+			break;
+		case Opt_nodirren:
+			AuLabel(nodirren);
+			break;
+		case Opt_acl:
+			AuLabel(acl);
+			break;
+		case Opt_noacl:
+			AuLabel(noacl);
+			break;
+		default:
+			BUG();
+		}
+		opt++;
+	}
+#endif
+}
+
+void au_opts_free(struct au_opts *opts)
+{
+	struct au_opt *opt;
+
+	opt = opts->opt;
+	while (opt->type != Opt_tail) {
+		switch (opt->type) {
+		case Opt_add:
+		case Opt_append:
+		case Opt_prepend:
+			path_put(&opt->add.path);
+			break;
+		case Opt_del:
+		case Opt_idel:
+			path_put(&opt->del.h_path);
+			break;
+		case Opt_mod:
+		case Opt_imod:
+			dput(opt->mod.h_root);
+			break;
+		case Opt_xino:
+			fput(opt->xino.file);
+			break;
+		}
+		opt++;
+	}
+}
+
+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
+		   aufs_bindex_t bindex)
+{
+	int err;
+	struct au_opt_add *add = &opt->add;
+	char *p;
+
+	add->bindex = bindex;
+	add->perm = AuBrPerm_RO;
+	add->pathname = opt_str;
+	p = strchr(opt_str, '=');
+	if (p) {
+		*p++ = 0;
+		if (*p)
+			add->perm = br_perm_val(p);
+	}
+
+	err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
+	if (!err) {
+		if (!p) {
+			add->perm = AuBrPerm_RO;
+			if (au_test_fs_rr(add->path.dentry->d_sb))
+				add->perm = AuBrPerm_RR;
+			else if (!bindex && !(sb_flags & SB_RDONLY))
+				add->perm = AuBrPerm_RW;
+		}
+		opt->type = Opt_add;
+		goto out;
+	}
+	pr_err("lookup failed %s (%d)\n", add->pathname, err);
+	err = -EINVAL;
+
+out:
+	return err;
+}
+
+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
+{
+	int err;
+
+	del->pathname = args[0].from;
+	AuDbg("del path %s\n", del->pathname);
+
+	err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
+	if (unlikely(err))
+		pr_err("lookup failed %s (%d)\n", del->pathname, err);
+
+	return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
+			      struct au_opt_del *del, substring_t args[])
+{
+	int err;
+	struct dentry *root;
+
+	err = -EINVAL;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	if (bindex < 0 || au_sbbot(sb) < bindex) {
+		pr_err("out of bounds, %d\n", bindex);
+		goto out;
+	}
+
+	err = 0;
+	del->h_path.dentry = dget(au_h_dptr(root, bindex));
+	del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
+
+out:
+	aufs_read_unlock(root, !AuLock_IR);
+	return err;
+}
+#endif
+
+static int noinline_for_stack
+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
+{
+	int err;
+	struct path path;
+	char *p;
+
+	err = -EINVAL;
+	mod->path = args[0].from;
+	p = strchr(mod->path, '=');
+	if (unlikely(!p)) {
+		pr_err("no permission %s\n", args[0].from);
+		goto out;
+	}
+
+	*p++ = 0;
+	err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
+	if (unlikely(err)) {
+		pr_err("lookup failed %s (%d)\n", mod->path, err);
+		goto out;
+	}
+
+	mod->perm = br_perm_val(p);
+	AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
+	mod->h_root = dget(path.dentry);
+	path_put(&path);
+
+out:
+	return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
+			      struct au_opt_mod *mod, substring_t args[])
+{
+	int err;
+	struct dentry *root;
+
+	err = -EINVAL;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	if (bindex < 0 || au_sbbot(sb) < bindex) {
+		pr_err("out of bounds, %d\n", bindex);
+		goto out;
+	}
+
+	err = 0;
+	mod->perm = br_perm_val(args[1].from);
+	AuDbg("mod path %s, perm 0x%x, %s\n",
+	      mod->path, mod->perm, args[1].from);
+	mod->h_root = dget(au_h_dptr(root, bindex));
+
+out:
+	aufs_read_unlock(root, !AuLock_IR);
+	return err;
+}
+#endif
+
+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
+			      substring_t args[])
+{
+	int err;
+	struct file *file;
+
+	file = au_xino_create(sb, args[0].from, /*silent*/0, /*wbrtop*/0);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+
+	err = -EINVAL;
+	if (unlikely(file->f_path.dentry->d_sb == sb)) {
+		fput(file);
+		pr_err("%s must be outside\n", args[0].from);
+		goto out;
+	}
+
+	err = 0;
+	xino->file = file;
+	xino->path = args[0].from;
+
+out:
+	return err;
+}
+
+static int noinline_for_stack
+au_opts_parse_xino_itrunc_path(struct super_block *sb,
+			       struct au_opt_xino_itrunc *xino_itrunc,
+			       substring_t args[])
+{
+	int err;
+	aufs_bindex_t bbot, bindex;
+	struct path path;
+	struct dentry *root;
+
+	err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
+	if (unlikely(err)) {
+		pr_err("lookup failed %s (%d)\n", args[0].from, err);
+		goto out;
+	}
+
+	xino_itrunc->bindex = -1;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		if (au_h_dptr(root, bindex) == path.dentry) {
+			xino_itrunc->bindex = bindex;
+			break;
+		}
+	}
+	aufs_read_unlock(root, !AuLock_IR);
+	path_put(&path);
+
+	if (unlikely(xino_itrunc->bindex < 0)) {
+		pr_err("no such branch %s\n", args[0].from);
+		err = -EINVAL;
+	}
+
+out:
+	return err;
+}
+
+/* called without aufs lock */
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
+{
+	int err, n, token;
+	aufs_bindex_t bindex;
+	unsigned char skipped;
+	struct dentry *root;
+	struct au_opt *opt, *opt_tail;
+	char *opt_str;
+	/* reduce the stack space */
+	union {
+		struct au_opt_xino_itrunc *xino_itrunc;
+		struct au_opt_wbr_create *create;
+	} u;
+	struct {
+		substring_t args[MAX_OPT_ARGS];
+	} *a;
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	root = sb->s_root;
+	err = 0;
+	bindex = 0;
+	opt = opts->opt;
+	opt_tail = opt + opts->max_opt - 1;
+	opt->type = Opt_tail;
+	while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
+		err = -EINVAL;
+		skipped = 0;
+		token = match_token(opt_str, options, a->args);
+		switch (token) {
+		case Opt_br:
+			err = 0;
+			while (!err && (opt_str = strsep(&a->args[0].from, ":"))
+			       && *opt_str) {
+				err = opt_add(opt, opt_str, opts->sb_flags,
+					      bindex++);
+				if (unlikely(!err && ++opt > opt_tail)) {
+					err = -E2BIG;
+					break;
+				}
+				opt->type = Opt_tail;
+				skipped = 1;
+			}
+			break;
+		case Opt_add:
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			bindex = n;
+			err = opt_add(opt, a->args[1].from, opts->sb_flags,
+				      bindex);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_append:
+			err = opt_add(opt, a->args[0].from, opts->sb_flags,
+				      /*dummy bindex*/1);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_prepend:
+			err = opt_add(opt, a->args[0].from, opts->sb_flags,
+				      /*bindex*/0);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_del:
+			err = au_opts_parse_del(&opt->del, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#if 0 /* reserved for future use */
+		case Opt_idel:
+			del->pathname = "(indexed)";
+			if (unlikely(match_int(&args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			err = au_opts_parse_idel(sb, n, &opt->del, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#endif
+		case Opt_mod:
+			err = au_opts_parse_mod(&opt->mod, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#ifdef IMOD /* reserved for future use */
+		case Opt_imod:
+			u.mod->path = "(indexed)";
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#endif
+		case Opt_xino:
+			err = au_opts_parse_xino(sb, &opt->xino, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+
+		case Opt_trunc_xino_path:
+			err = au_opts_parse_xino_itrunc_path
+				(sb, &opt->xino_itrunc, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+
+		case Opt_itrunc_xino:
+			u.xino_itrunc = &opt->xino_itrunc;
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			u.xino_itrunc->bindex = n;
+			aufs_read_lock(root, AuLock_FLUSH);
+			if (n < 0 || au_sbbot(sb) < n) {
+				pr_err("out of bounds, %d\n", n);
+				aufs_read_unlock(root, !AuLock_IR);
+				break;
+			}
+			aufs_read_unlock(root, !AuLock_IR);
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_dirwh:
+			if (unlikely(match_int(&a->args[0], &opt->dirwh)))
+				break;
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_rdcache:
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			if (unlikely(n > AUFS_RDCACHE_MAX)) {
+				pr_err("rdcache must be smaller than %d\n",
+				       AUFS_RDCACHE_MAX);
+				break;
+			}
+			opt->rdcache = n;
+			err = 0;
+			opt->type = token;
+			break;
+		case Opt_rdblk:
+			if (unlikely(match_int(&a->args[0], &n)
+				     || n < 0
+				     || n > KMALLOC_MAX_SIZE)) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			if (unlikely(n && n < NAME_MAX)) {
+				pr_err("rdblk must be larger than %d\n",
+				       NAME_MAX);
+				break;
+			}
+			opt->rdblk = n;
+			err = 0;
+			opt->type = token;
+			break;
+		case Opt_rdhash:
+			if (unlikely(match_int(&a->args[0], &n)
+				     || n < 0
+				     || n * sizeof(struct hlist_head)
+				     > KMALLOC_MAX_SIZE)) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			opt->rdhash = n;
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_trunc_xino:
+		case Opt_notrunc_xino:
+		case Opt_noxino:
+		case Opt_trunc_xib:
+		case Opt_notrunc_xib:
+		case Opt_shwh:
+		case Opt_noshwh:
+		case Opt_dirperm1:
+		case Opt_nodirperm1:
+		case Opt_plink:
+		case Opt_noplink:
+		case Opt_list_plink:
+		case Opt_dio:
+		case Opt_nodio:
+		case Opt_diropq_a:
+		case Opt_diropq_w:
+		case Opt_warn_perm:
+		case Opt_nowarn_perm:
+		case Opt_verbose:
+		case Opt_noverbose:
+		case Opt_sum:
+		case Opt_nosum:
+		case Opt_wsum:
+		case Opt_rdblk_def:
+		case Opt_rdhash_def:
+		case Opt_dirren:
+		case Opt_nodirren:
+		case Opt_acl:
+		case Opt_noacl:
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_udba:
+			opt->udba = udba_val(a->args[0].from);
+			if (opt->udba >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+
+		case Opt_wbr_create:
+			u.create = &opt->wbr_create;
+			u.create->wbr_create
+				= au_wbr_create_val(a->args[0].from, u.create);
+			if (u.create->wbr_create >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+		case Opt_wbr_copyup:
+			opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
+			if (opt->wbr_copyup >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+
+		case Opt_fhsm_sec:
+			if (unlikely(match_int(&a->args[0], &n)
+				     || n < 0)) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			if (sysaufs_brs) {
+				opt->fhsm_second = n;
+				opt->type = token;
+			} else
+				pr_warn("ignored %s\n", opt_str);
+			err = 0;
+			break;
+
+		case Opt_ignore:
+			pr_warn("ignored %s\n", opt_str);
+			/*FALLTHROUGH*/
+		case Opt_ignore_silent:
+			skipped = 1;
+			err = 0;
+			break;
+		case Opt_err:
+			pr_err("unknown option %s\n", opt_str);
+			break;
+		}
+
+		if (!err && !skipped) {
+			if (unlikely(++opt > opt_tail)) {
+				err = -E2BIG;
+				opt--;
+				opt->type = Opt_tail;
+				break;
+			}
+			opt->type = Opt_tail;
+		}
+	}
+
+	au_kfree_rcu(a);
+	dump_opts(opts);
+	if (unlikely(err))
+		au_opts_free(opts);
+
+out:
+	return err;
+}
+
+static int au_opt_wbr_create(struct super_block *sb,
+			     struct au_opt_wbr_create *create)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = 1; /* handled */
+	sbinfo = au_sbi(sb);
+	if (sbinfo->si_wbr_create_ops->fin) {
+		err = sbinfo->si_wbr_create_ops->fin(sb);
+		if (!err)
+			err = 1;
+	}
+
+	sbinfo->si_wbr_create = create->wbr_create;
+	sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
+	switch (create->wbr_create) {
+	case AuWbrCreate_MFSRRV:
+	case AuWbrCreate_MFSRR:
+	case AuWbrCreate_TDMFS:
+	case AuWbrCreate_TDMFSV:
+	case AuWbrCreate_PMFSRR:
+	case AuWbrCreate_PMFSRRV:
+		sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
+		/*FALLTHROUGH*/
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_MFSV:
+	case AuWbrCreate_PMFS:
+	case AuWbrCreate_PMFSV:
+		sbinfo->si_wbr_mfs.mfs_expire
+			= msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
+		break;
+	}
+
+	if (sbinfo->si_wbr_create_ops->init)
+		sbinfo->si_wbr_create_ops->init(sb); /* ignore */
+
+	return err;
+}
+
+/*
+ * returns,
+ * plus: processed without an error
+ * zero: unprocessed
+ */
+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
+			 struct au_opts *opts)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = 1; /* handled */
+	sbinfo = au_sbi(sb);
+	switch (opt->type) {
+	case Opt_udba:
+		sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+		sbinfo->si_mntflags |= opt->udba;
+		opts->given_udba |= opt->udba;
+		break;
+
+	case Opt_plink:
+		au_opt_set(sbinfo->si_mntflags, PLINK);
+		break;
+	case Opt_noplink:
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_put(sb, /*verbose*/1);
+		au_opt_clr(sbinfo->si_mntflags, PLINK);
+		break;
+	case Opt_list_plink:
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_list(sb);
+		break;
+
+	case Opt_dio:
+		au_opt_set(sbinfo->si_mntflags, DIO);
+		au_fset_opts(opts->flags, REFRESH_DYAOP);
+		break;
+	case Opt_nodio:
+		au_opt_clr(sbinfo->si_mntflags, DIO);
+		au_fset_opts(opts->flags, REFRESH_DYAOP);
+		break;
+
+	case Opt_fhsm_sec:
+		au_fhsm_set(sbinfo, opt->fhsm_second);
+		break;
+
+	case Opt_diropq_a:
+		au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+		break;
+	case Opt_diropq_w:
+		au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+		break;
+
+	case Opt_warn_perm:
+		au_opt_set(sbinfo->si_mntflags, WARN_PERM);
+		break;
+	case Opt_nowarn_perm:
+		au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
+		break;
+
+	case Opt_verbose:
+		au_opt_set(sbinfo->si_mntflags, VERBOSE);
+		break;
+	case Opt_noverbose:
+		au_opt_clr(sbinfo->si_mntflags, VERBOSE);
+		break;
+
+	case Opt_sum:
+		au_opt_set(sbinfo->si_mntflags, SUM);
+		break;
+	case Opt_wsum:
+		au_opt_clr(sbinfo->si_mntflags, SUM);
+		au_opt_set(sbinfo->si_mntflags, SUM_W);
+		break;
+	case Opt_nosum:
+		au_opt_clr(sbinfo->si_mntflags, SUM);
+		au_opt_clr(sbinfo->si_mntflags, SUM_W);
+		break;
+
+	case Opt_wbr_create:
+		err = au_opt_wbr_create(sb, &opt->wbr_create);
+		break;
+	case Opt_wbr_copyup:
+		sbinfo->si_wbr_copyup = opt->wbr_copyup;
+		sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
+		break;
+
+	case Opt_dirwh:
+		sbinfo->si_dirwh = opt->dirwh;
+		break;
+
+	case Opt_rdcache:
+		sbinfo->si_rdcache
+			= msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
+		break;
+	case Opt_rdblk:
+		sbinfo->si_rdblk = opt->rdblk;
+		break;
+	case Opt_rdblk_def:
+		sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+		break;
+	case Opt_rdhash:
+		sbinfo->si_rdhash = opt->rdhash;
+		break;
+	case Opt_rdhash_def:
+		sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+		break;
+
+	case Opt_shwh:
+		au_opt_set(sbinfo->si_mntflags, SHWH);
+		break;
+	case Opt_noshwh:
+		au_opt_clr(sbinfo->si_mntflags, SHWH);
+		break;
+
+	case Opt_dirperm1:
+		au_opt_set(sbinfo->si_mntflags, DIRPERM1);
+		break;
+	case Opt_nodirperm1:
+		au_opt_clr(sbinfo->si_mntflags, DIRPERM1);
+		break;
+
+	case Opt_trunc_xino:
+		au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
+		break;
+	case Opt_notrunc_xino:
+		au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
+		break;
+
+	case Opt_trunc_xino_path:
+	case Opt_itrunc_xino:
+		err = au_xino_trunc(sb, opt->xino_itrunc.bindex,
+				    /*idx_begin*/0);
+		if (!err)
+			err = 1;
+		break;
+
+	case Opt_trunc_xib:
+		au_fset_opts(opts->flags, TRUNC_XIB);
+		break;
+	case Opt_notrunc_xib:
+		au_fclr_opts(opts->flags, TRUNC_XIB);
+		break;
+
+	case Opt_dirren:
+		err = 1;
+		if (!au_opt_test(sbinfo->si_mntflags, DIRREN)) {
+			err = au_dr_opt_set(sb);
+			if (!err)
+				err = 1;
+		}
+		if (err == 1)
+			au_opt_set(sbinfo->si_mntflags, DIRREN);
+		break;
+	case Opt_nodirren:
+		err = 1;
+		if (au_opt_test(sbinfo->si_mntflags, DIRREN)) {
+			err = au_dr_opt_clr(sb, au_ftest_opts(opts->flags,
+							      DR_FLUSHED));
+			if (!err)
+				err = 1;
+		}
+		if (err == 1)
+			au_opt_clr(sbinfo->si_mntflags, DIRREN);
+		break;
+
+	case Opt_acl:
+		sb->s_flags |= SB_POSIXACL;
+		break;
+	case Opt_noacl:
+		sb->s_flags &= ~SB_POSIXACL;
+		break;
+
+	default:
+		err = 0;
+		break;
+	}
+
+	return err;
+}
+
+/*
+ * returns tri-state.
+ * plus: processed without an error
+ * zero: unprocessed
+ * minus: error
+ */
+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
+		     struct au_opts *opts)
+{
+	int err, do_refresh;
+
+	err = 0;
+	switch (opt->type) {
+	case Opt_append:
+		opt->add.bindex = au_sbbot(sb) + 1;
+		if (opt->add.bindex < 0)
+			opt->add.bindex = 0;
+		goto add;
+		/* Always goto add, not fallthrough */
+	case Opt_prepend:
+		opt->add.bindex = 0;
+		/* fallthrough */
+	add: /* indented label */
+	case Opt_add:
+		err = au_br_add(sb, &opt->add,
+				au_ftest_opts(opts->flags, REMOUNT));
+		if (!err) {
+			err = 1;
+			au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+
+	case Opt_del:
+	case Opt_idel:
+		err = au_br_del(sb, &opt->del,
+				au_ftest_opts(opts->flags, REMOUNT));
+		if (!err) {
+			err = 1;
+			au_fset_opts(opts->flags, TRUNC_XIB);
+			au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+
+	case Opt_mod:
+	case Opt_imod:
+		err = au_br_mod(sb, &opt->mod,
+				au_ftest_opts(opts->flags, REMOUNT),
+				&do_refresh);
+		if (!err) {
+			err = 1;
+			if (do_refresh)
+				au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+	}
+	return err;
+}
+
+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
+		       struct au_opt_xino **opt_xino,
+		       struct au_opts *opts)
+{
+	int err;
+
+	err = 0;
+	switch (opt->type) {
+	case Opt_xino:
+		err = au_xino_set(sb, &opt->xino,
+				  !!au_ftest_opts(opts->flags, REMOUNT));
+		if (unlikely(err))
+			break;
+
+		*opt_xino = &opt->xino;
+		break;
+
+	case Opt_noxino:
+		au_xino_clr(sb);
+		*opt_xino = (void *)-1;
+		break;
+	}
+
+	return err;
+}
+
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+		   unsigned int pending)
+{
+	int err, fhsm;
+	aufs_bindex_t bindex, bbot;
+	unsigned char do_plink, skip, do_free, can_no_dreval;
+	struct au_branch *br;
+	struct au_wbr *wbr;
+	struct dentry *root, *dentry;
+	struct inode *dir, *h_dir;
+	struct au_sbinfo *sbinfo;
+	struct au_hinode *hdir;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
+
+	if (!(sb_flags & SB_RDONLY)) {
+		if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
+			pr_warn("first branch should be rw\n");
+		if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
+			pr_warn_once("shwh should be used with ro\n");
+	}
+
+	if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
+	    && !au_opt_test(sbinfo->si_mntflags, XINO))
+		pr_warn_once("udba=*notify requires xino\n");
+
+	if (au_opt_test(sbinfo->si_mntflags, DIRPERM1))
+		pr_warn_once("dirperm1 breaks the protection"
+			     " by the permission bits on the lower branch\n");
+
+	err = 0;
+	fhsm = 0;
+	root = sb->s_root;
+	dir = d_inode(root);
+	do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
+	can_no_dreval = !!au_opt_test((sbinfo->si_mntflags | pending),
+				      UDBA_NONE);
+	bbot = au_sbbot(sb);
+	for (bindex = 0; !err && bindex <= bbot; bindex++) {
+		skip = 0;
+		h_dir = au_h_iptr(dir, bindex);
+		br = au_sbr(sb, bindex);
+
+		if ((br->br_perm & AuBrAttr_ICEX)
+		    && !h_dir->i_op->listxattr)
+			br->br_perm &= ~AuBrAttr_ICEX;
+#if 0 /* untested */
+		if ((br->br_perm & AuBrAttr_ICEX_SEC)
+		    && (au_br_sb(br)->s_flags & SB_NOSEC))
+			br->br_perm &= ~AuBrAttr_ICEX_SEC;
+#endif
+
+		do_free = 0;
+		wbr = br->br_wbr;
+		if (wbr)
+			wbr_wh_read_lock(wbr);
+
+		if (!au_br_writable(br->br_perm)) {
+			do_free = !!wbr;
+			skip = (!wbr
+				|| (!wbr->wbr_whbase
+				    && !wbr->wbr_plink
+				    && !wbr->wbr_orph));
+		} else if (!au_br_wh_linkable(br->br_perm)) {
+			/* skip = (!br->br_whbase && !br->br_orph); */
+			skip = (!wbr || !wbr->wbr_whbase);
+			if (skip && wbr) {
+				if (do_plink)
+					skip = !!wbr->wbr_plink;
+				else
+					skip = !wbr->wbr_plink;
+			}
+		} else {
+			/* skip = (br->br_whbase && br->br_ohph); */
+			skip = (wbr && wbr->wbr_whbase);
+			if (skip) {
+				if (do_plink)
+					skip = !!wbr->wbr_plink;
+				else
+					skip = !wbr->wbr_plink;
+			}
+		}
+		if (wbr)
+			wbr_wh_read_unlock(wbr);
+
+		if (can_no_dreval) {
+			dentry = br->br_path.dentry;
+			spin_lock(&dentry->d_lock);
+			if (dentry->d_flags &
+			    (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE))
+				can_no_dreval = 0;
+			spin_unlock(&dentry->d_lock);
+		}
+
+		if (au_br_fhsm(br->br_perm)) {
+			fhsm++;
+			AuDebugOn(!br->br_fhsm);
+		}
+
+		if (skip)
+			continue;
+
+		hdir = au_hi(dir, bindex);
+		au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+		if (wbr)
+			wbr_wh_write_lock(wbr);
+		err = au_wh_init(br, sb);
+		if (wbr)
+			wbr_wh_write_unlock(wbr);
+		au_hn_inode_unlock(hdir);
+
+		if (!err && do_free) {
+			au_kfree_rcu(wbr);
+			br->br_wbr = NULL;
+		}
+	}
+
+	if (can_no_dreval)
+		au_fset_si(sbinfo, NO_DREVAL);
+	else
+		au_fclr_si(sbinfo, NO_DREVAL);
+
+	if (fhsm >= 2) {
+		au_fset_si(sbinfo, FHSM);
+		for (bindex = bbot; bindex >= 0; bindex--) {
+			br = au_sbr(sb, bindex);
+			if (au_br_fhsm(br->br_perm)) {
+				au_fhsm_set_bottom(sb, bindex);
+				break;
+			}
+		}
+	} else {
+		au_fclr_si(sbinfo, FHSM);
+		au_fhsm_set_bottom(sb, -1);
+	}
+
+	return err;
+}
+
+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
+{
+	int err;
+	unsigned int tmp;
+	aufs_bindex_t bindex, bbot;
+	struct au_opt *opt;
+	struct au_opt_xino *opt_xino, xino;
+	struct au_sbinfo *sbinfo;
+	struct au_branch *br;
+	struct inode *dir;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	opt_xino = NULL;
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail)
+		err = au_opt_simple(sb, opt++, opts);
+	if (err > 0)
+		err = 0;
+	else if (unlikely(err < 0))
+		goto out;
+
+	/* disable xino and udba temporary */
+	sbinfo = au_sbi(sb);
+	tmp = sbinfo->si_mntflags;
+	au_opt_clr(sbinfo->si_mntflags, XINO);
+	au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
+
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail)
+		err = au_opt_br(sb, opt++, opts);
+	if (err > 0)
+		err = 0;
+	else if (unlikely(err < 0))
+		goto out;
+
+	bbot = au_sbbot(sb);
+	if (unlikely(bbot < 0)) {
+		err = -EINVAL;
+		pr_err("no branches\n");
+		goto out;
+	}
+
+	if (au_opt_test(tmp, XINO))
+		au_opt_set(sbinfo->si_mntflags, XINO);
+	opt = opts->opt;
+	while (!err && opt->type != Opt_tail)
+		err = au_opt_xino(sb, opt++, &opt_xino, opts);
+	if (unlikely(err))
+		goto out;
+
+	err = au_opts_verify(sb, sb->s_flags, tmp);
+	if (unlikely(err))
+		goto out;
+
+	/* restore xino */
+	if (au_opt_test(tmp, XINO) && !opt_xino) {
+		xino.file = au_xino_def(sb);
+		err = PTR_ERR(xino.file);
+		if (IS_ERR(xino.file))
+			goto out;
+
+		err = au_xino_set(sb, &xino, /*remount*/0);
+		fput(xino.file);
+		if (unlikely(err))
+			goto out;
+	}
+
+	/* restore udba */
+	tmp &= AuOptMask_UDBA;
+	sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+	sbinfo->si_mntflags |= tmp;
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_hnotify_reset_br(tmp, br, br->br_perm);
+		if (unlikely(err))
+			AuIOErr("hnotify failed on br %d, %d, ignored\n",
+				bindex, err);
+		/* go on even if err */
+	}
+	if (au_opt_test(tmp, UDBA_HNOTIFY)) {
+		dir = d_inode(sb->s_root);
+		au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
+	}
+
+out:
+	return err;
+}
+
+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
+{
+	int err, rerr;
+	unsigned char no_dreval;
+	struct inode *dir;
+	struct au_opt_xino *opt_xino;
+	struct au_opt *opt;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = au_dr_opt_flush(sb);
+	if (unlikely(err))
+		goto out;
+	au_fset_opts(opts->flags, DR_FLUSHED);
+
+	dir = d_inode(sb->s_root);
+	sbinfo = au_sbi(sb);
+	opt_xino = NULL;
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail) {
+		err = au_opt_simple(sb, opt, opts);
+		if (!err)
+			err = au_opt_br(sb, opt, opts);
+		if (!err)
+			err = au_opt_xino(sb, opt, &opt_xino, opts);
+		opt++;
+	}
+	if (err > 0)
+		err = 0;
+	AuTraceErr(err);
+	/* go on even err */
+
+	no_dreval = !!au_ftest_si(sbinfo, NO_DREVAL);
+	rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
+	if (unlikely(rerr && !err))
+		err = rerr;
+
+	if (no_dreval != !!au_ftest_si(sbinfo, NO_DREVAL))
+		au_fset_opts(opts->flags, REFRESH_IDOP);
+
+	if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
+		rerr = au_xib_trunc(sb);
+		if (unlikely(rerr && !err))
+			err = rerr;
+	}
+
+	/* will be handled by the caller */
+	if (!au_ftest_opts(opts->flags, REFRESH)
+	    && (opts->given_udba
+		|| au_opt_test(sbinfo->si_mntflags, XINO)
+		|| au_ftest_opts(opts->flags, REFRESH_IDOP)
+		    ))
+		au_fset_opts(opts->flags, REFRESH);
+
+	AuDbg("status 0x%x\n", opts->flags);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_opt_udba(struct super_block *sb)
+{
+	return au_mntflags(sb) & AuOptMask_UDBA;
+}
diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h
new file mode 100644
index 00000000000..79f3ea56f8b
--- /dev/null
+++ b/fs/aufs/opts.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount options/flags
+ */
+
+#ifndef __AUFS_OPTS_H__
+#define __AUFS_OPTS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct file;
+
+/* ---------------------------------------------------------------------- */
+
+/* mount flags */
+#define AuOpt_XINO		1		/* external inode number bitmap
+						   and translation table */
+#define AuOpt_TRUNC_XINO	(1 << 1)	/* truncate xino files */
+#define AuOpt_UDBA_NONE		(1 << 2)	/* users direct branch access */
+#define AuOpt_UDBA_REVAL	(1 << 3)
+#define AuOpt_UDBA_HNOTIFY	(1 << 4)
+#define AuOpt_SHWH		(1 << 5)	/* show whiteout */
+#define AuOpt_PLINK		(1 << 6)	/* pseudo-link */
+#define AuOpt_DIRPERM1		(1 << 7)	/* ignore the lower dir's perm
+						   bits */
+#define AuOpt_ALWAYS_DIROPQ	(1 << 9)	/* policy to creating diropq */
+#define AuOpt_SUM		(1 << 10)	/* summation for statfs(2) */
+#define AuOpt_SUM_W		(1 << 11)	/* unimplemented */
+#define AuOpt_WARN_PERM		(1 << 12)	/* warn when add-branch */
+#define AuOpt_VERBOSE		(1 << 13)	/* print the cause of error */
+#define AuOpt_DIO		(1 << 14)	/* direct io */
+#define AuOpt_DIRREN		(1 << 15)	/* directory rename */
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuOpt_UDBA_HNOTIFY
+#define AuOpt_UDBA_HNOTIFY	0
+#endif
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuOpt_DIRREN
+#define AuOpt_DIRREN		0
+#endif
+#ifndef CONFIG_AUFS_SHWH
+#undef AuOpt_SHWH
+#define AuOpt_SHWH		0
+#endif
+
+#define AuOpt_Def	(AuOpt_XINO \
+			 | AuOpt_UDBA_REVAL \
+			 | AuOpt_PLINK \
+			 /* | AuOpt_DIRPERM1 */ \
+			 | AuOpt_WARN_PERM)
+#define AuOptMask_UDBA	(AuOpt_UDBA_NONE \
+			 | AuOpt_UDBA_REVAL \
+			 | AuOpt_UDBA_HNOTIFY)
+
+#define au_opt_test(flags, name)	(flags & AuOpt_##name)
+#define au_opt_set(flags, name) do { \
+	BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
+	((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_set_udba(flags, name) do { \
+	(flags) &= ~AuOptMask_UDBA; \
+	((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_clr(flags, name) do { \
+	((flags) &= ~AuOpt_##name); \
+} while (0)
+
+static inline unsigned int au_opts_plink(unsigned int mntflags)
+{
+#ifdef CONFIG_PROC_FS
+	return mntflags;
+#else
+	return mntflags & ~AuOpt_PLINK;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies to select one among multiple writable branches */
+enum {
+	AuWbrCreate_TDP,	/* top down parent */
+	AuWbrCreate_RR,		/* round robin */
+	AuWbrCreate_MFS,	/* most free space */
+	AuWbrCreate_MFSV,	/* mfs with seconds */
+	AuWbrCreate_MFSRR,	/* mfs then rr */
+	AuWbrCreate_MFSRRV,	/* mfs then rr with seconds */
+	AuWbrCreate_TDMFS,	/* top down regardless parent and mfs */
+	AuWbrCreate_TDMFSV,	/* top down regardless parent and mfs */
+	AuWbrCreate_PMFS,	/* parent and mfs */
+	AuWbrCreate_PMFSV,	/* parent and mfs with seconds */
+	AuWbrCreate_PMFSRR,	/* parent, mfs and round-robin */
+	AuWbrCreate_PMFSRRV,	/* plus seconds */
+
+	AuWbrCreate_Def = AuWbrCreate_TDP
+};
+
+enum {
+	AuWbrCopyup_TDP,	/* top down parent */
+	AuWbrCopyup_BUP,	/* bottom up parent */
+	AuWbrCopyup_BU,		/* bottom up */
+
+	AuWbrCopyup_Def = AuWbrCopyup_TDP
+};
+
+/* ---------------------------------------------------------------------- */
+
+struct au_opt_add {
+	aufs_bindex_t	bindex;
+	char		*pathname;
+	int		perm;
+	struct path	path;
+};
+
+struct au_opt_del {
+	char		*pathname;
+	struct path	h_path;
+};
+
+struct au_opt_mod {
+	char		*path;
+	int		perm;
+	struct dentry	*h_root;
+};
+
+struct au_opt_xino {
+	char		*path;
+	struct file	*file;
+};
+
+struct au_opt_xino_itrunc {
+	aufs_bindex_t	bindex;
+};
+
+struct au_opt_wbr_create {
+	int			wbr_create;
+	int			mfs_second;
+	unsigned long long	mfsrr_watermark;
+};
+
+struct au_opt {
+	int type;
+	union {
+		struct au_opt_xino	xino;
+		struct au_opt_xino_itrunc xino_itrunc;
+		struct au_opt_add	add;
+		struct au_opt_del	del;
+		struct au_opt_mod	mod;
+		int			dirwh;
+		int			rdcache;
+		unsigned int		rdblk;
+		unsigned int		rdhash;
+		int			udba;
+		struct au_opt_wbr_create wbr_create;
+		int			wbr_copyup;
+		unsigned int		fhsm_second;
+	};
+};
+
+/* opts flags */
+#define AuOpts_REMOUNT		1
+#define AuOpts_REFRESH		(1 << 1)
+#define AuOpts_TRUNC_XIB	(1 << 2)
+#define AuOpts_REFRESH_DYAOP	(1 << 3)
+#define AuOpts_REFRESH_IDOP	(1 << 4)
+#define AuOpts_DR_FLUSHED	(1 << 5)
+#define au_ftest_opts(flags, name)	((flags) & AuOpts_##name)
+#define au_fset_opts(flags, name) \
+	do { (flags) |= AuOpts_##name; } while (0)
+#define au_fclr_opts(flags, name) \
+	do { (flags) &= ~AuOpts_##name; } while (0)
+
+#ifndef CONFIG_AUFS_DIRREN
+#undef AuOpts_DR_FLUSHED
+#define AuOpts_DR_FLUSHED	0
+#endif
+
+struct au_opts {
+	struct au_opt	*opt;
+	int		max_opt;
+
+	unsigned int	given_udba;
+	unsigned int	flags;
+	unsigned long	sb_flags;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* opts.c */
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm);
+const char *au_optstr_udba(int udba);
+const char *au_optstr_wbr_copyup(int wbr_copyup);
+const char *au_optstr_wbr_create(int wbr_create);
+
+void au_opts_free(struct au_opts *opts);
+struct super_block;
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+		   unsigned int pending);
+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
+
+unsigned int au_opt_udba(struct super_block *sb);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_OPTS_H__ */
diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c
new file mode 100644
index 00000000000..850829b2188
--- /dev/null
+++ b/fs/aufs/plink.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * pseudo-link
+ */
+
+#include "aufs.h"
+
+/*
+ * the pseudo-link maintenance mode.
+ * during a user process maintains the pseudo-links,
+ * prohibit adding a new plink and branch manipulation.
+ *
+ * Flags
+ * NOPLM:
+ *	For entry functions which will handle plink, and i_mutex is already held
+ *	in VFS.
+ *	They cannot wait and should return an error at once.
+ *	Callers has to check the error.
+ * NOPLMW:
+ *	For entry functions which will handle plink, but i_mutex is not held
+ *	in VFS.
+ *	They can wait the plink maintenance mode to finish.
+ *
+ * They behave like F_SETLK and F_SETLKW.
+ * If the caller never handle plink, then both flags are unnecessary.
+ */
+
+int au_plink_maint(struct super_block *sb, int flags)
+{
+	int err;
+	pid_t pid, ppid;
+	struct task_struct *parent, *prev;
+	struct au_sbinfo *sbi;
+
+	SiMustAnyLock(sb);
+
+	err = 0;
+	if (!au_opt_test(au_mntflags(sb), PLINK))
+		goto out;
+
+	sbi = au_sbi(sb);
+	pid = sbi->si_plink_maint_pid;
+	if (!pid || pid == current->pid)
+		goto out;
+
+	/* todo: it highly depends upon /sbin/mount.aufs */
+	prev = NULL;
+	parent = current;
+	ppid = 0;
+	rcu_read_lock();
+	while (1) {
+		parent = rcu_dereference(parent->real_parent);
+		if (parent == prev)
+			break;
+		ppid = task_pid_vnr(parent);
+		if (pid == ppid) {
+			rcu_read_unlock();
+			goto out;
+		}
+		prev = parent;
+	}
+	rcu_read_unlock();
+
+	if (au_ftest_lock(flags, NOPLMW)) {
+		/* if there is no i_mutex lock in VFS, we don't need to wait */
+		/* AuDebugOn(!lockdep_depth(current)); */
+		while (sbi->si_plink_maint_pid) {
+			si_read_unlock(sb);
+			/* gave up wake_up_bit() */
+			wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
+
+			if (au_ftest_lock(flags, FLUSH))
+				au_nwt_flush(&sbi->si_nowait);
+			si_noflush_read_lock(sb);
+		}
+	} else if (au_ftest_lock(flags, NOPLM)) {
+		AuDbg("ppid %d, pid %d\n", ppid, pid);
+		err = -EAGAIN;
+	}
+
+out:
+	return err;
+}
+
+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
+{
+	spin_lock(&sbinfo->si_plink_maint_lock);
+	sbinfo->si_plink_maint_pid = 0;
+	spin_unlock(&sbinfo->si_plink_maint_lock);
+	wake_up_all(&sbinfo->si_plink_wq);
+}
+
+int au_plink_maint_enter(struct super_block *sb)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	/* make sure i am the only one in this fs */
+	si_write_lock(sb, AuLock_FLUSH);
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		spin_lock(&sbinfo->si_plink_maint_lock);
+		if (!sbinfo->si_plink_maint_pid)
+			sbinfo->si_plink_maint_pid = current->pid;
+		else
+			err = -EBUSY;
+		spin_unlock(&sbinfo->si_plink_maint_lock);
+	}
+	si_write_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb)
+{
+	int i;
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_icntnr *icntnr;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		hbl = sbinfo->si_plink + i;
+		hlist_bl_lock(hbl);
+		hlist_bl_for_each_entry(icntnr, pos, hbl, plink)
+			AuDbg("%lu\n", icntnr->vfs_inode.i_ino);
+		hlist_bl_unlock(hbl);
+	}
+}
+#endif
+
+/* is the inode pseudo-linked? */
+int au_plink_test(struct inode *inode)
+{
+	int found, i;
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_icntnr *icntnr;
+
+	sbinfo = au_sbi(inode->i_sb);
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+	AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
+	AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+	found = 0;
+	i = au_plink_hash(inode->i_ino);
+	hbl =  sbinfo->si_plink + i;
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(icntnr, pos, hbl, plink)
+		if (&icntnr->vfs_inode == inode) {
+			found = 1;
+			break;
+		}
+	hlist_bl_unlock(hbl);
+	return found;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generate a name for plink.
+ * the file will be stored under AUFS_WH_PLINKDIR.
+ */
+/* 20 is max digits length of ulong 64 */
+#define PLINK_NAME_LEN	((20 + 1) * 2)
+
+static int plink_name(char *name, int len, struct inode *inode,
+		      aufs_bindex_t bindex)
+{
+	int rlen;
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, bindex);
+	rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
+	return rlen;
+}
+
+struct au_do_plink_lkup_args {
+	struct dentry **errp;
+	struct qstr *tgtname;
+	struct dentry *h_parent;
+	struct au_branch *br;
+};
+
+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
+				       struct dentry *h_parent,
+				       struct au_branch *br)
+{
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	h_inode = d_inode(h_parent);
+	inode_lock_shared_nested(h_inode, AuLsc_I_CHILD2);
+	h_dentry = vfsub_lkup_one(tgtname, h_parent);
+	inode_unlock_shared(h_inode);
+	return h_dentry;
+}
+
+static void au_call_do_plink_lkup(void *args)
+{
+	struct au_do_plink_lkup_args *a = args;
+	*a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
+}
+
+/* lookup the plink-ed @inode under the branch at @bindex */
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
+{
+	struct dentry *h_dentry, *h_parent;
+	struct au_branch *br;
+	int wkq_err;
+	char a[PLINK_NAME_LEN];
+	struct qstr tgtname = QSTR_INIT(a, 0);
+
+	AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+	br = au_sbr(inode->i_sb, bindex);
+	h_parent = br->br_wbr->wbr_plink;
+	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+	if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+		struct au_do_plink_lkup_args args = {
+			.errp		= &h_dentry,
+			.tgtname	= &tgtname,
+			.h_parent	= h_parent,
+			.br		= br
+		};
+
+		wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
+		if (unlikely(wkq_err))
+			h_dentry = ERR_PTR(wkq_err);
+	} else
+		h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
+
+	return h_dentry;
+}
+
+/* create a pseudo-link */
+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
+		      struct dentry *h_dentry, struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+	struct inode *h_dir, *delegated;
+
+	h_dir = d_inode(h_parent);
+	inode_lock_nested(h_dir, AuLsc_I_CHILD2);
+again:
+	h_path.dentry = vfsub_lkup_one(tgt, h_parent);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	err = 0;
+	/* wh.plink dir is not monitored */
+	/* todo: is it really safe? */
+	if (d_is_positive(h_path.dentry)
+	    && d_inode(h_path.dentry) != d_inode(h_dentry)) {
+		delegated = NULL;
+		err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal unlink\n");
+			iput(delegated);
+		}
+		dput(h_path.dentry);
+		h_path.dentry = NULL;
+		if (!err)
+			goto again;
+	}
+	if (!err && d_is_negative(h_path.dentry)) {
+		delegated = NULL;
+		err = vfsub_link(h_dentry, h_dir, &h_path, &delegated);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal link\n");
+			iput(delegated);
+		}
+	}
+	dput(h_path.dentry);
+
+out:
+	inode_unlock(h_dir);
+	return err;
+}
+
+struct do_whplink_args {
+	int *errp;
+	struct qstr *tgt;
+	struct dentry *h_parent;
+	struct dentry *h_dentry;
+	struct au_branch *br;
+};
+
+static void call_do_whplink(void *args)
+{
+	struct do_whplink_args *a = args;
+	*a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
+}
+
+static int whplink(struct dentry *h_dentry, struct inode *inode,
+		   aufs_bindex_t bindex, struct au_branch *br)
+{
+	int err, wkq_err;
+	struct au_wbr *wbr;
+	struct dentry *h_parent;
+	char a[PLINK_NAME_LEN];
+	struct qstr tgtname = QSTR_INIT(a, 0);
+
+	wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
+	h_parent = wbr->wbr_plink;
+	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+	/* always superio. */
+	if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+		struct do_whplink_args args = {
+			.errp		= &err,
+			.tgt		= &tgtname,
+			.h_parent	= h_parent,
+			.h_dentry	= h_dentry,
+			.br		= br
+		};
+		wkq_err = au_wkq_wait(call_do_whplink, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	} else
+		err = do_whplink(&tgtname, h_parent, h_dentry, br);
+
+	return err;
+}
+
+/*
+ * create a new pseudo-link for @h_dentry on @bindex.
+ * the linked inode is held in aufs @inode.
+ */
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+		     struct dentry *h_dentry)
+{
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_icntnr *icntnr;
+	int found, err, cnt, i;
+
+	sb = inode->i_sb;
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	found = au_plink_test(inode);
+	if (found)
+		return;
+
+	i = au_plink_hash(inode->i_ino);
+	hbl = sbinfo->si_plink + i;
+	au_igrab(inode);
+
+	hlist_bl_lock(hbl);
+	hlist_bl_for_each_entry(icntnr, pos, hbl, plink) {
+		if (&icntnr->vfs_inode == inode) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		icntnr = container_of(inode, struct au_icntnr, vfs_inode);
+		hlist_bl_add_head(&icntnr->plink, hbl);
+	}
+	hlist_bl_unlock(hbl);
+	if (!found) {
+		cnt = au_hbl_count(hbl);
+#define msg "unexpectedly unbalanced or too many pseudo-links"
+		if (cnt > AUFS_PLINK_WARN)
+			AuWarn1(msg ", %d\n", cnt);
+#undef msg
+		err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
+		if (unlikely(err)) {
+			pr_warn("err %d, damaged pseudo link.\n", err);
+			au_hbl_del(&icntnr->plink, hbl);
+			iput(&icntnr->vfs_inode);
+		}
+	} else
+		iput(&icntnr->vfs_inode);
+}
+
+/* free all plinks */
+void au_plink_put(struct super_block *sb, int verbose)
+{
+	int i, warned;
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos, *tmp;
+	struct au_icntnr *icntnr;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	/* no spin_lock since sbinfo is write-locked */
+	warned = 0;
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		hbl = sbinfo->si_plink + i;
+		if (!warned && verbose && !hlist_bl_empty(hbl)) {
+			pr_warn("pseudo-link is not flushed");
+			warned = 1;
+		}
+		hlist_bl_for_each_entry_safe(icntnr, pos, tmp, hbl, plink)
+			iput(&icntnr->vfs_inode);
+		INIT_HLIST_BL_HEAD(hbl);
+	}
+}
+
+void au_plink_clean(struct super_block *sb, int verbose)
+{
+	struct dentry *root;
+
+	root = sb->s_root;
+	aufs_write_lock(root);
+	if (au_opt_test(au_mntflags(sb), PLINK))
+		au_plink_put(sb, verbose);
+	aufs_write_unlock(root);
+}
+
+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
+{
+	int do_put;
+	aufs_bindex_t btop, bbot, bindex;
+
+	do_put = 0;
+	btop = au_ibtop(inode);
+	bbot = au_ibbot(inode);
+	if (btop >= 0) {
+		for (bindex = btop; bindex <= bbot; bindex++) {
+			if (!au_h_iptr(inode, bindex)
+			    || au_ii_br_id(inode, bindex) != br_id)
+				continue;
+			au_set_h_iptr(inode, bindex, NULL, 0);
+			do_put = 1;
+			break;
+		}
+		if (do_put)
+			for (bindex = btop; bindex <= bbot; bindex++)
+				if (au_h_iptr(inode, bindex)) {
+					do_put = 0;
+					break;
+				}
+	} else
+		do_put = 1;
+
+	return do_put;
+}
+
+/* free the plinks on a branch specified by @br_id */
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
+{
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos, *tmp;
+	struct au_icntnr *icntnr;
+	struct inode *inode;
+	int i, do_put;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	/* no bit_lock since sbinfo is write-locked */
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		hbl = sbinfo->si_plink + i;
+		hlist_bl_for_each_entry_safe(icntnr, pos, tmp, hbl, plink) {
+			inode = au_igrab(&icntnr->vfs_inode);
+			ii_write_lock_child(inode);
+			do_put = au_plink_do_half_refresh(inode, br_id);
+			if (do_put) {
+				hlist_bl_del(&icntnr->plink);
+				iput(inode);
+			}
+			ii_write_unlock(inode);
+			iput(inode);
+		}
+	}
+}
diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c
new file mode 100644
index 00000000000..6975d3ab2d2
--- /dev/null
+++ b/fs/aufs/poll.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * poll operation
+ * There is only one filesystem which implements ->poll operation, currently.
+ */
+
+#include "aufs.h"
+
+__poll_t aufs_poll(struct file *file, struct poll_table_struct *pt)
+{
+	__poll_t mask;
+	struct file *h_file;
+	struct super_block *sb;
+
+	/* We should pretend an error happened. */
+	mask = EPOLLERR /* | EPOLLIN | EPOLLOUT */;
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+	h_file = au_read_pre(file, /*keep_fi*/0, /*lsc*/0);
+	if (IS_ERR(h_file)) {
+		AuDbg("h_file %ld\n", PTR_ERR(h_file));
+		goto out;
+	}
+
+	mask = vfs_poll(h_file, pt);
+	fput(h_file); /* instead of au_read_post() */
+
+out:
+	si_read_unlock(sb);
+	if (mask & EPOLLERR)
+		AuDbg("mask 0x%x\n", mask);
+	return mask;
+}
diff --git a/fs/aufs/posix_acl.c b/fs/aufs/posix_acl.c
new file mode 100644
index 00000000000..0789335650d
--- /dev/null
+++ b/fs/aufs/posix_acl.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * posix acl operations
+ */
+
+#include <linux/fs.h>
+#include "aufs.h"
+
+struct posix_acl *aufs_get_acl(struct inode *inode, int type)
+{
+	struct posix_acl *acl;
+	int err;
+	aufs_bindex_t bindex;
+	struct inode *h_inode;
+	struct super_block *sb;
+
+	acl = NULL;
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	ii_read_lock_child(inode);
+	if (!(sb->s_flags & SB_POSIXACL))
+		goto out;
+
+	bindex = au_ibtop(inode);
+	h_inode = au_h_iptr(inode, bindex);
+	if (unlikely(!h_inode
+		     || ((h_inode->i_mode & S_IFMT)
+			 != (inode->i_mode & S_IFMT)))) {
+		err = au_busy_or_stale();
+		acl = ERR_PTR(err);
+		goto out;
+	}
+
+	/* always topmost only */
+	acl = get_acl(h_inode, type);
+	if (IS_ERR(acl))
+		forget_cached_acl(inode, type);
+	else
+		set_cached_acl(inode, type, acl);
+
+out:
+	ii_read_unlock(inode);
+	si_read_unlock(sb);
+
+	AuTraceErrPtr(acl);
+	return acl;
+}
+
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	int err;
+	ssize_t ssz;
+	struct dentry *dentry;
+	struct au_sxattr arg = {
+		.type = AU_ACL_SET,
+		.u.acl_set = {
+			.acl	= acl,
+			.type	= type
+		},
+	};
+
+	IMustLock(inode);
+
+	if (inode->i_ino == AUFS_ROOT_INO)
+		dentry = dget(inode->i_sb->s_root);
+	else {
+		dentry = d_find_alias(inode);
+		if (!dentry)
+			dentry = d_find_any_alias(inode);
+		if (!dentry) {
+			pr_warn("cannot handle this inode, "
+				"please report to aufs-users ML\n");
+			err = -ENOENT;
+			goto out;
+		}
+	}
+
+	ssz = au_sxattr(dentry, inode, &arg);
+	/* forget even it if succeeds since the branch might set differently */
+	forget_cached_acl(inode, type);
+	dput(dentry);
+	err = ssz;
+	if (ssz >= 0)
+		err = 0;
+
+out:
+	return err;
+}
diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c
new file mode 100644
index 00000000000..50ee0a9c1bc
--- /dev/null
+++ b/fs/aufs/procfs.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * procfs interfaces
+ */
+
+#include <linux/proc_fs.h>
+#include "aufs.h"
+
+static int au_procfs_plm_release(struct inode *inode, struct file *file)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = file->private_data;
+	if (sbinfo) {
+		au_plink_maint_leave(sbinfo);
+		kobject_put(&sbinfo->si_kobj);
+	}
+
+	return 0;
+}
+
+static void au_procfs_plm_write_clean(struct file *file)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = file->private_data;
+	if (sbinfo)
+		au_plink_clean(sbinfo->si_sb, /*verbose*/0);
+}
+
+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
+{
+	int err;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_node *pos;
+
+	err = -EBUSY;
+	if (unlikely(file->private_data))
+		goto out;
+
+	sb = NULL;
+	/* don't use au_sbilist_lock() here */
+	hlist_bl_lock(&au_sbilist);
+	hlist_bl_for_each_entry(sbinfo, pos, &au_sbilist, si_list)
+		if (id == sysaufs_si_id(sbinfo)) {
+			if (kobject_get_unless_zero(&sbinfo->si_kobj))
+				sb = sbinfo->si_sb;
+			break;
+		}
+	hlist_bl_unlock(&au_sbilist);
+
+	err = -EINVAL;
+	if (unlikely(!sb))
+		goto out;
+
+	err = au_plink_maint_enter(sb);
+	if (!err)
+		/* keep kobject_get() */
+		file->private_data = sbinfo;
+	else
+		kobject_put(&sbinfo->si_kobj);
+out:
+	return err;
+}
+
+/*
+ * Accept a valid "si=xxxx" only.
+ * Once it is accepted successfully, accept "clean" too.
+ */
+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
+				   size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	unsigned long id;
+	/* last newline is allowed */
+	char buf[3 + sizeof(unsigned long) * 2 + 1];
+
+	err = -EACCES;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = -EINVAL;
+	if (unlikely(count > sizeof(buf)))
+		goto out;
+
+	err = copy_from_user(buf, ubuf, count);
+	if (unlikely(err)) {
+		err = -EFAULT;
+		goto out;
+	}
+	buf[count] = 0;
+
+	err = -EINVAL;
+	if (!strcmp("clean", buf)) {
+		au_procfs_plm_write_clean(file);
+		goto out_success;
+	} else if (unlikely(strncmp("si=", buf, 3)))
+		goto out;
+
+	err = kstrtoul(buf + 3, 16, &id);
+	if (unlikely(err))
+		goto out;
+
+	err = au_procfs_plm_write_si(file, id);
+	if (unlikely(err))
+		goto out;
+
+out_success:
+	err = count; /* success */
+out:
+	return err;
+}
+
+static const struct proc_ops au_procfs_plm_op = {
+	.proc_write	= au_procfs_plm_write,
+	.proc_release	= au_procfs_plm_release
+};
+
+/* ---------------------------------------------------------------------- */
+
+static struct proc_dir_entry *au_procfs_dir;
+
+void au_procfs_fin(void)
+{
+	remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
+	remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+}
+
+int __init au_procfs_init(void)
+{
+	int err;
+	struct proc_dir_entry *entry;
+
+	err = -ENOMEM;
+	au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
+	if (unlikely(!au_procfs_dir))
+		goto out;
+
+	entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | 0200,
+			    au_procfs_dir, &au_procfs_plm_op);
+	if (unlikely(!entry))
+		goto out_dir;
+
+	err = 0;
+	goto out; /* success */
+
+
+out_dir:
+	remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+out:
+	return err;
+}
diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c
new file mode 100644
index 00000000000..afd77564f3d
--- /dev/null
+++ b/fs/aufs/rdu.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * readdir in userspace.
+ */
+
+#include <linux/compat.h>
+#include <linux/fs_stack.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+/* bits for struct aufs_rdu.flags */
+#define	AuRdu_CALLED	1
+#define	AuRdu_CONT	(1 << 1)
+#define	AuRdu_FULL	(1 << 2)
+#define au_ftest_rdu(flags, name)	((flags) & AuRdu_##name)
+#define au_fset_rdu(flags, name) \
+	do { (flags) |= AuRdu_##name; } while (0)
+#define au_fclr_rdu(flags, name) \
+	do { (flags) &= ~AuRdu_##name; } while (0)
+
+struct au_rdu_arg {
+	struct dir_context		ctx;
+	struct aufs_rdu			*rdu;
+	union au_rdu_ent_ul		ent;
+	unsigned long			end;
+
+	struct super_block		*sb;
+	int				err;
+};
+
+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen,
+		       loff_t offset, u64 h_ino, unsigned int d_type)
+{
+	int err, len;
+	struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx);
+	struct aufs_rdu *rdu = arg->rdu;
+	struct au_rdu_ent ent;
+
+	err = 0;
+	arg->err = 0;
+	au_fset_rdu(rdu->cookie.flags, CALLED);
+	len = au_rdu_len(nlen);
+	if (arg->ent.ul + len  < arg->end) {
+		ent.ino = h_ino;
+		ent.bindex = rdu->cookie.bindex;
+		ent.type = d_type;
+		ent.nlen = nlen;
+		if (unlikely(nlen > AUFS_MAX_NAMELEN))
+			ent.type = DT_UNKNOWN;
+
+		/* unnecessary to support mmap_sem since this is a dir */
+		err = -EFAULT;
+		if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
+			goto out;
+		if (copy_to_user(arg->ent.e->name, name, nlen))
+			goto out;
+		/* the terminating NULL */
+		if (__put_user(0, arg->ent.e->name + nlen))
+			goto out;
+		err = 0;
+		/* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
+		arg->ent.ul += len;
+		rdu->rent++;
+	} else {
+		err = -EFAULT;
+		au_fset_rdu(rdu->cookie.flags, FULL);
+		rdu->full = 1;
+		rdu->tail = arg->ent;
+	}
+
+out:
+	/* AuTraceErr(err); */
+	return err;
+}
+
+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
+{
+	int err;
+	loff_t offset;
+	struct au_rdu_cookie *cookie = &arg->rdu->cookie;
+
+	/* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */
+	offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
+	err = offset;
+	if (unlikely(offset != cookie->h_pos))
+		goto out;
+
+	err = 0;
+	do {
+		arg->err = 0;
+		au_fclr_rdu(cookie->flags, CALLED);
+		/* smp_mb(); */
+		err = vfsub_iterate_dir(h_file, &arg->ctx);
+		if (err >= 0)
+			err = arg->err;
+	} while (!err
+		 && au_ftest_rdu(cookie->flags, CALLED)
+		 && !au_ftest_rdu(cookie->flags, FULL));
+	cookie->h_pos = h_file->f_pos;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
+{
+	int err;
+	aufs_bindex_t bbot;
+	struct au_rdu_arg arg = {
+		.ctx = {
+			.actor = au_rdu_fill
+		}
+	};
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *h_file;
+	struct au_rdu_cookie *cookie = &rdu->cookie;
+
+	/* VERIFY_WRITE */
+	err = !access_ok(rdu->ent.e, rdu->sz);
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	rdu->rent = 0;
+	rdu->tail = rdu->ent;
+	rdu->full = 0;
+	arg.rdu = rdu;
+	arg.ent = rdu->ent;
+	arg.end = arg.ent.ul;
+	arg.end += rdu->sz;
+
+	err = -ENOTDIR;
+	if (unlikely(!file->f_op->iterate && !file->f_op->iterate_shared))
+		goto out;
+
+	err = security_file_permission(file, MAY_READ);
+	AuTraceErr(err);
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_path.dentry;
+	inode = d_inode(dentry);
+	inode_lock_shared(inode);
+
+	arg.sb = inode->i_sb;
+	err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_mtx;
+	err = au_alive_dir(dentry);
+	if (unlikely(err))
+		goto out_si;
+	/* todo: reval? */
+	fi_read_lock(file);
+
+	err = -EAGAIN;
+	if (unlikely(au_ftest_rdu(cookie->flags, CONT)
+		     && cookie->generation != au_figen(file)))
+		goto out_unlock;
+
+	err = 0;
+	if (!rdu->blk) {
+		rdu->blk = au_sbi(arg.sb)->si_rdblk;
+		if (!rdu->blk)
+			rdu->blk = au_dir_size(file, /*dentry*/NULL);
+	}
+	bbot = au_fbtop(file);
+	if (cookie->bindex < bbot)
+		cookie->bindex = bbot;
+	bbot = au_fbbot_dir(file);
+	/* AuDbg("b%d, b%d\n", cookie->bindex, bbot); */
+	for (; !err && cookie->bindex <= bbot;
+	     cookie->bindex++, cookie->h_pos = 0) {
+		h_file = au_hf_dir(file, cookie->bindex);
+		if (!h_file)
+			continue;
+
+		au_fclr_rdu(cookie->flags, FULL);
+		err = au_rdu_do(h_file, &arg);
+		AuTraceErr(err);
+		if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
+			break;
+	}
+	AuDbg("rent %llu\n", rdu->rent);
+
+	if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
+		rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
+		au_fset_rdu(cookie->flags, CONT);
+		cookie->generation = au_figen(file);
+	}
+
+	ii_read_lock_child(inode);
+	fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibtop(inode)));
+	ii_read_unlock(inode);
+
+out_unlock:
+	fi_read_unlock(file);
+out_si:
+	si_read_unlock(arg.sb);
+out_mtx:
+	inode_unlock_shared(inode);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
+{
+	int err;
+	ino_t ino;
+	unsigned long long nent;
+	union au_rdu_ent_ul *u;
+	struct au_rdu_ent ent;
+	struct super_block *sb;
+
+	err = 0;
+	nent = rdu->nent;
+	u = &rdu->ent;
+	sb = file->f_path.dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	while (nent-- > 0) {
+		/* unnecessary to support mmap_sem since this is a dir */
+		err = copy_from_user(&ent, u->e, sizeof(ent));
+		if (!err)
+			/* VERIFY_WRITE */
+			err = !access_ok(&u->e->ino, sizeof(ino));
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+			break;
+		}
+
+		/* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
+		if (!ent.wh)
+			err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
+		else
+			err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
+					&ino);
+		if (unlikely(err)) {
+			AuTraceErr(err);
+			break;
+		}
+
+		err = __put_user(ino, &u->e->ino);
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+			break;
+		}
+		u->ul += au_rdu_len(ent.nlen);
+	}
+	si_read_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_rdu_verify(struct aufs_rdu *rdu)
+{
+	AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
+	      "%llu, b%d, 0x%x, g%u}\n",
+	      rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
+	      rdu->blk,
+	      rdu->rent, rdu->shwh, rdu->full,
+	      rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
+	      rdu->cookie.generation);
+
+	if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
+		return 0;
+
+	AuDbg("%u:%u\n",
+	      rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
+	return -EINVAL;
+}
+
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err, e;
+	struct aufs_rdu rdu;
+	void __user *p = (void __user *)arg;
+
+	err = copy_from_user(&rdu, p, sizeof(rdu));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	err = au_rdu_verify(&rdu);
+	if (unlikely(err))
+		goto out;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+		err = au_rdu(file, &rdu);
+		if (unlikely(err))
+			break;
+
+		e = copy_to_user(p, &rdu, sizeof(rdu));
+		if (unlikely(e)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+		break;
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ino(file, &rdu);
+		break;
+
+	default:
+		/* err = -ENOTTY; */
+		err = -EINVAL;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err, e;
+	struct aufs_rdu rdu;
+	void __user *p = compat_ptr(arg);
+
+	/* todo: get_user()? */
+	err = copy_from_user(&rdu, p, sizeof(rdu));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	rdu.ent.e = compat_ptr(rdu.ent.ul);
+	err = au_rdu_verify(&rdu);
+	if (unlikely(err))
+		goto out;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+		err = au_rdu(file, &rdu);
+		if (unlikely(err))
+			break;
+
+		rdu.ent.ul = ptr_to_compat(rdu.ent.e);
+		rdu.tail.ul = ptr_to_compat(rdu.tail.e);
+		e = copy_to_user(p, &rdu, sizeof(rdu));
+		if (unlikely(e)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+		break;
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ino(file, &rdu);
+		break;
+
+	default:
+		/* err = -ENOTTY; */
+		err = -EINVAL;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+#endif
diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h
new file mode 100644
index 00000000000..370eae15934
--- /dev/null
+++ b/fs/aufs/rwsem.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * simple read-write semaphore wrappers
+ */
+
+#ifndef __AUFS_RWSEM_H__
+#define __AUFS_RWSEM_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+/* in the future, the name 'au_rwsem' will be totally gone */
+#define au_rwsem	rw_semaphore
+
+/* to debug easier, do not make them inlined functions */
+#define AuRwMustNoWaiters(rw)	AuDebugOn(rwsem_is_contended(rw))
+/* rwsem_is_locked() is unusable */
+#define AuRwMustReadLock(rw)	AuDebugOn(!lockdep_recursing(current) \
+					  && debug_locks \
+					  && !lockdep_is_held_type(rw, 1))
+#define AuRwMustWriteLock(rw)	AuDebugOn(!lockdep_recursing(current) \
+					  && debug_locks \
+					  && !lockdep_is_held_type(rw, 0))
+#define AuRwMustAnyLock(rw)	AuDebugOn(!lockdep_recursing(current) \
+					  && debug_locks \
+					  && !lockdep_is_held(rw))
+#define AuRwDestroy(rw)		AuDebugOn(!lockdep_recursing(current) \
+					  && debug_locks \
+					  && lockdep_is_held(rw))
+
+#define au_rw_init(rw)	init_rwsem(rw)
+
+#define au_rw_init_wlock(rw) do {		\
+		au_rw_init(rw);			\
+		down_write(rw);			\
+	} while (0)
+
+#define au_rw_init_wlock_nested(rw, lsc) do {	\
+		au_rw_init(rw);			\
+		down_write_nested(rw, lsc);	\
+	} while (0)
+
+#define au_rw_read_lock(rw)		down_read(rw)
+#define au_rw_read_lock_nested(rw, lsc)	down_read_nested(rw, lsc)
+#define au_rw_read_unlock(rw)		up_read(rw)
+#define au_rw_dgrade_lock(rw)		downgrade_write(rw)
+#define au_rw_write_lock(rw)		down_write(rw)
+#define au_rw_write_lock_nested(rw, lsc) down_write_nested(rw, lsc)
+#define au_rw_write_unlock(rw)		up_write(rw)
+/* why is not _nested version defined? */
+#define au_rw_read_trylock(rw)		down_read_trylock(rw)
+#define au_rw_write_trylock(rw)		down_write_trylock(rw)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_RWSEM_H__ */
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
new file mode 100644
index 00000000000..3835be61d97
--- /dev/null
+++ b/fs/aufs/sbinfo.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * superblock private data
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+/*
+ * they are necessary regardless sysfs is disabled.
+ */
+void au_si_free(struct kobject *kobj)
+{
+	int i;
+	struct au_sbinfo *sbinfo;
+	char *locked __maybe_unused; /* debug only */
+
+	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+	for (i = 0; i < AuPlink_NHASH; i++)
+		AuDebugOn(!hlist_bl_empty(sbinfo->si_plink + i));
+	AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
+
+	AuLCntZero(au_lcnt_read(&sbinfo->si_ninodes, /*do_rev*/0));
+	au_lcnt_fin(&sbinfo->si_ninodes, /*do_sync*/0);
+	AuLCntZero(au_lcnt_read(&sbinfo->si_nfiles, /*do_rev*/0));
+	au_lcnt_fin(&sbinfo->si_nfiles, /*do_sync*/0);
+
+	dbgaufs_si_fin(sbinfo);
+	au_rw_write_lock(&sbinfo->si_rwsem);
+	au_br_free(sbinfo);
+	au_rw_write_unlock(&sbinfo->si_rwsem);
+
+	au_kfree_try_rcu(sbinfo->si_branch);
+	mutex_destroy(&sbinfo->si_xib_mtx);
+	AuRwDestroy(&sbinfo->si_rwsem);
+
+	au_lcnt_wait_for_fin(&sbinfo->si_ninodes);
+	/* si_nfiles is waited too */
+	au_kfree_rcu(sbinfo);
+}
+
+int au_si_alloc(struct super_block *sb)
+{
+	int err, i;
+	struct au_sbinfo *sbinfo;
+
+	err = -ENOMEM;
+	sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
+	if (unlikely(!sbinfo))
+		goto out;
+
+	/* will be reallocated separately */
+	sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
+	if (unlikely(!sbinfo->si_branch))
+		goto out_sbinfo;
+
+	err = sysaufs_si_init(sbinfo);
+	if (!err) {
+		dbgaufs_si_null(sbinfo);
+		err = dbgaufs_si_init(sbinfo);
+		if (unlikely(err))
+			kobject_put(&sbinfo->si_kobj);
+	}
+	if (unlikely(err))
+		goto out_br;
+
+	au_nwt_init(&sbinfo->si_nowait);
+	au_rw_init_wlock(&sbinfo->si_rwsem);
+
+	au_lcnt_init(&sbinfo->si_ninodes, /*release*/NULL);
+	au_lcnt_init(&sbinfo->si_nfiles, /*release*/NULL);
+
+	sbinfo->si_bbot = -1;
+	sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2;
+
+	sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
+	sbinfo->si_wbr_create = AuWbrCreate_Def;
+	sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
+	sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
+
+	au_fhsm_init(sbinfo);
+
+	sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
+
+	sbinfo->si_xino_jiffy = jiffies;
+	sbinfo->si_xino_expire
+		= msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC);
+	mutex_init(&sbinfo->si_xib_mtx);
+	/* leave si_xib_last_pindex and si_xib_next_bit */
+
+	INIT_HLIST_BL_HEAD(&sbinfo->si_aopen);
+
+	sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
+	sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+	sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+	sbinfo->si_dirwh = AUFS_DIRWH_DEF;
+
+	for (i = 0; i < AuPlink_NHASH; i++)
+		INIT_HLIST_BL_HEAD(sbinfo->si_plink + i);
+	init_waitqueue_head(&sbinfo->si_plink_wq);
+	spin_lock_init(&sbinfo->si_plink_maint_lock);
+
+	INIT_HLIST_BL_HEAD(&sbinfo->si_files);
+
+	/* with getattr by default */
+	sbinfo->si_iop_array = aufs_iop;
+
+	/* leave other members for sysaufs and si_mnt. */
+	sbinfo->si_sb = sb;
+	sb->s_fs_info = sbinfo;
+	si_pid_set(sb);
+	return 0; /* success */
+
+out_br:
+	au_kfree_try_rcu(sbinfo->si_branch);
+out_sbinfo:
+	au_kfree_rcu(sbinfo);
+out:
+	return err;
+}
+
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink)
+{
+	int err, sz;
+	struct au_branch **brp;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	err = -ENOMEM;
+	sz = sizeof(*brp) * (sbinfo->si_bbot + 1);
+	if (unlikely(!sz))
+		sz = sizeof(*brp);
+	brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS,
+			   may_shrink);
+	if (brp) {
+		sbinfo->si_branch = brp;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_sigen_inc(struct super_block *sb)
+{
+	unsigned int gen;
+	struct inode *inode;
+
+	SiMustWriteLock(sb);
+
+	gen = ++au_sbi(sb)->si_generation;
+	au_update_digen(sb->s_root);
+	inode = d_inode(sb->s_root);
+	au_update_iigen(inode, /*half*/0);
+	inode_inc_iversion(inode);
+	return gen;
+}
+
+aufs_bindex_t au_new_br_id(struct super_block *sb)
+{
+	aufs_bindex_t br_id;
+	int i;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
+		br_id = ++sbinfo->si_last_br_id;
+		AuDebugOn(br_id < 0);
+		if (br_id && au_br_index(sb, br_id) < 0)
+			return br_id;
+	}
+
+	return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
+int si_read_lock(struct super_block *sb, int flags)
+{
+	int err;
+
+	err = 0;
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+	si_noflush_read_lock(sb);
+	err = au_plink_maint(sb, flags);
+	if (unlikely(err))
+		si_read_unlock(sb);
+
+	return err;
+}
+
+int si_write_lock(struct super_block *sb, int flags)
+{
+	int err;
+
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+	si_noflush_write_lock(sb);
+	err = au_plink_maint(sb, flags);
+	if (unlikely(err))
+		si_write_unlock(sb);
+
+	return err;
+}
+
+/* dentry and super_block lock. call at entry point */
+int aufs_read_lock(struct dentry *dentry, int flags)
+{
+	int err;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, flags);
+	if (unlikely(err))
+		goto out;
+
+	if (au_ftest_lock(flags, DW))
+		di_write_lock_child(dentry);
+	else
+		di_read_lock_child(dentry, flags);
+
+	if (au_ftest_lock(flags, GEN)) {
+		err = au_digen_test(dentry, au_sigen(sb));
+		if (!au_opt_test(au_mntflags(sb), UDBA_NONE))
+			AuDebugOn(!err && au_dbrange_test(dentry));
+		else if (!err)
+			err = au_dbrange_test(dentry);
+		if (unlikely(err))
+			aufs_read_unlock(dentry, flags);
+	}
+
+out:
+	return err;
+}
+
+void aufs_read_unlock(struct dentry *dentry, int flags)
+{
+	if (au_ftest_lock(flags, DW))
+		di_write_unlock(dentry);
+	else
+		di_read_unlock(dentry, flags);
+	si_read_unlock(dentry->d_sb);
+}
+
+void aufs_write_lock(struct dentry *dentry)
+{
+	si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
+	di_write_lock_child(dentry);
+}
+
+void aufs_write_unlock(struct dentry *dentry)
+{
+	di_write_unlock(dentry);
+	si_write_unlock(dentry->d_sb);
+}
+
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
+{
+	int err;
+	unsigned int sigen;
+	struct super_block *sb;
+
+	sb = d1->d_sb;
+	err = si_read_lock(sb, flags);
+	if (unlikely(err))
+		goto out;
+
+	di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIRS));
+
+	if (au_ftest_lock(flags, GEN)) {
+		sigen = au_sigen(sb);
+		err = au_digen_test(d1, sigen);
+		AuDebugOn(!err && au_dbrange_test(d1));
+		if (!err) {
+			err = au_digen_test(d2, sigen);
+			AuDebugOn(!err && au_dbrange_test(d2));
+		}
+		if (unlikely(err))
+			aufs_read_and_write_unlock2(d1, d2);
+	}
+
+out:
+	return err;
+}
+
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+	di_write_unlock2(d1, d2);
+	si_read_unlock(d1->d_sb);
+}
diff --git a/fs/aufs/super.c b/fs/aufs/super.c
new file mode 100644
index 00000000000..aa7f210e549
--- /dev/null
+++ b/fs/aufs/super.c
@@ -0,0 +1,1047 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * mount and super_block operations
+ */
+
+#include <linux/iversion.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include "aufs.h"
+
+/*
+ * super_operations
+ */
+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
+{
+	struct au_icntnr *c;
+
+	c = au_cache_alloc_icntnr();
+	if (c) {
+		au_icntnr_init(c);
+		inode_set_iversion(&c->vfs_inode, 1); /* sigen(sb); */
+		c->iinfo.ii_hinode = NULL;
+		return &c->vfs_inode;
+	}
+	return NULL;
+}
+
+static void aufs_destroy_inode(struct inode *inode)
+{
+	if (!au_is_bad_inode(inode))
+		au_iinfo_fin(inode);
+}
+
+static void aufs_free_inode(struct inode *inode)
+{
+	au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
+}
+
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
+{
+	struct inode *inode;
+	int err;
+
+	inode = iget_locked(sb, ino);
+	if (unlikely(!inode)) {
+		inode = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	if (!(inode->i_state & I_NEW))
+		goto out;
+
+	err = au_xigen_new(inode);
+	if (!err)
+		err = au_iinfo_init(inode);
+	if (!err)
+		inode_inc_iversion(inode);
+	else {
+		iget_failed(inode);
+		inode = ERR_PTR(err);
+	}
+
+out:
+	/* never return NULL */
+	AuDebugOn(!inode);
+	AuTraceErrPtr(inode);
+	return inode;
+}
+
+/* lock free root dinfo */
+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct path path;
+	struct au_hdentry *hdp;
+	struct au_branch *br;
+	au_br_perm_str_t perm;
+
+	err = 0;
+	bbot = au_sbbot(sb);
+	bindex = 0;
+	hdp = au_hdentry(au_di(sb->s_root), bindex);
+	for (; !err && bindex <= bbot; bindex++, hdp++) {
+		br = au_sbr(sb, bindex);
+		path.mnt = au_br_mnt(br);
+		path.dentry = hdp->hd_dentry;
+		err = au_seq_path(seq, &path);
+		if (!err) {
+			au_optstr_br_perm(&perm, br->br_perm);
+			seq_printf(seq, "=%s", perm.a);
+			if (bindex != bbot)
+				seq_putc(seq, ':');
+		}
+	}
+	if (unlikely(err || seq_has_overflowed(seq)))
+		err = -E2BIG;
+
+	return err;
+}
+
+static void au_gen_fmt(char *fmt, int len __maybe_unused, const char *pat,
+		       const char *append)
+{
+	char *p;
+
+	p = fmt;
+	while (*pat != ':')
+		*p++ = *pat++;
+	*p++ = *pat++;
+	strcpy(p, append);
+	AuDebugOn(strlen(fmt) >= len);
+}
+
+static void au_show_wbr_create(struct seq_file *m, int v,
+			       struct au_sbinfo *sbinfo)
+{
+	const char *pat;
+	char fmt[32];
+	struct au_wbr_mfs *mfs;
+
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+	seq_puts(m, ",create=");
+	pat = au_optstr_wbr_create(v);
+	mfs = &sbinfo->si_wbr_mfs;
+	switch (v) {
+	case AuWbrCreate_TDP:
+	case AuWbrCreate_RR:
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_PMFS:
+		seq_puts(m, pat);
+		break;
+	case AuWbrCreate_MFSRR:
+	case AuWbrCreate_TDMFS:
+	case AuWbrCreate_PMFSRR:
+		au_gen_fmt(fmt, sizeof(fmt), pat, "%llu");
+		seq_printf(m, fmt, mfs->mfsrr_watermark);
+		break;
+	case AuWbrCreate_MFSV:
+	case AuWbrCreate_PMFSV:
+		au_gen_fmt(fmt, sizeof(fmt), pat, "%lu");
+		seq_printf(m, fmt,
+			   jiffies_to_msecs(mfs->mfs_expire)
+			   / MSEC_PER_SEC);
+		break;
+	case AuWbrCreate_MFSRRV:
+	case AuWbrCreate_TDMFSV:
+	case AuWbrCreate_PMFSRRV:
+		au_gen_fmt(fmt, sizeof(fmt), pat, "%llu:%lu");
+		seq_printf(m, fmt, mfs->mfsrr_watermark,
+			   jiffies_to_msecs(mfs->mfs_expire) / MSEC_PER_SEC);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static int au_show_xino(struct seq_file *seq, struct super_block *sb)
+{
+#ifdef CONFIG_SYSFS
+	return 0;
+#else
+	int err;
+	const int len = sizeof(AUFS_XINO_FNAME) - 1;
+	aufs_bindex_t bindex, brid;
+	struct qstr *name;
+	struct file *f;
+	struct dentry *d, *h_root;
+	struct au_branch *br;
+
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+	err = 0;
+	f = au_sbi(sb)->si_xib;
+	if (!f)
+		goto out;
+
+	/* stop printing the default xino path on the first writable branch */
+	h_root = NULL;
+	bindex = au_xi_root(sb, f->f_path.dentry);
+	if (bindex >= 0) {
+		br = au_sbr_sb(sb, bindex);
+		h_root = au_br_dentry(br);
+	}
+
+	d = f->f_path.dentry;
+	name = &d->d_name;
+	/* safe ->d_parent because the file is unlinked */
+	if (d->d_parent == h_root
+	    && name->len == len
+	    && !memcmp(name->name, AUFS_XINO_FNAME, len))
+		goto out;
+
+	seq_puts(seq, ",xino=");
+	err = au_xino_path(seq, f);
+
+out:
+	return err;
+#endif
+}
+
+/* seq_file will re-call me in case of too long string */
+static int aufs_show_options(struct seq_file *m, struct dentry *dentry)
+{
+	int err;
+	unsigned int mnt_flags, v;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+#define AuBool(name, str) do { \
+	v = au_opt_test(mnt_flags, name); \
+	if (v != au_opt_test(AuOpt_Def, name)) \
+		seq_printf(m, ",%s" #str, v ? "" : "no"); \
+} while (0)
+
+#define AuStr(name, str) do { \
+	v = mnt_flags & AuOptMask_##name; \
+	if (v != (AuOpt_Def & AuOptMask_##name)) \
+		seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
+} while (0)
+
+#define AuUInt(name, str, val) do { \
+	if (val != AUFS_##name##_DEF) \
+		seq_printf(m, "," #str "=%u", val); \
+} while (0)
+
+	sb = dentry->d_sb;
+	if (sb->s_flags & SB_POSIXACL)
+		seq_puts(m, ",acl");
+#if 0 /* reserved for future use */
+	if (sb->s_flags & SB_I_VERSION)
+		seq_puts(m, ",i_version");
+#endif
+
+	/* lock free root dinfo */
+	si_noflush_read_lock(sb);
+	sbinfo = au_sbi(sb);
+	seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
+
+	mnt_flags = au_mntflags(sb);
+	if (au_opt_test(mnt_flags, XINO)) {
+		err = au_show_xino(m, sb);
+		if (unlikely(err))
+			goto out;
+	} else
+		seq_puts(m, ",noxino");
+
+	AuBool(TRUNC_XINO, trunc_xino);
+	AuStr(UDBA, udba);
+	AuBool(SHWH, shwh);
+	AuBool(PLINK, plink);
+	AuBool(DIO, dio);
+	AuBool(DIRPERM1, dirperm1);
+
+	v = sbinfo->si_wbr_create;
+	if (v != AuWbrCreate_Def)
+		au_show_wbr_create(m, v, sbinfo);
+
+	v = sbinfo->si_wbr_copyup;
+	if (v != AuWbrCopyup_Def)
+		seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
+
+	v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
+	if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
+		seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
+
+	AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
+
+	v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
+	AuUInt(RDCACHE, rdcache, v);
+
+	AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
+	AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
+
+	au_fhsm_show(m, sbinfo);
+
+	AuBool(DIRREN, dirren);
+	AuBool(SUM, sum);
+	/* AuBool(SUM_W, wsum); */
+	AuBool(WARN_PERM, warn_perm);
+	AuBool(VERBOSE, verbose);
+
+out:
+	/* be sure to print "br:" last */
+	if (!sysaufs_brs) {
+		seq_puts(m, ",br:");
+		au_show_brs(m, sb);
+	}
+	si_read_unlock(sb);
+	return 0;
+
+#undef AuBool
+#undef AuStr
+#undef AuUInt
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* sum mode which returns the summation for statfs(2) */
+
+static u64 au_add_till_max(u64 a, u64 b)
+{
+	u64 old;
+
+	old = a;
+	a += b;
+	if (old <= a)
+		return a;
+	return ULLONG_MAX;
+}
+
+static u64 au_mul_till_max(u64 a, long mul)
+{
+	u64 old;
+
+	old = a;
+	a *= mul;
+	if (old <= a)
+		return a;
+	return ULLONG_MAX;
+}
+
+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
+{
+	int err;
+	long bsize, factor;
+	u64 blocks, bfree, bavail, files, ffree;
+	aufs_bindex_t bbot, bindex, i;
+	unsigned char shared;
+	struct path h_path;
+	struct super_block *h_sb;
+
+	err = 0;
+	bsize = LONG_MAX;
+	files = 0;
+	ffree = 0;
+	blocks = 0;
+	bfree = 0;
+	bavail = 0;
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		h_path.mnt = au_sbr_mnt(sb, bindex);
+		h_sb = h_path.mnt->mnt_sb;
+		shared = 0;
+		for (i = 0; !shared && i < bindex; i++)
+			shared = (au_sbr_sb(sb, i) == h_sb);
+		if (shared)
+			continue;
+
+		/* sb->s_root for NFS is unreliable */
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, buf);
+		if (unlikely(err))
+			goto out;
+
+		if (bsize > buf->f_bsize) {
+			/*
+			 * we will reduce bsize, so we have to expand blocks
+			 * etc. to match them again
+			 */
+			factor = (bsize / buf->f_bsize);
+			blocks = au_mul_till_max(blocks, factor);
+			bfree = au_mul_till_max(bfree, factor);
+			bavail = au_mul_till_max(bavail, factor);
+			bsize = buf->f_bsize;
+		}
+
+		factor = (buf->f_bsize / bsize);
+		blocks = au_add_till_max(blocks,
+				au_mul_till_max(buf->f_blocks, factor));
+		bfree = au_add_till_max(bfree,
+				au_mul_till_max(buf->f_bfree, factor));
+		bavail = au_add_till_max(bavail,
+				au_mul_till_max(buf->f_bavail, factor));
+		files = au_add_till_max(files, buf->f_files);
+		ffree = au_add_till_max(ffree, buf->f_ffree);
+	}
+
+	buf->f_bsize = bsize;
+	buf->f_blocks = blocks;
+	buf->f_bfree = bfree;
+	buf->f_bavail = bavail;
+	buf->f_files = files;
+	buf->f_ffree = ffree;
+	buf->f_frsize = 0;
+
+out:
+	return err;
+}
+
+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	int err;
+	struct path h_path;
+	struct super_block *sb;
+
+	/* lock free root dinfo */
+	sb = dentry->d_sb;
+	si_noflush_read_lock(sb);
+	if (!au_opt_test(au_mntflags(sb), SUM)) {
+		/* sb->s_root for NFS is unreliable */
+		h_path.mnt = au_sbr_mnt(sb, 0);
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, buf);
+	} else
+		err = au_statfs_sum(sb, buf);
+	si_read_unlock(sb);
+
+	if (!err) {
+		buf->f_type = AUFS_SUPER_MAGIC;
+		buf->f_namelen = AUFS_MAX_NAMELEN;
+		memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
+	}
+	/* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_sync_fs(struct super_block *sb, int wait)
+{
+	int err, e;
+	aufs_bindex_t bbot, bindex;
+	struct au_branch *br;
+	struct super_block *h_sb;
+
+	err = 0;
+	si_noflush_read_lock(sb);
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (!au_br_writable(br->br_perm))
+			continue;
+
+		h_sb = au_sbr_sb(sb, bindex);
+		e = vfsub_sync_filesystem(h_sb, wait);
+		if (unlikely(e && !err))
+			err = e;
+		/* go on even if an error happens */
+	}
+	si_read_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* final actions when unmounting a file system */
+static void aufs_put_super(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = au_sbi(sb);
+	if (sbinfo)
+		kobject_put(&sbinfo->si_kobj);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+		     struct super_block *sb, void *arg)
+{
+	void *array;
+	unsigned long long n, sz;
+
+	array = NULL;
+	n = 0;
+	if (!*hint)
+		goto out;
+
+	if (*hint > ULLONG_MAX / sizeof(array)) {
+		array = ERR_PTR(-EMFILE);
+		pr_err("hint %llu\n", *hint);
+		goto out;
+	}
+
+	sz = sizeof(array) * *hint;
+	array = kzalloc(sz, GFP_NOFS);
+	if (unlikely(!array))
+		array = vzalloc(sz);
+	if (unlikely(!array)) {
+		array = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	n = cb(sb, array, *hint, arg);
+	AuDebugOn(n > *hint);
+
+out:
+	*hint = n;
+	return array;
+}
+
+static unsigned long long au_iarray_cb(struct super_block *sb, void *a,
+				       unsigned long long max __maybe_unused,
+				       void *arg)
+{
+	unsigned long long n;
+	struct inode **p, *inode;
+	struct list_head *head;
+
+	n = 0;
+	p = a;
+	head = arg;
+	spin_lock(&sb->s_inode_list_lock);
+	list_for_each_entry(inode, head, i_sb_list) {
+		if (!au_is_bad_inode(inode)
+		    && au_ii(inode)->ii_btop >= 0) {
+			spin_lock(&inode->i_lock);
+			if (atomic_read(&inode->i_count)) {
+				au_igrab(inode);
+				*p++ = inode;
+				n++;
+				AuDebugOn(n > max);
+			}
+			spin_unlock(&inode->i_lock);
+		}
+	}
+	spin_unlock(&sb->s_inode_list_lock);
+
+	return n;
+}
+
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
+{
+	struct au_sbinfo *sbi;
+
+	sbi = au_sbi(sb);
+	*max = au_lcnt_read(&sbi->si_ninodes, /*do_rev*/1);
+	return au_array_alloc(max, au_iarray_cb, sb, &sb->s_inodes);
+}
+
+void au_iarray_free(struct inode **a, unsigned long long max)
+{
+	unsigned long long ull;
+
+	for (ull = 0; ull < max; ull++)
+		iput(a[ull]);
+	kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * refresh dentry and inode at remount time.
+ */
+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
+		      struct dentry *parent)
+{
+	int err;
+
+	di_write_lock_child(dentry);
+	di_read_lock_parent(parent, AuLock_IR);
+	err = au_refresh_dentry(dentry, parent);
+	if (!err && dir_flags)
+		au_hn_reset(d_inode(dentry), dir_flags);
+	di_read_unlock(parent, AuLock_IR);
+	di_write_unlock(dentry);
+
+	return err;
+}
+
+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
+			   struct au_sbinfo *sbinfo,
+			   const unsigned int dir_flags, unsigned int do_idop)
+{
+	int err;
+	struct dentry *parent;
+
+	err = 0;
+	parent = dget_parent(dentry);
+	if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
+		if (d_really_is_positive(dentry)) {
+			if (!d_is_dir(dentry))
+				err = au_do_refresh(dentry, /*dir_flags*/0,
+						 parent);
+			else {
+				err = au_do_refresh(dentry, dir_flags, parent);
+				if (unlikely(err))
+					au_fset_si(sbinfo, FAILED_REFRESH_DIR);
+			}
+		} else
+			err = au_do_refresh(dentry, /*dir_flags*/0, parent);
+		AuDbgDentry(dentry);
+	}
+	dput(parent);
+
+	if (!err) {
+		if (do_idop)
+			au_refresh_dop(dentry, /*force_reval*/0);
+	} else
+		au_refresh_dop(dentry, /*force_reval*/1);
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_refresh_d(struct super_block *sb, unsigned int do_idop)
+{
+	int err, i, j, ndentry, e;
+	unsigned int sigen;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries, *d;
+	struct au_sbinfo *sbinfo;
+	struct dentry *root = sb->s_root;
+	const unsigned int dir_flags = au_hi_flags(d_inode(root), /*isdir*/1);
+
+	if (do_idop)
+		au_refresh_dop(root, /*force_reval*/0);
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, root, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	sigen = au_sigen(sb);
+	sbinfo = au_sbi(sb);
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			d = dentries[j];
+			e = au_do_refresh_d(d, sigen, sbinfo, dir_flags,
+					    do_idop);
+			if (unlikely(e && !err))
+				err = e;
+			/* go on even err */
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int au_refresh_i(struct super_block *sb, unsigned int do_idop)
+{
+	int err, e;
+	unsigned int sigen;
+	unsigned long long max, ull;
+	struct inode *inode, **array;
+
+	array = au_iarray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	err = 0;
+	sigen = au_sigen(sb);
+	for (ull = 0; ull < max; ull++) {
+		inode = array[ull];
+		if (unlikely(!inode))
+			break;
+
+		e = 0;
+		ii_write_lock_child(inode);
+		if (au_iigen(inode, NULL) != sigen) {
+			e = au_refresh_hinode_self(inode);
+			if (unlikely(e)) {
+				au_refresh_iop(inode, /*force_getattr*/1);
+				pr_err("error %d, i%lu\n", e, inode->i_ino);
+				if (!err)
+					err = e;
+				/* go on even if err */
+			}
+		}
+		if (!e && do_idop)
+			au_refresh_iop(inode, /*force_getattr*/0);
+		ii_write_unlock(inode);
+	}
+
+	au_iarray_free(array, max);
+
+out:
+	return err;
+}
+
+static void au_remount_refresh(struct super_block *sb, unsigned int do_idop)
+{
+	int err, e;
+	unsigned int udba;
+	aufs_bindex_t bindex, bbot;
+	struct dentry *root;
+	struct inode *inode;
+	struct au_branch *br;
+	struct au_sbinfo *sbi;
+
+	au_sigen_inc(sb);
+	sbi = au_sbi(sb);
+	au_fclr_si(sbi, FAILED_REFRESH_DIR);
+
+	root = sb->s_root;
+	DiMustNoWaiters(root);
+	inode = d_inode(root);
+	IiMustNoWaiters(inode);
+
+	udba = au_opt_udba(sb);
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_hnotify_reset_br(udba, br, br->br_perm);
+		if (unlikely(err))
+			AuIOErr("hnotify failed on br %d, %d, ignored\n",
+				bindex, err);
+		/* go on even if err */
+	}
+	au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
+
+	if (do_idop) {
+		if (au_ftest_si(sbi, NO_DREVAL)) {
+			AuDebugOn(sb->s_d_op == &aufs_dop_noreval);
+			sb->s_d_op = &aufs_dop_noreval;
+			AuDebugOn(sbi->si_iop_array == aufs_iop_nogetattr);
+			sbi->si_iop_array = aufs_iop_nogetattr;
+		} else {
+			AuDebugOn(sb->s_d_op == &aufs_dop);
+			sb->s_d_op = &aufs_dop;
+			AuDebugOn(sbi->si_iop_array == aufs_iop);
+			sbi->si_iop_array = aufs_iop;
+		}
+		pr_info("reset to %ps and %ps\n",
+			sb->s_d_op, sbi->si_iop_array);
+	}
+
+	di_write_unlock(root);
+	err = au_refresh_d(sb, do_idop);
+	e = au_refresh_i(sb, do_idop);
+	if (unlikely(e && !err))
+		err = e;
+	/* aufs_write_lock() calls ..._child() */
+	di_write_lock_child(root);
+
+	au_cpup_attr_all(inode, /*force*/1);
+
+	if (unlikely(err))
+		AuIOErr("refresh failed, ignored, %d\n", err);
+}
+
+/* stop extra interpretation of errno in mount(8), and strange error messages */
+static int cvt_err(int err)
+{
+	AuTraceErr(err);
+
+	switch (err) {
+	case -ENOENT:
+	case -ENOTDIR:
+	case -EEXIST:
+	case -EIO:
+		err = -EINVAL;
+	}
+	return err;
+}
+
+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+	int err, do_dx;
+	unsigned int mntflags;
+	struct au_opts opts = {
+		.opt = NULL
+	};
+	struct dentry *root;
+	struct inode *inode;
+	struct au_sbinfo *sbinfo;
+
+	err = 0;
+	root = sb->s_root;
+	if (!data || !*data) {
+		err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+		if (!err) {
+			di_write_lock_child(root);
+			err = au_opts_verify(sb, *flags, /*pending*/0);
+			aufs_write_unlock(root);
+		}
+		goto out;
+	}
+
+	err = -ENOMEM;
+	opts.opt = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!opts.opt))
+		goto out;
+	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+	opts.flags = AuOpts_REMOUNT;
+	opts.sb_flags = *flags;
+
+	/* parse it before aufs lock */
+	err = au_opts_parse(sb, data, &opts);
+	if (unlikely(err))
+		goto out_opts;
+
+	sbinfo = au_sbi(sb);
+	inode = d_inode(root);
+	inode_lock(inode);
+	err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_mtx;
+	di_write_lock_child(root);
+
+	/* au_opts_remount() may return an error */
+	err = au_opts_remount(sb, &opts);
+	au_opts_free(&opts);
+
+	if (au_ftest_opts(opts.flags, REFRESH))
+		au_remount_refresh(sb, au_ftest_opts(opts.flags, REFRESH_IDOP));
+
+	if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
+		mntflags = au_mntflags(sb);
+		do_dx = !!au_opt_test(mntflags, DIO);
+		au_dy_arefresh(do_dx);
+	}
+
+	au_fhsm_wrote_all(sb, /*force*/1); /* ?? */
+	aufs_write_unlock(root);
+
+out_mtx:
+	inode_unlock(inode);
+out_opts:
+	free_page((unsigned long)opts.opt);
+out:
+	err = cvt_err(err);
+	AuTraceErr(err);
+	return err;
+}
+
+static const struct super_operations aufs_sop = {
+	.alloc_inode	= aufs_alloc_inode,
+	.destroy_inode	= aufs_destroy_inode,
+	.free_inode	= aufs_free_inode,
+	/* always deleting, no clearing */
+	.drop_inode	= generic_delete_inode,
+	.show_options	= aufs_show_options,
+	.statfs		= aufs_statfs,
+	.put_super	= aufs_put_super,
+	.sync_fs	= aufs_sync_fs,
+	.remount_fs	= aufs_remount_fs
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int alloc_root(struct super_block *sb)
+{
+	int err;
+	struct inode *inode;
+	struct dentry *root;
+
+	err = -ENOMEM;
+	inode = au_iget_locked(sb, AUFS_ROOT_INO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	inode->i_op = aufs_iop + AuIop_DIR; /* with getattr by default */
+	inode->i_fop = &aufs_dir_fop;
+	inode->i_mode = S_IFDIR;
+	set_nlink(inode, 2);
+	unlock_new_inode(inode);
+
+	root = d_make_root(inode);
+	if (unlikely(!root))
+		goto out;
+	err = PTR_ERR(root);
+	if (IS_ERR(root))
+		goto out;
+
+	err = au_di_init(root);
+	if (!err) {
+		sb->s_root = root;
+		return 0; /* success */
+	}
+	dput(root);
+
+out:
+	return err;
+}
+
+static int aufs_fill_super(struct super_block *sb, void *raw_data,
+			   int silent __maybe_unused)
+{
+	int err;
+	struct au_opts opts = {
+		.opt = NULL
+	};
+	struct au_sbinfo *sbinfo;
+	struct dentry *root;
+	struct inode *inode;
+	char *arg = raw_data;
+
+	if (unlikely(!arg || !*arg)) {
+		err = -EINVAL;
+		pr_err("no arg\n");
+		goto out;
+	}
+
+	err = -ENOMEM;
+	opts.opt = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!opts.opt))
+		goto out;
+	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+	opts.sb_flags = sb->s_flags;
+
+	err = au_si_alloc(sb);
+	if (unlikely(err))
+		goto out_opts;
+	sbinfo = au_sbi(sb);
+
+	/* all timestamps always follow the ones on the branch */
+	sb->s_flags |= SB_NOATIME | SB_NODIRATIME;
+	sb->s_flags |= SB_I_VERSION; /* do we really need this? */
+	sb->s_op = &aufs_sop;
+	sb->s_d_op = &aufs_dop;
+	sb->s_magic = AUFS_SUPER_MAGIC;
+	sb->s_maxbytes = 0;
+	sb->s_stack_depth = 1;
+	au_export_init(sb);
+	au_xattr_init(sb);
+
+	err = alloc_root(sb);
+	if (unlikely(err)) {
+		si_write_unlock(sb);
+		goto out_info;
+	}
+	root = sb->s_root;
+	inode = d_inode(root);
+
+	/*
+	 * actually we can parse options regardless aufs lock here.
+	 * but at remount time, parsing must be done before aufs lock.
+	 * so we follow the same rule.
+	 */
+	ii_write_lock_parent(inode);
+	aufs_write_unlock(root);
+	err = au_opts_parse(sb, arg, &opts);
+	if (unlikely(err))
+		goto out_root;
+
+	/* lock vfs_inode first, then aufs. */
+	inode_lock(inode);
+	aufs_write_lock(root);
+	err = au_opts_mount(sb, &opts);
+	au_opts_free(&opts);
+	if (!err && au_ftest_si(sbinfo, NO_DREVAL)) {
+		sb->s_d_op = &aufs_dop_noreval;
+		pr_info("%ps\n", sb->s_d_op);
+		au_refresh_dop(root, /*force_reval*/0);
+		sbinfo->si_iop_array = aufs_iop_nogetattr;
+		au_refresh_iop(inode, /*force_getattr*/0);
+	}
+	aufs_write_unlock(root);
+	inode_unlock(inode);
+	if (!err)
+		goto out_opts; /* success */
+
+out_root:
+	dput(root);
+	sb->s_root = NULL;
+out_info:
+	kobject_put(&sbinfo->si_kobj);
+	sb->s_fs_info = NULL;
+out_opts:
+	free_page((unsigned long)opts.opt);
+out:
+	AuTraceErr(err);
+	err = cvt_err(err);
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
+				 const char *dev_name __maybe_unused,
+				 void *raw_data)
+{
+	struct dentry *root;
+
+	/* all timestamps always follow the ones on the branch */
+	/* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
+	root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
+	if (IS_ERR(root))
+		goto out;
+
+	au_sbilist_add(root->d_sb);
+
+out:
+	return root;
+}
+
+static void aufs_kill_sb(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = au_sbi(sb);
+	if (sbinfo) {
+		au_sbilist_del(sb);
+		aufs_write_lock(sb->s_root);
+		au_fhsm_fin(sb);
+		if (sbinfo->si_wbr_create_ops->fin)
+			sbinfo->si_wbr_create_ops->fin(sb);
+		if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
+			au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
+			au_remount_refresh(sb, /*do_idop*/0);
+		}
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_put(sb, /*verbose*/1);
+		au_xino_clr(sb);
+		au_dr_opt_flush(sb);
+		sbinfo->si_sb = NULL;
+		aufs_write_unlock(sb->s_root);
+		au_nwt_flush(&sbinfo->si_nowait);
+	}
+	kill_anon_super(sb);
+}
+
+struct file_system_type aufs_fs_type = {
+	.name		= AUFS_FSTYPE,
+	/* a race between rename and others */
+	.fs_flags	= FS_RENAME_DOES_D_MOVE,
+	.mount		= aufs_mount,
+	.kill_sb	= aufs_kill_sb,
+	/* no need to __module_get() and module_put(). */
+	.owner		= THIS_MODULE,
+};
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
new file mode 100644
index 00000000000..c0cb0051242
--- /dev/null
+++ b/fs/aufs/super.h
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * super_block operations
+ */
+
+#ifndef __AUFS_SUPER_H__
+#define __AUFS_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include "hbl.h"
+#include "lcnt.h"
+#include "rwsem.h"
+#include "wkq.h"
+
+/* policies to select one among multiple writable branches */
+struct au_wbr_copyup_operations {
+	int (*copyup)(struct dentry *dentry);
+};
+
+#define AuWbr_DIR	1		/* target is a dir */
+#define AuWbr_PARENT	(1 << 1)	/* always require a parent */
+
+#define au_ftest_wbr(flags, name)	((flags) & AuWbr_##name)
+#define au_fset_wbr(flags, name)	{ (flags) |= AuWbr_##name; }
+#define au_fclr_wbr(flags, name)	{ (flags) &= ~AuWbr_##name; }
+
+struct au_wbr_create_operations {
+	int (*create)(struct dentry *dentry, unsigned int flags);
+	int (*init)(struct super_block *sb);
+	int (*fin)(struct super_block *sb);
+};
+
+struct au_wbr_mfs {
+	struct mutex	mfs_lock; /* protect this structure */
+	unsigned long	mfs_jiffy;
+	unsigned long	mfs_expire;
+	aufs_bindex_t	mfs_bindex;
+
+	unsigned long long	mfsrr_bytes;
+	unsigned long long	mfsrr_watermark;
+};
+
+#define AuPlink_NHASH 100
+static inline int au_plink_hash(ino_t ino)
+{
+	return ino % AuPlink_NHASH;
+}
+
+/* File-based Hierarchical Storage Management */
+struct au_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+	/* allow only one process who can receive the notification */
+	spinlock_t		fhsm_spin;
+	pid_t			fhsm_pid;
+	wait_queue_head_t	fhsm_wqh;
+	atomic_t		fhsm_readable;
+
+	/* these are protected by si_rwsem */
+	unsigned long		fhsm_expire;
+	aufs_bindex_t		fhsm_bottom;
+#endif
+};
+
+struct au_branch;
+struct au_sbinfo {
+	/* nowait tasks in the system-wide workqueue */
+	struct au_nowait_tasks	si_nowait;
+
+	/*
+	 * tried sb->s_umount, but failed due to the dependency between i_mutex.
+	 * rwsem for au_sbinfo is necessary.
+	 */
+	struct au_rwsem		si_rwsem;
+
+	/*
+	 * dirty approach to protect sb->sb_inodes and ->s_files (gone) from
+	 * remount.
+	 */
+	au_lcnt_t		si_ninodes, si_nfiles;
+
+	/* branch management */
+	unsigned int		si_generation;
+
+	/* see AuSi_ flags */
+	unsigned char		au_si_status;
+
+	aufs_bindex_t		si_bbot;
+
+	/* dirty trick to keep br_id plus */
+	unsigned int		si_last_br_id :
+				sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
+	struct au_branch	**si_branch;
+
+	/* policy to select a writable branch */
+	unsigned char		si_wbr_copyup;
+	unsigned char		si_wbr_create;
+	struct au_wbr_copyup_operations *si_wbr_copyup_ops;
+	struct au_wbr_create_operations *si_wbr_create_ops;
+
+	/* round robin */
+	atomic_t		si_wbr_rr_next;
+
+	/* most free space */
+	struct au_wbr_mfs	si_wbr_mfs;
+
+	/* File-based Hierarchical Storage Management */
+	struct au_fhsm		si_fhsm;
+
+	/* mount flags */
+	/* include/asm-ia64/siginfo.h defines a macro named si_flags */
+	unsigned int		si_mntflags;
+
+	/* external inode number (bitmap and translation table) */
+	vfs_readf_t		si_xread;
+	vfs_writef_t		si_xwrite;
+	loff_t			si_ximaxent;	/* max entries in a xino */
+
+	struct file		*si_xib;
+	struct mutex		si_xib_mtx; /* protect xib members */
+	unsigned long		*si_xib_buf;
+	unsigned long		si_xib_last_pindex;
+	int			si_xib_next_bit;
+
+	unsigned long		si_xino_jiffy;
+	unsigned long		si_xino_expire;
+	/* reserved for future use */
+	/* unsigned long long	si_xib_limit; */	/* Max xib file size */
+
+#ifdef CONFIG_AUFS_EXPORT
+	/* i_generation */
+	/* todo: make xigen file an array to support many inode numbers */
+	struct file		*si_xigen;
+	atomic_t		si_xigen_next;
+#endif
+
+	/* dirty trick to support atomic_open */
+	struct hlist_bl_head	si_aopen;
+
+	/* vdir parameters */
+	unsigned long		si_rdcache;	/* max cache time in jiffies */
+	unsigned int		si_rdblk;	/* deblk size */
+	unsigned int		si_rdhash;	/* hash size */
+
+	/*
+	 * If the number of whiteouts are larger than si_dirwh, leave all of
+	 * them after au_whtmp_ren to reduce the cost of rmdir(2).
+	 * future fsck.aufs or kernel thread will remove them later.
+	 * Otherwise, remove all whiteouts and the dir in rmdir(2).
+	 */
+	unsigned int		si_dirwh;
+
+	/* pseudo_link list */
+	struct hlist_bl_head	si_plink[AuPlink_NHASH];
+	wait_queue_head_t	si_plink_wq;
+	spinlock_t		si_plink_maint_lock;
+	pid_t			si_plink_maint_pid;
+
+	/* file list */
+	struct hlist_bl_head	si_files;
+
+	/* with/without getattr, brother of sb->s_d_op */
+	const struct inode_operations *si_iop_array;
+
+	/*
+	 * sysfs and lifetime management.
+	 * this is not a small structure and it may be a waste of memory in case
+	 * of sysfs is disabled, particularly when many aufs-es are mounted.
+	 * but using sysfs is majority.
+	 */
+	struct kobject		si_kobj;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		 *si_dbgaufs;
+	struct dentry		 *si_dbgaufs_plink;
+	struct dentry		 *si_dbgaufs_xib;
+#ifdef CONFIG_AUFS_EXPORT
+	struct dentry		 *si_dbgaufs_xigen;
+#endif
+#endif
+
+#ifdef CONFIG_AUFS_SBILIST
+	struct hlist_bl_node	si_list;
+#endif
+
+	/* dirty, necessary for unmounting, sysfs and sysrq */
+	struct super_block	*si_sb;
+};
+
+/* sbinfo status flags */
+/*
+ * set true when refresh_dirs() failed at remount time.
+ * then try refreshing dirs at access time again.
+ * if it is false, refreshing dirs at access time is unnecessary
+ */
+#define AuSi_FAILED_REFRESH_DIR	1
+#define AuSi_FHSM		(1 << 1)	/* fhsm is active now */
+#define AuSi_NO_DREVAL		(1 << 2)	/* disable all d_revalidate */
+
+#ifndef CONFIG_AUFS_FHSM
+#undef AuSi_FHSM
+#define AuSi_FHSM		0
+#endif
+
+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
+					   unsigned int flag)
+{
+	AuRwMustAnyLock(&sbi->si_rwsem);
+	return sbi->au_si_status & flag;
+}
+#define au_ftest_si(sbinfo, name)	au_do_ftest_si(sbinfo, AuSi_##name)
+#define au_fset_si(sbinfo, name) do { \
+	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+	(sbinfo)->au_si_status |= AuSi_##name; \
+} while (0)
+#define au_fclr_si(sbinfo, name) do { \
+	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+	(sbinfo)->au_si_status &= ~AuSi_##name; \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* policy to select one among writable branches */
+#define AuWbrCopyup(sbinfo, ...) \
+	((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
+#define AuWbrCreate(sbinfo, ...) \
+	((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
+
+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
+#define AuLock_DW		1		/* write-lock dentry */
+#define AuLock_IR		(1 << 1)	/* read-lock inode */
+#define AuLock_IW		(1 << 2)	/* write-lock inode */
+#define AuLock_FLUSH		(1 << 3)	/* wait for 'nowait' tasks */
+#define AuLock_DIRS		(1 << 4)	/* target is a pair of dirs */
+						/* except RENAME_EXCHANGE */
+#define AuLock_NOPLM		(1 << 5)	/* return err in plm mode */
+#define AuLock_NOPLMW		(1 << 6)	/* wait for plm mode ends */
+#define AuLock_GEN		(1 << 7)	/* test digen/iigen */
+#define au_ftest_lock(flags, name)	((flags) & AuLock_##name)
+#define au_fset_lock(flags, name) \
+	do { (flags) |= AuLock_##name; } while (0)
+#define au_fclr_lock(flags, name) \
+	do { (flags) &= ~AuLock_##name; } while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* super.c */
+extern struct file_system_type aufs_fs_type;
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
+typedef unsigned long long (*au_arraycb_t)(struct super_block *sb, void *array,
+					   unsigned long long max, void *arg);
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+		     struct super_block *sb, void *arg);
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
+void au_iarray_free(struct inode **a, unsigned long long max);
+
+/* sbinfo.c */
+void au_si_free(struct kobject *kobj);
+int au_si_alloc(struct super_block *sb);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink);
+
+unsigned int au_sigen_inc(struct super_block *sb);
+aufs_bindex_t au_new_br_id(struct super_block *sb);
+
+int si_read_lock(struct super_block *sb, int flags);
+int si_write_lock(struct super_block *sb, int flags);
+int aufs_read_lock(struct dentry *dentry, int flags);
+void aufs_read_unlock(struct dentry *dentry, int flags);
+void aufs_write_lock(struct dentry *dentry);
+void aufs_write_unlock(struct dentry *dentry);
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+/* wbr_policy.c */
+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
+extern struct au_wbr_create_operations au_wbr_create_ops[];
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex);
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t btop);
+
+/* mvdown.c */
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg);
+
+#ifdef CONFIG_AUFS_FHSM
+/* fhsm.c */
+
+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm)
+{
+	pid_t pid;
+
+	spin_lock(&fhsm->fhsm_spin);
+	pid = fhsm->fhsm_pid;
+	spin_unlock(&fhsm->fhsm_spin);
+
+	return pid;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force);
+void au_fhsm_wrote_all(struct super_block *sb, int force);
+int au_fhsm_fd(struct super_block *sb, int oflags);
+int au_fhsm_br_alloc(struct au_branch *br);
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex);
+void au_fhsm_fin(struct super_block *sb);
+void au_fhsm_init(struct au_sbinfo *sbinfo);
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec);
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo);
+#else
+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex,
+	   int force)
+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force)
+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags)
+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm)
+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br)
+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(au_fhsm_fin, struct super_block *sb)
+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo)
+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec)
+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo)
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+int au_test_nfsd(void);
+void au_export_init(struct super_block *sb);
+void au_xigen_inc(struct inode *inode);
+int au_xigen_new(struct inode *inode);
+int au_xigen_set(struct super_block *sb, struct path *path);
+void au_xigen_clr(struct super_block *sb);
+
+static inline int au_busy_or_stale(void)
+{
+	if (!au_test_nfsd())
+		return -EBUSY;
+	return -ESTALE;
+}
+#else
+AuStubInt0(au_test_nfsd, void)
+AuStubVoid(au_export_init, struct super_block *sb)
+AuStubVoid(au_xigen_inc, struct inode *inode)
+AuStubInt0(au_xigen_new, struct inode *inode)
+AuStubInt0(au_xigen_set, struct super_block *sb, struct path *path)
+AuStubVoid(au_xigen_clr, struct super_block *sb)
+AuStub(int, au_busy_or_stale, return -EBUSY, void)
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_SBILIST
+/* module.c */
+extern struct hlist_bl_head au_sbilist;
+
+static inline void au_sbilist_init(void)
+{
+	INIT_HLIST_BL_HEAD(&au_sbilist);
+}
+
+static inline void au_sbilist_add(struct super_block *sb)
+{
+	au_hbl_add(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+static inline void au_sbilist_del(struct super_block *sb)
+{
+	au_hbl_del(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+static inline void au_sbilist_lock(void)
+{
+	hlist_bl_lock(&au_sbilist);
+}
+
+static inline void au_sbilist_unlock(void)
+{
+	hlist_bl_unlock(&au_sbilist);
+}
+#define AuGFP_SBILIST	GFP_ATOMIC
+#else
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST	GFP_NOFS
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+#else
+AuStubVoid(au_sbilist_init, void)
+AuStubVoid(au_sbilist_add, struct super_block *sb)
+AuStubVoid(au_sbilist_del, struct super_block *sb)
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST	GFP_NOFS
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
+{
+	/*
+	 * This function is a dynamic '__init' function actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+#ifdef CONFIG_DEBUG_FS
+	sbinfo->si_dbgaufs = NULL;
+	sbinfo->si_dbgaufs_plink = NULL;
+	sbinfo->si_dbgaufs_xib = NULL;
+#ifdef CONFIG_AUFS_EXPORT
+	sbinfo->si_dbgaufs_xigen = NULL;
+#endif
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* current->atomic_flags */
+/* this value should never corrupt the ones defined in linux/sched.h */
+#define PFA_AUFS	0x10
+
+TASK_PFA_TEST(AUFS, test_aufs)	/* task_test_aufs */
+TASK_PFA_SET(AUFS, aufs)	/* task_set_aufs */
+TASK_PFA_CLEAR(AUFS, aufs)	/* task_clear_aufs */
+
+static inline int si_pid_test(struct super_block *sb)
+{
+	return !!task_test_aufs(current);
+}
+
+static inline void si_pid_clr(struct super_block *sb)
+{
+	AuDebugOn(!task_test_aufs(current));
+	task_clear_aufs(current);
+}
+
+static inline void si_pid_set(struct super_block *sb)
+{
+	AuDebugOn(task_test_aufs(current));
+	task_set_aufs(current);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock superblock. mainly for entry point functions */
+#define __si_read_lock(sb)	au_rw_read_lock(&au_sbi(sb)->si_rwsem)
+#define __si_write_lock(sb)	au_rw_write_lock(&au_sbi(sb)->si_rwsem)
+#define __si_read_trylock(sb)	au_rw_read_trylock(&au_sbi(sb)->si_rwsem)
+#define __si_write_trylock(sb)	au_rw_write_trylock(&au_sbi(sb)->si_rwsem)
+/*
+#define __si_read_trylock_nested(sb) \
+	au_rw_read_trylock_nested(&au_sbi(sb)->si_rwsem)
+#define __si_write_trylock_nested(sb) \
+	au_rw_write_trylock_nested(&au_sbi(sb)->si_rwsem)
+*/
+
+#define __si_read_unlock(sb)	au_rw_read_unlock(&au_sbi(sb)->si_rwsem)
+#define __si_write_unlock(sb)	au_rw_write_unlock(&au_sbi(sb)->si_rwsem)
+#define __si_downgrade_lock(sb)	au_rw_dgrade_lock(&au_sbi(sb)->si_rwsem)
+
+#define SiMustNoWaiters(sb)	AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
+#define SiMustAnyLock(sb)	AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
+#define SiMustWriteLock(sb)	AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
+
+static inline void si_noflush_read_lock(struct super_block *sb)
+{
+	__si_read_lock(sb);
+	si_pid_set(sb);
+}
+
+static inline int si_noflush_read_trylock(struct super_block *sb)
+{
+	int locked;
+
+	locked = __si_read_trylock(sb);
+	if (locked)
+		si_pid_set(sb);
+	return locked;
+}
+
+static inline void si_noflush_write_lock(struct super_block *sb)
+{
+	__si_write_lock(sb);
+	si_pid_set(sb);
+}
+
+static inline int si_noflush_write_trylock(struct super_block *sb)
+{
+	int locked;
+
+	locked = __si_write_trylock(sb);
+	if (locked)
+		si_pid_set(sb);
+	return locked;
+}
+
+#if 0 /* reserved */
+static inline int si_read_trylock(struct super_block *sb, int flags)
+{
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+	return si_noflush_read_trylock(sb);
+}
+#endif
+
+static inline void si_read_unlock(struct super_block *sb)
+{
+	si_pid_clr(sb);
+	__si_read_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline int si_write_trylock(struct super_block *sb, int flags)
+{
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+	return si_noflush_write_trylock(sb);
+}
+#endif
+
+static inline void si_write_unlock(struct super_block *sb)
+{
+	si_pid_clr(sb);
+	__si_write_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline void si_downgrade_lock(struct super_block *sb)
+{
+	__si_downgrade_lock(sb);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_sbbot(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_bbot;
+}
+
+static inline unsigned int au_mntflags(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_mntflags;
+}
+
+static inline unsigned int au_sigen(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_generation;
+}
+
+static inline struct au_branch *au_sbr(struct super_block *sb,
+				       aufs_bindex_t bindex)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_branch[0 + bindex];
+}
+
+static inline loff_t au_xi_maxent(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_ximaxent;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SUPER_H__ */
diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c
new file mode 100644
index 00000000000..f615471231b
--- /dev/null
+++ b/fs/aufs/sysaufs.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface and lifetime management
+ * they are necessary regardless sysfs is disabled.
+ */
+
+#include <linux/random.h>
+#include "aufs.h"
+
+unsigned long sysaufs_si_mask;
+struct kset *sysaufs_kset;
+
+#define AuSiAttr(_name) { \
+	.attr   = { .name = __stringify(_name), .mode = 0444 },	\
+	.show   = sysaufs_si_##_name,				\
+}
+
+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
+struct attribute *sysaufs_si_attrs[] = {
+	&sysaufs_si_attr_xi_path.attr,
+	NULL,
+};
+
+static const struct sysfs_ops au_sbi_ops = {
+	.show   = sysaufs_si_show
+};
+
+static struct kobj_type au_sbi_ktype = {
+	.release	= au_si_free,
+	.sysfs_ops	= &au_sbi_ops,
+	.default_attrs	= sysaufs_si_attrs
+};
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+
+	sbinfo->si_kobj.kset = sysaufs_kset;
+	/* cf. sysaufs_name() */
+	err = kobject_init_and_add
+		(&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
+		 SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
+
+	return err;
+}
+
+void sysaufs_fin(void)
+{
+	sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+	kset_unregister(sysaufs_kset);
+}
+
+int __init sysaufs_init(void)
+{
+	int err;
+
+	do {
+		get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
+	} while (!sysaufs_si_mask);
+
+	err = -EINVAL;
+	sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
+	if (unlikely(!sysaufs_kset))
+		goto out;
+	err = PTR_ERR(sysaufs_kset);
+	if (IS_ERR(sysaufs_kset))
+		goto out;
+	err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+	if (unlikely(err))
+		kset_unregister(sysaufs_kset);
+
+out:
+	return err;
+}
diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h
new file mode 100644
index 00000000000..55c5c9868f3
--- /dev/null
+++ b/fs/aufs/sysaufs.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface and mount lifetime management
+ */
+
+#ifndef __SYSAUFS_H__
+#define __SYSAUFS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/sysfs.h>
+#include "module.h"
+
+struct super_block;
+struct au_sbinfo;
+
+struct sysaufs_si_attr {
+	struct attribute attr;
+	int (*show)(struct seq_file *seq, struct super_block *sb);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* sysaufs.c */
+extern unsigned long sysaufs_si_mask;
+extern struct kset *sysaufs_kset;
+extern struct attribute *sysaufs_si_attrs[];
+int sysaufs_si_init(struct au_sbinfo *sbinfo);
+int __init sysaufs_init(void);
+void sysaufs_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+/* some people doesn't like to show a pointer in kernel */
+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
+{
+	return sysaufs_si_mask ^ (unsigned long)sbinfo;
+}
+
+#define SysaufsSiNamePrefix	"si_"
+#define SysaufsSiNameLen	(sizeof(SysaufsSiNamePrefix) + 16)
+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
+{
+	snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
+		 sysaufs_si_id(sbinfo));
+}
+
+struct au_branch;
+#ifdef CONFIG_SYSFS
+/* sysfs.c */
+extern struct attribute_group *sysaufs_attr_group;
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+			 char *buf);
+long au_brinfo_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+
+void sysaufs_br_init(struct au_branch *br);
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+
+#define sysaufs_brs_init()	do {} while (0)
+
+#else
+#define sysaufs_attr_group	NULL
+
+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj,
+       struct attribute *attr, char *buf)
+AuStubVoid(sysaufs_br_init, struct au_branch *br)
+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+
+static inline void sysaufs_brs_init(void)
+{
+	sysaufs_brs = 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* __KERNEL__ */
+#endif /* __SYSAUFS_H__ */
diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c
new file mode 100644
index 00000000000..46638d88ef2
--- /dev/null
+++ b/fs/aufs/sysfs.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sysfs interface
+ */
+
+#include <linux/compat.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_FS_MODULE
+/* this entry violates the "one line per file" policy of sysfs */
+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr,
+			   char *buf)
+{
+	ssize_t err;
+	static char *conf =
+/* this file is generated at compiling */
+#include "conf.str"
+		;
+
+	err = snprintf(buf, PAGE_SIZE, conf);
+	if (unlikely(err >= PAGE_SIZE))
+		err = -EFBIG;
+	return err;
+}
+
+static struct kobj_attribute au_config_attr = __ATTR_RO(config);
+#endif
+
+static struct attribute *au_attr[] = {
+#ifdef CONFIG_AUFS_FS_MODULE
+	&au_config_attr.attr,
+#endif
+	NULL,	/* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group sysaufs_attr_group_body = {
+	.attrs = au_attr
+};
+
+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
+{
+	int err;
+
+	SiMustAnyLock(sb);
+
+	err = 0;
+	if (au_opt_test(au_mntflags(sb), XINO)) {
+		err = au_xino_path(seq, au_sbi(sb)->si_xib);
+		seq_putc(seq, '\n');
+	}
+	return err;
+}
+
+/*
+ * the lifetime of branch is independent from the entry under sysfs.
+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
+ * unlinked.
+ */
+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
+			 aufs_bindex_t bindex, int idx)
+{
+	int err;
+	struct path path;
+	struct dentry *root;
+	struct au_branch *br;
+	au_br_perm_str_t perm;
+
+	AuDbg("b%d\n", bindex);
+
+	err = 0;
+	root = sb->s_root;
+	di_read_lock_parent(root, !AuLock_IR);
+	br = au_sbr(sb, bindex);
+
+	switch (idx) {
+	case AuBrSysfs_BR:
+		path.mnt = au_br_mnt(br);
+		path.dentry = au_h_dptr(root, bindex);
+		err = au_seq_path(seq, &path);
+		if (!err) {
+			au_optstr_br_perm(&perm, br->br_perm);
+			seq_printf(seq, "=%s\n", perm.a);
+		}
+		break;
+	case AuBrSysfs_BRID:
+		seq_printf(seq, "%d\n", br->br_id);
+		break;
+	}
+	di_read_unlock(root, !AuLock_IR);
+	if (unlikely(err || seq_has_overflowed(seq)))
+		err = -E2BIG;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct seq_file *au_seq(char *p, ssize_t len)
+{
+	struct seq_file *seq;
+
+	seq = kzalloc(sizeof(*seq), GFP_NOFS);
+	if (seq) {
+		/* mutex_init(&seq.lock); */
+		seq->buf = p;
+		seq->size = len;
+		return seq; /* success */
+	}
+
+	seq = ERR_PTR(-ENOMEM);
+	return seq;
+}
+
+#define SysaufsBr_PREFIX	"br"
+#define SysaufsBrid_PREFIX	"brid"
+
+/* todo: file size may exceed PAGE_SIZE */
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+			char *buf)
+{
+	ssize_t err;
+	int idx;
+	long l;
+	aufs_bindex_t bbot;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct seq_file *seq;
+	char *name;
+	struct attribute **cattr;
+
+	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+	sb = sbinfo->si_sb;
+
+	/*
+	 * prevent a race condition between sysfs and aufs.
+	 * for instance, sysfs_file_read() calls sysfs_get_active_two() which
+	 * prohibits maintaining the sysfs entries.
+	 * hew we acquire read lock after sysfs_get_active_two().
+	 * on the other hand, the remount process may maintain the sysfs/aufs
+	 * entries after acquiring write lock.
+	 * it can cause a deadlock.
+	 * simply we gave up processing read here.
+	 */
+	err = -EBUSY;
+	if (unlikely(!si_noflush_read_trylock(sb)))
+		goto out;
+
+	seq = au_seq(buf, PAGE_SIZE);
+	err = PTR_ERR(seq);
+	if (IS_ERR(seq))
+		goto out_unlock;
+
+	name = (void *)attr->name;
+	cattr = sysaufs_si_attrs;
+	while (*cattr) {
+		if (!strcmp(name, (*cattr)->name)) {
+			err = container_of(*cattr, struct sysaufs_si_attr, attr)
+				->show(seq, sb);
+			goto out_seq;
+		}
+		cattr++;
+	}
+
+	if (!strncmp(name, SysaufsBrid_PREFIX,
+		     sizeof(SysaufsBrid_PREFIX) - 1)) {
+		idx = AuBrSysfs_BRID;
+		name += sizeof(SysaufsBrid_PREFIX) - 1;
+	} else if (!strncmp(name, SysaufsBr_PREFIX,
+			    sizeof(SysaufsBr_PREFIX) - 1)) {
+		idx = AuBrSysfs_BR;
+		name += sizeof(SysaufsBr_PREFIX) - 1;
+	} else
+		  BUG();
+
+	err = kstrtol(name, 10, &l);
+	if (!err) {
+		bbot = au_sbbot(sb);
+		if (l <= bbot)
+			err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx);
+		else
+			err = -ENOENT;
+	}
+
+out_seq:
+	if (!err) {
+		err = seq->count;
+		/* sysfs limit */
+		if (unlikely(err == PAGE_SIZE))
+			err = -EFBIG;
+	}
+	au_kfree_rcu(seq);
+out_unlock:
+	si_read_unlock(sb);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg)
+{
+	int err;
+	int16_t brid;
+	aufs_bindex_t bindex, bbot;
+	size_t sz;
+	char *buf;
+	struct seq_file *seq;
+	struct au_branch *br;
+
+	si_read_lock(sb, AuLock_FLUSH);
+	bbot = au_sbbot(sb);
+	err = bbot + 1;
+	if (!arg)
+		goto out;
+
+	err = -ENOMEM;
+	buf = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!buf))
+		goto out;
+
+	seq = au_seq(buf, PAGE_SIZE);
+	err = PTR_ERR(seq);
+	if (IS_ERR(seq))
+		goto out_buf;
+
+	sz = sizeof(*arg) - offsetof(union aufs_brinfo, path);
+	for (bindex = 0; bindex <= bbot; bindex++, arg++) {
+		/* VERIFY_WRITE */
+		err = !access_ok(arg, sizeof(*arg));
+		if (unlikely(err))
+			break;
+
+		br = au_sbr(sb, bindex);
+		brid = br->br_id;
+		BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id));
+		err = __put_user(brid, &arg->id);
+		if (unlikely(err))
+			break;
+
+		BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm));
+		err = __put_user(br->br_perm, &arg->perm);
+		if (unlikely(err))
+			break;
+
+		err = au_seq_path(seq, &br->br_path);
+		if (unlikely(err))
+			break;
+		seq_putc(seq, '\0');
+		if (!seq_has_overflowed(seq)) {
+			err = copy_to_user(arg->path, seq->buf, seq->count);
+			seq->count = 0;
+			if (unlikely(err))
+				break;
+		} else {
+			err = -E2BIG;
+			goto out_seq;
+		}
+	}
+	if (unlikely(err))
+		err = -EFAULT;
+
+out_seq:
+	au_kfree_rcu(seq);
+out_buf:
+	free_page((unsigned long)buf);
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+long au_brinfo_ioctl(struct file *file, unsigned long arg)
+{
+	return au_brinfo(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg)
+{
+	return au_brinfo(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+void sysaufs_br_init(struct au_branch *br)
+{
+	int i;
+	struct au_brsysfs *br_sysfs;
+	struct attribute *attr;
+
+	br_sysfs = br->br_sysfs;
+	for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+		attr = &br_sysfs->attr;
+		sysfs_attr_init(attr);
+		attr->name = br_sysfs->name;
+		attr->mode = 0444;
+		br_sysfs++;
+	}
+}
+
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+	struct au_branch *br;
+	struct kobject *kobj;
+	struct au_brsysfs *br_sysfs;
+	int i;
+	aufs_bindex_t bbot;
+
+	if (!sysaufs_brs)
+		return;
+
+	kobj = &au_sbi(sb)->si_kobj;
+	bbot = au_sbbot(sb);
+	for (; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		br_sysfs = br->br_sysfs;
+		for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+			sysfs_remove_file(kobj, &br_sysfs->attr);
+			br_sysfs++;
+		}
+	}
+}
+
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+	int err, i;
+	aufs_bindex_t bbot;
+	struct kobject *kobj;
+	struct au_branch *br;
+	struct au_brsysfs *br_sysfs;
+
+	if (!sysaufs_brs)
+		return;
+
+	kobj = &au_sbi(sb)->si_kobj;
+	bbot = au_sbbot(sb);
+	for (; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		br_sysfs = br->br_sysfs;
+		snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name),
+			 SysaufsBr_PREFIX "%d", bindex);
+		snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name),
+			 SysaufsBrid_PREFIX "%d", bindex);
+		for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+			err = sysfs_create_file(kobj, &br_sysfs->attr);
+			if (unlikely(err))
+				pr_warn("failed %s under sysfs(%d)\n",
+					br_sysfs->name, err);
+			br_sysfs++;
+		}
+	}
+}
diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c
new file mode 100644
index 00000000000..fc8aa4a2839
--- /dev/null
+++ b/fs/aufs/sysrq.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * magic sysrq handler
+ */
+
+/* #include <linux/sysrq.h> */
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void sysrq_sb(struct super_block *sb)
+{
+	char *plevel;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+	struct hlist_bl_head *files;
+	struct hlist_bl_node *pos;
+	struct au_finfo *finfo;
+	struct inode *i;
+
+	plevel = au_plevel;
+	au_plevel = KERN_WARNING;
+
+	/* since we define pr_fmt, call printk directly */
+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
+
+	sbinfo = au_sbi(sb);
+	printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
+	pr("superblock\n");
+	au_dpri_sb(sb);
+
+#if 0 /* reserved */
+	do {
+		int err, i, j, ndentry;
+		struct au_dcsub_pages dpages;
+		struct au_dpage *dpage;
+
+		err = au_dpages_init(&dpages, GFP_ATOMIC);
+		if (unlikely(err))
+			break;
+		err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
+		if (!err)
+			for (i = 0; i < dpages.ndpage; i++) {
+				dpage = dpages.dpages + i;
+				ndentry = dpage->ndentry;
+				for (j = 0; j < ndentry; j++)
+					au_dpri_dentry(dpage->dentries[j]);
+			}
+		au_dpages_free(&dpages);
+	} while (0);
+#endif
+
+	pr("isolated inode\n");
+	spin_lock(&sb->s_inode_list_lock);
+	list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
+		spin_lock(&i->i_lock);
+		if (hlist_empty(&i->i_dentry))
+			au_dpri_inode(i);
+		spin_unlock(&i->i_lock);
+	}
+	spin_unlock(&sb->s_inode_list_lock);
+
+	pr("files\n");
+	files = &au_sbi(sb)->si_files;
+	hlist_bl_lock(files);
+	hlist_bl_for_each_entry(finfo, pos, files, fi_hlist) {
+		umode_t mode;
+
+		file = finfo->fi_file;
+		mode = file_inode(file)->i_mode;
+		if (!special_file(mode))
+			au_dpri_file(file);
+	}
+	hlist_bl_unlock(files);
+	pr("done\n");
+
+#undef pr
+	au_plevel = plevel;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* module parameter */
+static char *aufs_sysrq_key = "a";
+module_param_named(sysrq, aufs_sysrq_key, charp, 0444);
+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
+
+static void au_sysrq(int key __maybe_unused)
+{
+	struct au_sbinfo *sbinfo;
+	struct hlist_bl_node *pos;
+
+	lockdep_off();
+	au_sbilist_lock();
+	hlist_bl_for_each_entry(sbinfo, pos, &au_sbilist, si_list)
+		sysrq_sb(sbinfo->si_sb);
+	au_sbilist_unlock();
+	lockdep_on();
+}
+
+static struct sysrq_key_op au_sysrq_op = {
+	.handler	= au_sysrq,
+	.help_msg	= "Aufs",
+	.action_msg	= "Aufs",
+	.enable_mask	= SYSRQ_ENABLE_DUMP
+};
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_sysrq_init(void)
+{
+	int err;
+	char key;
+
+	err = -1;
+	key = *aufs_sysrq_key;
+	if ('a' <= key && key <= 'z')
+		err = register_sysrq_key(key, &au_sysrq_op);
+	if (unlikely(err))
+		pr_err("err %d, sysrq=%c\n", err, key);
+	return err;
+}
+
+void au_sysrq_fin(void)
+{
+	int err;
+
+	err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
+	if (unlikely(err))
+		pr_err("err %d (ignored)\n", err);
+}
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
new file mode 100644
index 00000000000..5ba006b8072
--- /dev/null
+++ b/fs/aufs/vdir.c
@@ -0,0 +1,896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * virtual or vertical directory
+ */
+
+#include <linux/iversion.h>
+#include "aufs.h"
+
+static unsigned int calc_size(int nlen)
+{
+	return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
+}
+
+static int set_deblk_end(union au_vdir_deblk_p *p,
+			 union au_vdir_deblk_p *deblk_end)
+{
+	if (calc_size(0) <= deblk_end->deblk - p->deblk) {
+		p->de->de_str.len = 0;
+		/* smp_mb(); */
+		return 0;
+	}
+	return -1; /* error */
+}
+
+/* returns true or false */
+static int is_deblk_end(union au_vdir_deblk_p *p,
+			union au_vdir_deblk_p *deblk_end)
+{
+	if (calc_size(0) <= deblk_end->deblk - p->deblk)
+		return !p->de->de_str.len;
+	return 1;
+}
+
+static unsigned char *last_deblk(struct au_vdir *vdir)
+{
+	return vdir->vd_deblk[vdir->vd_nblk - 1];
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* estimate the appropriate size for name hash table */
+unsigned int au_rdhash_est(loff_t sz)
+{
+	unsigned int n;
+
+	n = UINT_MAX;
+	sz >>= 10;
+	if (sz < n)
+		n = sz;
+	if (sz < AUFS_RDHASH_DEF)
+		n = AUFS_RDHASH_DEF;
+	/* pr_info("n %u\n", n); */
+	return n;
+}
+
+/*
+ * the allocated memory has to be freed by
+ * au_nhash_wh_free() or au_nhash_de_free().
+ */
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
+{
+	struct hlist_head *head;
+	unsigned int u;
+	size_t sz;
+
+	sz = sizeof(*nhash->nh_head) * num_hash;
+	head = kmalloc(sz, gfp);
+	if (head) {
+		nhash->nh_num = num_hash;
+		nhash->nh_head = head;
+		for (u = 0; u < num_hash; u++)
+			INIT_HLIST_HEAD(head++);
+		return 0; /* success */
+	}
+
+	return -ENOMEM;
+}
+
+static void nhash_count(struct hlist_head *head)
+{
+#if 0 /* debugging */
+	unsigned long n;
+	struct hlist_node *pos;
+
+	n = 0;
+	hlist_for_each(pos, head)
+		n++;
+	pr_info("%lu\n", n);
+#endif
+}
+
+static void au_nhash_wh_do_free(struct hlist_head *head)
+{
+	struct au_vdir_wh *pos;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(pos, node, head, wh_hash)
+		au_kfree_rcu(pos);
+}
+
+static void au_nhash_de_do_free(struct hlist_head *head)
+{
+	struct au_vdir_dehstr *pos;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(pos, node, head, hash)
+		au_cache_free_vdir_dehstr(pos);
+}
+
+static void au_nhash_do_free(struct au_nhash *nhash,
+			     void (*free)(struct hlist_head *head))
+{
+	unsigned int n;
+	struct hlist_head *head;
+
+	n = nhash->nh_num;
+	if (!n)
+		return;
+
+	head = nhash->nh_head;
+	while (n-- > 0) {
+		nhash_count(head);
+		free(head++);
+	}
+	au_kfree_try_rcu(nhash->nh_head);
+}
+
+void au_nhash_wh_free(struct au_nhash *whlist)
+{
+	au_nhash_do_free(whlist, au_nhash_wh_do_free);
+}
+
+static void au_nhash_de_free(struct au_nhash *delist)
+{
+	au_nhash_do_free(delist, au_nhash_de_do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+			    int limit)
+{
+	int num;
+	unsigned int u, n;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+
+	num = 0;
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (u = 0; u < n; u++, head++)
+		hlist_for_each_entry(pos, head, wh_hash)
+			if (pos->wh_bindex == btgt && ++num > limit)
+				return 1;
+	return 0;
+}
+
+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
+				       unsigned char *name,
+				       unsigned int len)
+{
+	unsigned int v;
+	/* const unsigned int magic_bit = 12; */
+
+	AuDebugOn(!nhash->nh_num || !nhash->nh_head);
+
+	v = 0;
+	if (len > 8)
+		len = 8;
+	while (len--)
+		v += *name++;
+	/* v = hash_long(v, magic_bit); */
+	v %= nhash->nh_num;
+	return nhash->nh_head + v;
+}
+
+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
+			      int nlen)
+{
+	return str->len == nlen && !memcmp(str->name, name, nlen);
+}
+
+/* returns found or not */
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
+{
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct au_vdir_destr *str;
+
+	head = au_name_hash(whlist, name, nlen);
+	hlist_for_each_entry(pos, head, wh_hash) {
+		str = &pos->wh_str;
+		AuDbg("%.*s\n", str->len, str->name);
+		if (au_nhash_test_name(str, name, nlen))
+			return 1;
+	}
+	return 0;
+}
+
+/* returns found(true) or not */
+static int test_known(struct au_nhash *delist, char *name, int nlen)
+{
+	struct hlist_head *head;
+	struct au_vdir_dehstr *pos;
+	struct au_vdir_destr *str;
+
+	head = au_name_hash(delist, name, nlen);
+	hlist_for_each_entry(pos, head, hash) {
+		str = pos->str;
+		AuDbg("%.*s\n", str->len, str->name);
+		if (au_nhash_test_name(str, name, nlen))
+			return 1;
+	}
+	return 0;
+}
+
+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
+			    unsigned char d_type)
+{
+#ifdef CONFIG_AUFS_SHWH
+	wh->wh_ino = ino;
+	wh->wh_type = d_type;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+		       unsigned int d_type, aufs_bindex_t bindex,
+		       unsigned char shwh)
+{
+	int err;
+	struct au_vdir_destr *str;
+	struct au_vdir_wh *wh;
+
+	AuDbg("%.*s\n", nlen, name);
+	AuDebugOn(!whlist->nh_num || !whlist->nh_head);
+
+	err = -ENOMEM;
+	wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
+	if (unlikely(!wh))
+		goto out;
+
+	err = 0;
+	wh->wh_bindex = bindex;
+	if (shwh)
+		au_shwh_init_wh(wh, ino, d_type);
+	str = &wh->wh_str;
+	str->len = nlen;
+	memcpy(str->name, name, nlen);
+	hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
+	/* smp_mb(); */
+
+out:
+	return err;
+}
+
+static int append_deblk(struct au_vdir *vdir)
+{
+	int err;
+	unsigned long ul;
+	const unsigned int deblk_sz = vdir->vd_deblk_sz;
+	union au_vdir_deblk_p p, deblk_end;
+	unsigned char **o;
+
+	err = -ENOMEM;
+	o = au_krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+			GFP_NOFS, /*may_shrink*/0);
+	if (unlikely(!o))
+		goto out;
+
+	vdir->vd_deblk = o;
+	p.deblk = kmalloc(deblk_sz, GFP_NOFS);
+	if (p.deblk) {
+		ul = vdir->vd_nblk++;
+		vdir->vd_deblk[ul] = p.deblk;
+		vdir->vd_last.ul = ul;
+		vdir->vd_last.p.deblk = p.deblk;
+		deblk_end.deblk = p.deblk + deblk_sz;
+		err = set_deblk_end(&p, &deblk_end);
+	}
+
+out:
+	return err;
+}
+
+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
+		     unsigned int d_type, struct au_nhash *delist)
+{
+	int err;
+	unsigned int sz;
+	const unsigned int deblk_sz = vdir->vd_deblk_sz;
+	union au_vdir_deblk_p p, *room, deblk_end;
+	struct au_vdir_dehstr *dehstr;
+
+	p.deblk = last_deblk(vdir);
+	deblk_end.deblk = p.deblk + deblk_sz;
+	room = &vdir->vd_last.p;
+	AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
+		  || !is_deblk_end(room, &deblk_end));
+
+	sz = calc_size(nlen);
+	if (unlikely(sz > deblk_end.deblk - room->deblk)) {
+		err = append_deblk(vdir);
+		if (unlikely(err))
+			goto out;
+
+		p.deblk = last_deblk(vdir);
+		deblk_end.deblk = p.deblk + deblk_sz;
+		/* smp_mb(); */
+		AuDebugOn(room->deblk != p.deblk);
+	}
+
+	err = -ENOMEM;
+	dehstr = au_cache_alloc_vdir_dehstr();
+	if (unlikely(!dehstr))
+		goto out;
+
+	dehstr->str = &room->de->de_str;
+	hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
+	room->de->de_ino = ino;
+	room->de->de_type = d_type;
+	room->de->de_str.len = nlen;
+	memcpy(room->de->de_str.name, name, nlen);
+
+	err = 0;
+	room->deblk += sz;
+	if (unlikely(set_deblk_end(room, &deblk_end)))
+		err = append_deblk(vdir);
+	/* smp_mb(); */
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_vdir_free(struct au_vdir *vdir)
+{
+	unsigned char **deblk;
+
+	deblk = vdir->vd_deblk;
+	while (vdir->vd_nblk--)
+		au_kfree_try_rcu(*deblk++);
+	au_kfree_try_rcu(vdir->vd_deblk);
+	au_cache_free_vdir(vdir);
+}
+
+static struct au_vdir *alloc_vdir(struct file *file)
+{
+	struct au_vdir *vdir;
+	struct super_block *sb;
+	int err;
+
+	sb = file->f_path.dentry->d_sb;
+	SiMustAnyLock(sb);
+
+	err = -ENOMEM;
+	vdir = au_cache_alloc_vdir();
+	if (unlikely(!vdir))
+		goto out;
+
+	vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
+	if (unlikely(!vdir->vd_deblk))
+		goto out_free;
+
+	vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
+	if (!vdir->vd_deblk_sz) {
+		/* estimate the appropriate size for deblk */
+		vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
+		/* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
+	}
+	vdir->vd_nblk = 0;
+	vdir->vd_version = 0;
+	vdir->vd_jiffy = 0;
+	err = append_deblk(vdir);
+	if (!err)
+		return vdir; /* success */
+
+	au_kfree_try_rcu(vdir->vd_deblk);
+
+out_free:
+	au_cache_free_vdir(vdir);
+out:
+	vdir = ERR_PTR(err);
+	return vdir;
+}
+
+static int reinit_vdir(struct au_vdir *vdir)
+{
+	int err;
+	union au_vdir_deblk_p p, deblk_end;
+
+	while (vdir->vd_nblk > 1) {
+		au_kfree_try_rcu(vdir->vd_deblk[vdir->vd_nblk - 1]);
+		/* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
+		vdir->vd_nblk--;
+	}
+	p.deblk = vdir->vd_deblk[0];
+	deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
+	err = set_deblk_end(&p, &deblk_end);
+	/* keep vd_dblk_sz */
+	vdir->vd_last.ul = 0;
+	vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+	vdir->vd_version = 0;
+	vdir->vd_jiffy = 0;
+	/* smp_mb(); */
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuFillVdir_CALLED	1
+#define AuFillVdir_WHABLE	(1 << 1)
+#define AuFillVdir_SHWH		(1 << 2)
+#define au_ftest_fillvdir(flags, name)	((flags) & AuFillVdir_##name)
+#define au_fset_fillvdir(flags, name) \
+	do { (flags) |= AuFillVdir_##name; } while (0)
+#define au_fclr_fillvdir(flags, name) \
+	do { (flags) &= ~AuFillVdir_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuFillVdir_SHWH
+#define AuFillVdir_SHWH		0
+#endif
+
+struct fillvdir_arg {
+	struct dir_context	ctx;
+	struct file		*file;
+	struct au_vdir		*vdir;
+	struct au_nhash		delist;
+	struct au_nhash		whlist;
+	aufs_bindex_t		bindex;
+	unsigned int		flags;
+	int			err;
+};
+
+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen,
+		    loff_t offset __maybe_unused, u64 h_ino,
+		    unsigned int d_type)
+{
+	struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx);
+	char *name = (void *)__name;
+	struct super_block *sb;
+	ino_t ino;
+	const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
+
+	arg->err = 0;
+	sb = arg->file->f_path.dentry->d_sb;
+	au_fset_fillvdir(arg->flags, CALLED);
+	/* smp_mb(); */
+	if (nlen <= AUFS_WH_PFX_LEN
+	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+		if (test_known(&arg->delist, name, nlen)
+		    || au_nhash_test_known_wh(&arg->whlist, name, nlen))
+			goto out; /* already exists or whiteouted */
+
+		arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
+		if (!arg->err) {
+			if (unlikely(nlen > AUFS_MAX_NAMELEN))
+				d_type = DT_UNKNOWN;
+			arg->err = append_de(arg->vdir, name, nlen, ino,
+					     d_type, &arg->delist);
+		}
+	} else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
+		name += AUFS_WH_PFX_LEN;
+		nlen -= AUFS_WH_PFX_LEN;
+		if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
+			goto out; /* already whiteouted */
+
+		ino = 0; /* just to suppress a warning */
+		if (shwh)
+			arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
+					     &ino);
+		if (!arg->err) {
+			if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
+				d_type = DT_UNKNOWN;
+			arg->err = au_nhash_append_wh
+				(&arg->whlist, name, nlen, ino, d_type,
+				 arg->bindex, shwh);
+		}
+	}
+
+out:
+	if (!arg->err)
+		arg->vdir->vd_jiffy = jiffies;
+	/* smp_mb(); */
+	AuTraceErr(arg->err);
+	return arg->err;
+}
+
+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
+			  struct au_nhash *whlist, struct au_nhash *delist)
+{
+#ifdef CONFIG_AUFS_SHWH
+	int err;
+	unsigned int nh, u;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct hlist_node *n;
+	char *p, *o;
+	struct au_vdir_destr *destr;
+
+	AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
+
+	err = -ENOMEM;
+	o = p = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = 0;
+	nh = whlist->nh_num;
+	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+	p += AUFS_WH_PFX_LEN;
+	for (u = 0; u < nh; u++) {
+		head = whlist->nh_head + u;
+		hlist_for_each_entry_safe(pos, n, head, wh_hash) {
+			destr = &pos->wh_str;
+			memcpy(p, destr->name, destr->len);
+			err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
+					pos->wh_ino, pos->wh_type, delist);
+			if (unlikely(err))
+				break;
+		}
+	}
+
+	free_page((unsigned long)o);
+
+out:
+	AuTraceErr(err);
+	return err;
+#else
+	return 0;
+#endif
+}
+
+static int au_do_read_vdir(struct fillvdir_arg *arg)
+{
+	int err;
+	unsigned int rdhash;
+	loff_t offset;
+	aufs_bindex_t bbot, bindex, btop;
+	unsigned char shwh;
+	struct file *hf, *file;
+	struct super_block *sb;
+
+	file = arg->file;
+	sb = file->f_path.dentry->d_sb;
+	SiMustAnyLock(sb);
+
+	rdhash = au_sbi(sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
+	err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out_delist;
+
+	err = 0;
+	arg->flags = 0;
+	shwh = 0;
+	if (au_opt_test(au_mntflags(sb), SHWH)) {
+		shwh = 1;
+		au_fset_fillvdir(arg->flags, SHWH);
+	}
+	btop = au_fbtop(file);
+	bbot = au_fbbot_dir(file);
+	for (bindex = btop; !err && bindex <= bbot; bindex++) {
+		hf = au_hf_dir(file, bindex);
+		if (!hf)
+			continue;
+
+		offset = vfsub_llseek(hf, 0, SEEK_SET);
+		err = offset;
+		if (unlikely(offset))
+			break;
+
+		arg->bindex = bindex;
+		au_fclr_fillvdir(arg->flags, WHABLE);
+		if (shwh
+		    || (bindex != bbot
+			&& au_br_whable(au_sbr_perm(sb, bindex))))
+			au_fset_fillvdir(arg->flags, WHABLE);
+		do {
+			arg->err = 0;
+			au_fclr_fillvdir(arg->flags, CALLED);
+			/* smp_mb(); */
+			err = vfsub_iterate_dir(hf, &arg->ctx);
+			if (err >= 0)
+				err = arg->err;
+		} while (!err && au_ftest_fillvdir(arg->flags, CALLED));
+
+		/*
+		 * dir_relax() may be good for concurrency, but aufs should not
+		 * use it since it will cause a lockdep problem.
+		 */
+	}
+
+	if (!err && shwh)
+		err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
+
+	au_nhash_wh_free(&arg->whlist);
+
+out_delist:
+	au_nhash_de_free(&arg->delist);
+out:
+	return err;
+}
+
+static int read_vdir(struct file *file, int may_read)
+{
+	int err;
+	unsigned long expire;
+	unsigned char do_read;
+	struct fillvdir_arg arg = {
+		.ctx = {
+			.actor = fillvdir
+		}
+	};
+	struct inode *inode;
+	struct au_vdir *vdir, *allocated;
+
+	err = 0;
+	inode = file_inode(file);
+	IMustLock(inode);
+	IiMustWriteLock(inode);
+	SiMustAnyLock(inode->i_sb);
+
+	allocated = NULL;
+	do_read = 0;
+	expire = au_sbi(inode->i_sb)->si_rdcache;
+	vdir = au_ivdir(inode);
+	if (!vdir) {
+		do_read = 1;
+		vdir = alloc_vdir(file);
+		err = PTR_ERR(vdir);
+		if (IS_ERR(vdir))
+			goto out;
+		err = 0;
+		allocated = vdir;
+	} else if (may_read
+		   && (!inode_eq_iversion(inode, vdir->vd_version)
+		       || time_after(jiffies, vdir->vd_jiffy + expire))) {
+		do_read = 1;
+		err = reinit_vdir(vdir);
+		if (unlikely(err))
+			goto out;
+	}
+
+	if (!do_read)
+		return 0; /* success */
+
+	arg.file = file;
+	arg.vdir = vdir;
+	err = au_do_read_vdir(&arg);
+	if (!err) {
+		/* file->f_pos = 0; */ /* todo: ctx->pos? */
+		vdir->vd_version = inode_query_iversion(inode);
+		vdir->vd_last.ul = 0;
+		vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+		if (allocated)
+			au_set_ivdir(inode, allocated);
+	} else if (allocated)
+		au_vdir_free(allocated);
+
+out:
+	return err;
+}
+
+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
+{
+	int err, rerr;
+	unsigned long ul, n;
+	const unsigned int deblk_sz = src->vd_deblk_sz;
+
+	AuDebugOn(tgt->vd_nblk != 1);
+
+	err = -ENOMEM;
+	if (tgt->vd_nblk < src->vd_nblk) {
+		unsigned char **p;
+
+		p = au_krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+				GFP_NOFS, /*may_shrink*/0);
+		if (unlikely(!p))
+			goto out;
+		tgt->vd_deblk = p;
+	}
+
+	if (tgt->vd_deblk_sz != deblk_sz) {
+		unsigned char *p;
+
+		tgt->vd_deblk_sz = deblk_sz;
+		p = au_krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS,
+				/*may_shrink*/1);
+		if (unlikely(!p))
+			goto out;
+		tgt->vd_deblk[0] = p;
+	}
+	memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
+	tgt->vd_version = src->vd_version;
+	tgt->vd_jiffy = src->vd_jiffy;
+
+	n = src->vd_nblk;
+	for (ul = 1; ul < n; ul++) {
+		tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
+					    GFP_NOFS);
+		if (unlikely(!tgt->vd_deblk[ul]))
+			goto out;
+		tgt->vd_nblk++;
+	}
+	tgt->vd_nblk = n;
+	tgt->vd_last.ul = tgt->vd_last.ul;
+	tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
+	tgt->vd_last.p.deblk += src->vd_last.p.deblk
+		- src->vd_deblk[src->vd_last.ul];
+	/* smp_mb(); */
+	return 0; /* success */
+
+out:
+	rerr = reinit_vdir(tgt);
+	BUG_ON(rerr);
+	return err;
+}
+
+int au_vdir_init(struct file *file)
+{
+	int err;
+	struct inode *inode;
+	struct au_vdir *vdir_cache, *allocated;
+
+	/* test file->f_pos here instead of ctx->pos */
+	err = read_vdir(file, !file->f_pos);
+	if (unlikely(err))
+		goto out;
+
+	allocated = NULL;
+	vdir_cache = au_fvdir_cache(file);
+	if (!vdir_cache) {
+		vdir_cache = alloc_vdir(file);
+		err = PTR_ERR(vdir_cache);
+		if (IS_ERR(vdir_cache))
+			goto out;
+		allocated = vdir_cache;
+	} else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
+		/* test file->f_pos here instead of ctx->pos */
+		err = reinit_vdir(vdir_cache);
+		if (unlikely(err))
+			goto out;
+	} else
+		return 0; /* success */
+
+	inode = file_inode(file);
+	err = copy_vdir(vdir_cache, au_ivdir(inode));
+	if (!err) {
+		file->f_version = inode_query_iversion(inode);
+		if (allocated)
+			au_set_fvdir_cache(file, allocated);
+	} else if (allocated)
+		au_vdir_free(allocated);
+
+out:
+	return err;
+}
+
+static loff_t calc_offset(struct au_vdir *vdir)
+{
+	loff_t offset;
+	union au_vdir_deblk_p p;
+
+	p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
+	offset = vdir->vd_last.p.deblk - p.deblk;
+	offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
+	return offset;
+}
+
+/* returns true or false */
+static int seek_vdir(struct file *file, struct dir_context *ctx)
+{
+	int valid;
+	unsigned int deblk_sz;
+	unsigned long ul, n;
+	loff_t offset;
+	union au_vdir_deblk_p p, deblk_end;
+	struct au_vdir *vdir_cache;
+
+	valid = 1;
+	vdir_cache = au_fvdir_cache(file);
+	offset = calc_offset(vdir_cache);
+	AuDbg("offset %lld\n", offset);
+	if (ctx->pos == offset)
+		goto out;
+
+	vdir_cache->vd_last.ul = 0;
+	vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
+	if (!ctx->pos)
+		goto out;
+
+	valid = 0;
+	deblk_sz = vdir_cache->vd_deblk_sz;
+	ul = div64_u64(ctx->pos, deblk_sz);
+	AuDbg("ul %lu\n", ul);
+	if (ul >= vdir_cache->vd_nblk)
+		goto out;
+
+	n = vdir_cache->vd_nblk;
+	for (; ul < n; ul++) {
+		p.deblk = vdir_cache->vd_deblk[ul];
+		deblk_end.deblk = p.deblk + deblk_sz;
+		offset = ul;
+		offset *= deblk_sz;
+		while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) {
+			unsigned int l;
+
+			l = calc_size(p.de->de_str.len);
+			offset += l;
+			p.deblk += l;
+		}
+		if (!is_deblk_end(&p, &deblk_end)) {
+			valid = 1;
+			vdir_cache->vd_last.ul = ul;
+			vdir_cache->vd_last.p = p;
+			break;
+		}
+	}
+
+out:
+	/* smp_mb(); */
+	if (!valid)
+		AuDbg("valid %d\n", !valid);
+	return valid;
+}
+
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx)
+{
+	unsigned int l, deblk_sz;
+	union au_vdir_deblk_p deblk_end;
+	struct au_vdir *vdir_cache;
+	struct au_vdir_de *de;
+
+	if (!seek_vdir(file, ctx))
+		return 0;
+
+	vdir_cache = au_fvdir_cache(file);
+	deblk_sz = vdir_cache->vd_deblk_sz;
+	while (1) {
+		deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+		deblk_end.deblk += deblk_sz;
+		while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
+			de = vdir_cache->vd_last.p.de;
+			AuDbg("%.*s, off%lld, i%lu, dt%d\n",
+			      de->de_str.len, de->de_str.name, ctx->pos,
+			      (unsigned long)de->de_ino, de->de_type);
+			if (unlikely(!dir_emit(ctx, de->de_str.name,
+					       de->de_str.len, de->de_ino,
+					       de->de_type))) {
+				/* todo: ignore the error caused by udba? */
+				/* return err; */
+				return 0;
+			}
+
+			l = calc_size(de->de_str.len);
+			vdir_cache->vd_last.p.deblk += l;
+			ctx->pos += l;
+		}
+		if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
+			vdir_cache->vd_last.ul++;
+			vdir_cache->vd_last.p.deblk
+				= vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+			ctx->pos = deblk_sz * vdir_cache->vd_last.ul;
+			continue;
+		}
+		break;
+	}
+
+	/* smp_mb(); */
+	return 0;
+}
diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c
new file mode 100644
index 00000000000..a5e10c5c004
--- /dev/null
+++ b/fs/aufs/vfsub.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/security.h>
+#include <linux/splice.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb)
+{
+	if (!au_test_fuse(h_sb) || !au_userns)
+		return 0;
+
+	return is_current_mnt_ns(mnt) ? 0 : -EACCES;
+}
+#endif
+
+int vfsub_sync_filesystem(struct super_block *h_sb, int wait)
+{
+	int err;
+
+	lockdep_off();
+	down_read(&h_sb->s_umount);
+	err = __sync_filesystem(h_sb, wait);
+	up_read(&h_sb->s_umount);
+	lockdep_on();
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did)
+{
+	int err;
+	struct kstat st;
+	struct super_block *h_sb;
+
+	/* for remote fs, leave work for its getattr or d_revalidate */
+	/* for bad i_attr fs, handle them in aufs_getattr() */
+	/* still some fs may acquire i_mutex. we need to skip them */
+	err = 0;
+	if (!did)
+		did = &err;
+	h_sb = h_path->dentry->d_sb;
+	*did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
+	if (*did)
+		err = vfsub_getattr(h_path, &st);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct file *vfsub_dentry_open(struct path *path, int flags)
+{
+	return dentry_open(path, flags /* | __FMODE_NONOTIFY */,
+			   current_cred());
+}
+
+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
+{
+	struct file *file;
+
+	lockdep_off();
+	file = filp_open(path,
+			 oflags /* | __FMODE_NONOTIFY */,
+			 mode);
+	lockdep_on();
+	if (IS_ERR(file))
+		goto out;
+	vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+out:
+	return file;
+}
+
+/*
+ * Ideally this function should call VFS:do_last() in order to keep all its
+ * checkings. But it is very hard for aufs to regenerate several VFS internal
+ * structure such as nameidata. This is a second (or third) best approach.
+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open().
+ */
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+		      struct vfsub_aopen_args *args)
+{
+	int err;
+	struct au_branch *br = args->br;
+	struct file *file = args->file;
+	/* copied from linux/fs/namei.c:atomic_open() */
+	struct dentry *const DENTRY_NOT_SET = (void *)-1UL;
+
+	IMustLock(dir);
+	AuDebugOn(!dir->i_op->atomic_open);
+
+	err = au_br_test_oflag(args->open_flag, br);
+	if (unlikely(err))
+		goto out;
+
+	au_lcnt_inc(&br->br_nfiles);
+	file->f_path.dentry = DENTRY_NOT_SET;
+	file->f_path.mnt = au_br_mnt(br);
+	AuDbg("%ps\n", dir->i_op->atomic_open);
+	err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag,
+				     args->create_mode);
+	if (unlikely(err < 0)) {
+		au_lcnt_dec(&br->br_nfiles);
+		goto out;
+	}
+
+	/* temporary workaround for nfsv4 branch */
+	if (au_test_nfs(dir->i_sb))
+		nfs_mark_for_revalidate(dir);
+
+	if (file->f_mode & FMODE_CREATED)
+		fsnotify_create(dir, dentry);
+	if (!(file->f_mode & FMODE_OPENED)) {
+		au_lcnt_dec(&br->br_nfiles);
+		goto out;
+	}
+
+	/* todo: call VFS:may_open() here */
+	/* todo: ima_file_check() too? */
+	if (!err && (args->open_flag & __FMODE_EXEC))
+		err = deny_write_access(file);
+	if (!err)
+		fsnotify_open(file);
+	else
+		au_lcnt_dec(&br->br_nfiles);
+	/* note that the file is created and still opened */
+
+out:
+	return err;
+}
+
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
+{
+	int err;
+
+	err = kern_path(name, flags, path);
+	if (!err && d_is_positive(path->dentry))
+		vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+					     struct dentry *parent, int len)
+{
+	struct path path = {
+		.mnt = NULL
+	};
+
+	path.dentry = lookup_one_len_unlocked(name, parent, len);
+	if (IS_ERR(path.dentry))
+		goto out;
+	if (d_is_positive(path.dentry))
+		vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+	AuTraceErrPtr(path.dentry);
+	return path.dentry;
+}
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+				    int len)
+{
+	struct path path = {
+		.mnt = NULL
+	};
+
+	/* VFS checks it too, but by WARN_ON_ONCE() */
+	IMustLock(d_inode(parent));
+
+	path.dentry = lookup_one_len(name, parent, len);
+	if (IS_ERR(path.dentry))
+		goto out;
+	if (d_is_positive(path.dentry))
+		vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+	AuTraceErrPtr(path.dentry);
+	return path.dentry;
+}
+
+void vfsub_call_lkup_one(void *args)
+{
+	struct vfsub_lkup_one_args *a = args;
+	*a->errp = vfsub_lkup_one(a->name, a->parent);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+				 struct dentry *d2, struct au_hinode *hdir2)
+{
+	struct dentry *d;
+
+	lockdep_off();
+	d = lock_rename(d1, d2);
+	lockdep_on();
+	au_hn_suspend(hdir1);
+	if (hdir1 != hdir2)
+		au_hn_suspend(hdir2);
+
+	return d;
+}
+
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+			 struct dentry *d2, struct au_hinode *hdir2)
+{
+	au_hn_resume(hdir1);
+	if (hdir1 != hdir2)
+		au_hn_resume(hdir2);
+	lockdep_off();
+	unlock_rename(d1, d2);
+	lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mknod(path, d, mode, 0);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_create(dir, path->dentry, mode, want_excl);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_symlink(path, d, symname);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_symlink(dir, path->dentry, symname);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mknod(path, d, mode, new_encode_dev(dev));
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_mknod(dir, path->dentry, mode, dev);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+static int au_test_nlink(struct inode *inode)
+{
+	const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
+
+	if (!au_test_fs_no_limit_nlink(inode->i_sb)
+	    || inode->i_nlink < link_max)
+		return 0;
+	return -EMLINK;
+}
+
+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path,
+	       struct inode **delegated_inode)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	err = au_test_nlink(d_inode(src_dentry));
+	if (unlikely(err))
+		return err;
+
+	/* we don't call may_linkat() */
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_link(src_dentry, path, d);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_link(src_dentry, dir, path->dentry, delegated_inode);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		/* fuse has different memory inode for the same inumber */
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+			tmp.dentry = src_dentry;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
+		 struct inode *dir, struct path *path,
+		 struct inode **delegated_inode, unsigned int flags)
+{
+	int err;
+	struct path tmp = {
+		.mnt	= path->mnt
+	};
+	struct dentry *d;
+
+	IMustLock(dir);
+	IMustLock(src_dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	tmp.dentry = src_dentry->d_parent;
+	err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_rename(src_dir, src_dentry, dir, path->dentry,
+			 delegated_inode, flags);
+	lockdep_on();
+	if (!err) {
+		int did;
+
+		tmp.dentry = d->d_parent;
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = src_dentry;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+			tmp.dentry = src_dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mkdir(path, d, mode);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_mkdir(dir, path->dentry, mode);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_rmdir(struct inode *dir, struct path *path)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_rmdir(path, d);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_rmdir(dir, path->dentry);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = {
+			.dentry	= path->dentry->d_parent,
+			.mnt	= path->mnt
+		};
+
+		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: support mmap_sem? */
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+		     loff_t *ppos)
+{
+	ssize_t err;
+
+	lockdep_off();
+	err = vfs_read(file, ubuf, count, ppos);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+/* todo: kernel_read()? */
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+		     loff_t *ppos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = vfsub_read_u(file, buf.u, count, ppos);
+	set_fs(oldfs);
+	return err;
+}
+
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+		      loff_t *ppos)
+{
+	ssize_t err;
+
+	lockdep_off();
+	err = vfs_write(file, ubuf, count, ppos);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		const char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = vfsub_write_u(file, buf.u, count, ppos);
+	set_fs(oldfs);
+	return err;
+}
+
+int vfsub_flush(struct file *file, fl_owner_t id)
+{
+	int err;
+
+	err = 0;
+	if (file->f_op->flush) {
+		if (!au_test_nfs(file->f_path.dentry->d_sb))
+			err = file->f_op->flush(file, id);
+		else {
+			lockdep_off();
+			err = file->f_op->flush(file, id);
+			lockdep_on();
+		}
+		if (!err)
+			vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
+		/*ignore*/
+	}
+	return err;
+}
+
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx)
+{
+	int err;
+
+	AuDbg("%pD, ctx{%ps, %llu}\n", file, ctx->actor, ctx->pos);
+
+	lockdep_off();
+	err = iterate_dir(file, ctx);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+	return err;
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+		     struct pipe_inode_info *pipe, size_t len,
+		     unsigned int flags)
+{
+	long err;
+
+	lockdep_off();
+	err = do_splice_to(in, ppos, pipe, len, flags);
+	lockdep_on();
+	file_accessed(in);
+	if (err >= 0)
+		vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		       loff_t *ppos, size_t len, unsigned int flags)
+{
+	long err;
+
+	lockdep_off();
+	err = do_splice_from(pipe, out, ppos, len, flags);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+int vfsub_fsync(struct file *file, struct path *path, int datasync)
+{
+	int err;
+
+	/* file can be NULL */
+	lockdep_off();
+	err = vfs_fsync(file, datasync);
+	lockdep_on();
+	if (!err) {
+		if (!path) {
+			AuDebugOn(!file);
+			path = &file->f_path;
+		}
+		vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+	}
+	return err;
+}
+
+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+		struct file *h_file)
+{
+	int err;
+	struct inode *h_inode;
+	struct super_block *h_sb;
+
+	if (!h_file) {
+		err = vfsub_truncate(h_path, length);
+		goto out;
+	}
+
+	h_inode = d_inode(h_path->dentry);
+	h_sb = h_inode->i_sb;
+	lockdep_off();
+	sb_start_write(h_sb);
+	lockdep_on();
+	err = locks_verify_truncate(h_inode, h_file, length);
+	if (!err)
+		err = security_path_truncate(h_path);
+	if (!err) {
+		lockdep_off();
+		err = do_truncate(h_path->dentry, length, attr, h_file);
+		lockdep_on();
+	}
+	lockdep_off();
+	sb_end_write(h_sb);
+	lockdep_on();
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_vfsub_mkdir_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+	int mode;
+};
+
+static void au_call_vfsub_mkdir(void *args)
+{
+	struct au_vfsub_mkdir_args *a = args;
+	*a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
+}
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
+{
+	int err, do_sio, wkq_err;
+
+	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+	if (!do_sio) {
+		lockdep_off();
+		err = vfsub_mkdir(dir, path, mode);
+		lockdep_on();
+	} else {
+		struct au_vfsub_mkdir_args args = {
+			.errp	= &err,
+			.dir	= dir,
+			.path	= path,
+			.mode	= mode
+		};
+		wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+
+struct au_vfsub_rmdir_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+};
+
+static void au_call_vfsub_rmdir(void *args)
+{
+	struct au_vfsub_rmdir_args *a = args;
+	*a->errp = vfsub_rmdir(a->dir, a->path);
+}
+
+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
+{
+	int err, do_sio, wkq_err;
+
+	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+	if (!do_sio) {
+		lockdep_off();
+		err = vfsub_rmdir(dir, path);
+		lockdep_on();
+	} else {
+		struct au_vfsub_rmdir_args args = {
+			.errp	= &err,
+			.dir	= dir,
+			.path	= path
+		};
+		wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct notify_change_args {
+	int *errp;
+	struct path *path;
+	struct iattr *ia;
+	struct inode **delegated_inode;
+};
+
+static void call_notify_change(void *args)
+{
+	struct notify_change_args *a = args;
+	struct inode *h_inode;
+
+	h_inode = d_inode(a->path->dentry);
+	IMustLock(h_inode);
+
+	*a->errp = -EPERM;
+	if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
+		lockdep_off();
+		*a->errp = notify_change(a->path->dentry, a->ia,
+					 a->delegated_inode);
+		lockdep_on();
+		if (!*a->errp)
+			vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
+	}
+	AuTraceErr(*a->errp);
+}
+
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+			struct inode **delegated_inode)
+{
+	int err;
+	struct notify_change_args args = {
+		.errp			= &err,
+		.path			= path,
+		.ia			= ia,
+		.delegated_inode	= delegated_inode
+	};
+
+	call_notify_change(&args);
+
+	return err;
+}
+
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+			    struct inode **delegated_inode)
+{
+	int err, wkq_err;
+	struct notify_change_args args = {
+		.errp			= &err,
+		.path			= path,
+		.ia			= ia,
+		.delegated_inode	= delegated_inode
+	};
+
+	wkq_err = au_wkq_wait(call_notify_change, &args);
+	if (unlikely(wkq_err))
+		err = wkq_err;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct unlink_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+	struct inode **delegated_inode;
+};
+
+static void call_unlink(void *args)
+{
+	struct unlink_args *a = args;
+	struct dentry *d = a->path->dentry;
+	struct inode *h_inode;
+	const int stop_sillyrename = (au_test_nfs(d->d_sb)
+				      && au_dcount(d) == 1);
+
+	IMustLock(a->dir);
+
+	a->path->dentry = d->d_parent;
+	*a->errp = security_path_unlink(a->path, d);
+	a->path->dentry = d;
+	if (unlikely(*a->errp))
+		return;
+
+	if (!stop_sillyrename)
+		dget(d);
+	h_inode = NULL;
+	if (d_is_positive(d)) {
+		h_inode = d_inode(d);
+		ihold(h_inode);
+	}
+
+	lockdep_off();
+	*a->errp = vfs_unlink(a->dir, d, a->delegated_inode);
+	lockdep_on();
+	if (!*a->errp) {
+		struct path tmp = {
+			.dentry = d->d_parent,
+			.mnt	= a->path->mnt
+		};
+		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+	}
+
+	if (!stop_sillyrename)
+		dput(d);
+	if (h_inode)
+		iput(h_inode);
+
+	AuTraceErr(*a->errp);
+}
+
+/*
+ * @dir: must be locked.
+ * @dentry: target dentry.
+ */
+int vfsub_unlink(struct inode *dir, struct path *path,
+		 struct inode **delegated_inode, int force)
+{
+	int err;
+	struct unlink_args args = {
+		.errp			= &err,
+		.dir			= dir,
+		.path			= path,
+		.delegated_inode	= delegated_inode
+	};
+
+	if (!force)
+		call_unlink(&args);
+	else {
+		int wkq_err;
+
+		wkq_err = au_wkq_wait(call_unlink, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h
new file mode 100644
index 00000000000..705f033ba2c
--- /dev/null
+++ b/fs/aufs/vfsub.h
@@ -0,0 +1,354 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#ifndef __AUFS_VFSUB_H__
+#define __AUFS_VFSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/posix_acl.h>
+#include <linux/xattr.h>
+#include "debug.h"
+
+/* copied from linux/fs/internal.h */
+/* todo: BAD approach!! */
+extern void __mnt_drop_write(struct vfsmount *);
+extern struct file *alloc_empty_file(int, const struct cred *);
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for lower inode */
+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
+/* reduce? gave up. */
+enum {
+	AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */
+	AuLsc_I_PARENT,		/* lower inode, parent first */
+	AuLsc_I_PARENT2,	/* copyup dirs */
+	AuLsc_I_PARENT3,	/* copyup wh */
+	AuLsc_I_CHILD,
+	AuLsc_I_CHILD2,
+	AuLsc_I_End
+};
+
+/* to debug easier, do not make them inlined functions */
+#define MtxMustLock(mtx)	AuDebugOn(!mutex_is_locked(mtx))
+#define IMustLock(i)		AuDebugOn(!inode_is_locked(i))
+
+/* ---------------------------------------------------------------------- */
+
+static inline void vfsub_drop_nlink(struct inode *inode)
+{
+	AuDebugOn(!inode->i_nlink);
+	drop_nlink(inode);
+}
+
+static inline void vfsub_dead_dir(struct inode *inode)
+{
+	AuDebugOn(!S_ISDIR(inode->i_mode));
+	inode->i_flags |= S_DEAD;
+	clear_nlink(inode);
+}
+
+static inline int vfsub_native_ro(struct inode *inode)
+{
+	return sb_rdonly(inode->i_sb)
+		|| IS_RDONLY(inode)
+		/* || IS_APPEND(inode) */
+		|| IS_IMMUTABLE(inode);
+}
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb);
+#else
+AuStubInt0(vfsub_test_mntns, struct vfsmount *mnt, struct super_block *h_sb);
+#endif
+
+int vfsub_sync_filesystem(struct super_block *h_sb, int wait);
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did);
+struct file *vfsub_dentry_open(struct path *path, int flags);
+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
+struct au_branch;
+struct vfsub_aopen_args {
+	struct file		*file;
+	unsigned int		open_flag;
+	umode_t			create_mode;
+	struct au_branch	*br;
+};
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+		      struct vfsub_aopen_args *args);
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+					     struct dentry *parent, int len);
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+				    int len);
+
+struct vfsub_lkup_one_args {
+	struct dentry **errp;
+	struct qstr *name;
+	struct dentry *parent;
+};
+
+static inline struct dentry *vfsub_lkup_one(struct qstr *name,
+					    struct dentry *parent)
+{
+	return vfsub_lookup_one_len(name->name, parent, name->len);
+}
+
+void vfsub_call_lkup_one(void *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_mnt_want_write(struct vfsmount *mnt)
+{
+	int err;
+
+	lockdep_off();
+	err = mnt_want_write(mnt);
+	lockdep_on();
+	return err;
+}
+
+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt)
+{
+	lockdep_off();
+	mnt_drop_write(mnt);
+	lockdep_on();
+}
+
+#if 0 /* reserved */
+static inline void vfsub_mnt_drop_write_file(struct file *file)
+{
+	lockdep_off();
+	mnt_drop_write_file(file);
+	lockdep_on();
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_hinode;
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+				 struct dentry *d2, struct au_hinode *hdir2);
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+			 struct dentry *d2, struct au_hinode *hdir2);
+
+int vfsub_create(struct inode *dir, struct path *path, int mode,
+		 bool want_excl);
+int vfsub_symlink(struct inode *dir, struct path *path,
+		  const char *symname);
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
+	       struct path *path, struct inode **delegated_inode);
+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
+		 struct inode *hdir, struct path *path,
+		 struct inode **delegated_inode, unsigned int flags);
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_rmdir(struct inode *dir, struct path *path);
+
+/* ---------------------------------------------------------------------- */
+
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+		     loff_t *ppos);
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+			loff_t *ppos);
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+		      loff_t *ppos);
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
+		      loff_t *ppos);
+int vfsub_flush(struct file *file, fl_owner_t id);
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx);
+
+static inline loff_t vfsub_f_size_read(struct file *file)
+{
+	return i_size_read(file_inode(file));
+}
+
+static inline unsigned int vfsub_file_flags(struct file *file)
+{
+	unsigned int flags;
+
+	spin_lock(&file->f_lock);
+	flags = file->f_flags;
+	spin_unlock(&file->f_lock);
+
+	return flags;
+}
+
+static inline int vfsub_file_execed(struct file *file)
+{
+	/* todo: direct access f_flags */
+	return !!(vfsub_file_flags(file) & __FMODE_EXEC);
+}
+
+#if 0 /* reserved */
+static inline void vfsub_file_accessed(struct file *h_file)
+{
+	file_accessed(h_file);
+	vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+#if 0 /* reserved */
+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
+				     struct dentry *h_dentry)
+{
+	struct path h_path = {
+		.dentry	= h_dentry,
+		.mnt	= h_mnt
+	};
+	touch_atime(&h_path);
+	vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+static inline int vfsub_update_time(struct inode *h_inode,
+				    struct timespec64 *ts, int flags)
+{
+	return update_time(h_inode, ts, flags);
+	/* no vfsub_update_h_iattr() since we don't have struct path */
+}
+
+#ifdef CONFIG_FS_POSIX_ACL
+static inline int vfsub_acl_chmod(struct inode *h_inode, umode_t h_mode)
+{
+	int err;
+
+	err = posix_acl_chmod(h_inode, h_mode);
+	if (err == -EOPNOTSUPP)
+		err = 0;
+	return err;
+}
+#else
+AuStubInt0(vfsub_acl_chmod, struct inode *h_inode, umode_t h_mode);
+#endif
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+		     struct pipe_inode_info *pipe, size_t len,
+		     unsigned int flags);
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		       loff_t *ppos, size_t len, unsigned int flags);
+
+static inline long vfsub_truncate(struct path *path, loff_t length)
+{
+	long err;
+
+	lockdep_off();
+	err = vfs_truncate(path, length);
+	lockdep_on();
+	return err;
+}
+
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+		struct file *h_file);
+int vfsub_fsync(struct file *file, struct path *path, int datasync);
+
+/*
+ * re-use branch fs's ioctl(FICLONE) while aufs itself doesn't support such
+ * ioctl.
+ */
+static inline loff_t vfsub_clone_file_range(struct file *src, struct file *dst,
+					    loff_t len)
+{
+	loff_t err;
+
+	lockdep_off();
+	err = vfs_clone_file_range(src, 0, dst, 0, len, /*remap_flags*/0);
+	lockdep_on();
+
+	return err;
+}
+
+/* copy_file_range(2) is a systemcall */
+static inline ssize_t vfsub_copy_file_range(struct file *src, loff_t src_pos,
+					    struct file *dst, loff_t dst_pos,
+					    size_t len, unsigned int flags)
+{
+	ssize_t ssz;
+
+	lockdep_off();
+	ssz = vfs_copy_file_range(src, src_pos, dst, dst_pos, len, flags);
+	lockdep_on();
+
+	return ssz;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
+{
+	loff_t err;
+
+	lockdep_off();
+	err = vfs_llseek(file, offset, origin);
+	lockdep_on();
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+			    struct inode **delegated_inode);
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+			struct inode **delegated_inode);
+int vfsub_unlink(struct inode *dir, struct path *path,
+		 struct inode **delegated_inode, int force);
+
+static inline int vfsub_getattr(const struct path *path, struct kstat *st)
+{
+	return vfs_getattr(path, st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_setxattr(struct dentry *dentry, const char *name,
+				 const void *value, size_t size, int flags)
+{
+	int err;
+
+	lockdep_off();
+	err = vfs_setxattr(dentry, name, value, size, flags);
+	lockdep_on();
+
+	return err;
+}
+
+static inline int vfsub_removexattr(struct dentry *dentry, const char *name)
+{
+	int err;
+
+	lockdep_off();
+	err = vfs_removexattr(dentry, name);
+	lockdep_on();
+
+	return err;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_VFSUB_H__ */
diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c
new file mode 100644
index 00000000000..a7cc1109f69
--- /dev/null
+++ b/fs/aufs/wbr_policy.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * policies for selecting one among multiple writable branches
+ */
+
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* subset of cpup_attr() */
+static noinline_for_stack
+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
+{
+	int err, sbits;
+	struct iattr ia;
+	struct inode *h_isrc;
+
+	h_isrc = d_inode(h_src);
+	ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
+	ia.ia_mode = h_isrc->i_mode;
+	ia.ia_uid = h_isrc->i_uid;
+	ia.ia_gid = h_isrc->i_gid;
+	sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
+	au_cpup_attr_flags(d_inode(h_path->dentry), h_isrc->i_flags);
+	/* no delegation since it is just created */
+	err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+
+	/* is this nfs only? */
+	if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
+		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+		ia.ia_mode = h_isrc->i_mode;
+		err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+	}
+
+	return err;
+}
+
+#define AuCpdown_PARENT_OPQ	1
+#define AuCpdown_WHED		(1 << 1)
+#define AuCpdown_MADE_DIR	(1 << 2)
+#define AuCpdown_DIROPQ		(1 << 3)
+#define au_ftest_cpdown(flags, name)	((flags) & AuCpdown_##name)
+#define au_fset_cpdown(flags, name) \
+	do { (flags) |= AuCpdown_##name; } while (0)
+#define au_fclr_cpdown(flags, name) \
+	do { (flags) &= ~AuCpdown_##name; } while (0)
+
+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
+			     unsigned int *flags)
+{
+	int err;
+	struct dentry *opq_dentry;
+
+	opq_dentry = au_diropq_create(dentry, bdst);
+	err = PTR_ERR(opq_dentry);
+	if (IS_ERR(opq_dentry))
+		goto out;
+	dput(opq_dentry);
+	au_fset_cpdown(*flags, DIROPQ);
+
+out:
+	return err;
+}
+
+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
+			    struct inode *dir, aufs_bindex_t bdst)
+{
+	int err;
+	struct path h_path;
+	struct au_branch *br;
+
+	br = au_sbr(dentry->d_sb, bdst);
+	h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	err = 0;
+	if (d_is_positive(h_path.dentry)) {
+		h_path.mnt = au_br_mnt(br);
+		err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
+					  dentry);
+	}
+	dput(h_path.dentry);
+
+out:
+	return err;
+}
+
+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg)
+{
+	int err, rerr;
+	aufs_bindex_t bopq, btop;
+	struct path h_path;
+	struct dentry *parent;
+	struct inode *h_dir, *h_inode, *inode, *dir;
+	unsigned int *flags = arg;
+
+	btop = au_dbtop(dentry);
+	/* dentry is di-locked */
+	parent = dget_parent(dentry);
+	dir = d_inode(parent);
+	h_dir = d_inode(h_parent);
+	AuDebugOn(h_dir != au_h_iptr(dir, bdst));
+	IMustLock(h_dir);
+
+	err = au_lkup_neg(dentry, bdst, /*wh*/0);
+	if (unlikely(err < 0))
+		goto out;
+	h_path.dentry = au_h_dptr(dentry, bdst);
+	h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
+	err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path, 0755);
+	if (unlikely(err))
+		goto out_put;
+	au_fset_cpdown(*flags, MADE_DIR);
+
+	bopq = au_dbdiropq(dentry);
+	au_fclr_cpdown(*flags, WHED);
+	au_fclr_cpdown(*flags, DIROPQ);
+	if (au_dbwh(dentry) == bdst)
+		au_fset_cpdown(*flags, WHED);
+	if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
+		au_fset_cpdown(*flags, PARENT_OPQ);
+	h_inode = d_inode(h_path.dentry);
+	inode_lock_nested(h_inode, AuLsc_I_CHILD);
+	if (au_ftest_cpdown(*flags, WHED)) {
+		err = au_cpdown_dir_opq(dentry, bdst, flags);
+		if (unlikely(err)) {
+			inode_unlock(h_inode);
+			goto out_dir;
+		}
+	}
+
+	err = au_cpdown_attr(&h_path, au_h_dptr(dentry, btop));
+	inode_unlock(h_inode);
+	if (unlikely(err))
+		goto out_opq;
+
+	if (au_ftest_cpdown(*flags, WHED)) {
+		err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
+		if (unlikely(err))
+			goto out_opq;
+	}
+
+	inode = d_inode(dentry);
+	if (au_ibbot(inode) < bdst)
+		au_set_ibbot(inode, bdst);
+	au_set_h_iptr(inode, bdst, au_igrab(h_inode),
+		      au_hi_flags(inode, /*isdir*/1));
+	au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0);
+	goto out; /* success */
+
+	/* revert */
+out_opq:
+	if (au_ftest_cpdown(*flags, DIROPQ)) {
+		inode_lock_nested(h_inode, AuLsc_I_CHILD);
+		rerr = au_diropq_remove(dentry, bdst);
+		inode_unlock(h_inode);
+		if (unlikely(rerr)) {
+			AuIOErr("failed removing diropq for %pd b%d (%d)\n",
+				dentry, bdst, rerr);
+			err = -EIO;
+			goto out;
+		}
+	}
+out_dir:
+	if (au_ftest_cpdown(*flags, MADE_DIR)) {
+		rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
+		if (unlikely(rerr)) {
+			AuIOErr("failed removing %pd b%d (%d)\n",
+				dentry, bdst, rerr);
+			err = -EIO;
+		}
+	}
+out_put:
+	au_set_h_dptr(dentry, bdst, NULL);
+	if (au_dbbot(dentry) == bdst)
+		au_update_dbbot(dentry);
+out:
+	dput(parent);
+	return err;
+}
+
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	int err;
+	unsigned int flags;
+
+	flags = 0;
+	err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for create */
+
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	int err, i, j, ndentry;
+	aufs_bindex_t bopq;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries, *parent, *d;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	parent = dget_parent(dentry);
+	err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
+	if (unlikely(err))
+		goto out_free;
+
+	err = bindex;
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			d = dentries[j];
+			di_read_lock_parent2(d, !AuLock_IR);
+			bopq = au_dbdiropq(d);
+			di_read_unlock(d, !AuLock_IR);
+			if (bopq >= 0 && bopq < err)
+				err = bopq;
+		}
+	}
+
+out_free:
+	dput(parent);
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
+{
+	for (; bindex >= 0; bindex--)
+		if (!au_br_rdonly(au_sbr(sb, bindex)))
+			return bindex;
+	return -EROFS;
+}
+
+/* top down parent */
+static int au_wbr_create_tdp(struct dentry *dentry,
+			     unsigned int flags __maybe_unused)
+{
+	int err;
+	aufs_bindex_t btop, bindex;
+	struct super_block *sb;
+	struct dentry *parent, *h_parent;
+
+	sb = dentry->d_sb;
+	btop = au_dbtop(dentry);
+	err = btop;
+	if (!au_br_rdonly(au_sbr(sb, btop)))
+		goto out;
+
+	err = -EROFS;
+	parent = dget_parent(dentry);
+	for (bindex = au_dbtop(parent); bindex < btop; bindex++) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || d_is_negative(h_parent))
+			continue;
+
+		if (!au_br_rdonly(au_sbr(sb, bindex))) {
+			err = bindex;
+			break;
+		}
+	}
+	dput(parent);
+
+	/* bottom up here */
+	if (unlikely(err < 0)) {
+		err = au_wbr_bu(sb, btop - 1);
+		if (err >= 0)
+			err = au_wbr_nonopq(dentry, err);
+	}
+
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* an exception for the policy other than tdp */
+static int au_wbr_create_exp(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bwh, bdiropq;
+	struct dentry *parent;
+
+	err = -1;
+	bwh = au_dbwh(dentry);
+	parent = dget_parent(dentry);
+	bdiropq = au_dbdiropq(parent);
+	if (bwh >= 0) {
+		if (bdiropq >= 0)
+			err = min(bdiropq, bwh);
+		else
+			err = bwh;
+		AuDbg("%d\n", err);
+	} else if (bdiropq >= 0) {
+		err = bdiropq;
+		AuDbg("%d\n", err);
+	}
+	dput(parent);
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+	if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
+		err = -1;
+
+	AuDbg("%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* round robin */
+static int au_wbr_create_init_rr(struct super_block *sb)
+{
+	int err;
+
+	err = au_wbr_bu(sb, au_sbbot(sb));
+	atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
+	/* smp_mb(); */
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags)
+{
+	int err, nbr;
+	unsigned int u;
+	aufs_bindex_t bindex, bbot;
+	struct super_block *sb;
+	atomic_t *next;
+
+	err = au_wbr_create_exp(dentry);
+	if (err >= 0)
+		goto out;
+
+	sb = dentry->d_sb;
+	next = &au_sbi(sb)->si_wbr_rr_next;
+	bbot = au_sbbot(sb);
+	nbr = bbot + 1;
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		if (!au_ftest_wbr(flags, DIR)) {
+			err = atomic_dec_return(next) + 1;
+			/* modulo for 0 is meaningless */
+			if (unlikely(!err))
+				err = atomic_dec_return(next) + 1;
+		} else
+			err = atomic_read(next);
+		AuDbg("%d\n", err);
+		u = err;
+		err = u % nbr;
+		AuDbg("%d\n", err);
+		if (!au_br_rdonly(au_sbr(sb, err)))
+			break;
+		err = -EROFS;
+	}
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out:
+	AuDbg("%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space */
+static void au_mfs(struct dentry *dentry, struct dentry *parent)
+{
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_wbr_mfs *mfs;
+	struct dentry *h_parent;
+	aufs_bindex_t bindex, bbot;
+	int err;
+	unsigned long long b, bavail;
+	struct path h_path;
+	/* reduce the stack usage */
+	struct kstatfs *st;
+
+	st = kmalloc(sizeof(*st), GFP_NOFS);
+	if (unlikely(!st)) {
+		AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
+		return;
+	}
+
+	bavail = 0;
+	sb = dentry->d_sb;
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	MtxMustLock(&mfs->mfs_lock);
+	mfs->mfs_bindex = -EROFS;
+	mfs->mfsrr_bytes = 0;
+	if (!parent) {
+		bindex = 0;
+		bbot = au_sbbot(sb);
+	} else {
+		bindex = au_dbtop(parent);
+		bbot = au_dbtaildir(parent);
+	}
+
+	for (; bindex <= bbot; bindex++) {
+		if (parent) {
+			h_parent = au_h_dptr(parent, bindex);
+			if (!h_parent || d_is_negative(h_parent))
+				continue;
+		}
+		br = au_sbr(sb, bindex);
+		if (au_br_rdonly(br))
+			continue;
+
+		/* sb->s_root for NFS is unreliable */
+		h_path.mnt = au_br_mnt(br);
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, st);
+		if (unlikely(err)) {
+			AuWarn1("failed statfs, b%d, %d\n", bindex, err);
+			continue;
+		}
+
+		/* when the available size is equal, select the lower one */
+		BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
+			     || sizeof(b) < sizeof(st->f_bsize));
+		b = st->f_bavail * st->f_bsize;
+		br->br_wbr->wbr_bytes = b;
+		if (b >= bavail) {
+			bavail = b;
+			mfs->mfs_bindex = bindex;
+			mfs->mfs_jiffy = jiffies;
+		}
+	}
+
+	mfs->mfsrr_bytes = bavail;
+	AuDbg("b%d\n", mfs->mfs_bindex);
+	au_kfree_rcu(st);
+}
+
+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags)
+{
+	int err;
+	struct dentry *parent;
+	struct super_block *sb;
+	struct au_wbr_mfs *mfs;
+
+	err = au_wbr_create_exp(dentry);
+	if (err >= 0)
+		goto out;
+
+	sb = dentry->d_sb;
+	parent = NULL;
+	if (au_ftest_wbr(flags, PARENT))
+		parent = dget_parent(dentry);
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_lock(&mfs->mfs_lock);
+	if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+	    || mfs->mfs_bindex < 0
+	    || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
+		au_mfs(dentry, parent);
+	mutex_unlock(&mfs->mfs_lock);
+	err = mfs->mfs_bindex;
+	dput(parent);
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_init_mfs(struct super_block *sb)
+{
+	struct au_wbr_mfs *mfs;
+
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_init(&mfs->mfs_lock);
+	mfs->mfs_jiffy = 0;
+	mfs->mfs_bindex = -EROFS;
+
+	return 0;
+}
+
+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
+{
+	mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down regardless parent, and then mfs */
+static int au_wbr_create_tdmfs(struct dentry *dentry,
+			       unsigned int flags __maybe_unused)
+{
+	int err;
+	aufs_bindex_t bwh, btail, bindex, bfound, bmfs;
+	unsigned long long watermark;
+	struct super_block *sb;
+	struct au_wbr_mfs *mfs;
+	struct au_branch *br;
+	struct dentry *parent;
+
+	sb = dentry->d_sb;
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_lock(&mfs->mfs_lock);
+	if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+	    || mfs->mfs_bindex < 0)
+		au_mfs(dentry, /*parent*/NULL);
+	watermark = mfs->mfsrr_watermark;
+	bmfs = mfs->mfs_bindex;
+	mutex_unlock(&mfs->mfs_lock);
+
+	/* another style of au_wbr_create_exp() */
+	bwh = au_dbwh(dentry);
+	parent = dget_parent(dentry);
+	btail = au_dbtaildir(parent);
+	if (bwh >= 0 && bwh < btail)
+		btail = bwh;
+
+	err = au_wbr_nonopq(dentry, btail);
+	if (unlikely(err < 0))
+		goto out;
+	btail = err;
+	bfound = -1;
+	for (bindex = 0; bindex <= btail; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_rdonly(br))
+			continue;
+		if (br->br_wbr->wbr_bytes > watermark) {
+			bfound = bindex;
+			break;
+		}
+	}
+	err = bfound;
+	if (err < 0)
+		err = bmfs;
+
+out:
+	dput(parent);
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space and then round robin */
+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags)
+{
+	int err;
+	struct au_wbr_mfs *mfs;
+
+	err = au_wbr_create_mfs(dentry, flags);
+	if (err >= 0) {
+		mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
+		mutex_lock(&mfs->mfs_lock);
+		if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
+			err = au_wbr_create_rr(dentry, flags);
+		mutex_unlock(&mfs->mfs_lock);
+	}
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_init_mfsrr(struct super_block *sb)
+{
+	int err;
+
+	au_wbr_create_init_mfs(sb); /* ignore */
+	err = au_wbr_create_init_rr(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down parent and most free space */
+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags)
+{
+	int err, e2;
+	unsigned long long b;
+	aufs_bindex_t bindex, btop, bbot;
+	struct super_block *sb;
+	struct dentry *parent, *h_parent;
+	struct au_branch *br;
+
+	err = au_wbr_create_tdp(dentry, flags);
+	if (unlikely(err < 0))
+		goto out;
+	parent = dget_parent(dentry);
+	btop = au_dbtop(parent);
+	bbot = au_dbtaildir(parent);
+	if (btop == bbot)
+		goto out_parent; /* success */
+
+	e2 = au_wbr_create_mfs(dentry, flags);
+	if (e2 < 0)
+		goto out_parent; /* success */
+
+	/* when the available size is equal, select upper one */
+	sb = dentry->d_sb;
+	br = au_sbr(sb, err);
+	b = br->br_wbr->wbr_bytes;
+	AuDbg("b%d, %llu\n", err, b);
+
+	for (bindex = btop; bindex <= bbot; bindex++) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || d_is_negative(h_parent))
+			continue;
+
+		br = au_sbr(sb, bindex);
+		if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
+			b = br->br_wbr->wbr_bytes;
+			err = bindex;
+			AuDbg("b%d, %llu\n", err, b);
+		}
+	}
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out_parent:
+	dput(parent);
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * - top down parent
+ * - most free space with parent
+ * - most free space round-robin regardless parent
+ */
+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags)
+{
+	int err;
+	unsigned long long watermark;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_wbr_mfs *mfs;
+
+	err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT);
+	if (unlikely(err < 0))
+		goto out;
+
+	sb = dentry->d_sb;
+	br = au_sbr(sb, err);
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_lock(&mfs->mfs_lock);
+	watermark = mfs->mfsrr_watermark;
+	mutex_unlock(&mfs->mfs_lock);
+	if (br->br_wbr->wbr_bytes < watermark)
+		/* regardless the parent dir */
+		err = au_wbr_create_mfsrr(dentry, flags);
+
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for copyup */
+
+/* top down parent */
+static int au_wbr_copyup_tdp(struct dentry *dentry)
+{
+	return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0);
+}
+
+/* bottom up parent */
+static int au_wbr_copyup_bup(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bindex, btop;
+	struct dentry *parent, *h_parent;
+	struct super_block *sb;
+
+	err = -EROFS;
+	sb = dentry->d_sb;
+	parent = dget_parent(dentry);
+	btop = au_dbtop(parent);
+	for (bindex = au_dbtop(dentry); bindex >= btop; bindex--) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || d_is_negative(h_parent))
+			continue;
+
+		if (!au_br_rdonly(au_sbr(sb, bindex))) {
+			err = bindex;
+			break;
+		}
+	}
+	dput(parent);
+
+	/* bottom up here */
+	if (unlikely(err < 0))
+		err = au_wbr_bu(sb, btop - 1);
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* bottom up */
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t btop)
+{
+	int err;
+
+	err = au_wbr_bu(dentry->d_sb, btop);
+	AuDbg("b%d\n", err);
+	if (err > btop)
+		err = au_wbr_nonopq(dentry, err);
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_copyup_bu(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t btop;
+
+	btop = au_dbtop(dentry);
+	err = au_wbr_do_copyup_bu(dentry, btop);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
+	[AuWbrCopyup_TDP] = {
+		.copyup	= au_wbr_copyup_tdp
+	},
+	[AuWbrCopyup_BUP] = {
+		.copyup	= au_wbr_copyup_bup
+	},
+	[AuWbrCopyup_BU] = {
+		.copyup	= au_wbr_copyup_bu
+	}
+};
+
+struct au_wbr_create_operations au_wbr_create_ops[] = {
+	[AuWbrCreate_TDP] = {
+		.create	= au_wbr_create_tdp
+	},
+	[AuWbrCreate_RR] = {
+		.create	= au_wbr_create_rr,
+		.init	= au_wbr_create_init_rr
+	},
+	[AuWbrCreate_MFS] = {
+		.create	= au_wbr_create_mfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSV] = {
+		.create	= au_wbr_create_mfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSRR] = {
+		.create	= au_wbr_create_mfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSRRV] = {
+		.create	= au_wbr_create_mfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_TDMFS] = {
+		.create	= au_wbr_create_tdmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_TDMFSV] = {
+		.create	= au_wbr_create_tdmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFS] = {
+		.create	= au_wbr_create_pmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFSV] = {
+		.create	= au_wbr_create_pmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFSRR] = {
+		.create	= au_wbr_create_pmfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFSRRV] = {
+		.create	= au_wbr_create_pmfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	}
+};
diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c
new file mode 100644
index 00000000000..efe0c16508d
--- /dev/null
+++ b/fs/aufs/whout.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#include "aufs.h"
+
+#define WH_MASK			0444
+
+/*
+ * If a directory contains this file, then it is opaque.  We start with the
+ * .wh. flag so that it is blocked by lookup.
+ */
+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ,
+					   sizeof(AUFS_WH_DIROPQ) - 1);
+
+/*
+ * generate whiteout name, which is NOT terminated by NULL.
+ * @name: original d_name.name
+ * @len: original d_name.len
+ * @wh: whiteout qstr
+ * returns zero when succeeds, otherwise error.
+ * succeeded value as wh->name should be freed by kfree().
+ */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
+{
+	char *p;
+
+	if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
+		return -ENAMETOOLONG;
+
+	wh->len = name->len + AUFS_WH_PFX_LEN;
+	p = kmalloc(wh->len, GFP_NOFS);
+	wh->name = p;
+	if (p) {
+		memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+		memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
+		/* smp_mb(); */
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if the @wh_name exists under @h_parent.
+ * @try_sio specifies the necessary of super-io.
+ */
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio)
+{
+	int err;
+	struct dentry *wh_dentry;
+
+	if (!try_sio)
+		wh_dentry = vfsub_lkup_one(wh_name, h_parent);
+	else
+		wh_dentry = au_sio_lkup_one(wh_name, h_parent);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry)) {
+		if (err == -ENAMETOOLONG)
+			err = 0;
+		goto out;
+	}
+
+	err = 0;
+	if (d_is_negative(wh_dentry))
+		goto out_wh; /* success */
+
+	err = 1;
+	if (d_is_reg(wh_dentry))
+		goto out_wh; /* success */
+
+	err = -EIO;
+	AuIOErr("%pd Invalid whiteout entry type 0%o.\n",
+		wh_dentry, d_inode(wh_dentry)->i_mode);
+
+out_wh:
+	dput(wh_dentry);
+out:
+	return err;
+}
+
+/*
+ * test if the @h_dentry sets opaque or not.
+ */
+int au_diropq_test(struct dentry *h_dentry)
+{
+	int err;
+	struct inode *h_dir;
+
+	h_dir = d_inode(h_dentry);
+	err = au_wh_test(h_dentry, &diropq_name,
+			 au_test_h_perm_sio(h_dir, MAY_EXEC));
+	return err;
+}
+
+/*
+ * returns a negative dentry whose name is unique and temporary.
+ */
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+			     struct qstr *prefix)
+{
+	struct dentry *dentry;
+	int i;
+	char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
+		*name, *p;
+	/* strict atomic_t is unnecessary here */
+	static unsigned short cnt;
+	struct qstr qs;
+
+	BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
+
+	name = defname;
+	qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
+	if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
+		dentry = ERR_PTR(-ENAMETOOLONG);
+		if (unlikely(qs.len > NAME_MAX))
+			goto out;
+		dentry = ERR_PTR(-ENOMEM);
+		name = kmalloc(qs.len + 1, GFP_NOFS);
+		if (unlikely(!name))
+			goto out;
+	}
+
+	/* doubly whiteout-ed */
+	memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
+	p = name + AUFS_WH_PFX_LEN * 2;
+	memcpy(p, prefix->name, prefix->len);
+	p += prefix->len;
+	*p++ = '.';
+	AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
+
+	qs.name = name;
+	for (i = 0; i < 3; i++) {
+		sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
+		dentry = au_sio_lkup_one(&qs, h_parent);
+		if (IS_ERR(dentry) || d_is_negative(dentry))
+			goto out_name;
+		dput(dentry);
+	}
+	/* pr_warn("could not get random name\n"); */
+	dentry = ERR_PTR(-EEXIST);
+	AuDbg("%.*s\n", AuLNPair(&qs));
+	BUG();
+
+out_name:
+	if (name != defname)
+		au_kfree_try_rcu(name);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/*
+ * rename the @h_dentry on @br to the whiteouted temporary name.
+ */
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+	struct inode *h_dir, *delegated;
+	struct dentry *h_parent;
+
+	h_parent = h_dentry->d_parent; /* dir inode is locked */
+	h_dir = d_inode(h_parent);
+	IMustLock(h_dir);
+
+	h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	/* under the same dir, no need to lock_rename() */
+	delegated = NULL;
+	err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated,
+			   /*flags*/0);
+	AuTraceErr(err);
+	if (unlikely(err == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal rename\n");
+		iput(delegated);
+	}
+	dput(h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * functions for removing a whiteout
+ */
+
+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
+{
+	int err, force;
+	struct inode *delegated;
+
+	/*
+	 * forces superio when the dir has a sticky bit.
+	 * this may be a violation of unix fs semantics.
+	 */
+	force = (h_dir->i_mode & S_ISVTX)
+		&& !uid_eq(current_fsuid(), d_inode(h_path->dentry)->i_uid);
+	delegated = NULL;
+	err = vfsub_unlink(h_dir, h_path, &delegated, force);
+	if (unlikely(err == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal unlink\n");
+		iput(delegated);
+	}
+	return err;
+}
+
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+			struct dentry *dentry)
+{
+	int err;
+
+	err = do_unlink_wh(h_dir, h_path);
+	if (!err && dentry)
+		au_set_dbwh(dentry, -1);
+
+	return err;
+}
+
+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
+			  struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+
+	err = 0;
+	h_path.dentry = vfsub_lkup_one(wh, h_parent);
+	if (IS_ERR(h_path.dentry))
+		err = PTR_ERR(h_path.dentry);
+	else {
+		if (d_is_reg(h_path.dentry))
+			err = do_unlink_wh(d_inode(h_parent), &h_path);
+		dput(h_path.dentry);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * initialize/clean whiteout for a branch
+ */
+
+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
+			const int isdir)
+{
+	int err;
+	struct inode *delegated;
+
+	if (d_is_negative(whpath->dentry))
+		return;
+
+	if (isdir)
+		err = vfsub_rmdir(h_dir, whpath);
+	else {
+		delegated = NULL;
+		err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal unlink\n");
+			iput(delegated);
+		}
+	}
+	if (unlikely(err))
+		pr_warn("failed removing %pd (%d), ignored.\n",
+			whpath->dentry, err);
+}
+
+static int test_linkable(struct dentry *h_root)
+{
+	struct inode *h_dir = d_inode(h_root);
+
+	if (h_dir->i_op->link)
+		return 0;
+
+	pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n",
+	       h_root, au_sbtype(h_root->d_sb));
+	return -ENOSYS;	/* the branch doesn't have its ->link() */
+}
+
+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
+static int au_whdir(struct inode *h_dir, struct path *path)
+{
+	int err;
+
+	err = -EEXIST;
+	if (d_is_negative(path->dentry)) {
+		int mode = 0700;
+
+		if (au_test_nfs(path->dentry->d_sb))
+			mode |= 0111;
+		err = vfsub_mkdir(h_dir, path, mode);
+	} else if (d_is_dir(path->dentry))
+		err = 0;
+	else
+		pr_err("unknown %pd exists\n", path->dentry);
+
+	return err;
+}
+
+struct au_wh_base {
+	const struct qstr *name;
+	struct dentry *dentry;
+};
+
+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
+			  struct path *h_path)
+{
+	h_path->dentry = base[AuBrWh_BASE].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/0);
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/1);
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/1);
+}
+
+/*
+ * returns tri-state,
+ * minus: error, caller should print the message
+ * zero: success
+ * plus: error, caller should NOT print the message
+ */
+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
+				int do_plink, struct au_wh_base base[],
+				struct path *h_path)
+{
+	int err;
+	struct inode *h_dir;
+
+	h_dir = d_inode(h_root);
+	h_path->dentry = base[AuBrWh_BASE].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/0);
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	if (do_plink) {
+		err = test_linkable(h_root);
+		if (unlikely(err)) {
+			err = 1;
+			goto out;
+		}
+
+		err = au_whdir(h_dir, h_path);
+		if (unlikely(err))
+			goto out;
+		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+	} else
+		au_wh_clean(h_dir, h_path, /*isdir*/1);
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	err = au_whdir(h_dir, h_path);
+	if (unlikely(err))
+		goto out;
+	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+	return err;
+}
+
+/*
+ * for the moment, aufs supports the branch filesystem which does not support
+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
+ * copyup failed. finally, such filesystem will not be used as the writable
+ * branch.
+ *
+ * returns tri-state, see above.
+ */
+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
+			 int do_plink, struct au_wh_base base[],
+			 struct path *h_path)
+{
+	int err;
+	struct inode *h_dir;
+
+	WbrWhMustWriteLock(wbr);
+
+	err = test_linkable(h_root);
+	if (unlikely(err)) {
+		err = 1;
+		goto out;
+	}
+
+	/*
+	 * todo: should this create be done in /sbin/mount.aufs helper?
+	 */
+	err = -EEXIST;
+	h_dir = d_inode(h_root);
+	if (d_is_negative(base[AuBrWh_BASE].dentry)) {
+		h_path->dentry = base[AuBrWh_BASE].dentry;
+		err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true);
+	} else if (d_is_reg(base[AuBrWh_BASE].dentry))
+		err = 0;
+	else
+		pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry);
+	if (unlikely(err))
+		goto out;
+
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	if (do_plink) {
+		err = au_whdir(h_dir, h_path);
+		if (unlikely(err))
+			goto out;
+		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+	} else
+		au_wh_clean(h_dir, h_path, /*isdir*/1);
+	wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
+
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	err = au_whdir(h_dir, h_path);
+	if (unlikely(err))
+		goto out;
+	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+	return err;
+}
+
+/*
+ * initialize the whiteout base file/dir for @br.
+ */
+int au_wh_init(struct au_branch *br, struct super_block *sb)
+{
+	int err, i;
+	const unsigned char do_plink
+		= !!au_opt_test(au_mntflags(sb), PLINK);
+	struct inode *h_dir;
+	struct path path = br->br_path;
+	struct dentry *h_root = path.dentry;
+	struct au_wbr *wbr = br->br_wbr;
+	static const struct qstr base_name[] = {
+		[AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME,
+					  sizeof(AUFS_BASE_NAME) - 1),
+		[AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME,
+					   sizeof(AUFS_PLINKDIR_NAME) - 1),
+		[AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME,
+					  sizeof(AUFS_ORPHDIR_NAME) - 1)
+	};
+	struct au_wh_base base[] = {
+		[AuBrWh_BASE] = {
+			.name	= base_name + AuBrWh_BASE,
+			.dentry	= NULL
+		},
+		[AuBrWh_PLINK] = {
+			.name	= base_name + AuBrWh_PLINK,
+			.dentry	= NULL
+		},
+		[AuBrWh_ORPH] = {
+			.name	= base_name + AuBrWh_ORPH,
+			.dentry	= NULL
+		}
+	};
+
+	if (wbr)
+		WbrWhMustWriteLock(wbr);
+
+	for (i = 0; i < AuBrWh_Last; i++) {
+		/* doubly whiteouted */
+		struct dentry *d;
+
+		d = au_wh_lkup(h_root, (void *)base[i].name, br);
+		err = PTR_ERR(d);
+		if (IS_ERR(d))
+			goto out;
+
+		base[i].dentry = d;
+		AuDebugOn(wbr
+			  && wbr->wbr_wh[i]
+			  && wbr->wbr_wh[i] != base[i].dentry);
+	}
+
+	if (wbr)
+		for (i = 0; i < AuBrWh_Last; i++) {
+			dput(wbr->wbr_wh[i]);
+			wbr->wbr_wh[i] = NULL;
+		}
+
+	err = 0;
+	if (!au_br_writable(br->br_perm)) {
+		h_dir = d_inode(h_root);
+		au_wh_init_ro(h_dir, base, &path);
+	} else if (!au_br_wh_linkable(br->br_perm)) {
+		err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
+		if (err > 0)
+			goto out;
+		else if (err)
+			goto out_err;
+	} else {
+		err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
+		if (err > 0)
+			goto out;
+		else if (err)
+			goto out_err;
+	}
+	goto out; /* success */
+
+out_err:
+	pr_err("an error(%d) on the writable branch %pd(%s)\n",
+	       err, h_root, au_sbtype(h_root->d_sb));
+out:
+	for (i = 0; i < AuBrWh_Last; i++)
+		dput(base[i].dentry);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * whiteouts are all hard-linked usually.
+ * when its link count reaches a ceiling, we create a new whiteout base
+ * asynchronously.
+ */
+
+struct reinit_br_wh {
+	struct super_block *sb;
+	struct au_branch *br;
+};
+
+static void reinit_br_wh(void *arg)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct path h_path;
+	struct reinit_br_wh *a = arg;
+	struct au_wbr *wbr;
+	struct inode *dir, *delegated;
+	struct dentry *h_root;
+	struct au_hinode *hdir;
+
+	err = 0;
+	wbr = a->br->br_wbr;
+	/* big aufs lock */
+	si_noflush_write_lock(a->sb);
+	if (!au_br_writable(a->br->br_perm))
+		goto out;
+	bindex = au_br_index(a->sb, a->br->br_id);
+	if (unlikely(bindex < 0))
+		goto out;
+
+	di_read_lock_parent(a->sb->s_root, AuLock_IR);
+	dir = d_inode(a->sb->s_root);
+	hdir = au_hi(dir, bindex);
+	h_root = au_h_dptr(a->sb->s_root, bindex);
+	AuDebugOn(h_root != au_br_dentry(a->br));
+
+	au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+	wbr_wh_write_lock(wbr);
+	err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
+			  h_root, a->br);
+	if (!err) {
+		h_path.dentry = wbr->wbr_whbase;
+		h_path.mnt = au_br_mnt(a->br);
+		delegated = NULL;
+		err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated,
+				   /*force*/0);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal unlink\n");
+			iput(delegated);
+		}
+	} else {
+		pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase);
+		err = 0;
+	}
+	dput(wbr->wbr_whbase);
+	wbr->wbr_whbase = NULL;
+	if (!err)
+		err = au_wh_init(a->br, a->sb);
+	wbr_wh_write_unlock(wbr);
+	au_hn_inode_unlock(hdir);
+	di_read_unlock(a->sb->s_root, AuLock_IR);
+	if (!err)
+		au_fhsm_wrote(a->sb, bindex, /*force*/0);
+
+out:
+	if (wbr)
+		atomic_dec(&wbr->wbr_wh_running);
+	au_lcnt_dec(&a->br->br_count);
+	si_write_unlock(a->sb);
+	au_nwt_done(&au_sbi(a->sb)->si_nowait);
+	au_kfree_rcu(a);
+	if (unlikely(err))
+		AuIOErr("err %d\n", err);
+}
+
+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
+{
+	int do_dec, wkq_err;
+	struct reinit_br_wh *arg;
+
+	do_dec = 1;
+	if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
+		goto out;
+
+	/* ignore ENOMEM */
+	arg = kmalloc(sizeof(*arg), GFP_NOFS);
+	if (arg) {
+		/*
+		 * dec(wh_running), kfree(arg) and dec(br_count)
+		 * in reinit function
+		 */
+		arg->sb = sb;
+		arg->br = br;
+		au_lcnt_inc(&br->br_count);
+		wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
+		if (unlikely(wkq_err)) {
+			atomic_dec(&br->br_wbr->wbr_wh_running);
+			au_lcnt_dec(&br->br_count);
+			au_kfree_rcu(arg);
+		}
+		do_dec = 0;
+	}
+
+out:
+	if (do_dec)
+		atomic_dec(&br->br_wbr->wbr_wh_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create the whiteout @wh.
+ */
+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
+			     struct dentry *wh)
+{
+	int err;
+	struct path h_path = {
+		.dentry = wh
+	};
+	struct au_branch *br;
+	struct au_wbr *wbr;
+	struct dentry *h_parent;
+	struct inode *h_dir, *delegated;
+
+	h_parent = wh->d_parent; /* dir inode is locked */
+	h_dir = d_inode(h_parent);
+	IMustLock(h_dir);
+
+	br = au_sbr(sb, bindex);
+	h_path.mnt = au_br_mnt(br);
+	wbr = br->br_wbr;
+	wbr_wh_read_lock(wbr);
+	if (wbr->wbr_whbase) {
+		delegated = NULL;
+		err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated);
+		if (unlikely(err == -EWOULDBLOCK)) {
+			pr_warn("cannot retry for NFSv4 delegation"
+				" for an internal link\n");
+			iput(delegated);
+		}
+		if (!err || err != -EMLINK)
+			goto out;
+
+		/* link count full. re-initialize br_whbase. */
+		kick_reinit_br_wh(sb, br);
+	}
+
+	/* return this error in this context */
+	err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true);
+	if (!err)
+		au_fhsm_wrote(sb, bindex, /*force*/0);
+
+out:
+	wbr_wh_read_unlock(wbr);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create or remove the diropq.
+ */
+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
+				unsigned int flags)
+{
+	struct dentry *opq_dentry, *h_dentry;
+	struct super_block *sb;
+	struct au_branch *br;
+	int err;
+
+	sb = dentry->d_sb;
+	br = au_sbr(sb, bindex);
+	h_dentry = au_h_dptr(dentry, bindex);
+	opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry);
+	if (IS_ERR(opq_dentry))
+		goto out;
+
+	if (au_ftest_diropq(flags, CREATE)) {
+		err = link_or_create_wh(sb, bindex, opq_dentry);
+		if (!err) {
+			au_set_dbdiropq(dentry, bindex);
+			goto out; /* success */
+		}
+	} else {
+		struct path tmp = {
+			.dentry = opq_dentry,
+			.mnt	= au_br_mnt(br)
+		};
+		err = do_unlink_wh(au_h_iptr(d_inode(dentry), bindex), &tmp);
+		if (!err)
+			au_set_dbdiropq(dentry, -1);
+	}
+	dput(opq_dentry);
+	opq_dentry = ERR_PTR(err);
+
+out:
+	return opq_dentry;
+}
+
+struct do_diropq_args {
+	struct dentry **errp;
+	struct dentry *dentry;
+	aufs_bindex_t bindex;
+	unsigned int flags;
+};
+
+static void call_do_diropq(void *args)
+{
+	struct do_diropq_args *a = args;
+	*a->errp = do_diropq(a->dentry, a->bindex, a->flags);
+}
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+			     unsigned int flags)
+{
+	struct dentry *diropq, *h_dentry;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (!au_test_h_perm_sio(d_inode(h_dentry), MAY_EXEC | MAY_WRITE))
+		diropq = do_diropq(dentry, bindex, flags);
+	else {
+		int wkq_err;
+		struct do_diropq_args args = {
+			.errp		= &diropq,
+			.dentry		= dentry,
+			.bindex		= bindex,
+			.flags		= flags
+		};
+
+		wkq_err = au_wkq_wait(call_do_diropq, &args);
+		if (unlikely(wkq_err))
+			diropq = ERR_PTR(wkq_err);
+	}
+
+	return diropq;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * lookup whiteout dentry.
+ * @h_parent: lower parent dentry which must exist and be locked
+ * @base_name: name of dentry which will be whiteouted
+ * returns dentry for whiteout.
+ */
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+			  struct au_branch *br)
+{
+	int err;
+	struct qstr wh_name;
+	struct dentry *wh_dentry;
+
+	err = au_wh_name_alloc(&wh_name, base_name);
+	wh_dentry = ERR_PTR(err);
+	if (!err) {
+		wh_dentry = vfsub_lkup_one(&wh_name, h_parent);
+		au_kfree_try_rcu(wh_name.name);
+	}
+	return wh_dentry;
+}
+
+/*
+ * link/create a whiteout for @dentry on @bindex.
+ */
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+			    struct dentry *h_parent)
+{
+	struct dentry *wh_dentry;
+	struct super_block *sb;
+	int err;
+
+	sb = dentry->d_sb;
+	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
+	if (!IS_ERR(wh_dentry) && d_is_negative(wh_dentry)) {
+		err = link_or_create_wh(sb, bindex, wh_dentry);
+		if (!err) {
+			au_set_dbwh(dentry, bindex);
+			au_fhsm_wrote(sb, bindex, /*force*/0);
+		} else {
+			dput(wh_dentry);
+			wh_dentry = ERR_PTR(err);
+		}
+	}
+
+	return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Delete all whiteouts in this directory on branch bindex. */
+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
+			   aufs_bindex_t bindex, struct au_branch *br)
+{
+	int err;
+	unsigned long ul, n;
+	struct qstr wh_name;
+	char *p;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct au_vdir_destr *str;
+
+	err = -ENOMEM;
+	p = (void *)__get_free_page(GFP_NOFS);
+	wh_name.name = p;
+	if (unlikely(!wh_name.name))
+		goto out;
+
+	err = 0;
+	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+	p += AUFS_WH_PFX_LEN;
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (ul = 0; !err && ul < n; ul++, head++) {
+		hlist_for_each_entry(pos, head, wh_hash) {
+			if (pos->wh_bindex != bindex)
+				continue;
+
+			str = &pos->wh_str;
+			if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
+				memcpy(p, str->name, str->len);
+				wh_name.len = AUFS_WH_PFX_LEN + str->len;
+				err = unlink_wh_name(h_dentry, &wh_name, br);
+				if (!err)
+					continue;
+				break;
+			}
+			AuIOErr("whiteout name too long %.*s\n",
+				str->len, str->name);
+			err = -EIO;
+			break;
+		}
+	}
+	free_page((unsigned long)wh_name.name);
+
+out:
+	return err;
+}
+
+struct del_wh_children_args {
+	int *errp;
+	struct dentry *h_dentry;
+	struct au_nhash *whlist;
+	aufs_bindex_t bindex;
+	struct au_branch *br;
+};
+
+static void call_del_wh_children(void *args)
+{
+	struct del_wh_children_args *a = args;
+	*a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
+{
+	struct au_whtmp_rmdir *whtmp;
+	int err;
+	unsigned int rdhash;
+
+	SiMustAnyLock(sb);
+
+	whtmp = kzalloc(sizeof(*whtmp), gfp);
+	if (unlikely(!whtmp)) {
+		whtmp = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	/* no estimation for dir size */
+	rdhash = au_sbi(sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = AUFS_RDHASH_DEF;
+	err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
+	if (unlikely(err)) {
+		au_kfree_rcu(whtmp);
+		whtmp = ERR_PTR(err);
+	}
+
+out:
+	return whtmp;
+}
+
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
+{
+	if (whtmp->br)
+		au_lcnt_dec(&whtmp->br->br_count);
+	dput(whtmp->wh_dentry);
+	iput(whtmp->dir);
+	au_nhash_wh_free(&whtmp->whlist);
+	au_kfree_rcu(whtmp);
+}
+
+/*
+ * rmdir the whiteouted temporary named dir @h_dentry.
+ * @whlist: whiteouted children.
+ */
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+		   struct dentry *wh_dentry, struct au_nhash *whlist)
+{
+	int err;
+	unsigned int h_nlink;
+	struct path h_tmp;
+	struct inode *wh_inode, *h_dir;
+	struct au_branch *br;
+
+	h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+	IMustLock(h_dir);
+
+	br = au_sbr(dir->i_sb, bindex);
+	wh_inode = d_inode(wh_dentry);
+	inode_lock_nested(wh_inode, AuLsc_I_CHILD);
+
+	/*
+	 * someone else might change some whiteouts while we were sleeping.
+	 * it means this whlist may have an obsoleted entry.
+	 */
+	if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
+		err = del_wh_children(wh_dentry, whlist, bindex, br);
+	else {
+		int wkq_err;
+		struct del_wh_children_args args = {
+			.errp		= &err,
+			.h_dentry	= wh_dentry,
+			.whlist		= whlist,
+			.bindex		= bindex,
+			.br		= br
+		};
+
+		wkq_err = au_wkq_wait(call_del_wh_children, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+	inode_unlock(wh_inode);
+
+	if (!err) {
+		h_tmp.dentry = wh_dentry;
+		h_tmp.mnt = au_br_mnt(br);
+		h_nlink = h_dir->i_nlink;
+		err = vfsub_rmdir(h_dir, &h_tmp);
+		/* some fs doesn't change the parent nlink in some cases */
+		h_nlink -= h_dir->i_nlink;
+	}
+
+	if (!err) {
+		if (au_ibtop(dir) == bindex) {
+			/* todo: dir->i_mutex is necessary */
+			au_cpup_attr_timesizes(dir);
+			if (h_nlink)
+				vfsub_drop_nlink(dir);
+		}
+		return 0; /* success */
+	}
+
+	pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err);
+	return err;
+}
+
+static void call_rmdir_whtmp(void *args)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct au_whtmp_rmdir *a = args;
+	struct super_block *sb;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+	struct au_hinode *hdir;
+
+	/* rmdir by nfsd may cause deadlock with this i_mutex */
+	/* inode_lock(a->dir); */
+	err = -EROFS;
+	sb = a->dir->i_sb;
+	si_read_lock(sb, !AuLock_FLUSH);
+	if (!au_br_writable(a->br->br_perm))
+		goto out;
+	bindex = au_br_index(sb, a->br->br_id);
+	if (unlikely(bindex < 0))
+		goto out;
+
+	err = -EIO;
+	ii_write_lock_parent(a->dir);
+	h_parent = dget_parent(a->wh_dentry);
+	h_dir = d_inode(h_parent);
+	hdir = au_hi(a->dir, bindex);
+	err = vfsub_mnt_want_write(au_br_mnt(a->br));
+	if (unlikely(err))
+		goto out_mnt;
+	au_hn_inode_lock_nested(hdir, AuLsc_I_PARENT);
+	err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
+			  a->br);
+	if (!err)
+		err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist);
+	au_hn_inode_unlock(hdir);
+	vfsub_mnt_drop_write(au_br_mnt(a->br));
+
+out_mnt:
+	dput(h_parent);
+	ii_write_unlock(a->dir);
+out:
+	/* inode_unlock(a->dir); */
+	au_whtmp_rmdir_free(a);
+	si_read_unlock(sb);
+	au_nwt_done(&au_sbi(sb)->si_nowait);
+	if (unlikely(err))
+		AuIOErr("err %d\n", err);
+}
+
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
+{
+	int wkq_err;
+	struct super_block *sb;
+
+	IMustLock(dir);
+
+	/* all post-process will be done in do_rmdir_whtmp(). */
+	sb = dir->i_sb;
+	args->dir = au_igrab(dir);
+	args->br = au_sbr(sb, bindex);
+	au_lcnt_inc(&args->br->br_count);
+	args->wh_dentry = dget(wh_dentry);
+	wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
+	if (unlikely(wkq_err)) {
+		pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err);
+		au_whtmp_rmdir_free(args);
+	}
+}
diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h
new file mode 100644
index 00000000000..65459ba0e79
--- /dev/null
+++ b/fs/aufs/whout.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#ifndef __AUFS_WHOUT_H__
+#define __AUFS_WHOUT_H__
+
+#ifdef __KERNEL__
+
+#include "dir.h"
+
+/* whout.c */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio);
+int au_diropq_test(struct dentry *h_dentry);
+struct au_branch;
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+			     struct qstr *prefix);
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+			struct dentry *dentry);
+int au_wh_init(struct au_branch *br, struct super_block *sb);
+
+/* diropq flags */
+#define AuDiropq_CREATE	1
+#define au_ftest_diropq(flags, name)	((flags) & AuDiropq_##name)
+#define au_fset_diropq(flags, name) \
+	do { (flags) |= AuDiropq_##name; } while (0)
+#define au_fclr_diropq(flags, name) \
+	do { (flags) &= ~AuDiropq_##name; } while (0)
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+			     unsigned int flags);
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+			  struct au_branch *br);
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+			    struct dentry *h_parent);
+
+/* real rmdir for the whiteout-ed dir */
+struct au_whtmp_rmdir {
+	struct inode *dir;
+	struct au_branch *br;
+	struct dentry *wh_dentry;
+	struct au_nhash whlist;
+};
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+		   struct dentry *wh_dentry, struct au_nhash *whlist);
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_diropq_create(struct dentry *dentry,
+					      aufs_bindex_t bindex)
+{
+	return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
+}
+
+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c
new file mode 100644
index 00000000000..4d66bb2dc65
--- /dev/null
+++ b/fs/aufs/wkq.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credential scheme
+ */
+
+#include <linux/module.h>
+#include "aufs.h"
+
+/* internal workqueue named AUFS_WKQ_NAME */
+
+static struct workqueue_struct *au_wkq;
+
+struct au_wkinfo {
+	struct work_struct wk;
+	struct kobject *kobj;
+
+	unsigned int flags; /* see wkq.h */
+
+	au_wkq_func_t func;
+	void *args;
+
+#ifdef CONFIG_LOCKDEP
+	int dont_check;
+	struct held_lock **hlock;
+#endif
+
+	struct completion *comp;
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs passes some operations to the workqueue such as the internal copyup.
+ * This scheme looks rather unnatural for LOCKDEP debugging feature, since the
+ * job run by workqueue depends upon the locks acquired in the other task.
+ * Delegating a small operation to the workqueue, aufs passes its lockdep
+ * information too. And the job in the workqueue restores the info in order to
+ * pretend as if it acquired those locks. This is just to make LOCKDEP work
+ * correctly and expectedly.
+ */
+
+#ifndef CONFIG_LOCKDEP
+AuStubInt0(au_wkq_lockdep_alloc, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_free, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_pre, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_post, struct au_wkinfo *wkinfo);
+AuStubVoid(au_wkq_lockdep_init, struct au_wkinfo *wkinfo);
+#else
+static void au_wkq_lockdep_init(struct au_wkinfo *wkinfo)
+{
+	wkinfo->hlock = NULL;
+	wkinfo->dont_check = 0;
+}
+
+/*
+ * 1: matched
+ * 0: unmatched
+ */
+static int au_wkq_lockdep_test(struct lock_class_key *key, const char *name)
+{
+	static DEFINE_SPINLOCK(spin);
+	static struct {
+		char *name;
+		struct lock_class_key *key;
+	} a[] = {
+		{ .name = "&sbinfo->si_rwsem" },
+		{ .name = "&finfo->fi_rwsem" },
+		{ .name = "&dinfo->di_rwsem" },
+		{ .name = "&iinfo->ii_rwsem" }
+	};
+	static int set;
+	int i;
+
+	/* lockless read from 'set.' see below */
+	if (set == ARRAY_SIZE(a)) {
+		for (i = 0; i < ARRAY_SIZE(a); i++)
+			if (a[i].key == key)
+				goto match;
+		goto unmatch;
+	}
+
+	spin_lock(&spin);
+	if (set)
+		for (i = 0; i < ARRAY_SIZE(a); i++)
+			if (a[i].key == key) {
+				spin_unlock(&spin);
+				goto match;
+			}
+	for (i = 0; i < ARRAY_SIZE(a); i++) {
+		if (a[i].key) {
+			if (unlikely(a[i].key == key)) { /* rare but possible */
+				spin_unlock(&spin);
+				goto match;
+			} else
+				continue;
+		}
+		if (strstr(a[i].name, name)) {
+			/*
+			 * the order of these three lines is important for the
+			 * lockless read above.
+			 */
+			a[i].key = key;
+			spin_unlock(&spin);
+			set++;
+			/* AuDbg("%d, %s\n", set, name); */
+			goto match;
+		}
+	}
+	spin_unlock(&spin);
+	goto unmatch;
+
+match:
+	return 1;
+unmatch:
+	return 0;
+}
+
+static int au_wkq_lockdep_alloc(struct au_wkinfo *wkinfo)
+{
+	int err, n;
+	struct task_struct *curr;
+	struct held_lock **hl, *held_locks, *p;
+
+	err = 0;
+	curr = current;
+	wkinfo->dont_check = lockdep_recursing(curr);
+	if (wkinfo->dont_check)
+		goto out;
+	n = curr->lockdep_depth;
+	if (!n)
+		goto out;
+
+	err = -ENOMEM;
+	wkinfo->hlock = kmalloc_array(n + 1, sizeof(*wkinfo->hlock), GFP_NOFS);
+	if (unlikely(!wkinfo->hlock))
+		goto out;
+
+	err = 0;
+#if 0 /* left for debugging */
+	if (0 && au_debug_test())
+		lockdep_print_held_locks(curr);
+#endif
+	held_locks = curr->held_locks;
+	hl = wkinfo->hlock;
+	while (n--) {
+		p = held_locks++;
+		if (au_wkq_lockdep_test(p->instance->key, p->instance->name))
+			*hl++ = p;
+	}
+	*hl = NULL;
+
+out:
+	return err;
+}
+
+static void au_wkq_lockdep_free(struct au_wkinfo *wkinfo)
+{
+	au_kfree_try_rcu(wkinfo->hlock);
+}
+
+static void au_wkq_lockdep_pre(struct au_wkinfo *wkinfo)
+{
+	struct held_lock *p, **hl = wkinfo->hlock;
+	int subclass;
+
+	if (wkinfo->dont_check)
+		lockdep_off();
+	if (!hl)
+		return;
+	while ((p = *hl++)) { /* assignment */
+		subclass = lockdep_hlock_class(p)->subclass;
+		/* AuDbg("%s, %d\n", p->instance->name, subclass); */
+		if (p->read)
+			rwsem_acquire_read(p->instance, subclass, 0,
+					   /*p->acquire_ip*/_RET_IP_);
+		else
+			rwsem_acquire(p->instance, subclass, 0,
+				      /*p->acquire_ip*/_RET_IP_);
+	}
+}
+
+static void au_wkq_lockdep_post(struct au_wkinfo *wkinfo)
+{
+	struct held_lock *p, **hl = wkinfo->hlock;
+
+	if (wkinfo->dont_check)
+		lockdep_on();
+	if (!hl)
+		return;
+	while ((p = *hl++)) /* assignment */
+		rwsem_release(p->instance, /*p->acquire_ip*/_RET_IP_);
+}
+#endif
+
+static void wkq_func(struct work_struct *wk)
+{
+	struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
+
+	AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
+	AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
+
+	au_wkq_lockdep_pre(wkinfo);
+	wkinfo->func(wkinfo->args);
+	au_wkq_lockdep_post(wkinfo);
+	if (au_ftest_wkq(wkinfo->flags, WAIT))
+		complete(wkinfo->comp);
+	else {
+		kobject_put(wkinfo->kobj);
+		module_put(THIS_MODULE); /* todo: ?? */
+		au_kfree_rcu(wkinfo);
+	}
+}
+
+/*
+ * Since struct completion is large, try allocating it dynamically.
+ */
+#define AuWkqCompDeclare(name)	struct completion *comp = NULL
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+	*comp = kmalloc(sizeof(**comp), GFP_NOFS);
+	if (*comp) {
+		init_completion(*comp);
+		wkinfo->comp = *comp;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+static void au_wkq_comp_free(struct completion *comp)
+{
+	au_kfree_rcu(comp);
+}
+
+static void au_wkq_run(struct au_wkinfo *wkinfo)
+{
+	if (au_ftest_wkq(wkinfo->flags, NEST)) {
+		if (au_wkq_test()) {
+			AuWarn1("wkq from wkq, unless silly-rename on NFS,"
+				" due to a dead dir by UDBA,"
+				" or async xino write?\n");
+			AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+		}
+	} else
+		au_dbg_verify_kthread();
+
+	if (au_ftest_wkq(wkinfo->flags, WAIT)) {
+		INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
+		queue_work(au_wkq, &wkinfo->wk);
+	} else {
+		INIT_WORK(&wkinfo->wk, wkq_func);
+		schedule_work(&wkinfo->wk);
+	}
+}
+
+/*
+ * Be careful. It is easy to make deadlock happen.
+ * processA: lock, wkq and wait
+ * processB: wkq and wait, lock in wkq
+ * --> deadlock
+ */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
+{
+	int err;
+	AuWkqCompDeclare(comp);
+	struct au_wkinfo wkinfo = {
+		.flags	= flags,
+		.func	= func,
+		.args	= args
+	};
+
+	err = au_wkq_comp_alloc(&wkinfo, &comp);
+	if (unlikely(err))
+		goto out;
+	err = au_wkq_lockdep_alloc(&wkinfo);
+	if (unlikely(err))
+		goto out_comp;
+	if (!err) {
+		au_wkq_run(&wkinfo);
+		/* no timeout, no interrupt */
+		wait_for_completion(wkinfo.comp);
+	}
+	au_wkq_lockdep_free(&wkinfo);
+
+out_comp:
+	au_wkq_comp_free(comp);
+out:
+	destroy_work_on_stack(&wkinfo.wk);
+	return err;
+}
+
+/*
+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
+ * problem in a concurrent umounting.
+ */
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+		  unsigned int flags)
+{
+	int err;
+	struct au_wkinfo *wkinfo;
+
+	atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
+
+	/*
+	 * wkq_func() must free this wkinfo.
+	 * it highly depends upon the implementation of workqueue.
+	 */
+	err = 0;
+	wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
+	if (wkinfo) {
+		wkinfo->kobj = &au_sbi(sb)->si_kobj;
+		wkinfo->flags = flags & ~AuWkq_WAIT;
+		wkinfo->func = func;
+		wkinfo->args = args;
+		wkinfo->comp = NULL;
+		au_wkq_lockdep_init(wkinfo);
+		kobject_get(wkinfo->kobj);
+		__module_get(THIS_MODULE); /* todo: ?? */
+
+		au_wkq_run(wkinfo);
+	} else {
+		err = -ENOMEM;
+		au_nwt_done(&au_sbi(sb)->si_nowait);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_nwt_init(struct au_nowait_tasks *nwt)
+{
+	atomic_set(&nwt->nw_len, 0);
+	/* smp_mb(); */ /* atomic_set */
+	init_waitqueue_head(&nwt->nw_wq);
+}
+
+void au_wkq_fin(void)
+{
+	destroy_workqueue(au_wkq);
+}
+
+int __init au_wkq_init(void)
+{
+	int err;
+
+	err = 0;
+	au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
+	if (IS_ERR(au_wkq))
+		err = PTR_ERR(au_wkq);
+	else if (!au_wkq)
+		err = -ENOMEM;
+
+	return err;
+}
diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h
new file mode 100644
index 00000000000..5be76b69d8f
--- /dev/null
+++ b/fs/aufs/wkq.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credentials management scheme
+ */
+
+#ifndef __AUFS_WKQ_H__
+#define __AUFS_WKQ_H__
+
+#ifdef __KERNEL__
+
+#include <linux/wait.h>
+
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
+ */
+struct au_nowait_tasks {
+	atomic_t		nw_len;
+	wait_queue_head_t	nw_wq;
+};
+
+/* ---------------------------------------------------------------------- */
+
+typedef void (*au_wkq_func_t)(void *args);
+
+/* wkq flags */
+#define AuWkq_WAIT	1
+#define AuWkq_NEST	(1 << 1)
+#define au_ftest_wkq(flags, name)	((flags) & AuWkq_##name)
+#define au_fset_wkq(flags, name) \
+	do { (flags) |= AuWkq_##name; } while (0)
+#define au_fclr_wkq(flags, name) \
+	do { (flags) &= ~AuWkq_##name; } while (0)
+
+/* wkq.c */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+		  unsigned int flags);
+void au_nwt_init(struct au_nowait_tasks *nwt);
+int __init au_wkq_init(void);
+void au_wkq_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_wkq_test(void)
+{
+	return current->flags & PF_WQ_WORKER;
+}
+
+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
+{
+	return au_wkq_do_wait(AuWkq_WAIT, func, args);
+}
+
+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
+{
+	if (atomic_dec_and_test(&nwt->nw_len))
+		wake_up_all(&nwt->nw_wq);
+}
+
+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
+{
+	wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
+	return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WKQ_H__ */
diff --git a/fs/aufs/xattr.c b/fs/aufs/xattr.c
new file mode 100644
index 00000000000..103c4275e5f
--- /dev/null
+++ b/fs/aufs/xattr.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * handling xattr functions
+ */
+
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+#include "aufs.h"
+
+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags)
+{
+	if (!ignore_flags)
+		goto out;
+	switch (err) {
+	case -ENOMEM:
+	case -EDQUOT:
+		goto out;
+	}
+
+	if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) {
+		err = 0;
+		goto out;
+	}
+
+#define cmp(brattr, prefix) do {					\
+		if (!strncmp(name, XATTR_##prefix##_PREFIX,		\
+			     XATTR_##prefix##_PREFIX_LEN)) {		\
+			if (ignore_flags & AuBrAttr_ICEX_##brattr)	\
+				err = 0;				\
+			goto out;					\
+		}							\
+	} while (0)
+
+	cmp(SEC, SECURITY);
+	cmp(SYS, SYSTEM);
+	cmp(TR, TRUSTED);
+	cmp(USR, USER);
+#undef cmp
+
+	if (ignore_flags & AuBrAttr_ICEX_OTH)
+		err = 0;
+
+out:
+	return err;
+}
+
+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1;
+
+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src,
+			    char *name, char **buf, unsigned int ignore_flags,
+			    unsigned int verbose)
+{
+	int err;
+	ssize_t ssz;
+	struct inode *h_idst;
+
+	ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS);
+	err = ssz;
+	if (unlikely(err <= 0)) {
+		if (err == -ENODATA
+		    || (err == -EOPNOTSUPP
+			&& ((ignore_flags & au_xattr_out_of_list)
+			    || (au_test_nfs_noacl(d_inode(h_src))
+				&& (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)
+				    || !strcmp(name,
+					       XATTR_NAME_POSIX_ACL_DEFAULT))))
+			    ))
+			err = 0;
+		if (err && (verbose || au_debug_test()))
+			pr_err("%s, err %d\n", name, err);
+		goto out;
+	}
+
+	/* unlock it temporary */
+	h_idst = d_inode(h_dst);
+	inode_unlock(h_idst);
+	err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0);
+	inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+	if (unlikely(err)) {
+		if (verbose || au_debug_test())
+			pr_err("%s, err %d\n", name, err);
+		err = au_xattr_ignore(err, name, ignore_flags);
+	}
+
+out:
+	return err;
+}
+
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+		  unsigned int verbose)
+{
+	int err, unlocked, acl_access, acl_default;
+	ssize_t ssz;
+	struct inode *h_isrc, *h_idst;
+	char *value, *p, *o, *e;
+
+	/* try stopping to update the source inode while we are referencing */
+	/* there should not be the parent-child relationship between them */
+	h_isrc = d_inode(h_src);
+	h_idst = d_inode(h_dst);
+	inode_unlock(h_idst);
+	inode_lock_shared_nested(h_isrc, AuLsc_I_CHILD);
+	inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+	unlocked = 0;
+
+	/* some filesystems don't list POSIX ACL, for example tmpfs */
+	ssz = vfs_listxattr(h_src, NULL, 0);
+	err = ssz;
+	if (unlikely(err < 0)) {
+		AuTraceErr(err);
+		if (err == -ENODATA
+		    || err == -EOPNOTSUPP)
+			err = 0;	/* ignore */
+		goto out;
+	}
+
+	err = 0;
+	p = NULL;
+	o = NULL;
+	if (ssz) {
+		err = -ENOMEM;
+		p = kmalloc(ssz, GFP_NOFS);
+		o = p;
+		if (unlikely(!p))
+			goto out;
+		err = vfs_listxattr(h_src, p, ssz);
+	}
+	inode_unlock_shared(h_isrc);
+	unlocked = 1;
+	AuDbg("err %d, ssz %zd\n", err, ssz);
+	if (unlikely(err < 0))
+		goto out_free;
+
+	err = 0;
+	e = p + ssz;
+	value = NULL;
+	acl_access = 0;
+	acl_default = 0;
+	while (!err && p < e) {
+		acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS,
+				       sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1);
+		acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT,
+					sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)
+					- 1);
+		err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags,
+				       verbose);
+		p += strlen(p) + 1;
+	}
+	AuTraceErr(err);
+	ignore_flags |= au_xattr_out_of_list;
+	if (!err && !acl_access) {
+		err = au_do_cpup_xattr(h_dst, h_src,
+				       XATTR_NAME_POSIX_ACL_ACCESS, &value,
+				       ignore_flags, verbose);
+		AuTraceErr(err);
+	}
+	if (!err && !acl_default) {
+		err = au_do_cpup_xattr(h_dst, h_src,
+				       XATTR_NAME_POSIX_ACL_DEFAULT, &value,
+				       ignore_flags, verbose);
+		AuTraceErr(err);
+	}
+
+	au_kfree_try_rcu(value);
+
+out_free:
+	au_kfree_try_rcu(o);
+out:
+	if (!unlocked)
+		inode_unlock_shared(h_isrc);
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_smack_reentering(struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_SECURITY_SMACK) || IS_ENABLED(CONFIG_SECURITY_SELINUX)
+	/*
+	 * as a part of lookup, smack_d_instantiate() is called, and it calls
+	 * i_op->getxattr(). ouch.
+	 */
+	return si_pid_test(sb);
+#else
+	return 0;
+#endif
+}
+
+enum {
+	AU_XATTR_LIST,
+	AU_XATTR_GET
+};
+
+struct au_lgxattr {
+	int type;
+	union {
+		struct {
+			char	*list;
+			size_t	size;
+		} list;
+		struct {
+			const char	*name;
+			void		*value;
+			size_t		size;
+		} get;
+	} u;
+};
+
+static ssize_t au_lgxattr(struct dentry *dentry, struct inode *inode,
+			  struct au_lgxattr *arg)
+{
+	ssize_t err;
+	int reenter;
+	struct path h_path;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	reenter = au_smack_reentering(sb);
+	if (!reenter) {
+		err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+		if (unlikely(err))
+			goto out;
+	}
+	err = au_h_path_getattr(dentry, inode, /*force*/1, &h_path, reenter);
+	if (unlikely(err))
+		goto out_si;
+	if (unlikely(!h_path.dentry))
+		/* illegally overlapped or something */
+		goto out_di; /* pretending success */
+
+	/* always topmost entry only */
+	switch (arg->type) {
+	case AU_XATTR_LIST:
+		err = vfs_listxattr(h_path.dentry,
+				    arg->u.list.list, arg->u.list.size);
+		break;
+	case AU_XATTR_GET:
+		AuDebugOn(d_is_negative(h_path.dentry));
+		err = vfs_getxattr(h_path.dentry,
+				   arg->u.get.name, arg->u.get.value,
+				   arg->u.get.size);
+		break;
+	}
+
+out_di:
+	if (!reenter)
+		di_read_unlock(dentry, AuLock_IR);
+out_si:
+	if (!reenter)
+		si_read_unlock(sb);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+	struct au_lgxattr arg = {
+		.type = AU_XATTR_LIST,
+		.u.list = {
+			.list	= list,
+			.size	= size
+		},
+	};
+
+	return au_lgxattr(dentry, /*inode*/NULL, &arg);
+}
+
+static ssize_t au_getxattr(struct dentry *dentry, struct inode *inode,
+			   const char *name, void *value, size_t size)
+{
+	struct au_lgxattr arg = {
+		.type = AU_XATTR_GET,
+		.u.get = {
+			.name	= name,
+			.value	= value,
+			.size	= size
+		},
+	};
+
+	return au_lgxattr(dentry, inode, &arg);
+}
+
+static int au_setxattr(struct dentry *dentry, struct inode *inode,
+		       const char *name, const void *value, size_t size,
+		       int flags)
+{
+	struct au_sxattr arg = {
+		.type = AU_XATTR_SET,
+		.u.set = {
+			.name	= name,
+			.value	= value,
+			.size	= size,
+			.flags	= flags
+		},
+	};
+
+	return au_sxattr(dentry, inode, &arg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xattr_get(const struct xattr_handler *handler,
+			struct dentry *dentry, struct inode *inode,
+			const char *name, void *buffer, size_t size)
+{
+	return au_getxattr(dentry, inode, name, buffer, size);
+}
+
+static int au_xattr_set(const struct xattr_handler *handler,
+			struct dentry *dentry, struct inode *inode,
+			const char *name, const void *value, size_t size,
+			int flags)
+{
+	return au_setxattr(dentry, inode, name, value, size, flags);
+}
+
+static const struct xattr_handler au_xattr_handler = {
+	.name	= "",
+	.prefix	= "",
+	.get	= au_xattr_get,
+	.set	= au_xattr_set
+};
+
+static const struct xattr_handler *au_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
+	&posix_acl_access_xattr_handler,
+	&posix_acl_default_xattr_handler,
+#endif
+	&au_xattr_handler, /* must be last */
+	NULL
+};
+
+void au_xattr_init(struct super_block *sb)
+{
+	sb->s_xattr = au_xattr_handlers;
+}
diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c
new file mode 100644
index 00000000000..b3152c0ce0b
--- /dev/null
+++ b/fs/aufs/xino.c
@@ -0,0 +1,1966 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * external inode number translation table and bitmap
+ *
+ * things to consider
+ * - the lifetime
+ *   + au_xino object
+ *   + XINO files (xino, xib, xigen)
+ *   + dynamic debugfs entries (xiN)
+ *   + static debugfs entries (xib, xigen)
+ *   + static sysfs entry (xi_path)
+ * - several entry points to handle them.
+ *   + mount(2) without xino option (default)
+ *   + mount(2) with xino option
+ *   + mount(2) with noxino option
+ *   + umount(2)
+ *   + remount with add/del branches
+ *   + remount with xino/noxino options
+ */
+
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+static aufs_bindex_t sbr_find_shared(struct super_block *sb, aufs_bindex_t btop,
+				     aufs_bindex_t bbot,
+				     struct super_block *h_sb)
+{
+	/* todo: try binary-search if the branches are many */
+	for (; btop <= bbot; btop++)
+		if (h_sb == au_sbr_sb(sb, btop))
+			return btop;
+	return -1;
+}
+
+/*
+ * find another branch who is on the same filesystem of the specified
+ * branch{@btgt}. search until @bbot.
+ */
+static aufs_bindex_t is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
+				  aufs_bindex_t bbot)
+{
+	aufs_bindex_t bindex;
+	struct super_block *tgt_sb;
+
+	tgt_sb = au_sbr_sb(sb, btgt);
+	bindex = sbr_find_shared(sb, /*btop*/0, btgt - 1, tgt_sb);
+	if (bindex < 0)
+		bindex = sbr_find_shared(sb, btgt + 1, bbot, tgt_sb);
+
+	return bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * stop unnecessary notify events at creating xino files
+ */
+
+aufs_bindex_t au_xi_root(struct super_block *sb, struct dentry *dentry)
+{
+	aufs_bindex_t bfound, bindex, bbot;
+	struct dentry *parent;
+	struct au_branch *br;
+
+	bfound = -1;
+	parent = dentry->d_parent; /* safe d_parent access */
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_dentry(br) == parent) {
+			bfound = bindex;
+			break;
+		}
+	}
+
+	AuDbg("bfound b%d\n", bfound);
+	return bfound;
+}
+
+struct au_xino_lock_dir {
+	struct au_hinode *hdir;
+	struct dentry *parent;
+	struct inode *dir;
+};
+
+static struct dentry *au_dget_parent_lock(struct dentry *dentry,
+					  unsigned int lsc)
+{
+	struct dentry *parent;
+	struct inode *dir;
+
+	parent = dget_parent(dentry);
+	dir = d_inode(parent);
+	inode_lock_nested(dir, lsc);
+#if 0 /* it should not happen */
+	spin_lock(&dentry->d_lock);
+	if (unlikely(dentry->d_parent != parent)) {
+		spin_unlock(&dentry->d_lock);
+		inode_unlock(dir);
+		dput(parent);
+		parent = NULL;
+		goto out;
+	}
+	spin_unlock(&dentry->d_lock);
+
+out:
+#endif
+	return parent;
+}
+
+static void au_xino_lock_dir(struct super_block *sb, struct path *xipath,
+			     struct au_xino_lock_dir *ldir)
+{
+	aufs_bindex_t bindex;
+
+	ldir->hdir = NULL;
+	bindex = au_xi_root(sb, xipath->dentry);
+	if (bindex >= 0) {
+		/* rw branch root */
+		ldir->hdir = au_hi(d_inode(sb->s_root), bindex);
+		au_hn_inode_lock_nested(ldir->hdir, AuLsc_I_PARENT);
+	} else {
+		/* other */
+		ldir->parent = au_dget_parent_lock(xipath->dentry,
+						   AuLsc_I_PARENT);
+		ldir->dir = d_inode(ldir->parent);
+	}
+}
+
+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
+{
+	if (ldir->hdir)
+		au_hn_inode_unlock(ldir->hdir);
+	else {
+		inode_unlock(ldir->dir);
+		dput(ldir->parent);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create and set a new xino file
+ */
+struct file *au_xino_create(struct super_block *sb, char *fpath, int silent,
+			    int wbrtop)
+{
+	struct file *file;
+	struct dentry *h_parent, *d;
+	struct inode *h_dir, *inode;
+	int err;
+	static DEFINE_MUTEX(mtx);
+
+	/*
+	 * at mount-time, and the xino file is the default path,
+	 * hnotify is disabled so we have no notify events to ignore.
+	 * when a user specified the xino, we cannot get au_hdir to be ignored.
+	 */
+	if (!wbrtop)
+		mutex_lock(&mtx);
+	file = vfsub_filp_open(fpath, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+			       /* | __FMODE_NONOTIFY */,
+			       0666);
+	if (IS_ERR(file)) {
+		if (!wbrtop)
+			mutex_unlock(&mtx);
+		if (!silent)
+			pr_err("open %s(%ld)\n", fpath, PTR_ERR(file));
+		return file;
+	}
+
+	/* keep file count */
+	err = 0;
+	d = file->f_path.dentry;
+	h_parent = au_dget_parent_lock(d, AuLsc_I_PARENT);
+	if (!wbrtop)
+		mutex_unlock(&mtx);
+	/* mnt_want_write() is unnecessary here */
+	h_dir = d_inode(h_parent);
+	inode = file_inode(file);
+	/* no delegation since it is just created */
+	if (inode->i_nlink)
+		err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL,
+				   /*force*/0);
+	inode_unlock(h_dir);
+	dput(h_parent);
+	if (unlikely(err)) {
+		if (!silent)
+			pr_err("unlink %s(%d)\n", fpath, err);
+		goto out;
+	}
+
+	err = -EINVAL;
+	if (unlikely(sb == d->d_sb)) {
+		if (!silent)
+			pr_err("%s must be outside\n", fpath);
+		goto out;
+	}
+	if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
+		if (!silent)
+			pr_err("xino doesn't support %s(%s)\n",
+			       fpath, au_sbtype(d->d_sb));
+		goto out;
+	}
+	return file; /* success */
+
+out:
+	fput(file);
+	file = ERR_PTR(err);
+	return file;
+}
+
+/*
+ * create a new xinofile at the same place/path as @base.
+ */
+struct file *au_xino_create2(struct super_block *sb, struct path *base,
+			     struct file *copy_src)
+{
+	struct file *file;
+	struct dentry *dentry, *parent;
+	struct inode *dir, *delegated;
+	struct qstr *name;
+	struct path path;
+	int err, do_unlock;
+	struct au_xino_lock_dir ldir;
+
+	do_unlock = 1;
+	au_xino_lock_dir(sb, base, &ldir);
+	dentry = base->dentry;
+	parent = dentry->d_parent; /* dir inode is locked */
+	dir = d_inode(parent);
+	IMustLock(dir);
+
+	name = &dentry->d_name;
+	path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
+	if (IS_ERR(path.dentry)) {
+		file = (void *)path.dentry;
+		pr_err("%pd lookup err %ld\n", dentry, PTR_ERR(path.dentry));
+		goto out;
+	}
+
+	/* no need to mnt_want_write() since we call dentry_open() later */
+	err = vfs_create(dir, path.dentry, 0666, NULL);
+	if (unlikely(err)) {
+		file = ERR_PTR(err);
+		pr_err("%pd create err %d\n", dentry, err);
+		goto out_dput;
+	}
+
+	path.mnt = base->mnt;
+	file = vfsub_dentry_open(&path,
+				 O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+				 /* | __FMODE_NONOTIFY */);
+	if (IS_ERR(file)) {
+		pr_err("%pd open err %ld\n", dentry, PTR_ERR(file));
+		goto out_dput;
+	}
+
+	delegated = NULL;
+	err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0);
+	au_xino_unlock_dir(&ldir);
+	do_unlock = 0;
+	if (unlikely(err == -EWOULDBLOCK)) {
+		pr_warn("cannot retry for NFSv4 delegation"
+			" for an internal unlink\n");
+		iput(delegated);
+	}
+	if (unlikely(err)) {
+		pr_err("%pd unlink err %d\n", dentry, err);
+		goto out_fput;
+	}
+
+	if (copy_src) {
+		/* no one can touch copy_src xino */
+		err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src));
+		if (unlikely(err)) {
+			pr_err("%pd copy err %d\n", dentry, err);
+			goto out_fput;
+		}
+	}
+	goto out_dput; /* success */
+
+out_fput:
+	fput(file);
+	file = ERR_PTR(err);
+out_dput:
+	dput(path.dentry);
+out:
+	if (do_unlock)
+		au_xino_unlock_dir(&ldir);
+	return file;
+}
+
+struct file *au_xino_file1(struct au_xino *xi)
+{
+	struct file *file;
+	unsigned int u, nfile;
+
+	file = NULL;
+	nfile = xi->xi_nfile;
+	for (u = 0; u < nfile; u++) {
+		file = xi->xi_file[u];
+		if (file)
+			break;
+	}
+
+	return file;
+}
+
+static int au_xino_file_set(struct au_xino *xi, int idx, struct file *file)
+{
+	int err;
+	struct file *f;
+	void *p;
+
+	if (file)
+		get_file(file);
+
+	err = 0;
+	f = NULL;
+	if (idx < xi->xi_nfile) {
+		f = xi->xi_file[idx];
+		if (f)
+			fput(f);
+	} else {
+		p = au_kzrealloc(xi->xi_file,
+				 sizeof(*xi->xi_file) * xi->xi_nfile,
+				 sizeof(*xi->xi_file) * (idx + 1),
+				 GFP_NOFS, /*may_shrink*/0);
+		if (p) {
+			MtxMustLock(&xi->xi_mtx);
+			xi->xi_file = p;
+			xi->xi_nfile = idx + 1;
+		} else {
+			err = -ENOMEM;
+			if (file)
+				fput(file);
+			goto out;
+		}
+	}
+	xi->xi_file[idx] = file;
+
+out:
+	return err;
+}
+
+/*
+ * if @xinew->xi is not set, then create new xigen file.
+ */
+struct file *au_xi_new(struct super_block *sb, struct au_xi_new *xinew)
+{
+	struct file *file;
+	int err;
+
+	SiMustAnyLock(sb);
+
+	file = au_xino_create2(sb, xinew->base, xinew->copy_src);
+	if (IS_ERR(file)) {
+		err = PTR_ERR(file);
+		pr_err("%s[%d], err %d\n",
+		       xinew->xi ? "xino" : "xigen",
+		       xinew->idx, err);
+		goto out;
+	}
+
+	if (xinew->xi)
+		err = au_xino_file_set(xinew->xi, xinew->idx, file);
+	else {
+		BUG();
+		/* todo: make xigen file an array */
+		/* err = au_xigen_file_set(sb, xinew->idx, file); */
+	}
+	fput(file);
+	if (unlikely(err))
+		file = ERR_PTR(err);
+
+out:
+	return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * truncate xino files
+ */
+static int au_xino_do_trunc(struct super_block *sb, aufs_bindex_t bindex,
+			    int idx, struct kstatfs *st)
+{
+	int err;
+	blkcnt_t blocks;
+	struct file *file, *new_xino;
+	struct au_xi_new xinew = {
+		.idx = idx
+	};
+
+	err = 0;
+	xinew.xi = au_sbr(sb, bindex)->br_xino;
+	file = au_xino_file(xinew.xi, idx);
+	if (!file)
+		goto out;
+
+	xinew.base = &file->f_path;
+	err = vfs_statfs(xinew.base, st);
+	if (unlikely(err)) {
+		AuErr1("statfs err %d, ignored\n", err);
+		err = 0;
+		goto out;
+	}
+
+	blocks = file_inode(file)->i_blocks;
+	pr_info("begin truncating xino(b%d-%d), ib%llu, %llu/%llu free blks\n",
+		bindex, idx, (u64)blocks, st->f_bfree, st->f_blocks);
+
+	xinew.copy_src = file;
+	new_xino = au_xi_new(sb, &xinew);
+	if (IS_ERR(new_xino)) {
+		err = PTR_ERR(new_xino);
+		pr_err("xino(b%d-%d), err %d, ignored\n", bindex, idx, err);
+		goto out;
+	}
+
+	err = vfs_statfs(&new_xino->f_path, st);
+	if (!err)
+		pr_info("end truncating xino(b%d-%d), ib%llu, %llu/%llu free blks\n",
+			bindex, idx, (u64)file_inode(new_xino)->i_blocks,
+			st->f_bfree, st->f_blocks);
+	else {
+		AuErr1("statfs err %d, ignored\n", err);
+		err = 0;
+	}
+
+out:
+	return err;
+}
+
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex, int idx_begin)
+{
+	int err, i;
+	unsigned long jiffy;
+	aufs_bindex_t bbot;
+	struct kstatfs *st;
+	struct au_branch *br;
+	struct au_xino *xi;
+
+	err = -ENOMEM;
+	st = kmalloc(sizeof(*st), GFP_NOFS);
+	if (unlikely(!st))
+		goto out;
+
+	err = -EINVAL;
+	bbot = au_sbbot(sb);
+	if (unlikely(bindex < 0 || bbot < bindex))
+		goto out_st;
+
+	err = 0;
+	jiffy = jiffies;
+	br = au_sbr(sb, bindex);
+	xi = br->br_xino;
+	for (i = idx_begin; !err && i < xi->xi_nfile; i++)
+		err = au_xino_do_trunc(sb, bindex, i, st);
+	if (!err)
+		au_sbi(sb)->si_xino_jiffy = jiffy;
+
+out_st:
+	au_kfree_rcu(st);
+out:
+	return err;
+}
+
+struct xino_do_trunc_args {
+	struct super_block *sb;
+	struct au_branch *br;
+	int idx;
+};
+
+static void xino_do_trunc(void *_args)
+{
+	struct xino_do_trunc_args *args = _args;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct inode *dir;
+	int err, idx;
+	aufs_bindex_t bindex;
+
+	err = 0;
+	sb = args->sb;
+	dir = d_inode(sb->s_root);
+	br = args->br;
+	idx = args->idx;
+
+	si_noflush_write_lock(sb);
+	ii_read_lock_parent(dir);
+	bindex = au_br_index(sb, br->br_id);
+	err = au_xino_trunc(sb, bindex, idx);
+	ii_read_unlock(dir);
+	if (unlikely(err))
+		pr_warn("err b%d, (%d)\n", bindex, err);
+	atomic_dec(&br->br_xino->xi_truncating);
+	au_lcnt_dec(&br->br_count);
+	si_write_unlock(sb);
+	au_nwt_done(&au_sbi(sb)->si_nowait);
+	au_kfree_rcu(args);
+}
+
+/*
+ * returns the index in the xi_file array whose corresponding file is necessary
+ * to truncate, or -1 which means no need to truncate.
+ */
+static int xino_trunc_test(struct super_block *sb, struct au_branch *br)
+{
+	int err;
+	unsigned int u;
+	struct kstatfs st;
+	struct au_sbinfo *sbinfo;
+	struct au_xino *xi;
+	struct file *file;
+
+	/* todo: si_xino_expire and the ratio should be customizable */
+	sbinfo = au_sbi(sb);
+	if (time_before(jiffies,
+			sbinfo->si_xino_jiffy + sbinfo->si_xino_expire))
+		return -1;
+
+	/* truncation border */
+	xi = br->br_xino;
+	for (u = 0; u < xi->xi_nfile; u++) {
+		file = au_xino_file(xi, u);
+		if (!file)
+			continue;
+
+		err = vfs_statfs(&file->f_path, &st);
+		if (unlikely(err)) {
+			AuErr1("statfs err %d, ignored\n", err);
+			return -1;
+		}
+		if (div64_u64(st.f_bfree * 100, st.f_blocks)
+		    >= AUFS_XINO_DEF_TRUNC)
+			return u;
+	}
+
+	return -1;
+}
+
+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
+{
+	int idx;
+	struct xino_do_trunc_args *args;
+	int wkq_err;
+
+	idx = xino_trunc_test(sb, br);
+	if (idx < 0)
+		return;
+
+	if (atomic_inc_return(&br->br_xino->xi_truncating) > 1)
+		goto out;
+
+	/* lock and kfree() will be called in trunc_xino() */
+	args = kmalloc(sizeof(*args), GFP_NOFS);
+	if (unlikely(!args)) {
+		AuErr1("no memory\n");
+		goto out;
+	}
+
+	au_lcnt_inc(&br->br_count);
+	args->sb = sb;
+	args->br = br;
+	args->idx = idx;
+	wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
+	if (!wkq_err)
+		return; /* success */
+
+	pr_err("wkq %d\n", wkq_err);
+	au_lcnt_dec(&br->br_count);
+	au_kfree_rcu(args);
+
+out:
+	atomic_dec(&br->br_xino->xi_truncating);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_xi_calc {
+	int idx;
+	loff_t pos;
+};
+
+static void au_xi_calc(struct super_block *sb, ino_t h_ino,
+		       struct au_xi_calc *calc)
+{
+	loff_t maxent;
+
+	maxent = au_xi_maxent(sb);
+	calc->idx = div64_u64_rem(h_ino, maxent, &calc->pos);
+	calc->pos *= sizeof(ino_t);
+}
+
+static int au_xino_do_new_async(struct super_block *sb, struct au_branch *br,
+				struct au_xi_calc *calc)
+{
+	int err;
+	struct file *file;
+	struct au_xino *xi = br->br_xino;
+	struct au_xi_new xinew = {
+		.xi = xi
+	};
+
+	SiMustAnyLock(sb);
+
+	err = 0;
+	if (!xi)
+		goto out;
+
+	mutex_lock(&xi->xi_mtx);
+	file = au_xino_file(xi, calc->idx);
+	if (file)
+		goto out_mtx;
+
+	file = au_xino_file(xi, /*idx*/-1);
+	AuDebugOn(!file);
+	xinew.idx = calc->idx;
+	xinew.base = &file->f_path;
+	/* xinew.copy_src = NULL; */
+	file = au_xi_new(sb, &xinew);
+	if (IS_ERR(file))
+		err = PTR_ERR(file);
+
+out_mtx:
+	mutex_unlock(&xi->xi_mtx);
+out:
+	return err;
+}
+
+struct au_xino_do_new_async_args {
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_xi_calc calc;
+	ino_t ino;
+};
+
+struct au_xi_writing {
+	struct hlist_bl_node node;
+	ino_t h_ino, ino;
+};
+
+static int au_xino_do_write(vfs_writef_t write, struct file *file,
+			    struct au_xi_calc *calc, ino_t ino);
+
+static void au_xino_call_do_new_async(void *args)
+{
+	struct au_xino_do_new_async_args *a = args;
+	struct au_branch *br;
+	struct super_block *sb;
+	struct au_sbinfo *sbi;
+	struct inode *root;
+	struct file *file;
+	struct au_xi_writing *del, *p;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	int err;
+
+	br = a->br;
+	sb = a->sb;
+	sbi = au_sbi(sb);
+	si_noflush_read_lock(sb);
+	root = d_inode(sb->s_root);
+	ii_read_lock_child(root);
+	err = au_xino_do_new_async(sb, br, &a->calc);
+	if (unlikely(err)) {
+		AuIOErr("err %d\n", err);
+		goto out;
+	}
+
+	file = au_xino_file(br->br_xino, a->calc.idx);
+	AuDebugOn(!file);
+	err = au_xino_do_write(sbi->si_xwrite, file, &a->calc, a->ino);
+	if (unlikely(err)) {
+		AuIOErr("err %d\n", err);
+		goto out;
+	}
+
+	del = NULL;
+	hbl = &br->br_xino->xi_writing;
+	hlist_bl_lock(hbl);
+	au_hbl_for_each(pos, hbl) {
+		p = container_of(pos, struct au_xi_writing, node);
+		if (p->ino == a->ino) {
+			del = p;
+			hlist_bl_del(&p->node);
+			break;
+		}
+	}
+	hlist_bl_unlock(hbl);
+	au_kfree_rcu(del);
+
+out:
+	au_lcnt_dec(&br->br_count);
+	ii_read_unlock(root);
+	si_read_unlock(sb);
+	au_nwt_done(&sbi->si_nowait);
+	au_kfree_rcu(a);
+}
+
+/*
+ * create a new xino file asynchronously
+ */
+static int au_xino_new_async(struct super_block *sb, struct au_branch *br,
+			     struct au_xi_calc *calc, ino_t ino)
+{
+	int err;
+	struct au_xino_do_new_async_args *arg;
+
+	err = -ENOMEM;
+	arg = kmalloc(sizeof(*arg), GFP_NOFS);
+	if (unlikely(!arg))
+		goto out;
+
+	arg->sb = sb;
+	arg->br = br;
+	arg->calc = *calc;
+	arg->ino = ino;
+	au_lcnt_inc(&br->br_count);
+	err = au_wkq_nowait(au_xino_call_do_new_async, arg, sb, AuWkq_NEST);
+	if (unlikely(err)) {
+		pr_err("wkq %d\n", err);
+		au_lcnt_dec(&br->br_count);
+		au_kfree_rcu(arg);
+	}
+
+out:
+	return err;
+}
+
+/*
+ * read @ino from xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ */
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		 ino_t *ino)
+{
+	int err;
+	ssize_t sz;
+	struct au_xi_calc calc;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+	struct au_xino *xi;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos;
+	struct au_xi_writing *p;
+
+	*ino = 0;
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		return 0; /* no xino */
+
+	err = 0;
+	au_xi_calc(sb, h_ino, &calc);
+	xi = au_sbr(sb, bindex)->br_xino;
+	file = au_xino_file(xi, calc.idx);
+	if (!file) {
+		hbl = &xi->xi_writing;
+		hlist_bl_lock(hbl);
+		au_hbl_for_each(pos, hbl) {
+			p = container_of(pos, struct au_xi_writing, node);
+			if (p->h_ino == h_ino) {
+				AuDbg("hi%llu, i%llu, found\n",
+				      (u64)p->h_ino, (u64)p->ino);
+				*ino = p->ino;
+				break;
+			}
+		}
+		hlist_bl_unlock(hbl);
+		return 0;
+	} else if (vfsub_f_size_read(file) < calc.pos + sizeof(*ino))
+		return 0; /* no xino */
+
+	sbinfo = au_sbi(sb);
+	sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &calc.pos);
+	if (sz == sizeof(*ino))
+		return 0; /* success */
+
+	err = sz;
+	if (unlikely(sz >= 0)) {
+		err = -EIO;
+		AuIOErr("xino read error (%zd)\n", sz);
+	}
+	return err;
+}
+
+static int au_xino_do_write(vfs_writef_t write, struct file *file,
+			    struct au_xi_calc *calc, ino_t ino)
+{
+	ssize_t sz;
+
+	sz = xino_fwrite(write, file, &ino, sizeof(ino), &calc->pos);
+	if (sz == sizeof(ino))
+		return 0; /* success */
+
+	AuIOErr("write failed (%zd)\n", sz);
+	return -EIO;
+}
+
+/*
+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * even if @ino is zero, it is written to the xinofile and means no entry.
+ * if the size of the xino file on a specific filesystem exceeds the watermark,
+ * try truncating it.
+ */
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		  ino_t ino)
+{
+	int err;
+	unsigned int mnt_flags;
+	struct au_xi_calc calc;
+	struct file *file;
+	struct au_branch *br;
+	struct au_xino *xi;
+	struct au_xi_writing *p;
+
+	SiMustAnyLock(sb);
+
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, XINO))
+		return 0;
+
+	au_xi_calc(sb, h_ino, &calc);
+	br = au_sbr(sb, bindex);
+	xi = br->br_xino;
+	file = au_xino_file(xi, calc.idx);
+	if (!file) {
+		/* store the inum pair into the list */
+		p = kmalloc(sizeof(*p), GFP_NOFS | __GFP_NOFAIL);
+		p->h_ino = h_ino;
+		p->ino = ino;
+		au_hbl_add(&p->node, &xi->xi_writing);
+
+		/* create and write a new xino file asynchronously */
+		err = au_xino_new_async(sb, br, &calc, ino);
+		if (!err)
+			return 0; /* success */
+		goto out;
+	}
+
+	err = au_xino_do_write(au_sbi(sb)->si_xwrite, file, &calc, ino);
+	if (!err) {
+		br = au_sbr(sb, bindex);
+		if (au_opt_test(mnt_flags, TRUNC_XINO)
+		    && au_test_fs_trunc_xino(au_br_sb(br)))
+			xino_try_trunc(sb, br);
+		return 0; /* success */
+	}
+
+out:
+	AuIOErr("write failed (%d)\n", err);
+	return -EIO;
+}
+
+static ssize_t xino_fread_wkq(vfs_readf_t func, struct file *file, void *buf,
+			      size_t size, loff_t *pos);
+
+/* todo: unnecessary to support mmap_sem since kernel-space? */
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *kbuf, size_t size,
+		   loff_t *pos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		char __user *u;
+	} buf;
+	int i;
+	const int prevent_endless = 10;
+
+	i = 0;
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	do {
+		err = func(file, buf.u, size, pos);
+		if (err == -EINTR
+		    && !au_wkq_test()
+		    && fatal_signal_pending(current)) {
+			set_fs(oldfs);
+			err = xino_fread_wkq(func, file, kbuf, size, pos);
+			BUG_ON(err == -EINTR);
+			oldfs = get_fs();
+			set_fs(KERNEL_DS);
+		}
+	} while (i++ < prevent_endless
+		 && (err == -EAGAIN || err == -EINTR));
+	set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+	if (err > 0)
+		fsnotify_access(file->f_path.dentry);
+#endif
+
+	return err;
+}
+
+struct xino_fread_args {
+	ssize_t *errp;
+	vfs_readf_t func;
+	struct file *file;
+	void *buf;
+	size_t size;
+	loff_t *pos;
+};
+
+static void call_xino_fread(void *args)
+{
+	struct xino_fread_args *a = args;
+	*a->errp = xino_fread(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+static ssize_t xino_fread_wkq(vfs_readf_t func, struct file *file, void *buf,
+			      size_t size, loff_t *pos)
+{
+	ssize_t err;
+	int wkq_err;
+	struct xino_fread_args args = {
+		.errp	= &err,
+		.func	= func,
+		.file	= file,
+		.buf	= buf,
+		.size	= size,
+		.pos	= pos
+	};
+
+	wkq_err = au_wkq_wait(call_xino_fread, &args);
+	if (unlikely(wkq_err))
+		err = wkq_err;
+
+	return err;
+}
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+			       size_t size, loff_t *pos);
+
+static ssize_t do_xino_fwrite(vfs_writef_t func, struct file *file, void *kbuf,
+			      size_t size, loff_t *pos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		const char __user *u;
+	} buf;
+	int i;
+	const int prevent_endless = 10;
+
+	i = 0;
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	do {
+		err = func(file, buf.u, size, pos);
+		if (err == -EINTR
+		    && !au_wkq_test()
+		    && fatal_signal_pending(current)) {
+			set_fs(oldfs);
+			err = xino_fwrite_wkq(func, file, kbuf, size, pos);
+			BUG_ON(err == -EINTR);
+			oldfs = get_fs();
+			set_fs(KERNEL_DS);
+		}
+	} while (i++ < prevent_endless
+		 && (err == -EAGAIN || err == -EINTR));
+	set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+	if (err > 0)
+		fsnotify_modify(file->f_path.dentry);
+#endif
+
+	return err;
+}
+
+struct do_xino_fwrite_args {
+	ssize_t *errp;
+	vfs_writef_t func;
+	struct file *file;
+	void *buf;
+	size_t size;
+	loff_t *pos;
+};
+
+static void call_do_xino_fwrite(void *args)
+{
+	struct do_xino_fwrite_args *a = args;
+	*a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+			       size_t size, loff_t *pos)
+{
+	ssize_t err;
+	int wkq_err;
+	struct do_xino_fwrite_args args = {
+		.errp	= &err,
+		.func	= func,
+		.file	= file,
+		.buf	= buf,
+		.size	= size,
+		.pos	= pos
+	};
+
+	/*
+	 * it breaks RLIMIT_FSIZE and normal user's limit,
+	 * users should care about quota and real 'filesystem full.'
+	 */
+	wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
+	if (unlikely(wkq_err))
+		err = wkq_err;
+
+	return err;
+}
+
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+		    size_t size, loff_t *pos)
+{
+	ssize_t err;
+
+	if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
+		lockdep_off();
+		err = do_xino_fwrite(func, file, buf, size, pos);
+		lockdep_on();
+	} else {
+		lockdep_off();
+		err = xino_fwrite_wkq(func, file, buf, size, pos);
+		lockdep_on();
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * inode number bitmap
+ */
+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
+static ino_t xib_calc_ino(unsigned long pindex, int bit)
+{
+	ino_t ino;
+
+	AuDebugOn(bit < 0 || page_bits <= bit);
+	ino = AUFS_FIRST_INO + pindex * page_bits + bit;
+	return ino;
+}
+
+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
+{
+	AuDebugOn(ino < AUFS_FIRST_INO);
+	ino -= AUFS_FIRST_INO;
+	*pindex = ino / page_bits;
+	*bit = ino % page_bits;
+}
+
+static int xib_pindex(struct super_block *sb, unsigned long pindex)
+{
+	int err;
+	loff_t pos;
+	ssize_t sz;
+	struct au_sbinfo *sbinfo;
+	struct file *xib;
+	unsigned long *p;
+
+	sbinfo = au_sbi(sb);
+	MtxMustLock(&sbinfo->si_xib_mtx);
+	AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
+		  || !au_opt_test(sbinfo->si_mntflags, XINO));
+
+	if (pindex == sbinfo->si_xib_last_pindex)
+		return 0;
+
+	xib = sbinfo->si_xib;
+	p = sbinfo->si_xib_buf;
+	pos = sbinfo->si_xib_last_pindex;
+	pos *= PAGE_SIZE;
+	sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+	if (unlikely(sz != PAGE_SIZE))
+		goto out;
+
+	pos = pindex;
+	pos *= PAGE_SIZE;
+	if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE)
+		sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
+	else {
+		memset(p, 0, PAGE_SIZE);
+		sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+	}
+	if (sz == PAGE_SIZE) {
+		sbinfo->si_xib_last_pindex = pindex;
+		return 0; /* success */
+	}
+
+out:
+	AuIOErr1("write failed (%zd)\n", sz);
+	err = sz;
+	if (sz >= 0)
+		err = -EIO;
+	return err;
+}
+
+static void au_xib_clear_bit(struct inode *inode)
+{
+	int err, bit;
+	unsigned long pindex;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(inode->i_nlink);
+
+	sb = inode->i_sb;
+	xib_calc_bit(inode->i_ino, &pindex, &bit);
+	AuDebugOn(page_bits <= bit);
+	sbinfo = au_sbi(sb);
+	mutex_lock(&sbinfo->si_xib_mtx);
+	err = xib_pindex(sb, pindex);
+	if (!err) {
+		clear_bit(bit, sbinfo->si_xib_buf);
+		sbinfo->si_xib_next_bit = bit;
+	}
+	mutex_unlock(&sbinfo->si_xib_mtx);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * truncate a xino bitmap file
+ */
+
+/* todo: slow */
+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
+{
+	int err, bit;
+	ssize_t sz;
+	unsigned long pindex;
+	loff_t pos, pend;
+	struct au_sbinfo *sbinfo;
+	vfs_readf_t func;
+	ino_t *ino;
+	unsigned long *p;
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	MtxMustLock(&sbinfo->si_xib_mtx);
+	p = sbinfo->si_xib_buf;
+	func = sbinfo->si_xread;
+	pend = vfsub_f_size_read(file);
+	pos = 0;
+	while (pos < pend) {
+		sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
+		err = sz;
+		if (unlikely(sz <= 0))
+			goto out;
+
+		err = 0;
+		for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
+			if (unlikely(*ino < AUFS_FIRST_INO))
+				continue;
+
+			xib_calc_bit(*ino, &pindex, &bit);
+			AuDebugOn(page_bits <= bit);
+			err = xib_pindex(sb, pindex);
+			if (!err)
+				set_bit(bit, p);
+			else
+				goto out;
+		}
+	}
+
+out:
+	return err;
+}
+
+static int xib_restore(struct super_block *sb)
+{
+	int err, i;
+	unsigned int nfile;
+	aufs_bindex_t bindex, bbot;
+	void *page;
+	struct au_branch *br;
+	struct au_xino *xi;
+	struct file *file;
+
+	err = -ENOMEM;
+	page = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!page))
+		goto out;
+
+	err = 0;
+	bbot = au_sbbot(sb);
+	for (bindex = 0; !err && bindex <= bbot; bindex++)
+		if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0) {
+			br = au_sbr(sb, bindex);
+			xi = br->br_xino;
+			nfile = xi->xi_nfile;
+			for (i = 0; i < nfile; i++) {
+				file = au_xino_file(xi, i);
+				if (file)
+					err = do_xib_restore(sb, file, page);
+			}
+		} else
+			AuDbg("skip shared b%d\n", bindex);
+	free_page((unsigned long)page);
+
+out:
+	return err;
+}
+
+int au_xib_trunc(struct super_block *sb)
+{
+	int err;
+	ssize_t sz;
+	loff_t pos;
+	struct au_sbinfo *sbinfo;
+	unsigned long *p;
+	struct file *file;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	if (!au_opt_test(sbinfo->si_mntflags, XINO))
+		goto out;
+
+	file = sbinfo->si_xib;
+	if (vfsub_f_size_read(file) <= PAGE_SIZE)
+		goto out;
+
+	file = au_xino_create2(sb, &sbinfo->si_xib->f_path, NULL);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	fput(sbinfo->si_xib);
+	sbinfo->si_xib = file;
+
+	p = sbinfo->si_xib_buf;
+	memset(p, 0, PAGE_SIZE);
+	pos = 0;
+	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
+	if (unlikely(sz != PAGE_SIZE)) {
+		err = sz;
+		AuIOErr("err %d\n", err);
+		if (sz >= 0)
+			err = -EIO;
+		goto out;
+	}
+
+	mutex_lock(&sbinfo->si_xib_mtx);
+	/* mnt_want_write() is unnecessary here */
+	err = xib_restore(sb);
+	mutex_unlock(&sbinfo->si_xib_mtx);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_xino *au_xino_alloc(unsigned int nfile)
+{
+	struct au_xino *xi;
+
+	xi = kzalloc(sizeof(*xi), GFP_NOFS);
+	if (unlikely(!xi))
+		goto out;
+	xi->xi_nfile = nfile;
+	xi->xi_file = kcalloc(nfile, sizeof(*xi->xi_file), GFP_NOFS);
+	if (unlikely(!xi->xi_file))
+		goto out_free;
+
+	xi->xi_nondir.total = 8; /* initial size */
+	xi->xi_nondir.array = kcalloc(xi->xi_nondir.total, sizeof(ino_t),
+				      GFP_NOFS);
+	if (unlikely(!xi->xi_nondir.array))
+		goto out_file;
+
+	spin_lock_init(&xi->xi_nondir.spin);
+	init_waitqueue_head(&xi->xi_nondir.wqh);
+	mutex_init(&xi->xi_mtx);
+	INIT_HLIST_BL_HEAD(&xi->xi_writing);
+	atomic_set(&xi->xi_truncating, 0);
+	kref_init(&xi->xi_kref);
+	goto out; /* success */
+
+out_file:
+	au_kfree_try_rcu(xi->xi_file);
+out_free:
+	au_kfree_rcu(xi);
+	xi = NULL;
+out:
+	return xi;
+}
+
+static int au_xino_init(struct au_branch *br, int idx, struct file *file)
+{
+	int err;
+	struct au_xino *xi;
+
+	err = 0;
+	xi = au_xino_alloc(idx + 1);
+	if (unlikely(!xi)) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (file)
+		get_file(file);
+	xi->xi_file[idx] = file;
+	AuDebugOn(br->br_xino);
+	br->br_xino = xi;
+
+out:
+	return err;
+}
+
+static void au_xino_release(struct kref *kref)
+{
+	struct au_xino *xi;
+	int i;
+	unsigned long ul;
+	struct hlist_bl_head *hbl;
+	struct hlist_bl_node *pos, *n;
+	struct au_xi_writing *p;
+
+	xi = container_of(kref, struct au_xino, xi_kref);
+	for (i = 0; i < xi->xi_nfile; i++)
+		if (xi->xi_file[i])
+			fput(xi->xi_file[i]);
+	for (i = xi->xi_nondir.total - 1; i >= 0; i--)
+		AuDebugOn(xi->xi_nondir.array[i]);
+	mutex_destroy(&xi->xi_mtx);
+	hbl = &xi->xi_writing;
+	ul = au_hbl_count(hbl);
+	if (unlikely(ul)) {
+		pr_warn("xi_writing %lu\n", ul);
+		hlist_bl_lock(hbl);
+		hlist_bl_for_each_entry_safe(p, pos, n, hbl, node) {
+			hlist_bl_del(&p->node);
+			/* kmemleak reported au_kfree_rcu() doesn't free it */
+			kfree(p);
+		}
+		hlist_bl_unlock(hbl);
+	}
+	au_kfree_try_rcu(xi->xi_file);
+	au_kfree_try_rcu(xi->xi_nondir.array);
+	au_kfree_rcu(xi);
+}
+
+int au_xino_put(struct au_branch *br)
+{
+	int ret;
+	struct au_xino *xi;
+
+	ret = 0;
+	xi = br->br_xino;
+	if (xi) {
+		br->br_xino = NULL;
+		ret = kref_put(&xi->xi_kref, au_xino_release);
+	}
+
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * xino mount option handlers
+ */
+
+/* xino bitmap */
+static void xino_clear_xib(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	/* unnecessary to clear sbinfo->si_xread and ->si_xwrite */
+	if (sbinfo->si_xib)
+		fput(sbinfo->si_xib);
+	sbinfo->si_xib = NULL;
+	if (sbinfo->si_xib_buf)
+		free_page((unsigned long)sbinfo->si_xib_buf);
+	sbinfo->si_xib_buf = NULL;
+}
+
+static int au_xino_set_xib(struct super_block *sb, struct path *path)
+{
+	int err;
+	loff_t pos;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+	struct super_block *xi_sb;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	file = au_xino_create2(sb, path, sbinfo->si_xib);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	if (sbinfo->si_xib)
+		fput(sbinfo->si_xib);
+	sbinfo->si_xib = file;
+	sbinfo->si_xread = vfs_readf(file);
+	sbinfo->si_xwrite = vfs_writef(file);
+	xi_sb = file_inode(file)->i_sb;
+	sbinfo->si_ximaxent = xi_sb->s_maxbytes;
+	if (unlikely(sbinfo->si_ximaxent < PAGE_SIZE)) {
+		err = -EIO;
+		pr_err("s_maxbytes(%llu) on %s is too small\n",
+		       (u64)sbinfo->si_ximaxent, au_sbtype(xi_sb));
+		goto out_unset;
+	}
+	sbinfo->si_ximaxent /= sizeof(ino_t);
+
+	err = -ENOMEM;
+	if (!sbinfo->si_xib_buf)
+		sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
+	if (unlikely(!sbinfo->si_xib_buf))
+		goto out_unset;
+
+	sbinfo->si_xib_last_pindex = 0;
+	sbinfo->si_xib_next_bit = 0;
+	if (vfsub_f_size_read(file) < PAGE_SIZE) {
+		pos = 0;
+		err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
+				  PAGE_SIZE, &pos);
+		if (unlikely(err != PAGE_SIZE))
+			goto out_free;
+	}
+	err = 0;
+	goto out; /* success */
+
+out_free:
+	if (sbinfo->si_xib_buf)
+		free_page((unsigned long)sbinfo->si_xib_buf);
+	sbinfo->si_xib_buf = NULL;
+	if (err >= 0)
+		err = -EIO;
+out_unset:
+	fput(sbinfo->si_xib);
+	sbinfo->si_xib = NULL;
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* xino for each branch */
+static void xino_clear_br(struct super_block *sb)
+{
+	aufs_bindex_t bindex, bbot;
+	struct au_branch *br;
+
+	bbot = au_sbbot(sb);
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		AuDebugOn(!br);
+		au_xino_put(br);
+	}
+}
+
+static void au_xino_set_br_shared(struct super_block *sb, struct au_branch *br,
+				  aufs_bindex_t bshared)
+{
+	struct au_branch *brshared;
+
+	brshared = au_sbr(sb, bshared);
+	AuDebugOn(!brshared->br_xino);
+	AuDebugOn(!brshared->br_xino->xi_file);
+	if (br->br_xino != brshared->br_xino) {
+		au_xino_get(brshared);
+		au_xino_put(br);
+		br->br_xino = brshared->br_xino;
+	}
+}
+
+struct au_xino_do_set_br {
+	vfs_writef_t writef;
+	struct au_branch *br;
+	ino_t h_ino;
+	aufs_bindex_t bshared;
+};
+
+static int au_xino_do_set_br(struct super_block *sb, struct path *path,
+			     struct au_xino_do_set_br *args)
+{
+	int err;
+	struct au_xi_calc calc;
+	struct file *file;
+	struct au_branch *br;
+	struct au_xi_new xinew = {
+		.base = path
+	};
+
+	br = args->br;
+	xinew.xi = br->br_xino;
+	au_xi_calc(sb, args->h_ino, &calc);
+	xinew.copy_src = au_xino_file(xinew.xi, calc.idx);
+	if (args->bshared >= 0)
+		/* shared xino */
+		au_xino_set_br_shared(sb, br, args->bshared);
+	else if (!xinew.xi) {
+		/* new xino */
+		err = au_xino_init(br, calc.idx, xinew.copy_src);
+		if (unlikely(err))
+			goto out;
+	}
+
+	/* force re-creating */
+	xinew.xi = br->br_xino;
+	xinew.idx = calc.idx;
+	mutex_lock(&xinew.xi->xi_mtx);
+	file = au_xi_new(sb, &xinew);
+	mutex_unlock(&xinew.xi->xi_mtx);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	AuDebugOn(!file);
+
+	err = au_xino_do_write(args->writef, file, &calc, AUFS_ROOT_INO);
+	if (unlikely(err))
+		au_xino_put(br);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_xino_set_br(struct super_block *sb, struct path *path)
+{
+	int err;
+	aufs_bindex_t bindex, bbot;
+	struct au_xino_do_set_br args;
+	struct inode *inode;
+
+	SiMustWriteLock(sb);
+
+	bbot = au_sbbot(sb);
+	inode = d_inode(sb->s_root);
+	args.writef = au_sbi(sb)->si_xwrite;
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		args.h_ino = au_h_iptr(inode, bindex)->i_ino;
+		args.br = au_sbr(sb, bindex);
+		args.bshared = is_sb_shared(sb, bindex, bindex - 1);
+		err = au_xino_do_set_br(sb, path, &args);
+		if (unlikely(err))
+			break;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+void au_xino_clr(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	au_xigen_clr(sb);
+	xino_clear_xib(sb);
+	xino_clear_br(sb);
+	dbgaufs_brs_del(sb, 0);
+	sbinfo = au_sbi(sb);
+	/* lvalue, do not call au_mntflags() */
+	au_opt_clr(sbinfo->si_mntflags, XINO);
+}
+
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xiopt, int remount)
+{
+	int err, skip;
+	struct dentry *dentry, *parent, *cur_dentry, *cur_parent;
+	struct qstr *dname, *cur_name;
+	struct file *cur_xino;
+	struct au_sbinfo *sbinfo;
+	struct path *path, *cur_path;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	path = &xiopt->file->f_path;
+	dentry = path->dentry;
+	parent = dget_parent(dentry);
+	if (remount) {
+		skip = 0;
+		cur_xino = sbinfo->si_xib;
+		if (cur_xino) {
+			cur_path = &cur_xino->f_path;
+			cur_dentry = cur_path->dentry;
+			cur_parent = dget_parent(cur_dentry);
+			cur_name = &cur_dentry->d_name;
+			dname = &dentry->d_name;
+			skip = (cur_parent == parent
+				&& au_qstreq(dname, cur_name));
+			dput(cur_parent);
+		}
+		if (skip)
+			goto out;
+	}
+
+	au_opt_set(sbinfo->si_mntflags, XINO);
+	err = au_xino_set_xib(sb, path);
+	/* si_x{read,write} are set */
+	if (!err)
+		err = au_xigen_set(sb, path);
+	if (!err)
+		err = au_xino_set_br(sb, path);
+	if (!err) {
+		dbgaufs_brs_add(sb, 0, /*topdown*/1);
+		goto out; /* success */
+	}
+
+	/* reset all */
+	AuIOErr("failed setting xino(%d).\n", err);
+	au_xino_clr(sb);
+
+out:
+	dput(parent);
+	return err;
+}
+
+/*
+ * create a xinofile at the default place/path.
+ */
+struct file *au_xino_def(struct super_block *sb)
+{
+	struct file *file;
+	char *page, *p;
+	struct au_branch *br;
+	struct super_block *h_sb;
+	struct path path;
+	aufs_bindex_t bbot, bindex, bwr;
+
+	br = NULL;
+	bbot = au_sbbot(sb);
+	bwr = -1;
+	for (bindex = 0; bindex <= bbot; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_writable(br->br_perm)
+		    && !au_test_fs_bad_xino(au_br_sb(br))) {
+			bwr = bindex;
+			break;
+		}
+	}
+
+	if (bwr >= 0) {
+		file = ERR_PTR(-ENOMEM);
+		page = (void *)__get_free_page(GFP_NOFS);
+		if (unlikely(!page))
+			goto out;
+		path.mnt = au_br_mnt(br);
+		path.dentry = au_h_dptr(sb->s_root, bwr);
+		p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
+		file = (void *)p;
+		if (!IS_ERR(p)) {
+			strcat(p, "/" AUFS_XINO_FNAME);
+			AuDbg("%s\n", p);
+			file = au_xino_create(sb, p, /*silent*/0, /*wbrtop*/1);
+		}
+		free_page((unsigned long)page);
+	} else {
+		file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0,
+				      /*wbrtop*/0);
+		if (IS_ERR(file))
+			goto out;
+		h_sb = file->f_path.dentry->d_sb;
+		if (unlikely(au_test_fs_bad_xino(h_sb))) {
+			pr_err("xino doesn't support %s(%s)\n",
+			       AUFS_XINO_DEFPATH, au_sbtype(h_sb));
+			fput(file);
+			file = ERR_PTR(-EINVAL);
+		}
+	}
+
+out:
+	return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * initialize the xinofile for the specified branch @br
+ * at the place/path where @base_file indicates.
+ * test whether another branch is on the same filesystem or not,
+ * if found then share the xinofile with another branch.
+ */
+int au_xino_init_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
+		    struct path *base)
+{
+	int err;
+	struct au_xino_do_set_br args = {
+		.h_ino	= h_ino,
+		.br	= br
+	};
+
+	args.writef = au_sbi(sb)->si_xwrite;
+	args.bshared = sbr_find_shared(sb, /*btop*/0, au_sbbot(sb),
+				       au_br_sb(br));
+	err = au_xino_do_set_br(sb, base, &args);
+	if (unlikely(err))
+		au_xino_put(br);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * get an unused inode number from bitmap
+ */
+ino_t au_xino_new_ino(struct super_block *sb)
+{
+	ino_t ino;
+	unsigned long *p, pindex, ul, pend;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+	int free_bit, err;
+
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		return iunique(sb, AUFS_FIRST_INO);
+
+	sbinfo = au_sbi(sb);
+	mutex_lock(&sbinfo->si_xib_mtx);
+	p = sbinfo->si_xib_buf;
+	free_bit = sbinfo->si_xib_next_bit;
+	if (free_bit < page_bits && !test_bit(free_bit, p))
+		goto out; /* success */
+	free_bit = find_first_zero_bit(p, page_bits);
+	if (free_bit < page_bits)
+		goto out; /* success */
+
+	pindex = sbinfo->si_xib_last_pindex;
+	for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
+		err = xib_pindex(sb, ul);
+		if (unlikely(err))
+			goto out_err;
+		free_bit = find_first_zero_bit(p, page_bits);
+		if (free_bit < page_bits)
+			goto out; /* success */
+	}
+
+	file = sbinfo->si_xib;
+	pend = vfsub_f_size_read(file) / PAGE_SIZE;
+	for (ul = pindex + 1; ul <= pend; ul++) {
+		err = xib_pindex(sb, ul);
+		if (unlikely(err))
+			goto out_err;
+		free_bit = find_first_zero_bit(p, page_bits);
+		if (free_bit < page_bits)
+			goto out; /* success */
+	}
+	BUG();
+
+out:
+	set_bit(free_bit, p);
+	sbinfo->si_xib_next_bit = free_bit + 1;
+	pindex = sbinfo->si_xib_last_pindex;
+	mutex_unlock(&sbinfo->si_xib_mtx);
+	ino = xib_calc_ino(pindex, free_bit);
+	AuDbg("i%lu\n", (unsigned long)ino);
+	return ino;
+out_err:
+	mutex_unlock(&sbinfo->si_xib_mtx);
+	AuDbg("i0\n");
+	return 0;
+}
+
+/* for s_op->delete_inode() */
+void au_xino_delete_inode(struct inode *inode, const int unlinked)
+{
+	int err;
+	unsigned int mnt_flags;
+	aufs_bindex_t bindex, bbot, bi;
+	unsigned char try_trunc;
+	struct au_iinfo *iinfo;
+	struct super_block *sb;
+	struct au_hinode *hi;
+	struct inode *h_inode;
+	struct au_branch *br;
+	vfs_writef_t xwrite;
+	struct au_xi_calc calc;
+	struct file *file;
+
+	AuDebugOn(au_is_bad_inode(inode));
+
+	sb = inode->i_sb;
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, XINO)
+	    || inode->i_ino == AUFS_ROOT_INO)
+		return;
+
+	if (unlinked) {
+		au_xigen_inc(inode);
+		au_xib_clear_bit(inode);
+	}
+
+	iinfo = au_ii(inode);
+	bindex = iinfo->ii_btop;
+	if (bindex < 0)
+		return;
+
+	xwrite = au_sbi(sb)->si_xwrite;
+	try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
+	hi = au_hinode(iinfo, bindex);
+	bbot = iinfo->ii_bbot;
+	for (; bindex <= bbot; bindex++, hi++) {
+		h_inode = hi->hi_inode;
+		if (!h_inode
+		    || (!unlinked && h_inode->i_nlink))
+			continue;
+
+		/* inode may not be revalidated */
+		bi = au_br_index(sb, hi->hi_id);
+		if (bi < 0)
+			continue;
+
+		br = au_sbr(sb, bi);
+		au_xi_calc(sb, h_inode->i_ino, &calc);
+		file = au_xino_file(br->br_xino, calc.idx);
+		if (IS_ERR_OR_NULL(file))
+			continue;
+
+		err = au_xino_do_write(xwrite, file, &calc, /*ino*/0);
+		if (!err && try_trunc
+		    && au_test_fs_trunc_xino(au_br_sb(br)))
+			xino_try_trunc(sb, br);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xinondir_find(struct au_xino *xi, ino_t h_ino)
+{
+	int found, total, i;
+
+	found = -1;
+	total = xi->xi_nondir.total;
+	for (i = 0; i < total; i++) {
+		if (xi->xi_nondir.array[i] != h_ino)
+			continue;
+		found = i;
+		break;
+	}
+
+	return found;
+}
+
+static int au_xinondir_expand(struct au_xino *xi)
+{
+	int err, sz;
+	ino_t *p;
+
+	BUILD_BUG_ON(KMALLOC_MAX_SIZE > INT_MAX);
+
+	err = -ENOMEM;
+	sz = xi->xi_nondir.total * sizeof(ino_t);
+	if (unlikely(sz > KMALLOC_MAX_SIZE / 2))
+		goto out;
+	p = au_kzrealloc(xi->xi_nondir.array, sz, sz << 1, GFP_ATOMIC,
+			 /*may_shrink*/0);
+	if (p) {
+		xi->xi_nondir.array = p;
+		xi->xi_nondir.total <<= 1;
+		AuDbg("xi_nondir.total %d\n", xi->xi_nondir.total);
+		err = 0;
+	}
+
+out:
+	return err;
+}
+
+void au_xinondir_leave(struct super_block *sb, aufs_bindex_t bindex,
+		       ino_t h_ino, int idx)
+{
+	struct au_xino *xi;
+
+	AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+	xi = au_sbr(sb, bindex)->br_xino;
+	AuDebugOn(idx < 0 || xi->xi_nondir.total <= idx);
+
+	spin_lock(&xi->xi_nondir.spin);
+	AuDebugOn(xi->xi_nondir.array[idx] != h_ino);
+	xi->xi_nondir.array[idx] = 0;
+	spin_unlock(&xi->xi_nondir.spin);
+	wake_up_all(&xi->xi_nondir.wqh);
+}
+
+int au_xinondir_enter(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		      int *idx)
+{
+	int err, found, empty;
+	struct au_xino *xi;
+
+	err = 0;
+	*idx = -1;
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		goto out; /* no xino */
+
+	xi = au_sbr(sb, bindex)->br_xino;
+
+again:
+	spin_lock(&xi->xi_nondir.spin);
+	found = au_xinondir_find(xi, h_ino);
+	if (found == -1) {
+		empty = au_xinondir_find(xi, /*h_ino*/0);
+		if (empty == -1) {
+			empty = xi->xi_nondir.total;
+			err = au_xinondir_expand(xi);
+			if (unlikely(err))
+				goto out_unlock;
+		}
+		xi->xi_nondir.array[empty] = h_ino;
+		*idx = empty;
+	} else {
+		spin_unlock(&xi->xi_nondir.spin);
+		wait_event(xi->xi_nondir.wqh,
+			   xi->xi_nondir.array[found] != h_ino);
+		goto again;
+	}
+
+out_unlock:
+	spin_unlock(&xi->xi_nondir.spin);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_xino_path(struct seq_file *seq, struct file *file)
+{
+	int err;
+
+	err = au_seq_path(seq, &file->f_path);
+	if (unlikely(err))
+		goto out;
+
+#define Deleted "\\040(deleted)"
+	seq->count -= sizeof(Deleted) - 1;
+	AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
+			 sizeof(Deleted) - 1));
+#undef Deleted
+
+out:
+	return err;
+}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 313aae95818..b057d887f5d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -79,7 +79,7 @@ void kill_bdev(struct block_device *bdev)
 {
 	struct address_space *mapping = bdev->bd_inode->i_mapping;
 
-	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
+	if (mapping_empty(mapping))
 		return;
 
 	invalidate_bh_lrus();
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 6009e0e939b..43868e6a85d 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -91,7 +91,7 @@ static int caching_kthread(void *data)
 				btrfs_release_path(path);
 				root->ino_cache_progress = last;
 				up_read(&fs_info->commit_root_sem);
-				schedule_timeout(1);
+				schedule_min_hrtimeout();
 				goto again;
 			} else
 				continue;
diff --git a/fs/dax.c b/fs/dax.c
index 11b16729b86..80f96daabf2 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -478,9 +478,11 @@ static void *grab_mapping_entry(struct xa_state *xas,
 {
 	unsigned long index = xas->xa_index;
 	bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
+	int populated;
 	void *entry;
 
 retry:
+	populated = 0;
 	xas_lock_irq(xas);
 	entry = get_unlocked_entry(xas, order);
 
@@ -526,6 +528,8 @@ static void *grab_mapping_entry(struct xa_state *xas,
 		xas_store(xas, NULL);	/* undo the PMD join */
 		dax_wake_entry(xas, entry, true);
 		mapping->nrexceptional--;
+		if (mapping_empty(mapping))
+			populated = -1;
 		entry = NULL;
 		xas_set(xas, index);
 	}
@@ -541,11 +545,17 @@ static void *grab_mapping_entry(struct xa_state *xas,
 		dax_lock_entry(xas, entry);
 		if (xas_error(xas))
 			goto out_unlock;
+		if (mapping_empty(mapping))
+			populated++;
 		mapping->nrexceptional++;
 	}
 
 out_unlock:
 	xas_unlock_irq(xas);
+	if (populated == -1)
+		inode_pages_clear(mapping->host);
+	else if (populated == 1)
+		inode_pages_set(mapping->host);
 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 		goto retry;
 	if (xas->xa_node == XA_ERROR(-ENOMEM))
@@ -631,6 +641,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
 					  pgoff_t index, bool trunc)
 {
 	XA_STATE(xas, &mapping->i_pages, index);
+	bool empty = false;
 	int ret = 0;
 	void *entry;
 
@@ -645,10 +656,13 @@ static int __dax_invalidate_entry(struct address_space *mapping,
 	dax_disassociate_entry(entry, mapping, trunc);
 	xas_store(&xas, NULL);
 	mapping->nrexceptional--;
+	empty = mapping_empty(mapping);
 	ret = 1;
 out:
 	put_unlocked_entry(&xas, entry);
 	xas_unlock_irq(&xas);
+	if (empty)
+		inode_pages_clear(mapping->host);
 	return ret;
 }
 
diff --git a/fs/dcache.c b/fs/dcache.c
index b280e07e162..15aa871d1b4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1266,7 +1266,7 @@ enum d_walk_ret {
  *
  * The @enter() callbacks are called with d_lock held.
  */
-static void d_walk(struct dentry *parent, void *data,
+void d_walk(struct dentry *parent, void *data,
 		   enum d_walk_ret (*enter)(void *, struct dentry *))
 {
 	struct dentry *this_parent;
@@ -1371,6 +1371,7 @@ static void d_walk(struct dentry *parent, void *data,
 	seq = 1;
 	goto again;
 }
+EXPORT_SYMBOL_GPL(d_walk);
 
 struct check_mount {
 	struct vfsmount *mnt;
@@ -2916,6 +2917,7 @@ void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
 
 	write_sequnlock(&rename_lock);
 }
+EXPORT_SYMBOL_GPL(d_exchange);
 
 /**
  * d_ancestor - search for an ancestor
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index dc1a1d5d825..a5e9e905347 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -27,7 +27,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
 		 * we need to reschedule to avoid softlockups.
 		 */
 		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
-		    (inode->i_mapping->nrpages == 0 && !need_resched())) {
+		    (mapping_empty(inode->i_mapping) && !need_resched())) {
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
diff --git a/fs/exec.c b/fs/exec.c
index 2c465119aff..9c0eee37943 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
 #include <linux/oom.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
+#include <linux/ksm.h>
 
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
@@ -109,6 +110,7 @@ bool path_noexec(const struct path *path)
 	return (path->mnt->mnt_flags & MNT_NOEXEC) ||
 	       (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
 }
+EXPORT_SYMBOL_GPL(path_noexec);
 
 #ifdef CONFIG_USELIB
 /*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2e4c0fa2074..f2c90a416b7 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -32,7 +32,7 @@
 
 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
 
-static int setfl(int fd, struct file * filp, unsigned long arg)
+int setfl(int fd, struct file *filp, unsigned long arg)
 {
 	struct inode * inode = file_inode(filp);
 	int error = 0;
@@ -63,6 +63,8 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
 
 	if (filp->f_op->check_flags)
 		error = filp->f_op->check_flags(arg);
+	if (!error && filp->f_op->setfl)
+		error = filp->f_op->setfl(filp, arg);
 	if (error)
 		return error;
 
@@ -83,6 +85,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
  out:
 	return error;
 }
+EXPORT_SYMBOL_GPL(setfl);
 
 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
                      int force)
diff --git a/fs/file_table.c b/fs/file_table.c
index 30d55c9a174..34b9bbf4c55 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -162,6 +162,7 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
 	}
 	return ERR_PTR(-ENFILE);
 }
+EXPORT_SYMBOL_GPL(alloc_empty_file);
 
 /*
  * Variant of alloc_empty_file() that doesn't check and modify nr_files.
@@ -375,6 +376,7 @@ void __fput_sync(struct file *file)
 }
 
 EXPORT_SYMBOL(fput);
+EXPORT_SYMBOL_GPL(__fput_sync);
 
 void __init files_init(void)
 {
diff --git a/fs/inode.c b/fs/inode.c
index 93d9252a00a..578a37d6d1a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -431,26 +431,101 @@ static void inode_lru_list_add(struct inode *inode)
 		inode->i_state |= I_REFERENCED;
 }
 
+static void inode_lru_list_del(struct inode *inode)
+{
+	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
+		this_cpu_dec(nr_unused);
+}
+
 /*
  * Add inode to LRU if needed (inode is unused and clean).
  *
  * Needs inode->i_lock held.
  */
-void inode_add_lru(struct inode *inode)
+bool inode_add_lru(struct inode *inode)
 {
-	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
-				I_FREEING | I_WILL_FREE)) &&
-	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
-		inode_lru_list_add(inode);
+	if (inode->i_state &
+	    (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE | I_PAGES))
+		return false;
+	if (atomic_read(&inode->i_count))
+		return false;
+	if (!(inode->i_sb->s_flags & SB_ACTIVE))
+		return false;
+	inode_lru_list_add(inode);
+	return true;
 }
 
+/*
+ * Usually, inodes become reclaimable when they are no longer
+ * referenced and their page cache has been reclaimed. The following
+ * API allows the VM to communicate cache population state to the VFS.
+ *
+ * However, on CONFIG_HIGHMEM we can't wait for the page cache to go
+ * away: cache pages allocated in a large highmem zone could pin
+ * struct inode memory allocated in relatively small lowmem zones. So
+ * when CONFIG_HIGHMEM is enabled, we tie cache to the inode lifetime.
+ */
 
-static void inode_lru_list_del(struct inode *inode)
+#ifndef CONFIG_HIGHMEM
+/**
+ * inode_pages_set - mark the inode as holding page cache
+ * @inode: the inode whose first cache page was just added
+ *
+ * Tell the VFS that this inode has populated page cache and must not
+ * be reclaimed by the inode shrinker.
+ *
+ * The caller must hold the page lock of the just-added page: by
+ * pinning the page, the page cache cannot become depopulated, and we
+ * can safely set I_PAGES without a race check under the i_pages lock.
+ *
+ * This function acquires the i_lock.
+ */
+void inode_pages_set(struct inode *inode)
 {
+	spin_lock(&inode->i_lock);
+	if (!(inode->i_state & I_PAGES)) {
+		inode->i_state |= I_PAGES;
+		if (!list_empty(&inode->i_lru)) {
+			count_vm_event(PGINODERESCUE);
+			inode_lru_list_del(inode);
+		}
+	}
+	spin_unlock(&inode->i_lock);
+}
 
-	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
-		this_cpu_dec(nr_unused);
+/**
+ * inode_pages_clear - mark the inode as not holding page cache
+ * @inode: the inode whose last cache page was just removed
+ *
+ * Tell the VFS that the inode no longer holds page cache and that its
+ * lifetime is to be handed over to the inode shrinker LRU.
+ *
+ * This function acquires the i_lock and the i_pages lock.
+ */
+void inode_pages_clear(struct inode *inode)
+{
+	struct address_space *mapping = &inode->i_data;
+	bool add_to_lru = false;
+	unsigned long flags;
+
+	spin_lock(&inode->i_lock);
+
+	xa_lock_irqsave(&mapping->i_pages, flags);
+	if ((inode->i_state & I_PAGES) && mapping_empty(mapping)) {
+		inode->i_state &= ~I_PAGES;
+		add_to_lru = true;
+	}
+	xa_unlock_irqrestore(&mapping->i_pages, flags);
+
+	if (add_to_lru) {
+		WARN_ON_ONCE(!list_empty(&inode->i_lru));
+		if (inode_add_lru(inode))
+			__count_vm_event(PGINODEDELAYED);
+	}
+
+	spin_unlock(&inode->i_lock);
 }
+#endif /* CONFIG_HIGHMEM */
 
 /**
  * inode_sb_list_add - add inode to the superblock list of inodes
@@ -743,6 +818,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
 	if (!spin_trylock(&inode->i_lock))
 		return LRU_SKIP;
 
+	WARN_ON_ONCE(inode->i_state & I_PAGES);
+
 	/*
 	 * Referenced or dirty inodes are still in use. Give them another pass
 	 * through the LRU as we canot reclaim them now.
@@ -762,7 +839,18 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
 		return LRU_ROTATE;
 	}
 
-	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
+	/*
+	 * Usually, populated inodes shouldn't be on the shrinker LRU,
+	 * but they can be briefly visible when a new page is added to
+	 * an inode that was already linked but inode_pages_set()
+	 * hasn't run yet to move them off.
+	 *
+	 * The other exception is on HIGHMEM systems: highmem cache
+	 * can pin lowmem struct inodes, and we might be in dire
+	 * straits in the lower zones. Purge cache to free the inode.
+	 */
+	if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
+#ifdef CONFIG_HIGHMEM
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
 		spin_unlock(lru_lock);
@@ -779,6 +867,12 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
 		iput(inode);
 		spin_lock(lru_lock);
 		return LRU_RETRY;
+#else
+		list_lru_isolate(lru, &inode->i_lru);
+		spin_unlock(&inode->i_lock);
+		this_cpu_dec(nr_unused);
+		return LRU_REMOVED;
+#endif
 	}
 
 	WARN_ON(inode->i_state & I_NEW);
@@ -1688,12 +1782,13 @@ EXPORT_SYMBOL(generic_update_time);
  * This does the actual work of updating an inodes time or version.  Must have
  * had called mnt_want_write() before calling this.
  */
-static int update_time(struct inode *inode, struct timespec64 *time, int flags)
+int update_time(struct inode *inode, struct timespec64 *time, int flags)
 {
 	if (inode->i_op->update_time)
 		return inode->i_op->update_time(inode, time, flags);
 	return generic_update_time(inode, time, flags);
 }
+EXPORT_SYMBOL_GPL(update_time);
 
 /**
  *	touch_atime	-	update the access time
diff --git a/fs/internal.h b/fs/internal.h
index aa5d45524e8..2ca77c0afb0 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -137,7 +137,7 @@ extern int vfs_open(const struct path *, struct file *);
  * inode.c
  */
 extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
-extern void inode_add_lru(struct inode *inode);
+extern bool inode_add_lru(struct inode *inode);
 extern int dentry_needs_remove_privs(struct dentry *dentry);
 
 /*
diff --git a/fs/namespace.c b/fs/namespace.c
index a28e4db075e..fa17b9d5926 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -431,6 +431,7 @@ void __mnt_drop_write(struct vfsmount *mnt)
 	mnt_dec_writers(real_mount(mnt));
 	preempt_enable();
 }
+EXPORT_SYMBOL_GPL(__mnt_drop_write);
 
 /**
  * mnt_drop_write - give up write access to a mount
@@ -776,6 +777,13 @@ static inline int check_mnt(struct mount *mnt)
 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
 }
 
+/* for aufs, CONFIG_AUFS_BR_FUSE */
+int is_current_mnt_ns(struct vfsmount *mnt)
+{
+	return check_mnt(real_mount(mnt));
+}
+EXPORT_SYMBOL_GPL(is_current_mnt_ns);
+
 /*
  * vfsmount lock must be held for write
  */
@@ -1897,6 +1905,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(iterate_mounts);
 
 static void lock_mnt_tree(struct mount *mnt)
 {
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 133f723aca0..0b9f7f6d839 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -99,6 +99,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
 {
 	refcount_inc(&group->refcnt);
 }
+EXPORT_SYMBOL_GPL(fsnotify_get_group);
 
 /*
  * Drop a reference to a group.  Free it if it's through.
diff --git a/fs/open.c b/fs/open.c
index 719b320ede5..f88ce55c1c9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -65,6 +65,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
 	inode_unlock(dentry->d_inode);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(do_truncate);
 
 long vfs_truncate(const struct path *path, loff_t length)
 {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index eb2255e95f6..d3ca400f9fd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
 		seq_puts(m, "0 0 0\n");
 	else
 		seq_printf(m, "%llu %llu %lu\n",
-		   (unsigned long long)task->se.sum_exec_runtime,
+		   (unsigned long long)tsk_seruntime(task),
 		   (unsigned long long)task->sched_info.run_delay,
 		   task->sched_info.pcount);
 
@@ -2162,7 +2162,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
 	rc = -ENOENT;
 	vma = find_exact_vma(mm, vm_start, vm_end);
 	if (vma && vma->vm_file) {
-		*path = vma->vm_file->f_path;
+		*path = vma_pr_or_file(vma)->f_path;
 		path_get(path);
 		rc = 0;
 	}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8c1f1bb1a5c..62e28cf10bb 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -105,7 +105,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 		   global_zone_page_state(NR_KERNEL_STACK_KB));
 	show_val_kb(m, "PageTables:     ",
 		    global_zone_page_state(NR_PAGETABLE));
-
+#ifdef CONFIG_UKSM
+	show_val_kb(m, "KsmZeroPages:     ",
+		    global_zone_page_state(NR_UKSM_ZERO_PAGES));
+#endif
 	show_val_kb(m, "NFS_Unstable:   ",
 		    global_node_page_state(NR_UNSTABLE_NFS));
 	show_val_kb(m, "Bounce:         ",
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 14c2badb8fd..65afe5287e4 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -41,7 +41,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
 	file = region->vm_file;
 
 	if (file) {
-		struct inode *inode = file_inode(region->vm_file);
+		struct inode *inode;
+
+		file = vmr_pr_or_file(region);
+		inode = file_inode(file);
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 	}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8d382d4ec06..93a768f754c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -280,7 +280,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 	const char *name = NULL;
 
 	if (file) {
-		struct inode *inode = file_inode(vma->vm_file);
+		struct inode *inode;
+
+		file = vma_pr_or_file(vma);
+		inode = file_inode(file);
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -1787,7 +1790,7 @@ static int show_numa_map(struct seq_file *m, void *v)
 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
 	struct vm_area_struct *vma = v;
 	struct numa_maps *md = &numa_priv->md;
-	struct file *file = vma->vm_file;
+	struct file *file = vma_pr_or_file(vma);
 	struct mm_struct *mm = vma->vm_mm;
 	struct mempolicy *pol;
 	char buffer[64];
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 7907e6419e5..d17209cf52b 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -155,7 +155,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
 	file = vma->vm_file;
 
 	if (file) {
-		struct inode *inode = file_inode(vma->vm_file);
+		struct inode *inode;
+
+		file = vma_pr_or_file(vma);
+		inode = file_inode(file);
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/fs/read_write.c b/fs/read_write.c
index bbfa9b12b15..6f61c0b6aa6 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -469,6 +469,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(vfs_read);
 
 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
@@ -499,6 +500,30 @@ static ssize_t __vfs_write(struct file *file, const char __user *p,
 		return -EINVAL;
 }
 
+vfs_readf_t vfs_readf(struct file *file)
+{
+	const struct file_operations *fop = file->f_op;
+
+	if (fop->read)
+		return fop->read;
+	if (fop->read_iter)
+		return new_sync_read;
+	return ERR_PTR(-ENOSYS); /* doesn't have ->read(|_iter)() op */
+}
+EXPORT_SYMBOL_GPL(vfs_readf);
+
+vfs_writef_t vfs_writef(struct file *file)
+{
+	const struct file_operations *fop = file->f_op;
+
+	if (fop->write)
+		return fop->write;
+	if (fop->write_iter)
+		return new_sync_write;
+	return ERR_PTR(-ENOSYS); /* doesn't have ->write(|_iter)() op */
+}
+EXPORT_SYMBOL_GPL(vfs_writef);
+
 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
 {
 	mm_segment_t old_fs;
@@ -567,6 +592,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(vfs_write);
 
 /* file_ppos returns &file->f_pos or NULL if file is stream */
 static inline loff_t *file_ppos(struct file *file)
diff --git a/fs/splice.c b/fs/splice.c
index 4e53efbd621..a6bd0500443 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -849,8 +849,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
 /*
  * Attempt to initiate a splice from pipe to file.
  */
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-			   loff_t *ppos, size_t len, unsigned int flags)
+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		    loff_t *ppos, size_t len, unsigned int flags)
 {
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
 				loff_t *, size_t, unsigned int);
@@ -862,13 +862,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
 
 	return splice_write(pipe, out, ppos, len, flags);
 }
+EXPORT_SYMBOL_GPL(do_splice_from);
 
 /*
  * Attempt to initiate a splice from a file to a pipe.
  */
-static long do_splice_to(struct file *in, loff_t *ppos,
-			 struct pipe_inode_info *pipe, size_t len,
-			 unsigned int flags)
+long do_splice_to(struct file *in, loff_t *ppos,
+		  struct pipe_inode_info *pipe, size_t len,
+		  unsigned int flags)
 {
 	ssize_t (*splice_read)(struct file *, loff_t *,
 			       struct pipe_inode_info *, size_t, unsigned int);
@@ -891,6 +892,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
 
 	return splice_read(in, ppos, pipe, len, flags);
 }
+EXPORT_SYMBOL_GPL(do_splice_to);
 
 /**
  * splice_direct_to_actor - splices data directly between two non-pipes
diff --git a/fs/sync.c b/fs/sync.c
index 4d1ff010bc5..67c66358f3f 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -28,7 +28,7 @@
  * wait == 1 case since in that case write_inode() functions do
  * sync_dirty_buffer() and thus effectively write one block at a time.
  */
-static int __sync_filesystem(struct super_block *sb, int wait)
+int __sync_filesystem(struct super_block *sb, int wait)
 {
 	if (wait)
 		sync_inodes_sb(sb);
@@ -39,6 +39,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
 		sb->s_op->sync_fs(sb, wait);
 	return __sync_blockdev(sb->s_bdev, wait);
 }
+EXPORT_SYMBOL_GPL(__sync_filesystem);
 
 /*
  * Write out and wait upon all dirty data associated with this
diff --git a/fs/xattr.c b/fs/xattr.c
index 91608d9bfc6..02d19ab3ba5 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -296,6 +296,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
 	*xattr_value = value;
 	return error;
 }
+EXPORT_SYMBOL_GPL(vfs_getxattr_alloc);
 
 ssize_t
 __vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name,
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 329b8c8ca70..22fb5a592b9 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -867,12 +867,25 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 extern void untrack_pfn_moved(struct vm_area_struct *vma);
 #endif
 
+#ifdef CONFIG_UKSM
+static inline int is_uksm_zero_pfn(unsigned long pfn)
+{
+	extern unsigned long uksm_zero_pfn;
+	return pfn == uksm_zero_pfn;
+}
+#else
+static inline int is_uksm_zero_pfn(unsigned long pfn)
+{
+	return 0;
+}
+#endif
+
 #ifdef __HAVE_COLOR_ZERO_PAGE
 static inline int is_zero_pfn(unsigned long pfn)
 {
 	extern unsigned long zero_pfn;
 	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
-	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn);
 }
 
 #define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
@@ -881,7 +894,7 @@ static inline int is_zero_pfn(unsigned long pfn)
 static inline int is_zero_pfn(unsigned long pfn)
 {
 	extern unsigned long zero_pfn;
-	return pfn == zero_pfn;
+	return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn));
 }
 
 static inline unsigned long my_zero_pfn(unsigned long addr)
diff --git a/include/dt-bindings/board/am335x-bbw-bbb-base.h b/include/dt-bindings/board/am335x-bbw-bbb-base.h
new file mode 100644
index 00000000000..35f6d57ef49
--- /dev/null
+++ b/include/dt-bindings/board/am335x-bbw-bbb-base.h
@@ -0,0 +1,103 @@
+/*
+ * This header provides constants for bbw/bbb pinctrl bindings.
+ *
+ * Copyright (C) 2014 Robert Nelson <robertcnelson@gmail.com>
+ *
+ * Numbers Based on: https://github.com/derekmolloy/boneDeviceTree/tree/master/docs
+ */
+
+#ifndef _DT_BINDINGS_BOARD_AM335X_BBW_BBB_BASE_H
+#define _DT_BINDINGS_BOARD_AM335X_BBW_BBB_BASE_H
+
+#define BONE_P8_03 0x018
+#define BONE_P8_04 0x01C
+
+#define BONE_P8_05 0x008
+#define BONE_P8_06 0x00C
+#define BONE_P8_07 0x090
+#define BONE_P8_08 0x094
+
+#define BONE_P8_09 0x09C
+#define BONE_P8_10 0x098
+#define BONE_P8_11 0x034
+#define BONE_P8_12 0x030
+
+#define BONE_P8_13 0x024
+#define BONE_P8_14 0x028
+#define BONE_P8_15 0x03C
+#define BONE_P8_16 0x038
+
+#define BONE_P8_17 0x02C
+#define BONE_P8_18 0x08C
+#define BONE_P8_19 0x020
+#define BONE_P8_20 0x084
+
+#define BONE_P8_21 0x080
+#define BONE_P8_22 0x014
+#define BONE_P8_23 0x010
+#define BONE_P8_24 0x004
+
+#define BONE_P8_25 0x000
+#define BONE_P8_26 0x07C
+#define BONE_P8_27 0x0E0
+#define BONE_P8_28 0x0E8
+
+#define BONE_P8_29 0x0E4
+#define BONE_P8_30 0x0EC
+#define BONE_P8_31 0x0D8
+#define BONE_P8_32 0x0DC
+
+#define BONE_P8_33 0x0D4
+#define BONE_P8_34 0x0CC
+#define BONE_P8_35 0x0D0
+#define BONE_P8_36 0x0C8
+
+#define BONE_P8_37 0x0C0
+#define BONE_P8_38 0x0C4
+#define BONE_P8_39 0x0B8
+#define BONE_P8_40 0x0BC
+
+#define BONE_P8_41 0x0B0
+#define BONE_P8_42 0x0B4
+#define BONE_P8_43 0x0A8
+#define BONE_P8_44 0x0AC
+
+#define BONE_P8_45 0x0A0
+#define BONE_P8_46 0x0A4
+
+#define BONE_P9_11 0x070
+#define BONE_P9_12 0x078
+
+#define BONE_P9_13 0x074
+#define BONE_P9_14 0x048
+#define BONE_P9_15 0x040
+#define BONE_P9_16 0x04C
+
+#define BONE_P9_17 0x15C
+#define BONE_P9_18 0x158
+#define BONE_P9_19 0x17C
+#define BONE_P9_20 0x178
+
+#define BONE_P9_21 0x154
+#define BONE_P9_22 0x150
+#define BONE_P9_23 0x044
+#define BONE_P9_24 0x184
+
+#define BONE_P9_25 0x1AC
+#define BONE_P9_26 0x180
+#define BONE_P9_27 0x1A4
+#define BONE_P9_28 0x19C
+
+#define BONE_P9_29 0x194
+#define BONE_P9_30 0x198
+#define BONE_P9_31 0x190
+
+/* Shared P21 of P11 */
+#define BONE_P9_41A 0x1B4
+#define BONE_P9_41B 0x1A8
+
+/* Shared P22 of P11 */
+#define BONE_P9_42A 0x164
+#define BONE_P9_42B 0x1A0
+
+#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 21f5aa0b217..7f428764b7d 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -297,6 +297,9 @@ static inline void set_freezable(void) {}
 #define wait_event_freezekillable_unsafe(wq, condition)			\
 		wait_event_killable(wq, condition)
 
+#ifdef CONFIG_SCHED_MUQSS
+#define pm_freezing (false)
+#endif
 #endif /* !CONFIG_FREEZER */
 
 #endif	/* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 45cc10cdf6d..72f48d8baa5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -590,6 +590,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
 	atomic_inc(&mapping->i_mmap_writable);
 }
 
+static inline bool mapping_empty(struct address_space *mapping)
+{
+	return mapping->nrpages + mapping->nrexceptional == 0;
+}
+
 /*
  * Use sequence counter to get consistent i_size on 32-bit processors.
  */
@@ -1355,6 +1360,7 @@ extern void fasync_free(struct fasync_struct *);
 /* can be called from interrupts */
 extern void kill_fasync(struct fasync_struct **, int, int);
 
+extern int setfl(int fd, struct file *filp, unsigned long arg);
 extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
 extern int f_setown(struct file *filp, unsigned long arg, int force);
 extern void f_delown(struct file *filp);
@@ -1847,6 +1853,7 @@ struct file_operations {
 	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
 	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 	int (*check_flags)(int);
+	int (*setfl)(struct file *, unsigned long);
 	int (*flock) (struct file *, int, struct file_lock *);
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
@@ -1917,6 +1924,12 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
 			      struct iovec *fast_pointer,
 			      struct iovec **ret_pointer);
 
+typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t,
+				loff_t *);
+vfs_readf_t vfs_readf(struct file *file);
+vfs_writef_t vfs_writef(struct file *file);
+
 extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -2156,6 +2169,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
  *
  * I_CREATING		New object's inode in the middle of setting up.
  *
+ * I_PAGES		Inode is holding page cache that needs to get reclaimed
+ *			first before the inode can go onto the shrinker LRU.
+ *
  * Q: What is the difference between I_WILL_FREE and I_FREEING?
  */
 #define I_DIRTY_SYNC		(1 << 0)
@@ -2178,6 +2194,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
 #define I_WB_SWITCH		(1 << 13)
 #define I_OVL_INUSE		(1 << 14)
 #define I_CREATING		(1 << 15)
+#define I_PAGES			(1 << 16)
 
 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2349,6 +2366,7 @@ extern int current_umask(void);
 extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern int generic_update_time(struct inode *, struct timespec64 *, int);
+extern int update_time(struct inode *, struct timespec64 *, int);
 
 /* /sys/fs */
 extern struct kobject *fs_kobj;
@@ -2633,6 +2651,7 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb)
 	return false;
 }
 #endif
+extern int __sync_filesystem(struct super_block *, int);
 extern int sync_filesystem(struct super_block *);
 extern const struct file_operations def_blk_fops;
 extern const struct file_operations def_chr_fops;
@@ -3105,6 +3124,14 @@ static inline void remove_inode_hash(struct inode *inode)
 		__remove_inode_hash(inode);
 }
 
+#ifndef CONFIG_HIGHMEM
+extern void inode_pages_set(struct inode *inode);
+extern void inode_pages_clear(struct inode *inode);
+#else
+static inline void inode_pages_set(struct inode *inode) {}
+static inline void inode_pages_clear(struct inode *inode) {}
+#endif
+
 extern void inode_sb_list_add(struct inode *inode);
 
 #ifdef CONFIG_BLOCK
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2c620d7ac43..73417df5daa 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -36,7 +36,11 @@ extern struct cred init_cred;
 #define INIT_PREV_CPUTIME(x)
 #endif
 
+#ifdef CONFIG_SCHED_MUQSS
+#define INIT_TASK_COMM "MuQSS"
+#else
 #define INIT_TASK_COMM "swapper"
+#endif
 
 /* Attach to the init_task data structure for proper alignment */
 #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index e9bfe6972ae..16ba1c7e5bd 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -53,6 +53,8 @@ enum {
  */
 static inline int task_nice_ioprio(struct task_struct *task)
 {
+	if (iso_task(task))
+		return 0;
 	return (task_nice(task) + 20) / 5;
 }
 
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index e48b1e453ff..8cc8077cebf 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -21,20 +21,16 @@ struct mem_cgroup;
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 		unsigned long end, int advice, unsigned long *vm_flags);
-int __ksm_enter(struct mm_struct *mm);
-void __ksm_exit(struct mm_struct *mm);
 
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline struct stable_node *page_stable_node(struct page *page)
 {
-	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
-		return __ksm_enter(mm);
-	return 0;
+	return PageKsm(page) ? page_rmapping(page) : NULL;
 }
 
-static inline void ksm_exit(struct mm_struct *mm)
+static inline void set_page_stable_node(struct page *page,
+					struct stable_node *stable_node)
 {
-	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
-		__ksm_exit(mm);
+	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
 }
 
 /*
@@ -56,6 +52,33 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 bool reuse_ksm_page(struct page *page,
 			struct vm_area_struct *vma, unsigned long address);
 
+#ifdef CONFIG_KSM_LEGACY
+int __ksm_enter(struct mm_struct *mm);
+void __ksm_exit(struct mm_struct *mm);
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+		return __ksm_enter(mm);
+	return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+		__ksm_exit(mm);
+}
+
+#elif defined(CONFIG_UKSM)
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+#endif /* !CONFIG_UKSM */
+
 #else  /* !CONFIG_KSM */
 
 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -96,4 +119,6 @@ static inline bool reuse_ksm_page(struct page *page,
 #endif /* CONFIG_MMU */
 #endif /* !CONFIG_KSM */
 
+#include <linux/uksm.h>
+
 #endif /* __LINUX_KSM_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 206774ac694..e5eda5226fb 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -376,6 +376,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
 	return lock->key == key;
 }
 
+struct lock_class *lockdep_hlock_class(struct held_lock *hlock);
+
 /*
  * Acquire a lock.
  *
@@ -521,6 +523,7 @@ struct lockdep_map { };
 
 #define lockdep_depth(tsk)	(0)
 
+#define lockdep_is_held(lock)			(1)
 #define lockdep_is_held_type(l, r)		(1)
 
 #define lockdep_assert_held(l)			do { (void)(l); } while (0)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 465e8ad671f..d11050f1ccd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1689,6 +1689,28 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
 	unmap_mapping_range(mapping, holebegin, holelen, 0);
 }
 
+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int);
+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[],
+				      int);
+extern void vma_do_get_file(struct vm_area_struct *, const char[], int);
+extern void vma_do_fput(struct vm_area_struct *, const char[], int);
+
+#define vma_file_update_time(vma)	vma_do_file_update_time(vma, __func__, \
+								__LINE__)
+#define vma_pr_or_file(vma)		vma_do_pr_or_file(vma, __func__, \
+							  __LINE__)
+#define vma_get_file(vma)		vma_do_get_file(vma, __func__, __LINE__)
+#define vma_fput(vma)			vma_do_fput(vma, __func__, __LINE__)
+
+#ifndef CONFIG_MMU
+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int);
+extern void vmr_do_fput(struct vm_region *, const char[], int);
+
+#define vmr_pr_or_file(region)		vmr_do_pr_or_file(region, __func__, \
+							  __LINE__)
+#define vmr_fput(region)		vmr_do_fput(region, __func__, __LINE__)
+#endif /* !CONFIG_MMU */
+
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
 		void *buf, int len, unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba..29f517ec9a2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -272,6 +272,7 @@ struct vm_region {
 	unsigned long	vm_top;		/* region allocated to here */
 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
 	struct file	*vm_file;	/* the backing file or NULL */
+	struct file	*vm_prfile;	/* the virtual backing file or NULL */
 
 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
@@ -351,6 +352,7 @@ struct vm_area_struct {
 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
 					   units */
 	struct file * vm_file;		/* File we map to (can be NULL). */
+	struct file *vm_prfile;		/* shadow of vm_file */
 	void * vm_private_data;		/* was vm_pte (shared mem) */
 
 #ifdef CONFIG_SWAP
@@ -363,6 +365,9 @@ struct vm_area_struct {
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef CONFIG_UKSM
+	struct vma_slot *uksm_vma_slot;
+#endif
 } __randomize_layout;
 
 struct core_thread {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6196fd16b7f..7d9decab1f6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -162,6 +162,9 @@ enum zone_stat_item {
 	NR_ZSPAGES,		/* allocated in zsmalloc */
 #endif
 	NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UKSM
+	NR_UKSM_ZERO_PAGES,
+#endif
 	NR_VM_ZONE_STAT_ITEMS };
 
 enum node_stat_item {
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 35942084cd4..24f5fd1a789 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -6,11 +6,14 @@
 struct mnt_namespace;
 struct fs_struct;
 struct user_namespace;
+struct vfsmount;
 
 extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
 		struct user_namespace *, struct fs_struct *);
 extern void put_mnt_ns(struct mnt_namespace *ns);
 
+extern int is_current_mnt_ns(struct vfsmount *mnt);
+
 extern const struct file_operations proc_mounts_operations;
 extern const struct file_operations proc_mountinfo_operations;
 extern const struct file_operations proc_mountstats_operations;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a8f7bd8ea1c..af4f122e3bf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -610,7 +610,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 				pgoff_t index, gfp_t gfp_mask);
 extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+extern bool __delete_from_page_cache(struct page *page, void *shadow);
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 void delete_from_page_cache_batch(struct address_space *mapping,
 				  struct pagevec *pvec);
diff --git a/include/linux/power/pwrseq.h b/include/linux/power/pwrseq.h
new file mode 100644
index 00000000000..cbc344cdf9d
--- /dev/null
+++ b/include/linux/power/pwrseq.h
@@ -0,0 +1,81 @@
+#ifndef __LINUX_PWRSEQ_H
+#define __LINUX_PWRSEQ_H
+
+#include <linux/of.h>
+
+#define PWRSEQ_MAX_CLKS		3
+
+/**
+ * struct pwrseq - the power sequence structure
+ * @pwrseq_of_match_table: the OF device id table this pwrseq library supports
+ * @node: the list pointer to be added to pwrseq list
+ * @get: the API is used to get pwrseq instance from the device node
+ * @on: do power on for this pwrseq instance
+ * @off: do power off for this pwrseq instance
+ * @put: release the resources on this pwrseq instance
+ * @suspend: do suspend operation on this pwrseq instance
+ * @resume: do resume operation on this pwrseq instance
+ * @used: this pwrseq instance is used by device
+ */
+struct pwrseq {
+	const struct of_device_id *pwrseq_of_match_table;
+	struct list_head node;
+	int (*get)(struct device_node *np, struct pwrseq *p);
+	int (*on)(struct pwrseq *p);
+	void (*off)(struct pwrseq *p);
+	void (*put)(struct pwrseq *p);
+	int (*suspend)(struct pwrseq *p);
+	int (*resume)(struct pwrseq *p);
+	bool used;
+	bool suspended;
+};
+
+/* used for power sequence instance list in one driver */
+struct pwrseq_list_per_dev {
+	struct pwrseq *pwrseq;
+	struct list_head list;
+};
+
+#if IS_ENABLED(CONFIG_POWER_SEQUENCE)
+void pwrseq_register(struct pwrseq *pwrseq);
+void pwrseq_unregister(struct pwrseq *pwrseq);
+struct pwrseq *of_pwrseq_on(struct device_node *np);
+void of_pwrseq_off(struct pwrseq *pwrseq);
+int of_pwrseq_on_list(struct device_node *np, struct list_head *head);
+void of_pwrseq_off_list(struct list_head *head);
+int pwrseq_suspend(struct pwrseq *p);
+int pwrseq_resume(struct pwrseq *p);
+int pwrseq_suspend_list(struct list_head *head);
+int pwrseq_resume_list(struct list_head *head);
+#else
+static inline void pwrseq_register(struct pwrseq *pwrseq) {}
+static inline void pwrseq_unregister(struct pwrseq *pwrseq) {}
+static inline struct pwrseq *of_pwrseq_on(struct device_node *np)
+{
+	return NULL;
+}
+static void of_pwrseq_off(struct pwrseq *pwrseq) {}
+static int of_pwrseq_on_list(struct device_node *np, struct list_head *head)
+{
+	return 0;
+}
+static void of_pwrseq_off_list(struct list_head *head) {}
+static int pwrseq_suspend(struct pwrseq *p)
+{
+	return 0;
+}
+static int pwrseq_resume(struct pwrseq *p)
+{
+	return 0;
+}
+static int pwrseq_suspend_list(struct list_head *head)
+{
+	return 0;
+}
+static int pwrseq_resume_list(struct list_head *head)
+{
+	return 0;
+}
+#endif /* CONFIG_POWER_SEQUENCE */
+
+#endif  /* __LINUX_PWRSEQ_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4418f5cb832..0f26820700e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,9 @@
 #include <linux/task_io_accounting.h>
 #include <linux/posix-timers.h>
 #include <linux/rseq.h>
+#ifdef CONFIG_SCHED_MUQSS
+#include <linux/skip_list.h>
+#endif
 
 /* task_struct member predeclarations (sorted alphabetically): */
 struct audit_context;
@@ -221,6 +224,34 @@ extern long schedule_timeout_interruptible(long timeout);
 extern long schedule_timeout_killable(long timeout);
 extern long schedule_timeout_uninterruptible(long timeout);
 extern long schedule_timeout_idle(long timeout);
+
+#if defined(CONFIG_HIGH_RES_TIMERS) && defined(CONFIG_SCHED_MUQSS)
+extern long schedule_msec_hrtimeout(long timeout);
+extern long schedule_min_hrtimeout(void);
+extern long schedule_msec_hrtimeout_interruptible(long timeout);
+extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
+#else
+static inline long schedule_msec_hrtimeout(long timeout)
+{
+	return schedule_timeout(msecs_to_jiffies(timeout));
+}
+
+static inline long schedule_min_hrtimeout(void)
+{
+	return schedule_timeout(1);
+}
+
+static inline long schedule_msec_hrtimeout_interruptible(long timeout)
+{
+	return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
+}
+
+static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
+{
+	return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
+}
+#endif
+
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 asmlinkage void preempt_schedule_irq(void);
@@ -652,9 +683,11 @@ struct task_struct {
 	unsigned int			flags;
 	unsigned int			ptrace;
 
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
+	int on_cpu;
+#endif
 #ifdef CONFIG_SMP
 	struct llist_node		wake_entry;
-	int				on_cpu;
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 	/* Current CPU: */
 	unsigned int			cpu;
@@ -679,10 +712,25 @@ struct task_struct {
 	int				static_prio;
 	int				normal_prio;
 	unsigned int			rt_priority;
+#ifdef CONFIG_SCHED_MUQSS
+	int time_slice;
+	u64 deadline;
+	skiplist_node node; /* Skip list node */
+	u64 last_ran;
+	u64 sched_time; /* sched_clock time spent running */
+#ifdef CONFIG_SMT_NICE
+	int smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+	bool zerobound; /* Bound to CPU0 for hotplug */
+#endif
+	unsigned long rt_timeout;
+#else /* CONFIG_SCHED_MUQSS */
 
 	const struct sched_class	*sched_class;
 	struct sched_entity		se;
 	struct sched_rt_entity		rt;
+#endif
 #ifdef CONFIG_CGROUP_SCHED
 	struct task_group		*sched_task_group;
 #endif
@@ -850,6 +898,10 @@ struct task_struct {
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 	u64				utimescaled;
 	u64				stimescaled;
+#endif
+#ifdef CONFIG_SCHED_MUQSS
+	/* Unbanked cpu time */
+	unsigned long utime_ns, stime_ns;
 #endif
 	u64				gtime;
 	struct prev_cputime		prev_cputime;
@@ -1306,6 +1358,40 @@ struct task_struct {
 	 */
 };
 
+#ifdef CONFIG_SCHED_MUQSS
+#define tsk_seruntime(t)		((t)->sched_time)
+#define tsk_rttimeout(t)		((t)->rt_timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+}
+
+void print_scheduler_version(void);
+
+static inline bool iso_task(struct task_struct *p)
+{
+	return (p->policy == SCHED_ISO);
+}
+#else /* CFS */
+#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t)	((t)->rt.timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+	p->nr_cpus_allowed = current->nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+	printk(KERN_INFO "CFS CPU scheduler.\n");
+}
+
+static inline bool iso_task(struct task_struct *p)
+{
+	return false;
+}
+#endif /* CONFIG_SCHED_MUQSS */
+
 static inline struct pid *task_pid(struct task_struct *task)
 {
 	return task->thread_pid;
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 1aff00b65f3..73d6319a856 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -28,7 +28,16 @@ static inline bool dl_time_before(u64 a, u64 b)
 #ifdef CONFIG_SMP
 
 struct root_domain;
+#ifdef CONFIG_SCHED_MUQSS
+static inline void dl_clear_root_domain(struct root_domain *rd)
+{
+}
+static inline void dl_add_task_root_domain(struct task_struct *p)
+{
+}
+#else /* CONFIG_SCHED_MUQSS */
 extern void dl_add_task_root_domain(struct task_struct *p);
 extern void dl_clear_root_domain(struct root_domain *rd);
+#endif /* CONFIG_SCHED_MUQSS */
 
 #endif /* CONFIG_SMP */
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 6d67e9a5af6..101fe470aa8 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -13,7 +13,7 @@ extern int get_nohz_timer_target(void);
 static inline void nohz_balance_enter_idle(int cpu) { }
 #endif
 
-#ifdef CONFIG_NO_HZ_COMMON
+#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
 void calc_load_nohz_start(void);
 void calc_load_nohz_remote(struct rq *rq);
 void calc_load_nohz_stop(void);
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index 7d64feafc40..43c9d9e50c0 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -20,8 +20,20 @@
  */
 
 #define MAX_USER_RT_PRIO	100
+
+#ifdef CONFIG_SCHED_MUQSS
+/* Note different MAX_RT_PRIO */
+#define MAX_RT_PRIO		(MAX_USER_RT_PRIO + 1)
+
+#define ISO_PRIO		(MAX_RT_PRIO)
+#define NORMAL_PRIO		(MAX_RT_PRIO + 1)
+#define IDLE_PRIO		(MAX_RT_PRIO + 2)
+#define PRIO_LIMIT		((IDLE_PRIO) + 1)
+#else /* CONFIG_SCHED_MUQSS */
 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
 
+#endif /* CONFIG_SCHED_MUQSS */
+
 #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
 #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
 
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index e5af028c08b..010b2244e0b 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
 
 	if (policy == SCHED_FIFO || policy == SCHED_RR)
 		return true;
+#ifndef CONFIG_SCHED_MUQSS
 	if (policy == SCHED_DEADLINE)
 		return true;
+#endif
 	return false;
 }
 
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 38359071236..e2ebedb6512 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -106,7 +106,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
 extern void free_task(struct task_struct *tsk);
 
 /* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
 extern void sched_exec(void);
 #else
 #define sched_exec()   {}
diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
new file mode 100644
index 00000000000..d4be84ba273
--- /dev/null
+++ b/include/linux/skip_list.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_SKIP_LISTS_H
+#define _LINUX_SKIP_LISTS_H
+typedef u64 keyType;
+typedef void *valueType;
+
+typedef struct nodeStructure skiplist_node;
+
+struct nodeStructure {
+	int level;	/* Levels in this structure */
+	keyType key;
+	valueType value;
+	skiplist_node *next[8];
+	skiplist_node *prev[8];
+};
+
+typedef struct listStructure {
+	int entries;
+	int level;	/* Maximum level of the list
+			(1 more than the number of levels in the list) */
+	skiplist_node *header; /* pointer to header */
+} skiplist;
+
+void skiplist_init(skiplist_node *slnode);
+skiplist *new_skiplist(skiplist_node *slnode);
+void free_skiplist(skiplist *l);
+void skiplist_node_init(skiplist_node *node);
+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
+void skiplist_delete(skiplist *l, skiplist_node *node);
+
+static inline bool skiplist_node_empty(skiplist_node *node) {
+	return (!node->next[0]);
+}
+#endif /* _LINUX_SKIP_LISTS_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index ebbbfea48aa..d68d5742502 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -90,4 +90,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
 extern const struct pipe_buf_operations default_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+			   loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+			 struct pipe_inode_info *pipe, size_t len,
+			 unsigned int flags);
 #endif
diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
new file mode 100644
index 00000000000..d71edba6b63
--- /dev/null
+++ b/include/linux/sradix-tree.h
@@ -0,0 +1,77 @@
+#ifndef _LINUX_SRADIX_TREE_H
+#define _LINUX_SRADIX_TREE_H
+
+
+#define INIT_SRADIX_TREE(root, mask)					\
+do {									\
+	(root)->height = 0;						\
+	(root)->gfp_mask = (mask);					\
+	(root)->rnode = NULL;						\
+} while (0)
+
+#define ULONG_BITS	(sizeof(unsigned long) * 8)
+#define SRADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
+//#define SRADIX_TREE_MAP_SHIFT	6
+//#define SRADIX_TREE_MAP_SIZE	(1UL << SRADIX_TREE_MAP_SHIFT)
+//#define SRADIX_TREE_MAP_MASK	(SRADIX_TREE_MAP_SIZE-1)
+
+struct sradix_tree_node {
+	unsigned int	height;		/* Height from the bottom */
+	unsigned int	count;
+	unsigned int	fulls;		/* Number of full sublevel trees */
+	struct sradix_tree_node *parent;
+	void *stores[0];
+};
+
+/* A simple radix tree implementation */
+struct sradix_tree_root {
+	unsigned int            height;
+	struct sradix_tree_node *rnode;
+
+	/* Where found to have available empty stores in its sublevels */
+	struct sradix_tree_node *enter_node;
+	unsigned int shift;
+	unsigned int stores_size;
+	unsigned int mask;
+	unsigned long min;	/* The first hole index */
+	unsigned long num;
+	//unsigned long *height_to_maxindex;
+
+	/* How the node is allocated and freed. */
+	struct sradix_tree_node *(*alloc)(void);
+	void (*free)(struct sradix_tree_node *node);
+
+	/* When a new node is added and removed */
+	void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
+	void (*assign)(struct sradix_tree_node *node, unsigned int index, void *item);
+	void (*rm)(struct sradix_tree_node *node, unsigned int offset);
+};
+
+struct sradix_tree_path {
+	struct sradix_tree_node *node;
+	int offset;
+};
+
+static inline
+void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
+{
+	root->height = 0;
+	root->rnode = NULL;
+	root->shift = shift;
+	root->stores_size = 1UL << shift;
+	root->mask = root->stores_size - 1;
+}
+
+
+extern void *sradix_tree_next(struct sradix_tree_root *root,
+		       struct sradix_tree_node *node, unsigned long index,
+		       int (*iter)(void *, unsigned long));
+
+extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
+
+extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
+			struct sradix_tree_node *node, unsigned long index);
+
+extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
+
+#endif /* _LINUX_SRADIX_TREE_H */
diff --git a/include/linux/uksm.h b/include/linux/uksm.h
new file mode 100644
index 00000000000..bb8651f534f
--- /dev/null
+++ b/include/linux/uksm.h
@@ -0,0 +1,149 @@
+#ifndef __LINUX_UKSM_H
+#define __LINUX_UKSM_H
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork().
+ */
+
+/* if !CONFIG_UKSM this file should not be compiled at all. */
+#ifdef CONFIG_UKSM
+
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/sched.h>
+
+extern unsigned long zero_pfn __read_mostly;
+extern unsigned long uksm_zero_pfn __read_mostly;
+extern struct page *empty_uksm_zero_page;
+
+/* must be done before linked to mm */
+extern void uksm_vma_add_new(struct vm_area_struct *vma);
+extern void uksm_remove_vma(struct vm_area_struct *vma);
+
+#define UKSM_SLOT_NEED_SORT	(1 << 0)
+#define UKSM_SLOT_NEED_RERAND	(1 << 1)
+#define UKSM_SLOT_SCANNED	(1 << 2) /* It's scanned in this round */
+#define UKSM_SLOT_FUL_SCANNED	(1 << 3)
+#define UKSM_SLOT_IN_UKSM	(1 << 4)
+
+struct vma_slot {
+	struct sradix_tree_node *snode;
+	unsigned long sindex;
+
+	struct list_head slot_list;
+	unsigned long fully_scanned_round;
+	unsigned long dedup_num;
+	unsigned long pages_scanned;
+	unsigned long this_sampled;
+	unsigned long last_scanned;
+	unsigned long pages_to_scan;
+	struct scan_rung *rung;
+	struct page **rmap_list_pool;
+	unsigned int *pool_counts;
+	unsigned long pool_size;
+	struct vm_area_struct *vma;
+	struct mm_struct *mm;
+	unsigned long ctime_j;
+	unsigned long pages;
+	unsigned long flags;
+	unsigned long pages_cowed; /* pages cowed this round */
+	unsigned long pages_merged; /* pages merged this round */
+	unsigned long pages_bemerged;
+
+	/* when it has page merged in this eval round */
+	struct list_head dedup_list;
+};
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+	if (pte_pfn(pte) == uksm_zero_pfn)
+		__dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+	if (pte_pfn(pte) == uksm_zero_pfn)
+		__inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+	if (vma->uksm_vma_slot && PageKsm(page))
+		vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+	if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
+		vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+#ifdef VM_SAO
+		if (vm_flags & VM_SAO)
+			return 0;
+#endif
+
+	return !(vm_flags & (VM_PFNMAP | VM_IO  | VM_DONTEXPAND |
+			     VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
+			     | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN));
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+	if (uksm_flags_can_scan(*vm_flags_p))
+		*vm_flags_p |= VM_MERGEABLE;
+}
+
+/*
+ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
+ * be removed when uksm zero page patch is stable enough.
+ */
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+	BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
+}
+#else
+static inline void uksm_vma_add_new(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_remove_vma(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+	return 0;
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+}
+
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+}
+#endif /* !CONFIG_UKSM */
+#endif /* __LINUX_UKSM_H */
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index ff56c443180..db8a7d11809 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -16,6 +16,7 @@ struct vbg_dev;
 __printf(1, 2) void vbg_info(const char *fmt, ...);
 __printf(1, 2) void vbg_warn(const char *fmt, ...);
 __printf(1, 2) void vbg_err(const char *fmt, ...);
+__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...);
 
 /* Only use backdoor logging for non-dynamic debug builds */
 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ffef0f27974..de362c9cda2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -38,7 +38,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_NUMA
 		PGSCAN_ZONE_RECLAIM_FAILED,
 #endif
-		PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
+		SLABS_SCANNED,
+		PGINODESTEAL, KSWAPD_INODESTEAL, PGINODERESCUE, PGINODEDELAYED,
 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
 		PAGEOUTRUN, PGROTATED,
 		DROP_PAGECACHE, DROP_SLAB,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 70e48f66dac..42fdebb9556 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6558,6 +6558,8 @@ struct cfg80211_fils_resp_params {
  *	not known. This value is used only if @status < 0 to indicate that the
  *	failure is due to a timeout and not due to explicit rejection by the AP.
  *	This value is ignored in other cases (@status >= 0).
+ * @authorized: Indicates whether the connection is ready to transport
+ *	data packets.
  */
 struct cfg80211_connect_resp_params {
 	int status;
@@ -6569,6 +6571,7 @@ struct cfg80211_connect_resp_params {
 	size_t resp_ie_len;
 	struct cfg80211_fils_resp_params fils;
 	enum nl80211_timeout_reason timeout_reason;
+	bool authorized;
 };
 
 /**
@@ -6718,6 +6721,9 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
  * @fils: FILS related roaming information.
+ * @authorized: true if the 802.1X authentication was done by the driver or is
+ *	not needed (e.g., when Fast Transition protocol was used), false
+ *	otherwise. Ignored for networks that don't use 802.1X authentication.
  */
 struct cfg80211_roam_info {
 	struct ieee80211_channel *channel;
@@ -6728,6 +6734,7 @@ struct cfg80211_roam_info {
 	const u8 *resp_ie;
 	size_t resp_ie_len;
 	struct cfg80211_fils_resp_params fils;
+	bool authorized;
 };
 
 /**
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
new file mode 100644
index 00000000000..e1b2bd375ea
--- /dev/null
+++ b/include/uapi/linux/Kbuild
@@ -0,0 +1 @@
+header-y = aufs_type.h
diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h
new file mode 100644
index 00000000000..34738b8cf34
--- /dev/null
+++ b/include/uapi/linux/aufs_type.h
@@ -0,0 +1,452 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2005-2020 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __AUFS_TYPE_H__
+#define __AUFS_TYPE_H__
+
+#define AUFS_NAME	"aufs"
+
+#ifdef __KERNEL__
+/*
+ * define it before including all other headers.
+ * sched.h may use pr_* macros before defining "current", so define the
+ * no-current version first, and re-define later.
+ */
+#define pr_fmt(fmt)	AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
+#include <linux/sched.h>
+#undef pr_fmt
+#define pr_fmt(fmt) \
+		AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
+		(int)sizeof(current->comm), current->comm, current->pid
+#include <linux/limits.h>
+#else
+#include <stdint.h>
+#include <sys/types.h>
+#include <limits.h>
+#endif /* __KERNEL__ */
+
+#define AUFS_VERSION	"5.7-20200622"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC	('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_AUFS_BRANCH_MAX_127
+typedef int8_t aufs_bindex_t;
+#define AUFS_BRANCH_MAX 127
+#else
+typedef int16_t aufs_bindex_t;
+#ifdef CONFIG_AUFS_BRANCH_MAX_511
+#define AUFS_BRANCH_MAX 511
+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
+#define AUFS_BRANCH_MAX 1023
+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
+#define AUFS_BRANCH_MAX 32767
+#endif
+#endif
+
+#ifndef AUFS_BRANCH_MAX
+#error unknown CONFIG_AUFS_BRANCH_MAX value
+#endif
+#endif /* __KERNEL__ */
+
+/* ---------------------------------------------------------------------- */
+
+#define AUFS_FSTYPE		AUFS_NAME
+
+#define AUFS_ROOT_INO		2
+#define AUFS_FIRST_INO		11
+
+#define AUFS_WH_PFX		".wh."
+#define AUFS_WH_PFX_LEN		((int)sizeof(AUFS_WH_PFX) - 1)
+#define AUFS_WH_TMP_LEN		4
+/* a limit for rmdir/rename a dir and copyup */
+#define AUFS_MAX_NAMELEN	(NAME_MAX \
+				- AUFS_WH_PFX_LEN * 2	/* doubly whiteouted */\
+				- 1			/* dot */\
+				- AUFS_WH_TMP_LEN)	/* hex */
+#define AUFS_XINO_FNAME		"." AUFS_NAME ".xino"
+#define AUFS_XINO_DEFPATH	"/tmp/" AUFS_XINO_FNAME
+#define AUFS_XINO_DEF_SEC	30 /* seconds */
+#define AUFS_XINO_DEF_TRUNC	45 /* percentage */
+#define AUFS_DIRWH_DEF		3
+#define AUFS_RDCACHE_DEF	10 /* seconds */
+#define AUFS_RDCACHE_MAX	3600 /* seconds */
+#define AUFS_RDBLK_DEF		512 /* bytes */
+#define AUFS_RDHASH_DEF		32
+#define AUFS_WKQ_NAME		AUFS_NAME "d"
+#define AUFS_MFS_DEF_SEC	30 /* seconds */
+#define AUFS_MFS_MAX_SEC	3600 /* seconds */
+#define AUFS_FHSM_CACHE_DEF_SEC	30 /* seconds */
+#define AUFS_PLINK_WARN		50 /* number of plinks in a single bucket */
+
+/* pseudo-link maintenace under /proc */
+#define AUFS_PLINK_MAINT_NAME	"plink_maint"
+#define AUFS_PLINK_MAINT_DIR	"fs/" AUFS_NAME
+#define AUFS_PLINK_MAINT_PATH	AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
+
+/* dirren, renamed dir */
+#define AUFS_DR_INFO_PFX	AUFS_WH_PFX ".dr."
+#define AUFS_DR_BRHINO_NAME	AUFS_WH_PFX "hino"
+/* whiteouted doubly */
+#define AUFS_WH_DR_INFO_PFX	AUFS_WH_PFX AUFS_DR_INFO_PFX
+#define AUFS_WH_DR_BRHINO	AUFS_WH_PFX AUFS_DR_BRHINO_NAME
+
+#define AUFS_DIROPQ_NAME	AUFS_WH_PFX ".opq" /* whiteouted doubly */
+#define AUFS_WH_DIROPQ		AUFS_WH_PFX AUFS_DIROPQ_NAME
+
+#define AUFS_BASE_NAME		AUFS_WH_PFX AUFS_NAME
+#define AUFS_PLINKDIR_NAME	AUFS_WH_PFX "plnk"
+#define AUFS_ORPHDIR_NAME	AUFS_WH_PFX "orph"
+
+/* doubly whiteouted */
+#define AUFS_WH_BASE		AUFS_WH_PFX AUFS_BASE_NAME
+#define AUFS_WH_PLINKDIR	AUFS_WH_PFX AUFS_PLINKDIR_NAME
+#define AUFS_WH_ORPHDIR		AUFS_WH_PFX AUFS_ORPHDIR_NAME
+
+/* branch permissions and attributes */
+#define AUFS_BRPERM_RW		"rw"
+#define AUFS_BRPERM_RO		"ro"
+#define AUFS_BRPERM_RR		"rr"
+#define AUFS_BRATTR_COO_REG	"coo_reg"
+#define AUFS_BRATTR_COO_ALL	"coo_all"
+#define AUFS_BRATTR_FHSM	"fhsm"
+#define AUFS_BRATTR_UNPIN	"unpin"
+#define AUFS_BRATTR_ICEX	"icex"
+#define AUFS_BRATTR_ICEX_SEC	"icexsec"
+#define AUFS_BRATTR_ICEX_SYS	"icexsys"
+#define AUFS_BRATTR_ICEX_TR	"icextr"
+#define AUFS_BRATTR_ICEX_USR	"icexusr"
+#define AUFS_BRATTR_ICEX_OTH	"icexoth"
+#define AUFS_BRRATTR_WH		"wh"
+#define AUFS_BRWATTR_NLWH	"nolwh"
+#define AUFS_BRWATTR_MOO	"moo"
+
+#define AuBrPerm_RW		1		/* writable, hardlinkable wh */
+#define AuBrPerm_RO		(1 << 1)	/* readonly */
+#define AuBrPerm_RR		(1 << 2)	/* natively readonly */
+#define AuBrPerm_Mask		(AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
+
+#define AuBrAttr_COO_REG	(1 << 3)	/* copy-up on open */
+#define AuBrAttr_COO_ALL	(1 << 4)
+#define AuBrAttr_COO_Mask	(AuBrAttr_COO_REG | AuBrAttr_COO_ALL)
+
+#define AuBrAttr_FHSM		(1 << 5)	/* file-based hsm */
+#define AuBrAttr_UNPIN		(1 << 6)	/* rename-able top dir of
+						   branch. meaningless since
+						   linux-3.18-rc1 */
+
+/* ignore error in copying XATTR */
+#define AuBrAttr_ICEX_SEC	(1 << 7)
+#define AuBrAttr_ICEX_SYS	(1 << 8)
+#define AuBrAttr_ICEX_TR	(1 << 9)
+#define AuBrAttr_ICEX_USR	(1 << 10)
+#define AuBrAttr_ICEX_OTH	(1 << 11)
+#define AuBrAttr_ICEX		(AuBrAttr_ICEX_SEC	\
+				 | AuBrAttr_ICEX_SYS	\
+				 | AuBrAttr_ICEX_TR	\
+				 | AuBrAttr_ICEX_USR	\
+				 | AuBrAttr_ICEX_OTH)
+
+#define AuBrRAttr_WH		(1 << 12)	/* whiteout-able */
+#define AuBrRAttr_Mask		AuBrRAttr_WH
+
+#define AuBrWAttr_NoLinkWH	(1 << 13)	/* un-hardlinkable whiteouts */
+#define AuBrWAttr_MOO		(1 << 14)	/* move-up on open */
+#define AuBrWAttr_Mask		(AuBrWAttr_NoLinkWH | AuBrWAttr_MOO)
+
+#define AuBrAttr_CMOO_Mask	(AuBrAttr_COO_Mask | AuBrWAttr_MOO)
+
+/* #warning test userspace */
+#ifdef __KERNEL__
+#ifndef CONFIG_AUFS_FHSM
+#undef AuBrAttr_FHSM
+#define AuBrAttr_FHSM		0
+#endif
+#ifndef CONFIG_AUFS_XATTR
+#undef	AuBrAttr_ICEX
+#define AuBrAttr_ICEX		0
+#undef	AuBrAttr_ICEX_SEC
+#define AuBrAttr_ICEX_SEC	0
+#undef	AuBrAttr_ICEX_SYS
+#define AuBrAttr_ICEX_SYS	0
+#undef	AuBrAttr_ICEX_TR
+#define AuBrAttr_ICEX_TR	0
+#undef	AuBrAttr_ICEX_USR
+#define AuBrAttr_ICEX_USR	0
+#undef	AuBrAttr_ICEX_OTH
+#define AuBrAttr_ICEX_OTH	0
+#endif
+#endif
+
+/* the longest combination */
+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */
+#define AuBrPermStrSz	sizeof(AUFS_BRPERM_RW			\
+			       "+" AUFS_BRATTR_COO_REG		\
+			       "+" AUFS_BRATTR_FHSM		\
+			       "+" AUFS_BRATTR_UNPIN		\
+			       "+" AUFS_BRATTR_ICEX_SEC		\
+			       "+" AUFS_BRATTR_ICEX_SYS		\
+			       "+" AUFS_BRATTR_ICEX_USR		\
+			       "+" AUFS_BRATTR_ICEX_OTH		\
+			       "+" AUFS_BRWATTR_NLWH)
+
+typedef struct {
+	char a[AuBrPermStrSz];
+} au_br_perm_str_t;
+
+static inline int au_br_writable(int brperm)
+{
+	return brperm & AuBrPerm_RW;
+}
+
+static inline int au_br_whable(int brperm)
+{
+	return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
+}
+
+static inline int au_br_wh_linkable(int brperm)
+{
+	return !(brperm & AuBrWAttr_NoLinkWH);
+}
+
+static inline int au_br_cmoo(int brperm)
+{
+	return brperm & AuBrAttr_CMOO_Mask;
+}
+
+static inline int au_br_fhsm(int brperm)
+{
+	return brperm & AuBrAttr_FHSM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* ioctl */
+enum {
+	/* readdir in userspace */
+	AuCtl_RDU,
+	AuCtl_RDU_INO,
+
+	AuCtl_WBR_FD,	/* pathconf wrapper */
+	AuCtl_IBUSY,	/* busy inode */
+	AuCtl_MVDOWN,	/* move-down */
+	AuCtl_BR,	/* info about branches */
+	AuCtl_FHSM_FD	/* connection for fhsm */
+};
+
+/* borrowed from linux/include/linux/kernel.h */
+#ifndef ALIGN
+#ifdef _GNU_SOURCE
+#define ALIGN(x, a)		__ALIGN_MASK(x, (typeof(x))(a)-1)
+#else
+#define ALIGN(x, a)		(((x) + (a) - 1) & ~((a) - 1))
+#endif
+#define __ALIGN_MASK(x, mask)	(((x)+(mask))&~(mask))
+#endif
+
+/* borrowed from linux/include/linux/compiler-gcc3.h */
+#ifndef __aligned
+#define __aligned(x)			__attribute__((aligned(x)))
+#endif
+
+#ifdef __KERNEL__
+#ifndef __packed
+#define __packed			__attribute__((packed))
+#endif
+#endif
+
+struct au_rdu_cookie {
+	uint64_t	h_pos;
+	int16_t		bindex;
+	uint8_t		flags;
+	uint8_t		pad;
+	uint32_t	generation;
+} __aligned(8);
+
+struct au_rdu_ent {
+	uint64_t	ino;
+	int16_t		bindex;
+	uint8_t		type;
+	uint8_t		nlen;
+	uint8_t		wh;
+	char		name[];
+} __aligned(8);
+
+static inline int au_rdu_len(int nlen)
+{
+	/* include the terminating NULL */
+	return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
+		     sizeof(uint64_t));
+}
+
+union au_rdu_ent_ul {
+	struct au_rdu_ent __user	*e;
+	uint64_t			ul;
+};
+
+enum {
+	AufsCtlRduV_SZ,
+	AufsCtlRduV_End
+};
+
+struct aufs_rdu {
+	/* input */
+	union {
+		uint64_t	sz;	/* AuCtl_RDU */
+		uint64_t	nent;	/* AuCtl_RDU_INO */
+	};
+	union au_rdu_ent_ul	ent;
+	uint16_t		verify[AufsCtlRduV_End];
+
+	/* input/output */
+	uint32_t		blk;
+
+	/* output */
+	union au_rdu_ent_ul	tail;
+	/* number of entries which were added in a single call */
+	uint64_t		rent;
+	uint8_t			full;
+	uint8_t			shwh;
+
+	struct au_rdu_cookie	cookie;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* dirren. the branch is identified by the filename who contains this */
+struct au_drinfo {
+	uint64_t ino;
+	union {
+		uint8_t oldnamelen;
+		uint64_t _padding;
+	};
+	uint8_t oldname[];
+} __aligned(8);
+
+struct au_drinfo_fdata {
+	uint32_t magic;
+	struct au_drinfo drinfo;
+} __aligned(8);
+
+#define AUFS_DRINFO_MAGIC_V1	('a' << 24 | 'd' << 16 | 'r' << 8 | 0x01)
+/* future */
+#define AUFS_DRINFO_MAGIC_V2	('a' << 24 | 'd' << 16 | 'r' << 8 | 0x02)
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_wbr_fd {
+	uint32_t	oflags;
+	int16_t		brid;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_ibusy {
+	uint64_t	ino, h_ino;
+	int16_t		bindex;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* error code for move-down */
+/* the actual message strings are implemented in aufs-util.git */
+enum {
+	EAU_MVDOWN_OPAQUE = 1,
+	EAU_MVDOWN_WHITEOUT,
+	EAU_MVDOWN_UPPER,
+	EAU_MVDOWN_BOTTOM,
+	EAU_MVDOWN_NOUPPER,
+	EAU_MVDOWN_NOLOWERBR,
+	EAU_Last
+};
+
+/* flags for move-down */
+#define AUFS_MVDOWN_DMSG	1
+#define AUFS_MVDOWN_OWLOWER	(1 << 1)	/* overwrite lower */
+#define AUFS_MVDOWN_KUPPER	(1 << 2)	/* keep upper */
+#define AUFS_MVDOWN_ROLOWER	(1 << 3)	/* do even if lower is RO */
+#define AUFS_MVDOWN_ROLOWER_R	(1 << 4)	/* did on lower RO */
+#define AUFS_MVDOWN_ROUPPER	(1 << 5)	/* do even if upper is RO */
+#define AUFS_MVDOWN_ROUPPER_R	(1 << 6)	/* did on upper RO */
+#define AUFS_MVDOWN_BRID_UPPER	(1 << 7)	/* upper brid */
+#define AUFS_MVDOWN_BRID_LOWER	(1 << 8)	/* lower brid */
+#define AUFS_MVDOWN_FHSM_LOWER	(1 << 9)	/* find fhsm attr for lower */
+#define AUFS_MVDOWN_STFS	(1 << 10)	/* req. stfs */
+#define AUFS_MVDOWN_STFS_FAILED	(1 << 11)	/* output: stfs is unusable */
+#define AUFS_MVDOWN_BOTTOM	(1 << 12)	/* output: no more lowers */
+
+/* index for move-down */
+enum {
+	AUFS_MVDOWN_UPPER,
+	AUFS_MVDOWN_LOWER,
+	AUFS_MVDOWN_NARRAY
+};
+
+/*
+ * additional info of move-down
+ * number of free blocks and inodes.
+ * subset of struct kstatfs, but smaller and always 64bit.
+ */
+struct aufs_stfs {
+	uint64_t	f_blocks;
+	uint64_t	f_bavail;
+	uint64_t	f_files;
+	uint64_t	f_ffree;
+};
+
+struct aufs_stbr {
+	int16_t			brid;	/* optional input */
+	int16_t			bindex;	/* output */
+	struct aufs_stfs	stfs;	/* output when AUFS_MVDOWN_STFS set */
+} __aligned(8);
+
+struct aufs_mvdown {
+	uint32_t		flags;			/* input/output */
+	struct aufs_stbr	stbr[AUFS_MVDOWN_NARRAY]; /* input/output */
+	int8_t			au_errno;		/* output */
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+union aufs_brinfo {
+	/* PATH_MAX may differ between kernel-space and user-space */
+	char	_spacer[4096];
+	struct {
+		int16_t	id;
+		int	perm;
+		char	path[];
+	};
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+#define AuCtlType		'A'
+#define AUFS_CTL_RDU		_IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
+#define AUFS_CTL_RDU_INO	_IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
+#define AUFS_CTL_WBR_FD		_IOW(AuCtlType, AuCtl_WBR_FD, \
+				     struct aufs_wbr_fd)
+#define AUFS_CTL_IBUSY		_IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
+#define AUFS_CTL_MVDOWN		_IOWR(AuCtlType, AuCtl_MVDOWN, \
+				      struct aufs_mvdown)
+#define AUFS_CTL_BRINFO		_IOW(AuCtlType, AuCtl_BR, union aufs_brinfo)
+#define AUFS_CTL_FHSM_FD	_IOW(AuCtlType, AuCtl_FHSM_FD, int)
+
+#endif /* __AUFS_TYPE_H__ */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
new file mode 100644
index 00000000000..548e732f33a
--- /dev/null
+++ b/include/uapi/linux/can/isotp.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * socketcan/can/isotp.h
+ *
+ * Definitions for isotp CAN sockets
+ *
+ * Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ * Copyright (c) 2008 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#ifndef CAN_ISOTP_H
+#define CAN_ISOTP_H
+
+#include <linux/can.h>
+
+#define SOL_CAN_ISOTP (SOL_CAN_BASE + CAN_ISOTP)
+
+/* for socket options affecting the socket (not the global system) */
+
+#define CAN_ISOTP_OPTS		1	/* pass struct can_isotp_options */
+
+#define CAN_ISOTP_RECV_FC	2	/* pass struct can_isotp_fc_options */
+
+/* sockopts to force stmin timer values for protocol regression tests */
+
+#define CAN_ISOTP_TX_STMIN	3	/* pass __u32 value in nano secs    */
+					/* use this time instead of value   */
+					/* provided in FC from the receiver */
+
+#define CAN_ISOTP_RX_STMIN	4	/* pass __u32 value in nano secs   */
+					/* ignore received CF frames which */
+					/* timestamps differ less than val */
+
+#define CAN_ISOTP_LL_OPTS	5	/* pass struct can_isotp_ll_options */
+
+struct can_isotp_options {
+
+	__u32 flags;		/* set flags for isotp behaviour.	*/
+				/* __u32 value : flags see below	*/
+
+	__u32 frame_txtime;	/* frame transmission time (N_As/N_Ar)	*/
+				/* __u32 value : time in nano secs	*/
+
+	__u8  ext_address;	/* set address for extended addressing	*/
+				/* __u8 value : extended address	*/
+
+	__u8  txpad_content;	/* set content of padding byte (tx)	*/
+				/* __u8 value : content	on tx path	*/
+
+	__u8  rxpad_content;	/* set content of padding byte (rx)	*/
+				/* __u8 value : content	on rx path	*/
+
+	__u8  rx_ext_address;	/* set address for extended addressing	*/
+				/* __u8 value : extended address (rx)	*/
+};
+
+struct can_isotp_fc_options {
+
+	__u8  bs;		/* blocksize provided in FC frame	*/
+				/* __u8 value : blocksize. 0 = off	*/
+
+	__u8  stmin;		/* separation time provided in FC frame	*/
+				/* __u8 value :				*/
+				/* 0x00 - 0x7F : 0 - 127 ms		*/
+				/* 0x80 - 0xF0 : reserved		*/
+				/* 0xF1 - 0xF9 : 100 us - 900 us	*/
+				/* 0xFA - 0xFF : reserved		*/
+
+	__u8  wftmax;		/* max. number of wait frame transmiss.	*/
+				/* __u8 value : 0 = omit FC N_PDU WT	*/
+};
+
+struct can_isotp_ll_options {
+
+	__u8  mtu;		/* generated & accepted CAN frame type	*/
+				/* __u8 value :				*/
+				/* CAN_MTU   (16) -> standard CAN 2.0	*/
+				/* CANFD_MTU (72) -> CAN FD frame	*/
+
+	__u8  tx_dl;		/* tx link layer data length in bytes	*/
+				/* (configured maximum payload length)	*/
+				/* __u8 value : 8,12,16,20,24,32,48,64	*/
+				/* => rx path supports all LL_DL values */
+
+	__u8  tx_flags;		/* set into struct canfd_frame.flags	*/
+				/* at frame creation: e.g. CANFD_BRS	*/
+				/* Obsolete when the BRS flag is fixed	*/
+				/* by the CAN netdriver configuration	*/
+};
+
+/* flags for isotp behaviour */
+
+#define CAN_ISOTP_LISTEN_MODE	0x001	/* listen only (do not send FC) */
+#define CAN_ISOTP_EXTEND_ADDR	0x002	/* enable extended addressing */
+#define CAN_ISOTP_TX_PADDING	0x004	/* enable CAN frame padding tx path */
+#define CAN_ISOTP_RX_PADDING	0x008	/* enable CAN frame padding rx path */
+#define CAN_ISOTP_CHK_PAD_LEN	0x010	/* check received CAN frame padding */
+#define CAN_ISOTP_CHK_PAD_DATA	0x020	/* check received CAN frame padding */
+#define CAN_ISOTP_HALF_DUPLEX	0x040	/* half duplex error state handling */
+#define CAN_ISOTP_FORCE_TXSTMIN	0x080	/* ignore stmin from received FC */
+#define CAN_ISOTP_FORCE_RXSTMIN	0x100	/* ignore CFs depending on rx stmin */
+#define CAN_ISOTP_RX_EXT_ADDR	0x200	/* different rx extended addressing */
+
+
+/* default values */
+
+#define CAN_ISOTP_DEFAULT_FLAGS		0
+#define CAN_ISOTP_DEFAULT_EXT_ADDRESS	0x00
+#define CAN_ISOTP_DEFAULT_PAD_CONTENT	0xCC /* prevent bit-stuffing */
+#define CAN_ISOTP_DEFAULT_FRAME_TXTIME	0
+#define CAN_ISOTP_DEFAULT_RECV_BS	0
+#define CAN_ISOTP_DEFAULT_RECV_STMIN	0x00
+#define CAN_ISOTP_DEFAULT_RECV_WFTMAX	0
+
+#define CAN_ISOTP_DEFAULT_LL_MTU	CAN_MTU
+#define CAN_ISOTP_DEFAULT_LL_TX_DL	CAN_MAX_DLEN
+#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS	0
+
+/*
+ * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
+ *
+ * We can strongly assume, that the Linux Kernel implementation of
+ * CAN_ISOTP is capable to run with BS=0, STmin=0 and WFTmax=0.
+ * But as we like to be able to behave as a commonly available ECU,
+ * these default settings can be changed via sockopts.
+ * For that reason the STmin value is intentionally _not_ checked for
+ * consistency and copied directly into the flow control (FC) frame.
+ *
+ */
+
+#endif
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index a89eb0accd5..a3e760886b8 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -21,6 +21,7 @@
 #define FUTEX_WAKE_BITSET	10
 #define FUTEX_WAIT_REQUEUE_PI	11
 #define FUTEX_CMP_REQUEUE_PI	12
+#define FUTEX_WAIT_MULTIPLE	31
 
 #define FUTEX_PRIVATE_FLAG	128
 #define FUTEX_CLOCK_REALTIME	256
@@ -40,6 +41,8 @@
 					 FUTEX_PRIVATE_FLAG)
 #define FUTEX_CMP_REQUEUE_PI_PRIVATE	(FUTEX_CMP_REQUEUE_PI | \
 					 FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAIT_MULTIPLE_PRIVATE	(FUTEX_WAIT_MULTIPLE | \
+					 FUTEX_PRIVATE_FLAG)
 
 /*
  * Support for robust futexes: the kernel cleans up held futexes at
@@ -150,4 +153,21 @@ struct robust_list_head {
   (((op & 0xf) << 28) | ((cmp & 0xf) << 24)		\
    | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
 
+/*
+ * Maximum number of multiple futexes to wait for
+ */
+#define FUTEX_MULTIPLE_MAX_COUNT	128
+
+/**
+ * struct futex_wait_block - Block of futexes to be waited for
+ * @uaddr:	User address of the futex
+ * @val:	Futex value expected by userspace
+ * @bitset:	Bitset for the optional bitmasked wakeup
+ */
+struct futex_wait_block {
+	__u32 __user *uaddr;
+	__u32 val;
+	__u32 bitset;
+};
+
 #endif /* _UAPI_LINUX_FUTEX_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 2b691161830..afbe6038224 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -183,18 +183,27 @@
  *
  * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers
  * can indicate they support offloading EAPOL handshakes for WPA/WPA2
- * preshared key authentication. In %NL80211_CMD_CONNECT the preshared
- * key should be specified using %NL80211_ATTR_PMK. Drivers supporting
- * this offload may reject the %NL80211_CMD_CONNECT when no preshared
- * key material is provided, for example when that driver does not
- * support setting the temporal keys through %CMD_NEW_KEY.
+ * preshared key authentication in station mode. In %NL80211_CMD_CONNECT
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_CONNECT when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
  *
  * Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be
  * set by drivers indicating offload support of the PTK/GTK EAPOL
- * handshakes during 802.1X authentication. In order to use the offload
- * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS
- * attribute flag. Drivers supporting this offload may reject the
- * %NL80211_CMD_CONNECT when the attribute flag is not present.
+ * handshakes during 802.1X authentication in station mode. In order to
+ * use the offload the %NL80211_CMD_CONNECT should have
+ * %NL80211_ATTR_WANT_1X_4WAY_HS attribute flag. Drivers supporting this
+ * offload may reject the %NL80211_CMD_CONNECT when the attribute flag is
+ * not present.
+ *
+ * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag drivers
+ * can indicate they support offloading EAPOL handshakes for WPA/WPA2
+ * preshared key authentication in AP mode. In %NL80211_CMD_START_AP
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_START_AP when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
  *
  * For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK
  * using %NL80211_CMD_SET_PMK. For offloaded FT support also
@@ -243,9 +252,10 @@
  * DOC: SAE authentication offload
  *
  * By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they
- * support offloading SAE authentication for WPA3-Personal networks. In
- * %NL80211_CMD_CONNECT the password for SAE should be specified using
- * %NL80211_ATTR_SAE_PASSWORD.
+ * support offloading SAE authentication for WPA3-Personal networks. The
+ * password for SAE should be specified using %NL80211_ATTR_SAE_PASSWORD in
+ * %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP for station and AP mode
+ * respectively.
  */
 
 /**
@@ -2338,10 +2348,11 @@ enum nl80211_commands {
  *
  * @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with
  *	%NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID.
- *	For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way
- *	handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is
- *	used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute
- *	specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well.
+ *	For %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP it is used to provide
+ *	PSK for offloading 4-way handshake for WPA/WPA2-PSK networks. For 802.1X
+ *	authentication it is used with %NL80211_CMD_SET_PMK. For offloaded FT
+ *	support this attribute specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME
+ *	is included as well.
  *
  * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to
  *	indicate that it supports multiple active scheduled scan requests.
@@ -2352,7 +2363,10 @@ enum nl80211_commands {
  *	in %NL80211_CMD_CONNECT to indicate that for 802.1X authentication it
  *	wants to use the supported offload of the 4-way handshake.
  * @NL80211_ATTR_PMKR0_NAME: PMK-R0 Name for offloaded FT.
- * @NL80211_ATTR_PORT_AUTHORIZED: (reserved)
+ * @NL80211_ATTR_PORT_AUTHORIZED: flag attribute used in %NL80211_CMD_ROAMED
+ *	notification indicating that that 802.1X authentication was done by
+ *	the driver or is not needed (because roaming used the Fast Transition
+ *	protocol).
  *
  * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external
  *     authentication operation (u32 attribute with an
@@ -5662,7 +5676,8 @@ enum nl80211_feature_flags {
  *	to a station.
  *
  * @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in
- *	station mode (SAE password is passed as part of the connect command).
+ *	station mode (SAE password is passed as part of the connect command)
+ *	or AP mode (SAE password is passed as part of the start AP command).
  *
  * @NL80211_EXT_FEATURE_VLAN_OFFLOAD: The driver supports a single netdev
  *	with VLAN tagged frames and separate VLAN-specific netdevs added using
@@ -5684,6 +5699,10 @@ enum nl80211_feature_flags {
  * @NL80211_EXT_FEATURE_DEL_IBSS_STA: The driver supports removing stations
  *      in IBSS mode, essentially by dropping their state.
  *
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK: Device wants to do 4-way
+ *	handshake with PSK in AP mode (PSK is passed as part of the start AP
+ *	command).
+ *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
  */
@@ -5731,6 +5750,7 @@ enum nl80211_ext_feature_index {
 	NL80211_EXT_FEATURE_SAE_OFFLOAD,
 	NL80211_EXT_FEATURE_VLAN_OFFLOAD,
 	NL80211_EXT_FEATURE_AQL,
+	NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
 	NL80211_EXT_FEATURE_BEACON_PROTECTION,
 	NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH,
 	NL80211_EXT_FEATURE_PROTECTED_TWT,
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 3bac0a8ceab..f48c5c5da65 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -115,9 +115,16 @@ struct clone_args {
 #define SCHED_FIFO		1
 #define SCHED_RR		2
 #define SCHED_BATCH		3
-/* SCHED_ISO: reserved but not implemented yet */
+/* SCHED_ISO: Implemented on MuQSS only */
 #define SCHED_IDLE		5
+#ifdef CONFIG_SCHED_MUQSS
+#define SCHED_ISO		4
+#define SCHED_IDLEPRIO		SCHED_IDLE
+#define SCHED_MAX		(SCHED_IDLEPRIO)
+#define SCHED_RANGE(policy)	((policy) <= SCHED_MAX)
+#else /* CONFIG_SCHED_MUQSS */
 #define SCHED_DEADLINE		6
+#endif /* CONFIG_SCHED_MUQSS */
 
 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
 #define SCHED_RESET_ON_FORK     0x40000000
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index c27289fd619..f8a8d6b3c52 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -63,6 +63,7 @@ enum vmmdev_request_type {
 	VMMDEVREQ_SET_GUEST_CAPABILITIES       = 56,
 	VMMDEVREQ_VIDEMODE_SUPPORTED2          = 57, /* since version 3.2.0 */
 	VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX     = 80, /* since version 4.2.4 */
+	VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81,
 	VMMDEVREQ_HGCM_CONNECT                 = 60,
 	VMMDEVREQ_HGCM_DISCONNECT              = 61,
 	VMMDEVREQ_HGCM_CALL32                  = 62,
@@ -92,6 +93,8 @@ enum vmmdev_request_type {
 	VMMDEVREQ_WRITE_COREDUMP               = 218,
 	VMMDEVREQ_GUEST_HEARTBEAT              = 219,
 	VMMDEVREQ_HEARTBEAT_CONFIGURE          = 220,
+	VMMDEVREQ_NT_BUG_CHECK                 = 221,
+	VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222,
 	/* Ensure the enum is a 32 bit data-type */
 	VMMDEVREQ_SIZEHACK                     = 0x7fffffff
 };
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index f79d7abe27d..15125f6ec60 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
 	_IOWR('V', 12, struct vbg_ioctl_change_filter)
 
 
+/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */
+struct vbg_ioctl_acquire_guest_caps {
+	/** The header. */
+	struct vbg_ioctl_hdr hdr;
+	union {
+		struct {
+			/** Flags (VBGL_IOC_AGC_FLAGS_XXX). */
+			__u32 flags;
+			/** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
+			__u32 or_mask;
+			/** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
+			__u32 not_mask;
+		} in;
+	} u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12);
+
+#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE		0x00000001
+#define VBGL_IOC_AGC_FLAGS_VALID_MASK			0x00000001
+
+#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \
+	_IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps)
+
+
 /** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
 struct vbg_ioctl_set_guest_caps {
 	/** The header. */
diff --git a/init/Kconfig b/init/Kconfig
index 74a5ac65644..80241e04d4a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -61,6 +61,50 @@ config THREAD_INFO_IN_TASK
 
 menu "General setup"
 
+config SCHED_MUQSS
+	bool "MuQSS cpu scheduler"
+	select HIGH_RES_TIMERS
+	default n
+	---help---
+	  The Multiple Queue Skiplist Scheduler for excellent interactivity and
+	  responsiveness on the desktop and highly scalable deterministic
+	  low latency on any hardware.
+
+config PCK_INTERACTIVE
+	bool "Tune kernel for interactivity"
+	default y
+	help
+	  Tunes the kernel for responsiveness at the cost of throughput and power usage.
+
+	  --- Block Layer ----------------------------------------
+
+	    Default scheduler.........: mq-deadline -> bfq
+
+	  --- Virtual Memory Subsystem ---------------------------
+
+	    Mem dirty before bg writeback..:  10 %  ->  20 %
+	    Mem dirty before sync writeback:  20 %  ->  50 %
+	    Background-reclaim hugepages...:    no  -> yes
+
+	  --- CFS CPU Scheduler ----------------------------------
+
+	    Scheduling latency.............:   6    ->   4    ms
+	    Minimal granularity............:   0.75 ->   0.4  ms
+	    Wakeup granularity.............:   1    ->   0.5  ms
+	    CPU migration cost.............:   0.5  ->   0.25 ms
+	    Bandwidth slice size...........:   5    ->   3    ms
+	    Task rebalancing threshold.....:   32   ->   128
+	    Ondemand sampling down factor..:   1    ->   5
+
+	  --- MuQSS CPU Scheduler --------------------------------
+
+	    Scheduling interval............:   6    ->   2    ms
+	    ISO task max realtime use......:  70 %  ->  25 %
+	    Yield type.....................:   1    ->   0
+	    Ondemand default freq up thresh:  80    ->  40
+	    Ondemand micro freq up thresh..:  95    ->  45
+	    Ondemand sampling down factor..:   1    ->   5
+
 config BROKEN
 	bool
 
@@ -440,7 +484,7 @@ config HAVE_SCHED_AVG_IRQ
 
 config SCHED_THERMAL_PRESSURE
 	bool "Enable periodic averaging of thermal pressure"
-	depends on SMP
+	depends on SMP && !SCHED_MUQSS
 
 config BSD_PROCESS_ACCT
 	bool "BSD Process Accounting"
@@ -777,6 +821,7 @@ config NUMA_BALANCING
 	depends on ARCH_SUPPORTS_NUMA_BALANCING
 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
 	depends on SMP && NUMA && MIGRATION
+	depends on !SCHED_MUQSS
 	help
 	  This option adds support for automatic NUMA aware memory/task placement.
 	  The mechanism is quite primitive and is based on migrating memory when
@@ -876,9 +921,13 @@ menuconfig CGROUP_SCHED
 	help
 	  This feature lets CPU scheduler recognize task groups and control CPU
 	  bandwidth allocation to such task groups. It uses cgroups to group
-	  tasks.
+	  tasks. In combination with MuQSS this is purely a STUB to create the
+	  files associated with the CPU controller cgroup but most of the
+	  controls do nothing. This is useful for working in environments and
+	  with applications that will only work if this control group is
+	  present.
 
-if CGROUP_SCHED
+if CGROUP_SCHED && !SCHED_MUQSS
 config FAIR_GROUP_SCHED
 	bool "Group scheduling for SCHED_OTHER"
 	depends on CGROUP_SCHED
@@ -1007,6 +1056,7 @@ config CGROUP_DEVICE
 
 config CGROUP_CPUACCT
 	bool "Simple CPU accounting controller"
+	depends on !SCHED_MUQSS
 	help
 	  Provides a simple controller for monitoring the
 	  total CPU consumed by the tasks in a cgroup.
@@ -1102,6 +1152,22 @@ config USER_NS
 
 	  If unsure, say N.
 
+config USER_NS_UNPRIVILEGED
+	bool "Allow unprivileged users to create namespaces"
+	default y
+	depends on USER_NS
+	help
+	  When disabled, unprivileged users will not be able to create
+	  new namespaces. Allowing users to create their own namespaces
+	  has been part of several recent local privilege escalation
+	  exploits, so if you need user namespaces but are
+	  paranoid^Wsecurity-conscious you want to disable this.
+
+	  This setting can be overridden at runtime via the
+	  kernel.unprivileged_userns_clone sysctl.
+
+	  If unsure, say Y.
+
 config PID_NS
 	bool "PID Namespaces"
 	default y
@@ -1134,6 +1200,7 @@ config CHECKPOINT_RESTORE
 
 config SCHED_AUTOGROUP
 	bool "Automatic process group scheduling"
+	depends on !SCHED_MUQSS
 	select CGROUPS
 	select CGROUP_SCHED
 	select FAIR_GROUP_SCHED
@@ -1240,7 +1307,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
 
 config CC_OPTIMIZE_FOR_PERFORMANCE_O3
 	bool "Optimize more for performance (-O3)"
-	depends on ARC
 	help
 	  Choosing this option will pass "-O3" to your compiler to optimize
 	  the kernel yet more for performance.
diff --git a/init/init_task.c b/init/init_task.c
index bd403ed3e41..5df65b2578e 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -67,9 +67,17 @@ struct task_struct init_task
 	.stack		= init_stack,
 	.usage		= REFCOUNT_INIT(2),
 	.flags		= PF_KTHREAD,
+#ifdef CONFIG_SCHED_MUQSS
+	.prio		= NORMAL_PRIO,
+	.static_prio	= MAX_PRIO - 20,
+	.normal_prio	= NORMAL_PRIO,
+	.deadline	= 0,
+	.time_slice	= 1000000,
+#else
 	.prio		= MAX_PRIO - 20,
 	.static_prio	= MAX_PRIO - 20,
 	.normal_prio	= MAX_PRIO - 20,
+#endif
 	.policy		= SCHED_NORMAL,
 	.cpus_ptr	= &init_task.cpus_mask,
 	.cpus_mask	= CPU_MASK_ALL,
@@ -79,6 +87,7 @@ struct task_struct init_task
 	.restart_block	= {
 		.fn = do_no_restart_syscall,
 	},
+#ifndef CONFIG_SCHED_MUQSS
 	.se		= {
 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
 	},
@@ -86,6 +95,7 @@ struct task_struct init_task
 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
 		.time_slice	= RR_TIMESLICE,
 	},
+#endif
 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
 #ifdef CONFIG_SMP
 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
diff --git a/init/main.c b/init/main.c
index 03371976d38..63243a24de9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1411,6 +1411,8 @@ static int __ref kernel_init(void *unused)
 
 	rcu_end_inkernel_boot();
 
+	print_scheduler_version();
+
 	if (ramdisk_execute_command) {
 		ret = run_init_process(ramdisk_execute_command);
 		if (!ret)
diff --git a/kernel/Kconfig.MuQSS b/kernel/Kconfig.MuQSS
new file mode 100644
index 00000000000..a6a58781ef9
--- /dev/null
+++ b/kernel/Kconfig.MuQSS
@@ -0,0 +1,105 @@
+choice
+	prompt "CPU scheduler runqueue sharing"
+	default RQ_MC if SCHED_MUQSS
+	default RQ_NONE
+
+config RQ_NONE
+	bool "No sharing"
+	help
+	  This is the default behaviour where the CPU scheduler has one runqueue
+	  per CPU, whether it is a physical or logical CPU (hyperthread).
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=none
+
+	  If unsure, say N.
+
+config RQ_SMT
+	bool "SMT (hyperthread) siblings"
+	depends on SCHED_SMT && SCHED_MUQSS
+
+	help
+	  With this option enabled, the CPU scheduler will have one runqueue
+	  shared by SMT (hyperthread) siblings. As these logical cores share
+	  one physical core, sharing the runqueue resource can lead to decreased
+	  overhead, lower latency and higher throughput.
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=smt
+
+	  If unsure, say N.
+
+config RQ_MC
+	bool "Multicore siblings"
+	depends on SCHED_MC && SCHED_MUQSS
+	help
+	  With this option enabled, the CPU scheduler will have one runqueue
+	  shared by multicore siblings in addition to any SMT siblings.
+	  As these physical cores share caches, sharing the runqueue resource
+	  will lead to lower latency, but its effects on overhead and throughput
+	  are less predictable. As a general rule, 6 or fewer cores will likely
+	  benefit from this, while larger CPUs will only derive a latency
+	  benefit. If your workloads are primarily single threaded, this will
+	  possibly worsen throughput. If you are only concerned about latency
+	  then enable this regardless of how many cores you have.
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=mc
+
+	  If unsure, say Y.
+
+config RQ_MC_LLC
+	bool "Multicore siblings (LLC)"
+	depends on SCHED_MC && SCHED_MUQSS
+	help
+	  With this option enabled, the CPU scheduler will behave similarly as
+	  with "Multicore siblings".
+	  This option takes LLC cache into account when scheduling tasks.
+	  Option may benefit CPUs with multiple LLC caches, such as Ryzen
+	  and Xeon CPUs.
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=llc
+
+	  If unsure, say N.
+
+config RQ_SMP
+	bool "Symmetric Multi-Processing"
+	depends on SMP && SCHED_MUQSS
+	help
+	  With this option enabled, the CPU scheduler will have one runqueue
+	  shared by all physical CPUs unless they are on separate NUMA nodes.
+	  As physical CPUs usually do not share resources, sharing the runqueue
+	  will normally worsen throughput but improve latency. If you only
+	  care about latency enable this.
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=smp
+
+	  If unsure, say N.
+
+config RQ_ALL
+	bool "NUMA"
+	depends on SMP && SCHED_MUQSS
+	help
+	  With this option enabled, the CPU scheduler will have one runqueue
+	  regardless of the architecture configuration, including across NUMA
+	  nodes. This can substantially decrease throughput in NUMA
+	  configurations, but light NUMA designs will not be dramatically
+	  affected. This option should only be chosen if latency is the prime
+	  concern.
+
+	  This can still be enabled runtime with the boot parameter
+	  rqshare=all
+
+	  If unsure, say N.
+endchoice
+
+config SHARERQ
+	int
+	default 0 if RQ_NONE
+	default 1 if RQ_SMT
+	default 2 if RQ_MC
+	default 3 if RQ_MC_LLC
+	default 4 if RQ_SMP
+	default 5 if RQ_ALL
diff --git a/kernel/Makefile b/kernel/Makefile
index 4cb4130ced3..b11afae9eea 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y     = fork.o exec_domain.o panic.o \
 	    extable.o params.o \
 	    kthread.o sys_ni.o nsproxy.o \
 	    notifier.o ksysfs.o cred.o reboot.o \
-	    async.o range.o smpboot.o ucount.o
+	    async.o range.o smpboot.o ucount.o skip_list.o
 
 obj-$(CONFIG_MODULES) += kmod.o
 obj-$(CONFIG_MULTIUSER) += groups.o
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 244d3054437..90b77028233 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1565,7 +1565,11 @@ static struct cpuhp_step cpuhp_hp_states[] = {
 	[CPUHP_BRINGUP_CPU] = {
 		.name			= "cpu:bringup",
 		.startup.single		= bringup_cpu,
+#ifdef CONFIG_SCHED_MUQSS
+		.teardown.single	= NULL,
+#else
 		.teardown.single	= finish_cpu,
+#endif
 		.cant_stop		= true,
 	},
 	/* Final state before CPU kills itself */
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 27725754ac9..769d773c718 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
 	 */
 	t1 = tsk->sched_info.pcount;
 	t2 = tsk->sched_info.run_delay;
-	t3 = tsk->se.sum_exec_runtime;
+	t3 = tsk_seruntime(tsk);
 
 	d->cpu_count += t1;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index d56fe51bdf0..3aa2c1e822b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
 			sig->curr_target = next_thread(tsk);
 	}
 
-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+	add_device_randomness((const void*) &tsk_seruntime(tsk),
 			      sizeof(unsigned long long));
 
 	/*
@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
 	sig->inblock += task_io_get_inblock(tsk);
 	sig->oublock += task_io_get_oublock(tsk);
 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+	sig->sum_sched_runtime += tsk_seruntime(tsk);
 	sig->nr_threads--;
 	__unhash_process(tsk, group_dead);
 	write_sequnlock(&sig->stats_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 48ed22774ef..cb8aa8a8561 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -106,6 +106,11 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/task.h>
+#ifdef CONFIG_USER_NS
+extern int unprivileged_userns_clone;
+#else
+#define unprivileged_userns_clone 0
+#endif
 
 /*
  * Minimum number of threads to boot the kernel
@@ -568,7 +573,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 			struct inode *inode = file_inode(file);
 			struct address_space *mapping = file->f_mapping;
 
-			get_file(file);
+			vma_get_file(tmp);
 			if (tmp->vm_flags & VM_DENYWRITE)
 				atomic_dec(&inode->i_writecount);
 			i_mmap_lock_write(mapping);
@@ -601,7 +606,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 		__vma_link_rb(mm, tmp, rb_link, rb_parent);
 		rb_link = &tmp->vm_rb.rb_right;
 		rb_parent = &tmp->vm_rb;
-
+		uksm_vma_add_new(tmp);
 		mm->map_count++;
 		if (!(tmp->vm_flags & VM_WIPEONFORK))
 			retval = copy_page_range(mm, oldmm, mpnt);
@@ -1848,6 +1853,10 @@ static __latent_entropy struct task_struct *copy_process(
 	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
 		return ERR_PTR(-EINVAL);
 
+	if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
+		if (!capable(CAP_SYS_ADMIN))
+			return ERR_PTR(-EPERM);
+
 	/*
 	 * Thread groups must share signals as well, and detached threads
 	 * can only be started up within the thread group.
@@ -2948,6 +2957,12 @@ int ksys_unshare(unsigned long unshare_flags)
 	if (unshare_flags & CLONE_NEWNS)
 		unshare_flags |= CLONE_FS;
 
+	if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
+		err = -EPERM;
+		if (!capable(CAP_SYS_ADMIN))
+			goto bad_unshare_out;
+	}
+
 	err = check_unshare_flags(unshare_flags);
 	if (err)
 		goto bad_unshare_out;
diff --git a/kernel/futex.c b/kernel/futex.c
index b59532862bc..2fbfb0b808c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -214,6 +214,8 @@ struct futex_pi_state {
  * @rt_waiter:		rt_waiter storage for use with requeue_pi
  * @requeue_pi_key:	the requeue_pi target futex key
  * @bitset:		bitset for the optional bitmasked wakeup
+ * @uaddr:             userspace address of futex
+ * @uval:              expected futex's value
  *
  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
@@ -236,6 +238,8 @@ struct futex_q {
 	struct rt_mutex_waiter *rt_waiter;
 	union futex_key *requeue_pi_key;
 	u32 bitset;
+	u32 __user *uaddr;
+	u32 uval;
 } __randomize_layout;
 
 static const struct futex_q futex_q_init = {
@@ -2346,6 +2350,29 @@ static int unqueue_me(struct futex_q *q)
 	return ret;
 }
 
+/**
+ * unqueue_multiple() - Remove several futexes from their futex_hash_bucket
+ * @q:	The list of futexes to unqueue
+ * @count: Number of futexes in the list
+ *
+ * Helper to unqueue a list of futexes. This can't fail.
+ *
+ * Return:
+ *  - >=0 - Index of the last futex that was awoken;
+ *  - -1  - If no futex was awoken
+ */
+static int unqueue_multiple(struct futex_q *q, int count)
+{
+	int ret = -1;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		if (!unqueue_me(&q[i]))
+			ret = i;
+	}
+	return ret;
+}
+
 /*
  * PI futexes can not be requeued and must remove themself from the
  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
@@ -2709,6 +2736,211 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
 	return ret;
 }
 
+/**
+ * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes
+ * @qs:		The corresponding futex list
+ * @count:	The size of the lists
+ * @flags:	Futex flags (FLAGS_SHARED, etc.)
+ * @awaken:	Index of the last awoken futex
+ *
+ * Prepare multiple futexes in a single step and enqueue them. This may fail if
+ * the futex list is invalid or if any futex was already awoken. On success the
+ * task is ready to interruptible sleep.
+ *
+ * Return:
+ *  -  1 - One of the futexes was awaken by another thread
+ *  -  0 - Success
+ *  - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL
+ */
+static int futex_wait_multiple_setup(struct futex_q *qs, int count,
+				     unsigned int flags, int *awaken)
+{
+	struct futex_hash_bucket *hb;
+	int ret, i;
+	u32 uval;
+
+	/*
+	 * Enqueuing multiple futexes is tricky, because we need to
+	 * enqueue each futex in the list before dealing with the next
+	 * one to avoid deadlocking on the hash bucket.  But, before
+	 * enqueuing, we need to make sure that current->state is
+	 * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which
+	 * cannot be done before the get_futex_key of the next key,
+	 * because it calls get_user_pages, which can sleep.  Thus, we
+	 * fetch the list of futexes keys in two steps, by first pinning
+	 * all the memory keys in the futex key, and only then we read
+	 * each key and queue the corresponding futex.
+	 */
+retry:
+	for (i = 0; i < count; i++) {
+		qs[i].key = FUTEX_KEY_INIT;
+		ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED,
+				    &qs[i].key, FUTEX_READ);
+		if (unlikely(ret)) {
+			for (--i; i >= 0; i--)
+				put_futex_key(&qs[i].key);
+			return ret;
+		}
+	}
+
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (i = 0; i < count; i++) {
+		struct futex_q *q = &qs[i];
+
+		hb = queue_lock(q);
+
+		ret = get_futex_value_locked(&uval, q->uaddr);
+		if (ret) {
+			/*
+			 * We need to try to handle the fault, which
+			 * cannot be done without sleep, so we need to
+			 * undo all the work already done, to make sure
+			 * we don't miss any wake ups.  Therefore, clean
+			 * up, handle the fault and retry from the
+			 * beginning.
+			 */
+			queue_unlock(hb);
+
+			/*
+			 * Keys 0..(i-1) are implicitly put
+			 * on unqueue_multiple.
+			 */
+			put_futex_key(&q->key);
+
+			*awaken = unqueue_multiple(qs, i);
+
+			__set_current_state(TASK_RUNNING);
+
+			/*
+			 * On a real fault, prioritize the error even if
+			 * some other futex was awoken.  Userspace gave
+			 * us a bad address, -EFAULT them.
+			 */
+			ret = get_user(uval, q->uaddr);
+			if (ret)
+				return ret;
+
+			/*
+			 * Even if the page fault was handled, If
+			 * something was already awaken, we can safely
+			 * give up and succeed to give a hint for userspace to
+			 * acquire the right futex faster.
+			 */
+			if (*awaken >= 0)
+				return 1;
+
+			goto retry;
+		}
+
+		if (uval != q->uval) {
+			queue_unlock(hb);
+
+			put_futex_key(&qs[i].key);
+
+			/*
+			 * If something was already awaken, we can
+			 * safely ignore the error and succeed.
+			 */
+			*awaken = unqueue_multiple(qs, i);
+			__set_current_state(TASK_RUNNING);
+			if (*awaken >= 0)
+				return 1;
+
+			return -EWOULDBLOCK;
+		}
+
+		/*
+		 * The bucket lock can't be held while dealing with the
+		 * next futex. Queue each futex at this moment so hb can
+		 * be unlocked.
+		 */
+		queue_me(&qs[i], hb);
+	}
+	return 0;
+}
+
+/**
+ * futex_wait_multiple() - Prepare to wait on and enqueue several futexes
+ * @qs:		The list of futexes to wait on
+ * @op:		Operation code from futex's syscall
+ * @count:	The number of objects
+ * @abs_time:	Timeout before giving up and returning to userspace
+ *
+ * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function
+ * sleeps on a group of futexes and returns on the first futex that
+ * triggered, or after the timeout has elapsed.
+ *
+ * Return:
+ *  - >=0 - Hint to the futex that was awoken
+ *  - <0  - On error
+ */
+static int futex_wait_multiple(struct futex_q *qs, int op,
+			       u32 count, ktime_t *abs_time)
+{
+	struct hrtimer_sleeper timeout, *to;
+	int ret, flags = 0, hint = 0;
+	unsigned int i;
+
+	if (!(op & FUTEX_PRIVATE_FLAG))
+		flags |= FLAGS_SHARED;
+
+	if (op & FUTEX_CLOCK_REALTIME)
+		flags |= FLAGS_CLOCKRT;
+
+	to = futex_setup_timer(abs_time, &timeout, flags, 0);
+	while (1) {
+		ret = futex_wait_multiple_setup(qs, count, flags, &hint);
+		if (ret) {
+			if (ret > 0) {
+				/* A futex was awaken during setup */
+				ret = hint;
+			}
+			break;
+		}
+
+		if (to)
+			hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+
+		/*
+		 * Avoid sleeping if another thread already tried to
+		 * wake us.
+		 */
+		for (i = 0; i < count; i++) {
+			if (plist_node_empty(&qs[i].list))
+				break;
+		}
+
+		if (i == count && (!to || to->task))
+			freezable_schedule();
+
+		ret = unqueue_multiple(qs, count);
+
+		__set_current_state(TASK_RUNNING);
+
+		if (ret >= 0)
+			break;
+		if (to && !to->task) {
+			ret = -ETIMEDOUT;
+			break;
+		} else if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		/*
+		 * The final case is a spurious wakeup, for
+		 * which just retry.
+		 */
+	}
+
+	if (to) {
+		hrtimer_cancel(&to->timer);
+		destroy_hrtimer_on_stack(&to->timer);
+	}
+
+	return ret;
+}
+
 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
 		      ktime_t *abs_time, u32 bitset)
 {
@@ -3833,6 +4065,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
 	return -ENOSYS;
 }
 
+/**
+ * futex_read_wait_block - Read an array of futex_wait_block from userspace
+ * @uaddr:	Userspace address of the block
+ * @count:	Number of blocks to be read
+ *
+ * This function creates and allocate an array of futex_q (we zero it to
+ * initialize the fields) and then, for each futex_wait_block element from
+ * userspace, fill a futex_q element with proper values.
+ */
+inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count)
+{
+	unsigned int i;
+	struct futex_q *qs;
+	struct futex_wait_block fwb;
+	struct futex_wait_block __user *entry =
+		(struct futex_wait_block __user *)uaddr;
+
+	if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
+		return ERR_PTR(-EINVAL);
+
+	qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
+	if (!qs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
+			kfree(qs);
+			return ERR_PTR(-EFAULT);
+		}
+
+		qs[i].uaddr = fwb.uaddr;
+		qs[i].uval = fwb.val;
+		qs[i].bitset = fwb.bitset;
+	}
+
+	return qs;
+}
 
 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
 		struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
@@ -3845,7 +4114,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
 
 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 		      cmd == FUTEX_WAIT_BITSET ||
-		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
+		      cmd == FUTEX_WAIT_REQUEUE_PI ||
+		      cmd == FUTEX_WAIT_MULTIPLE)) {
 		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
 			return -EFAULT;
 		if (get_timespec64(&ts, utime))
@@ -3854,7 +4124,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
 			return -EINVAL;
 
 		t = timespec64_to_ktime(ts);
-		if (cmd == FUTEX_WAIT)
+		if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
 			t = ktime_add_safe(ktime_get(), t);
 		tp = &t;
 	}
@@ -3866,6 +4136,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
 	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
 		val2 = (u32) (unsigned long) utime;
 
+	if (cmd == FUTEX_WAIT_MULTIPLE) {
+		int ret;
+		struct futex_q *qs;
+
+#ifdef CONFIG_X86_X32
+		if (unlikely(in_x32_syscall()))
+			return -ENOSYS;
+#endif
+		qs = futex_read_wait_block(uaddr, val);
+
+		if (IS_ERR(qs))
+			return PTR_ERR(qs);
+
+		ret = futex_wait_multiple(qs, op, val, tp);
+		kfree(qs);
+
+		return ret;
+	}
+
 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
 }
 
@@ -4028,6 +4317,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
 #endif /* CONFIG_COMPAT */
 
 #ifdef CONFIG_COMPAT_32BIT_TIME
+/**
+ * struct compat_futex_wait_block - Block of futexes to be waited for
+ * @uaddr:	User address of the futex (compatible pointer)
+ * @val:	Futex value expected by userspace
+ * @bitset:	Bitset for the optional bitmasked wakeup
+ */
+struct compat_futex_wait_block {
+	compat_uptr_t	uaddr;
+	__u32 pad;
+	__u32 val;
+	__u32 bitset;
+};
+
+/**
+ * compat_futex_read_wait_block - Read an array of futex_wait_block from
+ * userspace
+ * @uaddr:	Userspace address of the block
+ * @count:	Number of blocks to be read
+ *
+ * This function does the same as futex_read_wait_block(), except that it
+ * converts the pointer to the futex from the compat version to the regular one.
+ */
+inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr,
+						    u32 count)
+{
+	unsigned int i;
+	struct futex_q *qs;
+	struct compat_futex_wait_block fwb;
+	struct compat_futex_wait_block __user *entry =
+		(struct compat_futex_wait_block __user *)uaddr;
+
+	if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
+		return ERR_PTR(-EINVAL);
+
+	qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
+	if (!qs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
+			kfree(qs);
+			return ERR_PTR(-EFAULT);
+		}
+
+		qs[i].uaddr = compat_ptr(fwb.uaddr);
+		qs[i].uval = fwb.val;
+		qs[i].bitset = fwb.bitset;
+	}
+
+	return qs;
+}
+
 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
 		struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
 		u32, val3)
@@ -4039,14 +4380,15 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
 
 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 		      cmd == FUTEX_WAIT_BITSET ||
-		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
+		      cmd == FUTEX_WAIT_REQUEUE_PI ||
+		      cmd == FUTEX_WAIT_MULTIPLE)) {
 		if (get_old_timespec32(&ts, utime))
 			return -EFAULT;
 		if (!timespec64_valid(&ts))
 			return -EINVAL;
 
 		t = timespec64_to_ktime(ts);
-		if (cmd == FUTEX_WAIT)
+		if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
 			t = ktime_add_safe(ktime_get(), t);
 		tp = &t;
 	}
@@ -4054,6 +4396,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
 	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
 		val2 = (int) (unsigned long) utime;
 
+	if (cmd == FUTEX_WAIT_MULTIPLE) {
+		int ret;
+		struct futex_q *qs = compat_futex_read_wait_block(uaddr, val);
+
+		if (IS_ERR(qs))
+			return PTR_ERR(qs);
+
+		ret = futex_wait_multiple(qs, op, val, tp);
+		kfree(qs);
+
+		return ret;
+	}
+
 	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
 }
 #endif /* CONFIG_COMPAT_32BIT_TIME */
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 20d501af4f2..f92cabe495b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -115,6 +115,23 @@ config GENERIC_IRQ_RESERVATION_MODE
 config IRQ_FORCED_THREADING
        bool
 
+config FORCE_IRQ_THREADING
+	bool "Make IRQ threading compulsory"
+	depends on IRQ_FORCED_THREADING
+	default n
+	---help---
+
+	  Make IRQ threading mandatory for any IRQ handlers that support it
+	  instead of being optional and requiring the threadirqs kernel
+	  parameter. Instead they can be optionally disabled with the
+	  nothreadirqs kernel parameter.
+
+	  Enabling this may make some architectures not boot with runqueue
+	  sharing and MuQSS.
+
+	  Enable if you are building for a desktop or low latency system,
+	  otherwise say N.
+
 config SPARSE_IRQ
 	bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
 	---help---
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index dc58fd245e7..538443a0491 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -25,9 +25,20 @@
 #include "internals.h"
 
 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
+#ifdef CONFIG_FORCE_IRQ_THREADING
+__read_mostly bool force_irqthreads = true;
+#else
 __read_mostly bool force_irqthreads;
+#endif
 EXPORT_SYMBOL_GPL(force_irqthreads);
 
+static int __init setup_noforced_irqthreads(char *arg)
+{
+	force_irqthreads = false;
+	return 0;
+}
+early_param("nothreadirqs", setup_noforced_irqthreads);
+
 static int __init setup_forced_irqthreads(char *arg)
 {
 	force_irqthreads = true;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bfbfa481be3..f5942fb29ba 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -446,6 +446,34 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
 }
 EXPORT_SYMBOL(kthread_bind);
 
+#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
+extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/*
+ * new_kthread_bind is a special variant of __kthread_bind_mask.
+ * For new threads to work on muqss we want to call do_set_cpus_allowed
+ * without the task_cpu being set and the task rescheduled until they're
+ * rescheduled on their own so we call __do_set_cpus_allowed directly which
+ * only changes the cpumask. This is particularly important for smpboot threads
+ * to work.
+ */
+static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
+{
+	unsigned long flags;
+
+	if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
+		return;
+
+	/* It's safe because the task is inactive. */
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	__do_set_cpus_allowed(p, cpumask_of(cpu));
+	p->flags |= PF_NO_SETAFFINITY;
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+#else
+#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
+#endif
+
 /**
  * kthread_create_on_cpu - Create a cpu bound kthread
  * @threadfn: the function to run until signal_pending(current).
@@ -467,7 +495,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 				   cpu);
 	if (IS_ERR(p))
 		return p;
-	kthread_bind(p, cpu);
+	new_kthread_bind(p, cpu);
 	/* CPU hotplug need to bind once again when unparking the thread. */
 	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
 	to_kthread(p)->cpu = cpu;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ac10db66cc6..715ba9c1b91 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -169,7 +169,7 @@ static
 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
 
-static inline struct lock_class *hlock_class(struct held_lock *hlock)
+inline struct lock_class *lockdep_hlock_class(struct held_lock *hlock)
 {
 	unsigned int class_idx = hlock->class_idx;
 
@@ -190,6 +190,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 	 */
 	return lock_classes + class_idx;
 }
+EXPORT_SYMBOL_GPL(lockdep_hlock_class);
+#define hlock_class(hlock) lockdep_hlock_class(hlock)
 
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 21fb5a5662b..a04ffebc6b7 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -16,15 +16,23 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
 CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
+ifdef CONFIG_SCHED_MUQSS
+obj-y += MuQSS.o clock.o cputime.o
+obj-y += idle.o
+obj-y += wait.o wait_bit.o swait.o completion.o
+
+obj-$(CONFIG_SMP) += topology.o
+else
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle.o fair.o rt.o deadline.o
 obj-y += wait.o wait_bit.o swait.o completion.o
 
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
-obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+endif
+obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_CPU_FREQ) += cpufreq.o
 obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 obj-$(CONFIG_MEMBARRIER) += membarrier.o
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
new file mode 100644
index 00000000000..c856872ce3d
--- /dev/null
+++ b/kernel/sched/MuQSS.c
@@ -0,0 +1,7638 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  kernel/sched/MuQSS.c, was kernel/sched.c
+ *
+ *  Kernel scheduler and related syscalls
+ *
+ *  Copyright (C) 1991-2002  Linus Torvalds
+ *
+ *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
+ *		make semaphores SMP safe
+ *  1998-11-19	Implemented schedule_timeout() and related stuff
+ *		by Andrea Arcangeli
+ *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
+ *		hybrid priority-list and round-robin design with
+ *		an array-switch method of distributing timeslices
+ *		and per-CPU runqueues.  Cleanups and useful suggestions
+ *		by Davide Libenzi, preemptible kernel bits by Robert Love.
+ *  2003-09-03	Interactivity tuning by Con Kolivas.
+ *  2004-04-02	Scheduler domains code by Nick Piggin
+ *  2007-04-15  Work begun on replacing all interactivity tuning with a
+ *              fair scheduling design by Con Kolivas.
+ *  2007-05-05  Load balancing (smp-nice) and other improvements
+ *              by Peter Williams
+ *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
+ *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ *              Thomas Gleixner, Mike Kravetz
+ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
+ *              a whole lot of those previous things.
+ *  2016-10-01  Multiple Queue Skiplist Scheduler scalable evolution of BFS
+ * 		scheduler by Con Kolivas.
+ *  2019-08-31  LLC bits by Eduards Bezverhijs
+ */
+
+#include <linux/sched/isolation.h>
+#include <linux/sched/loadavg.h>
+
+#include <linux/binfmts.h>
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/context_tracking.h>
+#include <linux/cpuset.h>
+#include <linux/delayacct.h>
+#include <linux/init_task.h>
+#include <linux/kcov.h>
+#include <linux/kprobes.h>
+#include <linux/mmu_context.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/prefetch.h>
+#include <linux/profile.h>
+#include <linux/rcupdate_wait.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <linux/skip_list.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+#include <linux/wait_bit.h>
+
+#include <asm/irq_regs.h>
+#include <asm/switch_to.h>
+#include <asm/tlb.h>
+
+#include "../workqueue_internal.h"
+#include "../../fs/io-wq.h"
+#include "../smpboot.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched.h>
+
+#include "MuQSS.h"
+
+#define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)
+#define rt_task(p)		rt_prio((p)->prio)
+#define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH))
+#define is_rt_policy(policy)	((policy) == SCHED_FIFO || \
+					(policy) == SCHED_RR)
+#define has_rt_policy(p)	unlikely(is_rt_policy((p)->policy))
+
+#define is_idle_policy(policy)	((policy) == SCHED_IDLEPRIO)
+#define idleprio_task(p)	unlikely(is_idle_policy((p)->policy))
+#define task_running_idle(p)	unlikely((p)->prio == IDLE_PRIO)
+
+#define is_iso_policy(policy)	((policy) == SCHED_ISO)
+#define iso_task(p)		unlikely(is_iso_policy((p)->policy))
+#define task_running_iso(p)	unlikely((p)->prio == ISO_PRIO)
+
+#define rq_idle(rq)		((rq)->rq_prio == PRIO_LIMIT)
+
+#define ISO_PERIOD		(5 * HZ)
+
+#define STOP_PRIO		(MAX_RT_PRIO - 1)
+
+/*
+ * Some helpers for converting to/from various scales. Use shifts to get
+ * approximate multiples of ten for less overhead.
+ */
+#define APPROX_NS_PS		(1073741824) /* Approximate ns per second */
+#define JIFFIES_TO_NS(TIME)	((TIME) * (APPROX_NS_PS / HZ))
+#define JIFFY_NS		(APPROX_NS_PS / HZ)
+#define JIFFY_US		(1048576 / HZ)
+#define NS_TO_JIFFIES(TIME)	((TIME) / JIFFY_NS)
+#define HALF_JIFFY_NS		(APPROX_NS_PS / HZ / 2)
+#define HALF_JIFFY_US		(1048576 / HZ / 2)
+#define MS_TO_NS(TIME)		((TIME) << 20)
+#define MS_TO_US(TIME)		((TIME) << 10)
+#define NS_TO_MS(TIME)		((TIME) >> 20)
+#define NS_TO_US(TIME)		((TIME) >> 10)
+#define US_TO_NS(TIME)		((TIME) << 10)
+#define TICK_APPROX_NS		((APPROX_NS_PS+HZ/2)/HZ)
+
+#define RESCHED_US	(100) /* Reschedule if less than this many μs left */
+
+void print_scheduler_version(void)
+{
+	printk(KERN_INFO "MuQSS CPU scheduler v0.202 by Con Kolivas.\n");
+}
+
+/* Define RQ share levels */
+#define RQSHARE_NONE 0
+#define RQSHARE_SMT 1
+#define RQSHARE_MC 2
+#define RQSHARE_MC_LLC 3
+#define RQSHARE_SMP 4
+#define RQSHARE_ALL 5
+
+/* Define locality levels */
+#define LOCALITY_SAME 0
+#define LOCALITY_SMT 1
+#define LOCALITY_MC_LLC 2
+#define LOCALITY_MC 3
+#define LOCALITY_SMP 4
+#define LOCALITY_DISTANT 5
+
+/*
+ * This determines what level of runqueue sharing will be done and is
+ * configurable at boot time with the bootparam rqshare =
+ */
+static int rqshare __read_mostly = CONFIG_SHARERQ; /* Default RQSHARE_MC */
+
+static int __init set_rqshare(char *str)
+{
+	if (!strncmp(str, "none", 4)) {
+		rqshare = RQSHARE_NONE;
+		return 0;
+	}
+	if (!strncmp(str, "smt", 3)) {
+		rqshare = RQSHARE_SMT;
+		return 0;
+	}
+	if (!strncmp(str, "mc", 2)) {
+		rqshare = RQSHARE_MC;
+		return 0;
+	}
+	if (!strncmp(str, "llc", 3)) {
+		rqshare = RQSHARE_MC_LLC;
+		return 0;
+	}
+	if (!strncmp(str, "smp", 3)) {
+		rqshare = RQSHARE_SMP;
+		return 0;
+	}
+	if (!strncmp(str, "all", 3)) {
+		rqshare = RQSHARE_ALL;
+		return 0;
+	}
+	return 1;
+}
+__setup("rqshare=", set_rqshare);
+
+/*
+ * This is the time all tasks within the same priority round robin.
+ * Value is in ms and set to a minimum of 6ms.
+ * Tunable via /proc interface.
+ */
+#ifdef CONFIG_PCK_INTERACTIVE
+int rr_interval __read_mostly = 2;
+#else
+int rr_interval __read_mostly = 6;
+#endif
+
+/*
+ * Tunable to choose whether to prioritise latency or throughput, simple
+ * binary yes or no
+ */
+int sched_interactive __read_mostly = 1;
+
+/*
+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
+ * are allowed to run five seconds as real time tasks. This is the total over
+ * all online cpus.
+ */
+#ifdef CONFIG_PCK_INTERACTIVE
+int sched_iso_cpu __read_mostly = 25;
+#else
+int sched_iso_cpu __read_mostly = 70;
+#endif
+
+/*
+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
+ * 0: No yield.
+ * 1: Yield only to better priority/deadline tasks. (default)
+ * 2: Expire timeslice and recalculate deadline.
+ */
+
+#ifdef CONFIG_PCK_INTERACTIVE
+int sched_yield_type __read_mostly = 0;
+#else
+int sched_yield_type __read_mostly = 1;
+#endif
+
+/*
+ * The relative length of deadline for each priority(nice) level.
+ */
+static int prio_ratios[NICE_WIDTH] __read_mostly;
+
+
+/*
+ * The quota handed out to tasks of all priority levels when refilling their
+ * time_slice.
+ */
+static inline int timeslice(void)
+{
+	return MS_TO_US(rr_interval);
+}
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+#ifdef CONFIG_SMP
+/*
+ * Total number of runqueues. Equals number of CPUs when there is no runqueue
+ * sharing but is usually less with SMT/MC sharing of runqueues.
+ */
+static int total_runqueues __read_mostly = 1;
+
+static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
+
+struct rq *cpu_rq(int cpu)
+{
+	return &per_cpu(runqueues, (cpu));
+}
+#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
+
+/*
+ * For asym packing, by default the lower numbered cpu has higher priority.
+ */
+int __weak arch_asym_cpu_priority(int cpu)
+{
+	return -cpu;
+}
+
+int __weak arch_sd_sibling_asym_packing(void)
+{
+       return 0*SD_ASYM_PACKING;
+}
+
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
+#endif
+
+#else
+struct rq *uprq;
+#endif /* CONFIG_SMP */
+
+#include "stats.h"
+
+/*
+ * All common locking functions performed on rq->lock. rq->clock is local to
+ * the CPU accessing it so it can be modified just with interrupts disabled
+ * when we're not updating niffies.
+ * Looking up task_rq must be done under rq->lock to be safe.
+ */
+
+/*
+ * RQ-clock updating methods:
+ */
+
+#ifdef HAVE_SCHED_AVG_IRQ
+static void update_irq_load_avg(struct rq *rq, long delta);
+#else
+static inline void update_irq_load_avg(struct rq *rq, long delta) {}
+#endif
+
+static void update_rq_clock_task(struct rq *rq, s64 delta)
+{
+/*
+ * In theory, the compile should just see 0 here, and optimize out the call
+ * to sched_rt_avg_update. But I don't trust it...
+ */
+	s64 __maybe_unused steal = 0, irq_delta = 0;
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+
+	/*
+	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
+	 * this case when a previous update_rq_clock() happened inside a
+	 * {soft,}irq region.
+	 *
+	 * When this happens, we stop ->clock_task and only update the
+	 * prev_irq_time stamp to account for the part that fit, so that a next
+	 * update will consume the rest. This ensures ->clock_task is
+	 * monotonic.
+	 *
+	 * It does however cause some slight miss-attribution of {soft,}irq
+	 * time, a more accurate solution would be to update the irq_time using
+	 * the current rq->clock timestamp, except that would require using
+	 * atomic ops.
+	 */
+	if (irq_delta > delta)
+		irq_delta = delta;
+
+	rq->prev_irq_time += irq_delta;
+	delta -= irq_delta;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+	if (static_key_false((&paravirt_steal_rq_enabled))) {
+		steal = paravirt_steal_clock(cpu_of(rq));
+		steal -= rq->prev_steal_time_rq;
+
+		if (unlikely(steal > delta))
+			steal = delta;
+
+		rq->prev_steal_time_rq += steal;
+		delta -= steal;
+	}
+#endif
+	rq->clock_task += delta;
+
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+	if (irq_delta + steal)
+		update_irq_load_avg(rq, irq_delta + steal);
+#endif
+}
+
+static inline void update_rq_clock(struct rq *rq)
+{
+	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+
+	if (unlikely(delta < 0))
+		return;
+	rq->clock += delta;
+	update_rq_clock_task(rq, delta);
+}
+
+/*
+ * Niffies are a globally increasing nanosecond counter. They're only used by
+ * update_load_avg and time_slice_expired, however deadlines are based on them
+ * across CPUs. Update them whenever we will call one of those functions, and
+ * synchronise them across CPUs whenever we hold both runqueue locks.
+ */
+static inline void update_clocks(struct rq *rq)
+{
+	s64 ndiff, minndiff;
+	long jdiff;
+
+	update_rq_clock(rq);
+	ndiff = rq->clock - rq->old_clock;
+	rq->old_clock = rq->clock;
+	jdiff = jiffies - rq->last_jiffy;
+
+	/* Subtract any niffies added by balancing with other rqs */
+	ndiff -= rq->niffies - rq->last_niffy;
+	minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
+	if (minndiff < 0)
+		minndiff = 0;
+	ndiff = max(ndiff, minndiff);
+	rq->niffies += ndiff;
+	rq->last_niffy = rq->niffies;
+	if (jdiff) {
+		rq->last_jiffy += jdiff;
+		rq->last_jiffy_niffies = rq->niffies;
+	}
+}
+
+/*
+ * Any time we have two runqueues locked we use that as an opportunity to
+ * synchronise niffies to the highest value as idle ticks may have artificially
+ * kept niffies low on one CPU and the truth can only be later.
+ */
+static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
+{
+	if (rq1->niffies > rq2->niffies)
+		rq2->niffies = rq1->niffies;
+	else
+		rq1->niffies = rq2->niffies;
+}
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+
+/* For when we know rq1 != rq2 */
+static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
+	__acquires(rq1->lock)
+	__acquires(rq2->lock)
+{
+	if (rq1 < rq2) {
+		raw_spin_lock(rq1->lock);
+		raw_spin_lock_nested(rq2->lock, SINGLE_DEPTH_NESTING);
+	} else {
+		raw_spin_lock(rq2->lock);
+		raw_spin_lock_nested(rq1->lock, SINGLE_DEPTH_NESTING);
+	}
+}
+
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+	__acquires(rq1->lock)
+	__acquires(rq2->lock)
+{
+	BUG_ON(!irqs_disabled());
+	if (rq1->lock == rq2->lock) {
+		raw_spin_lock(rq1->lock);
+		__acquire(rq2->lock);	/* Fake it out ;) */
+	} else
+		__double_rq_lock(rq1, rq2);
+	synchronise_niffies(rq1, rq2);
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+	__releases(rq1->lock)
+	__releases(rq2->lock)
+{
+	raw_spin_unlock(rq1->lock);
+	if (rq1->lock != rq2->lock)
+		raw_spin_unlock(rq2->lock);
+	else
+		__release(rq2->lock);
+}
+
+static inline void lock_all_rqs(void)
+{
+	int cpu;
+
+	preempt_disable();
+	for_each_possible_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+
+		do_raw_spin_lock(rq->lock);
+	}
+}
+
+static inline void unlock_all_rqs(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+
+		do_raw_spin_unlock(rq->lock);
+	}
+	preempt_enable();
+}
+
+/* Specially nest trylock an rq */
+static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
+{
+	if (unlikely(!do_raw_spin_trylock(rq->lock)))
+		return false;
+	spin_acquire(&rq->lock->dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
+	synchronise_niffies(this_rq, rq);
+	return true;
+}
+
+/* Unlock a specially nested trylocked rq */
+static inline void unlock_rq(struct rq *rq)
+{
+	spin_release(&rq->lock->dep_map, _RET_IP_);
+	do_raw_spin_unlock(rq->lock);
+}
+
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask)						\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		typeof(mask) _mask = (mask);				\
+		typeof(*_ptr) _old, _val = *_ptr;			\
+									\
+		for (;;) {						\
+			_old = cmpxchg(_ptr, _val, _val | _mask);	\
+			if (_old == _val)				\
+				break;					\
+			_val = _old;					\
+		}							\
+	_old;								\
+})
+
+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
+/*
+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * this avoids any races wrt polling state changes and thereby avoids
+ * spurious IPIs.
+ */
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+	struct thread_info *ti = task_thread_info(p);
+	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+}
+
+/*
+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
+ *
+ * If this returns true, then the idle task promises to call
+ * sched_ttwu_pending() and reschedule soon.
+ */
+static bool set_nr_if_polling(struct task_struct *p)
+{
+	struct thread_info *ti = task_thread_info(p);
+	typeof(ti->flags) old, val = READ_ONCE(ti->flags);
+
+	for (;;) {
+		if (!(val & _TIF_POLLING_NRFLAG))
+			return false;
+		if (val & _TIF_NEED_RESCHED)
+			return true;
+		old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
+		if (old == val)
+			break;
+		val = old;
+	}
+	return true;
+}
+
+#else
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+	set_tsk_need_resched(p);
+	return true;
+}
+
+#ifdef CONFIG_SMP
+static bool set_nr_if_polling(struct task_struct *p)
+{
+	return false;
+}
+#endif
+#endif
+
+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+	struct wake_q_node *node = &task->wake_q;
+
+	/*
+	 * Atomically grab the task, if ->wake_q is !nil already it means
+	 * its already queued (either by us or someone else) and will get the
+	 * wakeup due to that.
+	 *
+	 * In order to ensure that a pending wakeup will observe our pending
+	 * state, even in the failed case, an explicit smp_mb() must be used.
+	 */
+	smp_mb__before_atomic();
+	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
+		return false;
+
+	/*
+	 * The head is context local, there can be no concurrency.
+	 */
+	*head->lastp = node;
+	head->lastp = &node->next;
+	return true;
+}
+
+/**
+ * wake_q_add() - queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ */
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+	if (__wake_q_add(head, task))
+		get_task_struct(task);
+}
+
+/**
+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ *
+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
+ * that already hold reference to @task can call the 'safe' version and trust
+ * wake_q to do the right thing depending whether or not the @task is already
+ * queued for wakeup.
+ */
+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
+{
+	if (!__wake_q_add(head, task))
+		put_task_struct(task);
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+	struct wake_q_node *node = head->first;
+
+	while (node != WAKE_Q_TAIL) {
+		struct task_struct *task;
+
+		task = container_of(node, struct task_struct, wake_q);
+		BUG_ON(!task);
+		/* Task can safely be re-inserted now */
+		node = node->next;
+		task->wake_q.next = NULL;
+
+		/*
+		 * wake_up_process() executes a full barrier, which pairs with
+		 * the queueing in wake_q_add() so as not to miss wakeups.
+		 */
+		wake_up_process(task);
+		put_task_struct(task);
+	}
+}
+
+static inline void smp_sched_reschedule(int cpu)
+{
+	if (likely(cpu_online(cpu)))
+		smp_send_reschedule(cpu);
+}
+
+/*
+ * resched_task - mark a task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the need_resched flag, on SMP it
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+void resched_task(struct task_struct *p)
+{
+	int cpu;
+#ifdef CONFIG_LOCKDEP
+	/* Kernel threads call this when creating workqueues while still
+	 * inactive from __kthread_bind_mask, holding only the pi_lock */
+	if (!(p->flags & PF_KTHREAD)) {
+		struct rq *rq = task_rq(p);
+
+		lockdep_assert_held(rq->lock);
+	}
+#endif
+	if (test_tsk_need_resched(p))
+		return;
+
+	cpu = task_cpu(p);
+	if (cpu == smp_processor_id()) {
+		set_tsk_need_resched(p);
+		set_preempt_need_resched();
+		return;
+	}
+
+	if (set_nr_and_not_polling(p))
+		smp_sched_reschedule(cpu);
+	else
+		trace_sched_wake_idle_without_ipi(cpu);
+}
+
+/*
+ * A task that is not running or queued will not have a node set.
+ * A task that is queued but not running will have a node set.
+ * A task that is currently running will have ->on_cpu set but no node set.
+ */
+static inline bool task_queued(struct task_struct *p)
+{
+	return !skiplist_node_empty(&p->node);
+}
+
+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
+static inline void resched_if_idle(struct rq *rq);
+
+static inline bool deadline_before(u64 deadline, u64 time)
+{
+	return (deadline < time);
+}
+
+/*
+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
+ * is the key to everything. It distributes cpu fairly amongst tasks of the
+ * same nice value, it proportions cpu according to nice level, it means the
+ * task that last woke up the longest ago has the earliest deadline, thus
+ * ensuring that interactive tasks get low latency on wake up. The CPU
+ * proportion works out to the square of the virtual deadline difference, so
+ * this equation will give nice 19 3% CPU compared to nice 0.
+ */
+static inline u64 prio_deadline_diff(int user_prio)
+{
+	return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
+}
+
+static inline u64 task_deadline_diff(struct task_struct *p)
+{
+	return prio_deadline_diff(TASK_USER_PRIO(p));
+}
+
+static inline u64 static_deadline_diff(int static_prio)
+{
+	return prio_deadline_diff(USER_PRIO(static_prio));
+}
+
+static inline int longest_deadline_diff(void)
+{
+	return prio_deadline_diff(39);
+}
+
+static inline int ms_longest_deadline_diff(void)
+{
+	return NS_TO_MS(longest_deadline_diff());
+}
+
+static inline bool rq_local(struct rq *rq);
+
+#ifndef SCHED_CAPACITY_SCALE
+#define SCHED_CAPACITY_SCALE 1024
+#endif
+
+static inline int rq_load(struct rq *rq)
+{
+	return rq->nr_running;
+}
+
+/*
+ * Update the load average for feeding into cpu frequency governors. Use a
+ * rough estimate of a rolling average with ~ time constant of 32ms.
+ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
+ * Make sure a call to update_clocks has been made before calling this to get
+ * an updated rq->niffies.
+ */
+static void update_load_avg(struct rq *rq, unsigned int flags)
+{
+	long us_interval, load;
+
+	us_interval = NS_TO_US(rq->niffies - rq->load_update);
+	if (unlikely(us_interval <= 0))
+		return;
+
+	load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
+	if (unlikely(load < 0))
+		load = 0;
+	load += rq_load(rq) * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
+	rq->load_avg = load;
+
+	rq->load_update = rq->niffies;
+	update_irq_load_avg(rq, 0);
+	if (likely(rq_local(rq)))
+		cpufreq_trigger(rq, flags);
+}
+
+#ifdef HAVE_SCHED_AVG_IRQ
+/*
+ * IRQ variant of update_load_avg below. delta is actually time in nanoseconds
+ * here so we scale curload to how long it's been since the last update.
+ */
+static void update_irq_load_avg(struct rq *rq, long delta)
+{
+	long us_interval, load;
+
+	us_interval = NS_TO_US(rq->niffies - rq->irq_load_update);
+	if (unlikely(us_interval <= 0))
+		return;
+
+	load = rq->irq_load_avg - (rq->irq_load_avg * us_interval * 5 / 262144);
+	if (unlikely(load < 0))
+		load = 0;
+	load += NS_TO_US(delta) * SCHED_CAPACITY_SCALE * 5 / 262144;
+	rq->irq_load_avg = load;
+
+	rq->irq_load_update = rq->niffies;
+}
+#endif
+
+/*
+ * Removing from the runqueue. Enter with rq locked. Deleting a task
+ * from the skip list is done via the stored node reference in the task struct
+ * and does not require a full look up. Thus it occurs in O(k) time where k
+ * is the "level" of the list the task was stored at - usually < 4, max 8.
+ */
+static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+{
+	skiplist_delete(rq->sl, &p->node);
+	rq->best_key = rq->node->next[0]->key;
+	update_clocks(rq);
+
+	if (!(flags & DEQUEUE_SAVE)) {
+		sched_info_dequeued(rq, p);
+		psi_dequeue(p, flags & DEQUEUE_SLEEP);
+	}
+	rq->nr_running--;
+	if (rt_task(p))
+		rq->rt_nr_running--;
+	update_load_avg(rq, flags);
+}
+
+#ifdef CONFIG_PREEMPT_RCU
+static bool rcu_read_critical(struct task_struct *p)
+{
+	return p->rcu_read_unlock_special.b.blocked;
+}
+#else /* CONFIG_PREEMPT_RCU */
+#define rcu_read_critical(p) (false)
+#endif /* CONFIG_PREEMPT_RCU */
+
+/*
+ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
+ * an idle task, we ensure none of the following conditions are met.
+ */
+static bool idleprio_suitable(struct task_struct *p)
+{
+	return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
+		!signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
+}
+
+/*
+ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
+ * that the iso_refractory flag is not set.
+ */
+static inline bool isoprio_suitable(struct rq *rq)
+{
+	return !rq->iso_refractory;
+}
+
+/*
+ * Adding to the runqueue. Enter with rq locked.
+ */
+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
+{
+	unsigned int randseed, cflags = 0;
+	u64 sl_id;
+
+	if (!rt_task(p)) {
+		/* Check it hasn't gotten rt from PI */
+		if ((idleprio_task(p) && idleprio_suitable(p)) ||
+		   (iso_task(p) && isoprio_suitable(rq)))
+			p->prio = p->normal_prio;
+		else
+			p->prio = NORMAL_PRIO;
+	} else
+		rq->rt_nr_running++;
+	/*
+	 * The sl_id key passed to the skiplist generates a sorted list.
+	 * Realtime and sched iso tasks run FIFO so they only need be sorted
+	 * according to priority. The skiplist will put tasks of the same
+	 * key inserted later in FIFO order. Tasks of sched normal, batch
+	 * and idleprio are sorted according to their deadlines. Idleprio
+	 * tasks are offset by an impossibly large deadline value ensuring
+	 * they get sorted into last positions, but still according to their
+	 * own deadlines. This creates a "landscape" of skiplists running
+	 * from priority 0 realtime in first place to the lowest priority
+	 * idleprio tasks last. Skiplist insertion is an O(log n) process.
+	 */
+	if (p->prio <= ISO_PRIO) {
+		sl_id = p->prio;
+	} else {
+		sl_id = p->deadline;
+		if (idleprio_task(p)) {
+			if (p->prio == IDLE_PRIO)
+				sl_id |= 0xF000000000000000;
+			else
+				sl_id += longest_deadline_diff();
+		}
+	}
+	/*
+	 * Some architectures don't have better than microsecond resolution
+	 * so mask out ~microseconds as the random seed for skiplist insertion.
+	 */
+	update_clocks(rq);
+	if (!(flags & ENQUEUE_RESTORE)) {
+		sched_info_queued(rq, p);
+		psi_enqueue(p, flags & ENQUEUE_WAKEUP);
+	}
+
+	randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
+	skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
+	rq->best_key = rq->node->next[0]->key;
+	if (p->in_iowait)
+		cflags |= SCHED_CPUFREQ_IOWAIT;
+	rq->nr_running++;
+	update_load_avg(rq, cflags);
+}
+
+/*
+ * Returns the relative length of deadline all compared to the shortest
+ * deadline which is that of nice -20.
+ */
+static inline int task_prio_ratio(struct task_struct *p)
+{
+	return prio_ratios[TASK_USER_PRIO(p)];
+}
+
+/*
+ * task_timeslice - all tasks of all priorities get the exact same timeslice
+ * length. CPU distribution is handled by giving different deadlines to
+ * tasks of different priorities. Use 128 as the base value for fast shifts.
+ */
+static inline int task_timeslice(struct task_struct *p)
+{
+	return (rr_interval * task_prio_ratio(p) / 128);
+}
+
+#ifdef CONFIG_SMP
+/* Entered with rq locked */
+static inline void resched_if_idle(struct rq *rq)
+{
+	if (rq_idle(rq))
+		resched_task(rq->curr);
+}
+
+static inline bool rq_local(struct rq *rq)
+{
+	return (rq->cpu == smp_processor_id());
+}
+#ifdef CONFIG_SMT_NICE
+static const cpumask_t *thread_cpumask(int cpu);
+
+/* Find the best real time priority running on any SMT siblings of cpu and if
+ * none are running, the static priority of the best deadline task running.
+ * The lookups to the other runqueues is done lockless as the occasional wrong
+ * value would be harmless. */
+static int best_smt_bias(struct rq *this_rq)
+{
+	int other_cpu, best_bias = 0;
+
+	for_each_cpu(other_cpu, &this_rq->thread_mask) {
+		struct rq *rq = cpu_rq(other_cpu);
+
+		if (rq_idle(rq))
+			continue;
+		if (unlikely(!rq->online))
+			continue;
+		if (!rq->rq_mm)
+			continue;
+		if (likely(rq->rq_smt_bias > best_bias))
+			best_bias = rq->rq_smt_bias;
+	}
+	return best_bias;
+}
+
+static int task_prio_bias(struct task_struct *p)
+{
+	if (rt_task(p))
+		return 1 << 30;
+	else if (task_running_iso(p))
+		return 1 << 29;
+	else if (task_running_idle(p))
+		return 0;
+	return MAX_PRIO - p->static_prio;
+}
+
+static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
+{
+	return true;
+}
+
+static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
+
+/* We've already decided p can run on CPU, now test if it shouldn't for SMT
+ * nice reasons. */
+static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
+{
+	int best_bias, task_bias;
+
+	/* Kernel threads always run */
+	if (unlikely(!p->mm))
+		return true;
+	if (rt_task(p))
+		return true;
+	if (!idleprio_suitable(p))
+		return true;
+	best_bias = best_smt_bias(this_rq);
+	/* The smt siblings are all idle or running IDLEPRIO */
+	if (best_bias < 1)
+		return true;
+	task_bias = task_prio_bias(p);
+	if (task_bias < 1)
+		return false;
+	if (task_bias >= best_bias)
+		return true;
+	/* Dither 25% cpu of normal tasks regardless of nice difference */
+	if (best_bias % 4 == 1)
+		return true;
+	/* Sorry, you lose */
+	return false;
+}
+#else /* CONFIG_SMT_NICE */
+#define smt_schedule(p, this_rq) (true)
+#endif /* CONFIG_SMT_NICE */
+
+static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
+{
+	set_bit(cpu, (volatile unsigned long *)cpumask);
+}
+
+/*
+ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
+ * allow easy lookup of whether any suitable idle CPUs are available.
+ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
+ * idle_cpus variable than to do a full bitmask check when we are busy. The
+ * bits are set atomically but read locklessly as occasional false positive /
+ * negative is harmless.
+ */
+static inline void set_cpuidle_map(int cpu)
+{
+	if (likely(cpu_online(cpu)))
+		atomic_set_cpu(cpu, &cpu_idle_map);
+}
+
+static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
+{
+	clear_bit(cpu, (volatile unsigned long *)cpumask);
+}
+
+static inline void clear_cpuidle_map(int cpu)
+{
+	atomic_clear_cpu(cpu, &cpu_idle_map);
+}
+
+static bool suitable_idle_cpus(struct task_struct *p)
+{
+	return (cpumask_intersects(p->cpus_ptr, &cpu_idle_map));
+}
+
+/*
+ * Resched current on rq. We don't know if rq is local to this CPU nor if it
+ * is locked so we do not use an intermediate variable for the task to avoid
+ * having it dereferenced.
+ */
+static void resched_curr(struct rq *rq)
+{
+	int cpu;
+
+	if (test_tsk_need_resched(rq->curr))
+		return;
+
+	rq->preempt = rq->curr;
+	cpu = rq->cpu;
+
+	/* We're doing this without holding the rq lock if it's not task_rq */
+
+	if (cpu == smp_processor_id()) {
+		set_tsk_need_resched(rq->curr);
+		set_preempt_need_resched();
+		return;
+	}
+
+	if (set_nr_and_not_polling(rq->curr))
+		smp_sched_reschedule(cpu);
+	else
+		trace_sched_wake_idle_without_ipi(cpu);
+}
+
+#define CPUIDLE_DIFF_THREAD     (1)
+#define CPUIDLE_DIFF_CORE_LLC   (2)
+#define CPUIDLE_DIFF_CORE       (4)
+#define CPUIDLE_CACHE_BUSY      (8)
+#define CPUIDLE_DIFF_CPU        (16)
+#define CPUIDLE_THREAD_BUSY     (32)
+#define CPUIDLE_DIFF_NODE       (64)
+
+/*
+ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
+ * lowest value would give the most suitable CPU to schedule p onto next. The
+ * order works out to be the following:
+ *
+ * Same thread, idle or busy cache, idle or busy threads
+ * Other core, same cache, idle or busy cache, idle threads.
+ * Same node, other CPU, idle cache, idle threads.
+ * Same node, other CPU, busy cache, idle threads.
+ * Other core, same cache, busy threads.
+ * Same node, other CPU, busy threads.
+ * Other node, other CPU, idle cache, idle threads.
+ * Other node, other CPU, busy cache, idle threads.
+ * Other node, other CPU, busy threads.
+ */
+static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
+{
+	int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
+		CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
+		CPUIDLE_DIFF_CORE_LLC | CPUIDLE_DIFF_THREAD;
+	int cpu_tmp;
+
+	if (cpumask_test_cpu(best_cpu, tmpmask))
+		goto out;
+
+	for_each_cpu(cpu_tmp, tmpmask) {
+		int ranking, locality;
+		struct rq *tmp_rq;
+
+		ranking = 0;
+		tmp_rq = cpu_rq(cpu_tmp);
+
+		locality = rq->cpu_locality[cpu_tmp];
+#ifdef CONFIG_NUMA
+		if (locality > LOCALITY_SMP)
+			ranking |= CPUIDLE_DIFF_NODE;
+		else
+#endif
+			if (locality > LOCALITY_MC)
+				ranking |= CPUIDLE_DIFF_CPU;
+#ifdef CONFIG_SCHED_MC
+			else if (locality == LOCALITY_MC_LLC)
+				ranking |= CPUIDLE_DIFF_CORE_LLC;
+			else if (locality == LOCALITY_MC)
+				ranking |= CPUIDLE_DIFF_CORE;
+		if (!(tmp_rq->cache_idle(tmp_rq)))
+			ranking |= CPUIDLE_CACHE_BUSY;
+#endif
+#ifdef CONFIG_SCHED_SMT
+		if (locality == LOCALITY_SMT)
+			ranking |= CPUIDLE_DIFF_THREAD;
+#endif
+		if (ranking < best_ranking
+#ifdef CONFIG_SCHED_SMT
+			|| (ranking == best_ranking && (tmp_rq->siblings_idle(tmp_rq)))
+#endif
+		) {
+			best_cpu = cpu_tmp;
+			best_ranking = ranking;
+		}
+	}
+out:
+	return best_cpu;
+}
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+	struct rq *this_rq = cpu_rq(this_cpu);
+
+	return (this_rq->cpu_locality[that_cpu] < LOCALITY_SMP);
+}
+
+/* As per resched_curr but only will resched idle task */
+static inline void resched_idle(struct rq *rq)
+{
+	if (test_tsk_need_resched(rq->idle))
+		return;
+
+	rq->preempt = rq->idle;
+
+	set_tsk_need_resched(rq->idle);
+
+	if (rq_local(rq)) {
+		set_preempt_need_resched();
+		return;
+	}
+
+	smp_sched_reschedule(rq->cpu);
+}
+
+DEFINE_PER_CPU(cpumask_t, idlemask);
+
+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
+{
+	cpumask_t *tmpmask = &(per_cpu(idlemask, cpu));
+	struct rq *rq;
+	int best_cpu;
+
+	cpumask_and(tmpmask, p->cpus_ptr, &cpu_idle_map);
+	best_cpu = best_mask_cpu(cpu, task_rq(p), tmpmask);
+	rq = cpu_rq(best_cpu);
+	if (!smt_schedule(p, rq))
+		return NULL;
+	rq->preempt = p;
+	resched_idle(rq);
+	return rq;
+}
+
+static inline void resched_suitable_idle(struct task_struct *p)
+{
+	if (suitable_idle_cpus(p))
+		resched_best_idle(p, task_cpu(p));
+}
+
+static inline struct rq *rq_order(struct rq *rq, int cpu)
+{
+	return rq->rq_order[cpu];
+}
+#else /* CONFIG_SMP */
+static inline void set_cpuidle_map(int cpu)
+{
+}
+
+static inline void clear_cpuidle_map(int cpu)
+{
+}
+
+static inline bool suitable_idle_cpus(struct task_struct *p)
+{
+	return uprq->curr == uprq->idle;
+}
+
+static inline void resched_suitable_idle(struct task_struct *p)
+{
+}
+
+static inline void resched_curr(struct rq *rq)
+{
+	resched_task(rq->curr);
+}
+
+static inline void resched_if_idle(struct rq *rq)
+{
+}
+
+static inline bool rq_local(struct rq *rq)
+{
+	return true;
+}
+
+static inline struct rq *rq_order(struct rq *rq, int cpu)
+{
+	return rq;
+}
+
+static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
+{
+	return true;
+}
+#endif /* CONFIG_SMP */
+
+static inline int normal_prio(struct task_struct *p)
+{
+	if (has_rt_policy(p))
+		return MAX_RT_PRIO - 1 - p->rt_priority;
+	if (idleprio_task(p))
+		return IDLE_PRIO;
+	if (iso_task(p))
+		return ISO_PRIO;
+	return NORMAL_PRIO;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks as it will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+	p->normal_prio = normal_prio(p);
+	/*
+	 * If we are RT tasks or we were boosted to RT priority,
+	 * keep the priority unchanged. Otherwise, update priority
+	 * to the normal priority:
+	 */
+	if (!rt_prio(p->prio))
+		return p->normal_prio;
+	return p->prio;
+}
+
+/*
+ * activate_task - move a task to the runqueue. Enter with rq locked.
+ */
+static void activate_task(struct rq *rq, struct task_struct *p, int flags)
+{
+	resched_if_idle(rq);
+
+	/*
+	 * Sleep time is in units of nanosecs, so shift by 20 to get a
+	 * milliseconds-range estimation of the amount of time that the task
+	 * spent sleeping:
+	 */
+	if (unlikely(prof_on == SLEEP_PROFILING)) {
+		if (p->state == TASK_UNINTERRUPTIBLE)
+			profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+				     (rq->niffies - p->last_ran) >> 20);
+	}
+
+	p->prio = effective_prio(p);
+	if (task_contributes_to_load(p))
+		rq->nr_uninterruptible--;
+
+	enqueue_task(rq, p, flags);
+	p->on_rq = TASK_ON_RQ_QUEUED;
+}
+
+/*
+ * deactivate_task - If it's running, it's not on the runqueue and we can just
+ * decrement the nr_running. Enter with rq locked.
+ */
+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
+{
+	if (task_contributes_to_load(p))
+		rq->nr_uninterruptible++;
+
+	p->on_rq = 0;
+	sched_info_dequeued(rq, p);
+	/* deactivate_task is always DEQUEUE_SLEEP in muqss */
+	psi_dequeue(p, DEQUEUE_SLEEP);
+}
+
+#ifdef CONFIG_SMP
+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+{
+	struct rq *rq;
+
+	if (task_cpu(p) == new_cpu)
+		return;
+
+	/* Do NOT call set_task_cpu on a currently queued task as we will not
+	 * be reliably holding the rq lock after changing CPU. */
+	BUG_ON(task_queued(p));
+	rq = task_rq(p);
+
+#ifdef CONFIG_LOCKDEP
+	/*
+	 * The caller should hold either p->pi_lock or rq->lock, when changing
+	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+	 *
+	 * Furthermore, all task_rq users should acquire both locks, see
+	 * task_rq_lock().
+	 */
+	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
+				      lockdep_is_held(rq->lock)));
+#endif
+
+	trace_sched_migrate_task(p, new_cpu);
+	rseq_migrate(p);
+	perf_event_task_migrate(p);
+
+	/*
+	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+	 * successfully executed on another CPU. We must ensure that updates of
+	 * per-task data have been completed by this moment.
+	 */
+	smp_wmb();
+
+	p->wake_cpu = new_cpu;
+
+	if (task_running(rq, p)) {
+		/*
+		 * We should only be calling this on a running task if we're
+		 * holding rq lock.
+		 */
+		lockdep_assert_held(rq->lock);
+
+		/*
+		 * We can't change the task_thread_info CPU on a running task
+		 * as p will still be protected by the rq lock of the CPU it
+		 * is still running on so we only set the wake_cpu for it to be
+		 * lazily updated once off the CPU.
+		 */
+		return;
+	}
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+	WRITE_ONCE(p->cpu, new_cpu);
+#else
+	WRITE_ONCE(task_thread_info(p)->cpu, new_cpu);
+#endif
+	/* We're no longer protecting p after this point since we're holding
+	 * the wrong runqueue lock. */
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Move a task off the runqueue and take it to a cpu for it will
+ * become the running task.
+ */
+static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
+{
+	struct rq *p_rq = task_rq(p);
+
+	dequeue_task(p_rq, p, DEQUEUE_SAVE);
+	if (p_rq != rq) {
+		sched_info_dequeued(p_rq, p);
+		sched_info_queued(rq, p);
+	}
+	set_task_cpu(p, cpu);
+}
+
+/*
+ * Returns a descheduling task to the runqueue unless it is being
+ * deactivated.
+ */
+static inline void return_task(struct task_struct *p, struct rq *rq,
+			       int cpu, bool deactivate)
+{
+	if (deactivate)
+		deactivate_task(p, rq);
+	else {
+#ifdef CONFIG_SMP
+		/*
+		 * set_task_cpu was called on the running task that doesn't
+		 * want to deactivate so it has to be enqueued to a different
+		 * CPU and we need its lock. Tag it to be moved with as the
+		 * lock is dropped in finish_lock_switch.
+		 */
+		if (unlikely(p->wake_cpu != cpu))
+			WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
+		else
+#endif
+			enqueue_task(rq, p, ENQUEUE_RESTORE);
+	}
+}
+
+/* Enter with rq lock held. We know p is on the local cpu */
+static inline void __set_tsk_resched(struct task_struct *p)
+{
+	set_tsk_need_resched(p);
+	set_preempt_need_resched();
+}
+
+/**
+ * task_curr - is this task currently executing on a CPU?
+ * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
+ */
+inline int task_curr(const struct task_struct *p)
+{
+	return cpu_curr(task_cpu(p)) == p;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change.  If it changes, i.e. @p might have woken up,
+ * then return zero.  When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count).  If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+{
+	int running, queued;
+	struct rq_flags rf;
+	unsigned long ncsw;
+	struct rq *rq;
+
+	for (;;) {
+		rq = task_rq(p);
+
+		/*
+		 * If the task is actively running on another CPU
+		 * still, just relax and busy-wait without holding
+		 * any locks.
+		 *
+		 * NOTE! Since we don't hold any locks, it's not
+		 * even sure that "rq" stays as the right runqueue!
+		 * But we don't care, since this will return false
+		 * if the runqueue has changed and p is actually now
+		 * running somewhere else!
+		 */
+		while (task_running(rq, p)) {
+			if (match_state && unlikely(p->state != match_state))
+				return 0;
+			cpu_relax();
+		}
+
+		/*
+		 * Ok, time to look more closely! We need the rq
+		 * lock now, to be *sure*. If we're wrong, we'll
+		 * just go back and repeat.
+		 */
+		rq = task_rq_lock(p, &rf);
+		trace_sched_wait_task(p);
+		running = task_running(rq, p);
+		queued = task_on_rq_queued(p);
+		ncsw = 0;
+		if (!match_state || p->state == match_state)
+			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+		task_rq_unlock(rq, p, &rf);
+
+		/*
+		 * If it changed from the expected state, bail out now.
+		 */
+		if (unlikely(!ncsw))
+			break;
+
+		/*
+		 * Was it really running after all now that we
+		 * checked with the proper locks actually held?
+		 *
+		 * Oops. Go back and try again..
+		 */
+		if (unlikely(running)) {
+			cpu_relax();
+			continue;
+		}
+
+		/*
+		 * It's not enough that it's not actively running,
+		 * it must be off the runqueue _entirely_, and not
+		 * preempted!
+		 *
+		 * So if it was still runnable (but just not actively
+		 * running right now), it's preempted, and we should
+		 * yield - it could be a while.
+		 */
+		if (unlikely(queued)) {
+			ktime_t to = NSEC_PER_SEC / HZ;
+
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+			continue;
+		}
+
+		/*
+		 * Ahh, all good. It wasn't running, and it wasn't
+		 * runnable, which means that it will never become
+		 * running in the future either. We're all done!
+		 */
+		break;
+	}
+
+	return ncsw;
+}
+
+/***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+ *
+ * Cause a process which is running on another CPU to enter
+ * kernel-mode, without any delay. (to get signals handled.)
+ *
+ * NOTE: this function doesn't have to take the runqueue lock,
+ * because all it wants to ensure is that the remote task enters
+ * the kernel. If the IPI races and the task has been migrated
+ * to another CPU then no harm is done and the purpose has been
+ * achieved as well.
+ */
+void kick_process(struct task_struct *p)
+{
+	int cpu;
+
+	preempt_disable();
+	cpu = task_cpu(p);
+	if ((cpu != smp_processor_id()) && task_curr(p))
+		smp_sched_reschedule(cpu);
+	preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kick_process);
+#endif
+
+/*
+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
+ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
+ * between themselves, they cooperatively multitask. An idle rq scores as
+ * prio PRIO_LIMIT so it is always preempted.
+ */
+static inline bool
+can_preempt(struct task_struct *p, int prio, u64 deadline)
+{
+	/* Better static priority RT task or better policy preemption */
+	if (p->prio < prio)
+		return true;
+	if (p->prio > prio)
+		return false;
+	if (p->policy == SCHED_BATCH)
+		return false;
+	/* SCHED_NORMAL and ISO will preempt based on deadline */
+	if (!deadline_before(p->deadline, deadline))
+		return false;
+	return true;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
+ * __set_cpus_allowed_ptr().
+ */
+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
+{
+	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+		return false;
+
+	if (is_per_cpu_kthread(p))
+		return cpu_online(cpu);
+
+	return cpu_active(cpu);
+}
+
+/*
+ * Check to see if p can run on cpu, and if not, whether there are any online
+ * CPUs it can run on instead. This only happens with the hotplug threads that
+ * bring up the CPUs.
+ */
+static inline bool sched_other_cpu(struct task_struct *p, int cpu)
+{
+	if (likely(cpumask_test_cpu(cpu, p->cpus_ptr)))
+		return false;
+	if (p->nr_cpus_allowed == 1) {
+		cpumask_t valid_mask;
+
+		cpumask_and(&valid_mask, p->cpus_ptr, cpu_online_mask);
+		if (unlikely(cpumask_empty(&valid_mask)))
+			return false;
+	}
+	return true;
+}
+
+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
+{
+	if (cpumask_test_cpu(cpu, p->cpus_ptr))
+		return false;
+	return true;
+}
+
+#define cpu_online_map		(*(cpumask_t *)cpu_online_mask)
+
+static void try_preempt(struct task_struct *p, struct rq *this_rq)
+{
+	int i, this_entries = rq_load(this_rq);
+	cpumask_t tmp;
+
+	if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
+		return;
+
+	/* IDLEPRIO tasks never preempt anything but idle */
+	if (p->policy == SCHED_IDLEPRIO)
+		return;
+
+	cpumask_and(&tmp, &cpu_online_map, p->cpus_ptr);
+
+	for (i = 0; i < num_online_cpus(); i++) {
+		struct rq *rq = this_rq->cpu_order[i];
+
+		if (!cpumask_test_cpu(rq->cpu, &tmp))
+			continue;
+
+		if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
+			continue;
+		if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
+			/* We set rq->preempting lockless, it's a hint only */
+			rq->preempting = p;
+			resched_curr(rq);
+			return;
+		}
+	}
+}
+
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+				  const struct cpumask *new_mask, bool check);
+#else /* CONFIG_SMP */
+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
+{
+	return false;
+}
+
+static void try_preempt(struct task_struct *p, struct rq *this_rq)
+{
+	if (p->policy == SCHED_IDLEPRIO)
+		return;
+	if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
+		resched_curr(uprq);
+}
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+					 const struct cpumask *new_mask, bool check)
+{
+	return set_cpus_allowed_ptr(p, new_mask);
+}
+#endif /* CONFIG_SMP */
+
+static void
+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
+{
+	struct rq *rq;
+
+	if (!schedstat_enabled())
+		return;
+
+	rq = this_rq();
+
+#ifdef CONFIG_SMP
+	if (cpu == rq->cpu) {
+		__schedstat_inc(rq->ttwu_local);
+	} else {
+		struct sched_domain *sd;
+
+		rcu_read_lock();
+		for_each_domain(rq->cpu, sd) {
+			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
+				__schedstat_inc(sd->ttwu_wake_remote);
+				break;
+			}
+		}
+		rcu_read_unlock();
+	}
+
+#endif /* CONFIG_SMP */
+
+	__schedstat_inc(rq->ttwu_count);
+}
+
+/*
+ * Mark the task runnable and perform wakeup-preemption.
+ */
+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+{
+	/*
+	 * Sync wakeups (i.e. those types of wakeups where the waker
+	 * has indicated that it will leave the CPU in short order)
+	 * don't trigger a preemption if there are no idle cpus,
+	 * instead waiting for current to deschedule.
+	 */
+	if (wake_flags & WF_SYNC)
+		resched_suitable_idle(p);
+	else
+		try_preempt(p, rq);
+	p->state = TASK_RUNNING;
+	trace_sched_wakeup(p);
+}
+
+static void
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
+{
+	int en_flags = ENQUEUE_WAKEUP;
+
+	lockdep_assert_held(rq->lock);
+
+#ifdef CONFIG_SMP
+	if (p->sched_contributes_to_load)
+		rq->nr_uninterruptible--;
+
+	if (wake_flags & WF_MIGRATED)
+		en_flags |= ENQUEUE_MIGRATED;
+#endif
+
+	activate_task(rq, p, en_flags);
+	ttwu_do_wakeup(rq, p, wake_flags);
+}
+
+/*
+ * Called in case the task @p isn't fully descheduled from its runqueue,
+ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
+ * since all we need to do is flip p->state to TASK_RUNNING, since
+ * the task is still ->on_rq.
+ */
+static int ttwu_remote(struct task_struct *p, int wake_flags)
+{
+	struct rq *rq;
+	int ret = 0;
+
+	rq = __task_rq_lock(p, NULL);
+	if (likely(task_on_rq_queued(p))) {
+		ttwu_do_wakeup(rq, p, wake_flags);
+		ret = 1;
+	}
+	__task_rq_unlock(rq, NULL);
+
+	return ret;
+}
+
+#ifdef CONFIG_SMP
+void sched_ttwu_pending(void)
+{
+	struct rq *rq = this_rq();
+	struct llist_node *llist = llist_del_all(&rq->wake_list);
+	struct task_struct *p, *t;
+	struct rq_flags rf;
+
+	if (!llist)
+		return;
+
+	rq_lock_irqsave(rq, &rf);
+
+	llist_for_each_entry_safe(p, t, llist, wake_entry)
+		ttwu_do_activate(rq, p, 0);
+
+	rq_unlock_irqrestore(rq, &rf);
+}
+
+void scheduler_ipi(void)
+{
+	/*
+	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+	 * TIF_NEED_RESCHED remotely (for the first time) will also send
+	 * this IPI.
+	 */
+	preempt_fold_need_resched();
+
+	if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
+		return;
+
+	/*
+	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+	 * traditionally all their work was done from the interrupt return
+	 * path. Now that we actually do some work, we need to make sure
+	 * we do call them.
+	 *
+	 * Some archs already do call them, luckily irq_enter/exit nest
+	 * properly.
+	 *
+	 * Arguably we should visit all archs and update all handlers,
+	 * however a fair share of IPIs are still resched only so this would
+	 * somewhat pessimize the simple resched case.
+	 */
+	irq_enter();
+	sched_ttwu_pending();
+	irq_exit();
+}
+
+static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
+		if (!set_nr_if_polling(rq->idle))
+			smp_sched_reschedule(cpu);
+		else
+			trace_sched_wake_idle_without_ipi(cpu);
+	}
+}
+
+void wake_up_if_idle(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct rq_flags rf;
+
+	rcu_read_lock();
+
+	if (!is_idle_task(rcu_dereference(rq->curr)))
+		goto out;
+
+	if (set_nr_if_polling(rq->idle)) {
+		trace_sched_wake_idle_without_ipi(cpu);
+	} else {
+		rq_lock_irqsave(rq, &rf);
+		if (likely(is_idle_task(rq->curr)))
+			smp_sched_reschedule(cpu);
+		/* Else cpu is not in idle, do nothing here */
+		rq_unlock_irqrestore(rq, &rf);
+	}
+
+out:
+	rcu_read_unlock();
+}
+
+static int valid_task_cpu(struct task_struct *p)
+{
+	cpumask_t valid_mask;
+
+	if (p->flags & PF_KTHREAD)
+		cpumask_and(&valid_mask, p->cpus_ptr, cpu_all_mask);
+	else
+		cpumask_and(&valid_mask, p->cpus_ptr, cpu_active_mask);
+
+	if (unlikely(!cpumask_weight(&valid_mask))) {
+		/* We shouldn't be hitting this any more */
+		printk(KERN_WARNING "SCHED: No cpumask for %s/%d weight %d\n", p->comm,
+		       p->pid, cpumask_weight(p->cpus_ptr));
+		return cpumask_any(p->cpus_ptr);
+	}
+	return cpumask_any(&valid_mask);
+}
+
+/*
+ * For a task that's just being woken up we have a valuable balancing
+ * opportunity so choose the nearest cache most lightly loaded runqueue.
+ * Entered with rq locked and returns with the chosen runqueue locked.
+ */
+static inline int select_best_cpu(struct task_struct *p)
+{
+	unsigned int idlest = ~0U;
+	struct rq *rq = NULL;
+	int i;
+
+	if (suitable_idle_cpus(p)) {
+		int cpu = task_cpu(p);
+
+		if (unlikely(needs_other_cpu(p, cpu)))
+			cpu = valid_task_cpu(p);
+		rq = resched_best_idle(p, cpu);
+		if (likely(rq))
+			return rq->cpu;
+	}
+
+	for (i = 0; i < num_online_cpus(); i++) {
+		struct rq *other_rq = task_rq(p)->cpu_order[i];
+		int entries;
+
+		if (!other_rq->online)
+			continue;
+		if (needs_other_cpu(p, other_rq->cpu))
+			continue;
+		entries = rq_load(other_rq);
+		if (entries >= idlest)
+			continue;
+		idlest = entries;
+		rq = other_rq;
+	}
+	if (unlikely(!rq))
+		return task_cpu(p);
+	return rq->cpu;
+}
+#else /* CONFIG_SMP */
+static int valid_task_cpu(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline int select_best_cpu(struct task_struct *p)
+{
+	return 0;
+}
+
+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
+{
+	return NULL;
+}
+#endif /* CONFIG_SMP */
+
+static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+#if defined(CONFIG_SMP)
+	if (!cpus_share_cache(smp_processor_id(), cpu)) {
+		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
+		ttwu_queue_remote(p, cpu, wake_flags);
+		return;
+	}
+#endif
+	rq_lock(rq);
+	ttwu_do_activate(rq, p, wake_flags);
+	rq_unlock(rq);
+}
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the thread to be awakened
+ * @state: the mask of task states that can be woken
+ * @wake_flags: wake modifier flags (WF_*)
+ *
+ * Put it on the run-queue if it's not already there. The "current"
+ * thread is always on the run-queue (except when the actual
+ * re-schedule is in progress), and as such you're allowed to do
+ * the simpler "current->state = TASK_RUNNING" to mark yourself
+ * runnable without the overhead of this.
+ *
+ * Return: %true if @p was woken up, %false if it was already running.
+ * or @state didn't match @p's state.
+ */
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+{
+	unsigned long flags;
+	int cpu, success = 0;
+
+	preempt_disable();
+	if (p == current) {
+		/*
+		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
+		 * == smp_processor_id()'. Together this means we can special
+		 * case the whole 'p->on_rq && ttwu_remote()' case below
+		 * without taking any locks.
+		 *
+		 * In particular:
+		 *  - we rely on Program-Order guarantees for all the ordering,
+		 *  - we're serialized against set_special_state() by virtue of
+		 *    it disabling IRQs (this allows not taking ->pi_lock).
+		 */
+		if (!(p->state & state))
+			goto out;
+
+		success = 1;
+		cpu = task_cpu(p);
+		trace_sched_waking(p);
+		p->state = TASK_RUNNING;
+		trace_sched_wakeup(p);
+		goto out;
+	}
+
+	/*
+	 * If we are going to wake up a thread waiting for CONDITION we
+	 * need to ensure that CONDITION=1 done by the caller can not be
+	 * reordered with p->state check below. This pairs with mb() in
+	 * set_current_state() the waiting thread does.
+	 */
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	smp_mb__after_spinlock();
+	if (!(p->state & state))
+		goto unlock;
+
+	trace_sched_waking(p);
+
+	/* We're going to change ->state: */
+	success = 1;
+	cpu = task_cpu(p);
+
+	/*
+	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
+	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
+	 * in smp_cond_load_acquire() below.
+	 *
+	 * sched_ttwu_pending()			try_to_wake_up()
+	 *   STORE p->on_rq = 1			  LOAD p->state
+	 *   UNLOCK rq->lock
+	 *
+	 * __schedule() (switch to task 'p')
+	 *   LOCK rq->lock			  smp_rmb();
+	 *   smp_mb__after_spinlock();
+	 *   UNLOCK rq->lock
+	 *
+	 * [task p]
+	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
+	 *
+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+	 * __schedule().  See the comment for smp_mb__after_spinlock().
+	 */
+	smp_rmb();
+	if (p->on_rq && ttwu_remote(p, wake_flags))
+		goto unlock;
+
+#ifdef CONFIG_SMP
+	/*
+	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+	 * possible to, falsely, observe p->on_cpu == 0.
+	 *
+	 * One must be running (->on_cpu == 1) in order to remove oneself
+	 * from the runqueue.
+	 *
+	 * __schedule() (switch to task 'p')	try_to_wake_up()
+	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
+	 *   UNLOCK rq->lock
+	 *
+	 * __schedule() (put 'p' to sleep)
+	 *   LOCK rq->lock			  smp_rmb();
+	 *   smp_mb__after_spinlock();
+	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
+	 *
+	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+	 * __schedule().  See the comment for smp_mb__after_spinlock().
+	 */
+	smp_rmb();
+
+	/*
+	 * If the owning (remote) CPU is still in the middle of schedule() with
+	 * this task as prev, wait until its done referencing the task.
+	 *
+	 * Pairs with the smp_store_release() in finish_task().
+	 *
+	 * This ensures that tasks getting woken will be fully ordered against
+	 * their previous state and preserve Program Order.
+	 */
+	smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+	p->sched_contributes_to_load = !!task_contributes_to_load(p);
+	p->state = TASK_WAKING;
+
+	if (p->in_iowait) {
+		delayacct_blkio_end(p);
+		atomic_dec(&task_rq(p)->nr_iowait);
+	}
+
+	cpu = select_best_cpu(p);
+	if (task_cpu(p) != cpu) {
+		wake_flags |= WF_MIGRATED;
+		psi_ttwu_dequeue(p);
+		set_task_cpu(p, cpu);
+	}
+
+#else /* CONFIG_SMP */
+
+	if (p->in_iowait) {
+		delayacct_blkio_end(p);
+		atomic_dec(&task_rq(p)->nr_iowait);
+	}
+
+#endif /* CONFIG_SMP */
+
+	ttwu_queue(p, cpu, wake_flags);
+unlock:
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+out:
+	if (success)
+		ttwu_stat(p, cpu, wake_flags);
+	preempt_enable();
+
+	return success;
+}
+
+/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * This function executes a full memory barrier before accessing the task state.
+ */
+int wake_up_process(struct task_struct *p)
+{
+	return try_to_wake_up(p, TASK_NORMAL, 0);
+}
+EXPORT_SYMBOL(wake_up_process);
+
+int wake_up_state(struct task_struct *p, unsigned int state)
+{
+	return try_to_wake_up(p, state, 0);
+}
+
+static void time_slice_expired(struct task_struct *p, struct rq *rq);
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
+{
+	unsigned long flags;
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+	INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+
+#ifdef CONFIG_COMPACTION
+	p->capture_control = NULL;
+#endif
+
+	/*
+	 * We mark the process as NEW here. This guarantees that
+	 * nobody will actually run it, and a signal or other external
+	 * event cannot wake it up and insert it on the runqueue either.
+	 */
+	p->state = TASK_NEW;
+
+	/*
+	 * The process state is set to the same value of the process executing
+	 * do_fork() code. That is running. This guarantees that nobody will
+	 * actually run it, and a signal or other external event cannot wake
+	 * it up and insert it on the runqueue either.
+	 */
+
+	/* Should be reset in fork.c but done here for ease of MuQSS patching */
+	p->on_cpu =
+	p->on_rq =
+	p->utime =
+	p->stime =
+	p->sched_time =
+	p->stime_ns =
+	p->utime_ns = 0;
+	skiplist_node_init(&p->node);
+
+	/*
+	 * Revert to default priority/policy on fork if requested.
+	 */
+	if (unlikely(p->sched_reset_on_fork)) {
+		if (p->policy == SCHED_FIFO || p->policy == SCHED_RR || p-> policy == SCHED_ISO) {
+			p->policy = SCHED_NORMAL;
+			p->normal_prio = normal_prio(p);
+		}
+
+		if (PRIO_TO_NICE(p->static_prio) < 0) {
+			p->static_prio = NICE_TO_PRIO(0);
+			p->normal_prio = p->static_prio;
+		}
+
+		/*
+		 * We don't need the reset flag anymore after the fork. It has
+		 * fulfilled its duty:
+		 */
+		p->sched_reset_on_fork = 0;
+	}
+
+	/*
+	 * Silence PROVE_RCU.
+	 */
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	set_task_cpu(p, smp_processor_id());
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+#ifdef CONFIG_SCHED_INFO
+	if (unlikely(sched_info_on()))
+		memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif
+	init_task_preempt_count(p);
+
+	return 0;
+}
+
+#ifdef CONFIG_SCHEDSTATS
+
+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
+
+static void set_schedstats(bool enabled)
+{
+	if (enabled)
+		static_branch_enable(&sched_schedstats);
+	else
+		static_branch_disable(&sched_schedstats);
+}
+
+void force_schedstat_enabled(void)
+{
+	if (!schedstat_enabled()) {
+		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
+		static_branch_enable(&sched_schedstats);
+	}
+}
+
+static int __init setup_schedstats(char *str)
+{
+	int ret = 0;
+	if (!str)
+		goto out;
+
+	/*
+	 * This code is called before jump labels have been set up, so we can't
+	 * change the static branch directly just yet.  Instead set a temporary
+	 * variable so init_schedstats() can do it later.
+	 */
+	if (!strcmp(str, "enable")) {
+		__sched_schedstats = true;
+		ret = 1;
+	} else if (!strcmp(str, "disable")) {
+		__sched_schedstats = false;
+		ret = 1;
+	}
+out:
+	if (!ret)
+		pr_warn("Unable to parse schedstats=\n");
+
+	return ret;
+}
+__setup("schedstats=", setup_schedstats);
+
+static void __init init_schedstats(void)
+{
+	set_schedstats(__sched_schedstats);
+}
+
+#ifdef CONFIG_PROC_SYSCTL
+int sysctl_schedstats(struct ctl_table *table, int write,
+			 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct ctl_table t;
+	int err;
+	int state = static_branch_likely(&sched_schedstats);
+
+	if (write && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	t = *table;
+	t.data = &state;
+	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+	if (err < 0)
+		return err;
+	if (write)
+		set_schedstats(state);
+	return err;
+}
+#endif /* CONFIG_PROC_SYSCTL */
+#else  /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
+
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
+
+static void account_task_cpu(struct rq *rq, struct task_struct *p)
+{
+	update_clocks(rq);
+	/* This isn't really a context switch but accounting is the same */
+	update_cpu_clock_switch(rq, p);
+	p->last_ran = rq->niffies;
+}
+
+bool sched_smp_initialized __read_mostly;
+
+static inline int hrexpiry_enabled(struct rq *rq)
+{
+	if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
+		return 0;
+	return hrtimer_is_hres_active(&rq->hrexpiry_timer);
+}
+
+/*
+ * Use HR-timers to deliver accurate preemption points.
+ */
+static inline void hrexpiry_clear(struct rq *rq)
+{
+	if (!hrexpiry_enabled(rq))
+		return;
+	if (hrtimer_active(&rq->hrexpiry_timer))
+		hrtimer_cancel(&rq->hrexpiry_timer);
+}
+
+/*
+ * High-resolution time_slice expiry.
+ * Runs from hardirq context with interrupts disabled.
+ */
+static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
+{
+	struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
+	struct task_struct *p;
+
+	/* This can happen during CPU hotplug / resume */
+	if (unlikely(cpu_of(rq) != smp_processor_id()))
+		goto out;
+
+	/*
+	 * We're doing this without the runqueue lock but this should always
+	 * be run on the local CPU. Time slice should run out in __schedule
+	 * but we set it to zero here in case niffies is slightly less.
+	 */
+	p = rq->curr;
+	p->time_slice = 0;
+	__set_tsk_resched(p);
+out:
+	return HRTIMER_NORESTART;
+}
+
+/*
+ * Called to set the hrexpiry timer state.
+ *
+ * called with irqs disabled from the local CPU only
+ */
+static void hrexpiry_start(struct rq *rq, u64 delay)
+{
+	if (!hrexpiry_enabled(rq))
+		return;
+
+	hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
+		      HRTIMER_MODE_REL_PINNED);
+}
+
+static void init_rq_hrexpiry(struct rq *rq)
+{
+	hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rq->hrexpiry_timer.function = hrexpiry;
+}
+
+static inline int rq_dither(struct rq *rq)
+{
+	if (!hrexpiry_enabled(rq))
+		return HALF_JIFFY_US;
+	return 0;
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+void wake_up_new_task(struct task_struct *p)
+{
+	struct task_struct *parent, *rq_curr;
+	struct rq *rq, *new_rq;
+	unsigned long flags;
+
+	parent = p->parent;
+
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	p->state = TASK_RUNNING;
+	/* Task_rq can't change yet on a new task */
+	new_rq = rq = task_rq(p);
+	if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
+		set_task_cpu(p, valid_task_cpu(p));
+		new_rq = task_rq(p);
+	}
+
+	double_rq_lock(rq, new_rq);
+	rq_curr = rq->curr;
+
+	/*
+	 * Make sure we do not leak PI boosting priority to the child.
+	 */
+	p->prio = rq_curr->normal_prio;
+
+	trace_sched_wakeup_new(p);
+
+	/*
+	 * Share the timeslice between parent and child, thus the
+	 * total amount of pending timeslices in the system doesn't change,
+	 * resulting in more scheduling fairness. If it's negative, it won't
+	 * matter since that's the same as being 0. rq->rq_deadline is only
+	 * modified within schedule() so it is always equal to
+	 * current->deadline.
+	 */
+	account_task_cpu(rq, rq_curr);
+	p->last_ran = rq_curr->last_ran;
+	if (likely(rq_curr->policy != SCHED_FIFO)) {
+		rq_curr->time_slice /= 2;
+		if (rq_curr->time_slice < RESCHED_US) {
+			/*
+			 * Forking task has run out of timeslice. Reschedule it and
+			 * start its child with a new time slice and deadline. The
+			 * child will end up running first because its deadline will
+			 * be slightly earlier.
+			 */
+			__set_tsk_resched(rq_curr);
+			time_slice_expired(p, new_rq);
+			if (suitable_idle_cpus(p))
+				resched_best_idle(p, task_cpu(p));
+			else if (unlikely(rq != new_rq))
+				try_preempt(p, new_rq);
+		} else {
+			p->time_slice = rq_curr->time_slice;
+			if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
+				/*
+				 * The VM isn't cloned, so we're in a good position to
+				 * do child-runs-first in anticipation of an exec. This
+				 * usually avoids a lot of COW overhead.
+				 */
+				__set_tsk_resched(rq_curr);
+			} else {
+				/*
+				 * Adjust the hrexpiry since rq_curr will keep
+				 * running and its timeslice has been shortened.
+				 */
+				hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
+				try_preempt(p, new_rq);
+			}
+		}
+	} else {
+		time_slice_expired(p, new_rq);
+		try_preempt(p, new_rq);
+	}
+	activate_task(new_rq, p, 0);
+	double_rq_unlock(rq, new_rq);
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
+
+void preempt_notifier_inc(void)
+{
+	static_branch_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+	static_branch_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
+
+/**
+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
+ * @notifier: notifier struct to register
+ */
+void preempt_notifier_register(struct preempt_notifier *notifier)
+{
+	if (!static_branch_unlikely(&preempt_notifier_key))
+		WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
+	hlist_add_head(&notifier->link, &current->preempt_notifiers);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_register);
+
+/**
+ * preempt_notifier_unregister - no longer interested in preemption notifications
+ * @notifier: notifier struct to unregister
+ *
+ * This is *not* safe to call from within a preemption notifier.
+ */
+void preempt_notifier_unregister(struct preempt_notifier *notifier)
+{
+	hlist_del(&notifier->link);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
+
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+	struct preempt_notifier *notifier;
+
+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+		notifier->ops->sched_in(notifier, raw_smp_processor_id());
+}
+
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+	if (static_branch_unlikely(&preempt_notifier_key))
+		__fire_sched_in_preempt_notifiers(curr);
+}
+
+static void
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+				 struct task_struct *next)
+{
+	struct preempt_notifier *notifier;
+
+	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+		notifier->ops->sched_out(notifier, next);
+}
+
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+				 struct task_struct *next)
+{
+	if (static_branch_unlikely(&preempt_notifier_key))
+		__fire_sched_out_preempt_notifiers(curr, next);
+}
+
+#else /* !CONFIG_PREEMPT_NOTIFIERS */
+
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+}
+
+static inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+				 struct task_struct *next)
+{
+}
+
+#endif /* CONFIG_PREEMPT_NOTIFIERS */
+
+static inline void prepare_task(struct task_struct *next)
+{
+	/*
+	 * Claim the task as running, we do this before switching to it
+	 * such that any running task will have this set.
+	 */
+	next->on_cpu = 1;
+}
+
+static inline void finish_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+	/*
+	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
+	 * We must ensure this doesn't happen until the switch is completely
+	 * finished.
+	 *
+	 * In particular, the load of prev->state in finish_task_switch() must
+	 * happen before this.
+	 *
+	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+	 */
+	smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void
+prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+	/*
+	 * Since the runqueue lock will be released by the next
+	 * task (which is an invalid locking op but in the case
+	 * of the scheduler it's an obvious special-case), so we
+	 * do an early lockdep release here:
+	 */
+	spin_release(&rq->lock->dep_map, _THIS_IP_);
+#ifdef CONFIG_DEBUG_SPINLOCK
+	/* this is a valid case when another task releases the spinlock */
+	rq->lock->owner = next;
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+	/*
+	 * If we are tracking spinlock dependencies then we have to
+	 * fix up the runqueue lock - which gets 'carried over' from
+	 * prev into current:
+	 */
+	spin_acquire(&rq->lock->dep_map, 0, 0, _THIS_IP_);
+
+#ifdef CONFIG_SMP
+	/*
+	 * If prev was marked as migrating to another CPU in return_task, drop
+	 * the local runqueue lock but leave interrupts disabled and grab the
+	 * remote lock we're migrating it to before enabling them.
+	 */
+	if (unlikely(task_on_rq_migrating(prev))) {
+		sched_info_dequeued(rq, prev);
+		/*
+		 * We move the ownership of prev to the new cpu now. ttwu can't
+		 * activate prev to the wrong cpu since it has to grab this
+		 * runqueue in ttwu_remote.
+		 */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+		prev->cpu = prev->wake_cpu;
+#else
+		task_thread_info(prev)->cpu = prev->wake_cpu;
+#endif
+		raw_spin_unlock(rq->lock);
+
+		raw_spin_lock(&prev->pi_lock);
+		rq = __task_rq_lock(prev, NULL);
+		/* Check that someone else hasn't already queued prev */
+		if (likely(!task_queued(prev))) {
+			enqueue_task(rq, prev, 0);
+			prev->on_rq = TASK_ON_RQ_QUEUED;
+			/* Wake up the CPU if it's not already running */
+			resched_if_idle(rq);
+		}
+		raw_spin_unlock(&prev->pi_lock);
+	}
+#endif
+	rq_unlock(rq);
+	local_irq_enable();
+}
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next)	do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev)	do { } while (0)
+#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch()	do { } while (0)
+#endif
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void
+prepare_task_switch(struct rq *rq, struct task_struct *prev,
+		    struct task_struct *next)
+{
+	kcov_prepare_switch(prev);
+	sched_info_switch(rq, prev, next);
+	perf_event_task_sched_out(prev, next);
+	rseq_preempt(prev);
+	fire_sched_out_preempt_notifiers(prev, next);
+	prepare_task(next);
+	prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock.  (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
+ */
+static void finish_task_switch(struct task_struct *prev)
+	__releases(rq->lock)
+{
+	struct rq *rq = this_rq();
+	struct mm_struct *mm = rq->prev_mm;
+	long prev_state;
+
+	/*
+	 * The previous task will have left us with a preempt_count of 2
+	 * because it left us after:
+	 *
+	 *	schedule()
+	 *	  preempt_disable();			// 1
+	 *	  __schedule()
+	 *	    raw_spin_lock_irq(rq->lock)	// 2
+	 *
+	 * Also, see FORK_PREEMPT_COUNT.
+	 */
+	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
+		      "corrupted preempt_count: %s/%d/0x%x\n",
+		      current->comm, current->pid, preempt_count()))
+		preempt_count_set(FORK_PREEMPT_COUNT);
+
+	rq->prev_mm = NULL;
+
+	/*
+	 * A task struct has one reference for the use as "current".
+	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+	 * schedule one last time. The schedule call will never return, and
+	 * the scheduled task must drop that reference.
+	 *
+	 * We must observe prev->state before clearing prev->on_cpu (in
+	 * finish_task), otherwise a concurrent wakeup can get prev
+	 * running on another CPU and we could rave with its RUNNING -> DEAD
+	 * transition, resulting in a double drop.
+	 */
+	prev_state = prev->state;
+	vtime_task_switch(prev);
+	perf_event_task_sched_in(prev, current);
+	finish_task(prev);
+	finish_lock_switch(rq, prev);
+	finish_arch_post_lock_switch();
+	kcov_finish_switch(current);
+
+	fire_sched_in_preempt_notifiers(current);
+	/*
+	 * When switching through a kernel thread, the loop in
+	 * membarrier_{private,global}_expedited() may have observed that
+	 * kernel thread and not issued an IPI. It is therefore possible to
+	 * schedule between user->kernel->user threads without passing though
+	 * switch_mm(). Membarrier requires a barrier after storing to
+	 * rq->curr, before returning to userspace, so provide them here:
+	 *
+	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
+	 *   provided by mmdrop(),
+	 * - a sync_core for SYNC_CORE.
+	 */
+	if (mm) {
+		membarrier_mm_sync_core_before_usermode(mm);
+		mmdrop(mm);
+	}
+	if (unlikely(prev_state == TASK_DEAD)) {
+		/*
+		 * Remove function-return probe instances associated with this
+		 * task and put them back on the free list.
+		 */
+		kprobe_flush_task(prev);
+
+		/* Task is done with its stack. */
+		put_task_stack(prev);
+
+		put_task_struct_rcu_user(prev);
+	}
+}
+
+/**
+ * schedule_tail - first thing a freshly forked thread must call.
+ * @prev: the thread we just switched away from.
+ */
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
+{
+	/*
+	 * New tasks start with FORK_PREEMPT_COUNT, see there and
+	 * finish_task_switch() for details.
+	 *
+	 * finish_task_switch() will drop rq->lock() and lower preempt_count
+	 * and the preempt_enable() will end up enabling preemption (on
+	 * PREEMPT_COUNT kernels).
+	 */
+
+	finish_task_switch(prev);
+	preempt_enable();
+
+	if (current->set_child_tid)
+		put_user(task_pid_vnr(current), current->set_child_tid);
+
+	calculate_sigpending();
+}
+
+/*
+ * context_switch - switch to the new MM and the new thread's register state.
+ */
+static __always_inline void
+context_switch(struct rq *rq, struct task_struct *prev,
+	       struct task_struct *next)
+{
+	prepare_task_switch(rq, prev, next);
+
+	/*
+	 * For paravirt, this is coupled with an exit in switch_to to
+	 * combine the page table reload and the switch backend into
+	 * one hypercall.
+	 */
+	arch_start_context_switch(prev);
+
+	/*
+	 * kernel -> kernel   lazy + transfer active
+	 *   user -> kernel   lazy + mmgrab() active
+	 *
+	 * kernel ->   user   switch + mmdrop() active
+	 *   user ->   user   switch
+	 */
+	if (!next->mm) {                                // to kernel
+		enter_lazy_tlb(prev->active_mm, next);
+
+		next->active_mm = prev->active_mm;
+		if (prev->mm)                           // from user
+			mmgrab(prev->active_mm);
+		else
+			prev->active_mm = NULL;
+	} else {                                        // to user
+		membarrier_switch_mm(rq, prev->active_mm, next->mm);
+		/*
+		 * sys_membarrier() requires an smp_mb() between setting
+		 * rq->curr / membarrier_switch_mm() and returning to userspace.
+		 *
+		 * The below provides this either through switch_mm(), or in
+		 * case 'prev->active_mm == next->mm' through
+		 * finish_task_switch()'s mmdrop().
+		 */
+		switch_mm_irqs_off(prev->active_mm, next->mm, next);
+
+		if (!prev->mm) {                        // from kernel
+			/* will mmdrop() in finish_task_switch(). */
+			rq->prev_mm = prev->active_mm;
+			prev->active_mm = NULL;
+		}
+	}
+	prepare_lock_switch(rq, next);
+
+	/* Here we just switch the register state and the stack. */
+	switch_to(prev, next, prev);
+	barrier();
+
+	finish_task_switch(prev);
+}
+
+/*
+ * nr_running, nr_uninterruptible and nr_context_switches:
+ *
+ * externally visible scheduler statistics: current number of runnable
+ * threads, total number of context switches performed since bootup.
+ */
+unsigned long nr_running(void)
+{
+	unsigned long i, sum = 0;
+
+	for_each_online_cpu(i)
+		sum += cpu_rq(i)->nr_running;
+
+	return sum;
+}
+
+static unsigned long nr_uninterruptible(void)
+{
+	unsigned long i, sum = 0;
+
+	for_each_online_cpu(i)
+		sum += cpu_rq(i)->nr_uninterruptible;
+
+	return sum;
+}
+
+/*
+ * Check if only the current task is running on the CPU.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race.  The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptible section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
+ */
+bool single_task_running(void)
+{
+	if (rq_load(raw_rq()) == 1)
+		return true;
+	else
+		return false;
+}
+EXPORT_SYMBOL(single_task_running);
+
+unsigned long long nr_context_switches(void)
+{
+	int cpu;
+	unsigned long long sum = 0;
+
+	for_each_possible_cpu(cpu)
+		sum += cpu_rq(cpu)->nr_switches;
+
+	return sum;
+}
+
+/*
+ * Consumers of these two interfaces, like for example the cpufreq menu
+ * governor are using nonsensical data. Boosting frequency for a CPU that has
+ * IO-wait which might not even end up running the task when it does become
+ * runnable.
+ */
+
+unsigned long nr_iowait_cpu(int cpu)
+{
+	return atomic_read(&cpu_rq(cpu)->nr_iowait);
+}
+
+/*
+ * IO-wait accounting, and how its mostly bollocks (on SMP).
+ *
+ * The idea behind IO-wait account is to account the idle time that we could
+ * have spend running if it were not for IO. That is, if we were to improve the
+ * storage performance, we'd have a proportional reduction in IO-wait time.
+ *
+ * This all works nicely on UP, where, when a task blocks on IO, we account
+ * idle time as IO-wait, because if the storage were faster, it could've been
+ * running and we'd not be idle.
+ *
+ * This has been extended to SMP, by doing the same for each CPU. This however
+ * is broken.
+ *
+ * Imagine for instance the case where two tasks block on one CPU, only the one
+ * CPU will have IO-wait accounted, while the other has regular idle. Even
+ * though, if the storage were faster, both could've ran at the same time,
+ * utilising both CPUs.
+ *
+ * This means, that when looking globally, the current IO-wait accounting on
+ * SMP is a lower bound, by reason of under accounting.
+ *
+ * Worse, since the numbers are provided per CPU, they are sometimes
+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
+ * associated with any one particular CPU, it can wake to another CPU than it
+ * blocked on. This means the per CPU IO-wait number is meaningless.
+ *
+ * Task CPU affinities can make all that even more 'interesting'.
+ */
+
+unsigned long nr_iowait(void)
+{
+	unsigned long cpu, sum = 0;
+
+	for_each_possible_cpu(cpu)
+		sum += nr_iowait_cpu(cpu);
+
+	return sum;
+}
+
+unsigned long nr_active(void)
+{
+	return nr_running() + nr_uninterruptible();
+}
+
+/* Variables and functions for calc_load */
+static unsigned long calc_load_update;
+unsigned long avenrun[3];
+EXPORT_SYMBOL(avenrun);
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:	pointer to dest load array
+ * @offset:	offset to add
+ * @shift:	shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+	loads[0] = (avenrun[0] + offset) << shift;
+	loads[1] = (avenrun[1] + offset) << shift;
+	loads[2] = (avenrun[2] + offset) << shift;
+}
+
+/*
+ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
+ */
+void calc_global_load(unsigned long ticks)
+{
+	long active;
+
+	if (time_before(jiffies, READ_ONCE(calc_load_update)))
+		return;
+	active = nr_active() * FIXED_1;
+
+	avenrun[0] = calc_load(avenrun[0], EXP_1, active);
+	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
+	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
+
+	calc_load_update = jiffies + LOAD_FREQ;
+}
+
+/**
+ * fixed_power_int - compute: x^n, in O(log n) time
+ *
+ * @x:         base of the power
+ * @frac_bits: fractional bits of @x
+ * @n:         power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ */
+static unsigned long
+fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
+{
+	unsigned long result = 1UL << frac_bits;
+
+	if (n) {
+		for (;;) {
+			if (n & 1) {
+				result *= x;
+				result += 1UL << (frac_bits - 1);
+				result >>= frac_bits;
+			}
+			n >>= 1;
+			if (!n)
+				break;
+			x *= x;
+			x += 1UL << (frac_bits - 1);
+			x >>= frac_bits;
+		}
+	}
+
+	return result;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ *
+ * a2 = a1 * e + a * (1 - e)
+ *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
+ *    = a0 * e^2 + a * (1 - e) * (1 + e)
+ *
+ * a3 = a2 * e + a * (1 - e)
+ *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
+ *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
+ *
+ *  ...
+ *
+ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
+ *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
+ *    = a0 * e^n + a * (1 - e^n)
+ *
+ * [1] application of the geometric series:
+ *
+ *              n         1 - x^(n+1)
+ *     S_n := \Sum x^i = -------------
+ *             i=0          1 - x
+ */
+unsigned long
+calc_load_n(unsigned long load, unsigned long exp,
+	    unsigned long active, unsigned int n)
+{
+	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
+}
+
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
+
+#ifdef CONFIG_PARAVIRT
+static inline u64 steal_ticks(u64 steal)
+{
+	if (unlikely(steal > NSEC_PER_SEC))
+		return div_u64(steal, TICK_NSEC);
+
+	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
+}
+#endif
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)
+#endif
+
+/*
+ * On each tick, add the number of nanoseconds to the unbanked variables and
+ * once one tick's worth has accumulated, account it allowing for accurate
+ * sub-tick accounting and totals. Use the TICK_APPROX_NS to match the way we
+ * deduct nanoseconds.
+ */
+static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
+{
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
+	unsigned long ticks;
+
+	if (atomic_read(&rq->nr_iowait) > 0) {
+		rq->iowait_ns += ns;
+		if (rq->iowait_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->iowait_ns);
+			cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->iowait_ns %= JIFFY_NS;
+		}
+	} else {
+		rq->idle_ns += ns;
+		if (rq->idle_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->idle_ns);
+			cpustat[CPUTIME_IDLE] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->idle_ns %= JIFFY_NS;
+		}
+	}
+	acct_update_integrals(idle);
+}
+
+static void pc_system_time(struct rq *rq, struct task_struct *p,
+			   int hardirq_offset, unsigned long ns)
+{
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
+	unsigned long ticks;
+
+	p->stime_ns += ns;
+	if (p->stime_ns >= JIFFY_NS) {
+		ticks = NS_TO_JIFFIES(p->stime_ns);
+		p->stime_ns %= JIFFY_NS;
+		p->stime += (__force u64)TICK_APPROX_NS * ticks;
+		account_group_system_time(p, TICK_APPROX_NS * ticks);
+	}
+	p->sched_time += ns;
+	account_group_exec_runtime(p, ns);
+
+	if (hardirq_count() - hardirq_offset) {
+		rq->irq_ns += ns;
+		if (rq->irq_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->irq_ns);
+			cpustat[CPUTIME_IRQ] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->irq_ns %= JIFFY_NS;
+		}
+	} else if (in_serving_softirq()) {
+		rq->softirq_ns += ns;
+		if (rq->softirq_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->softirq_ns);
+			cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->softirq_ns %= JIFFY_NS;
+		}
+	} else {
+		rq->system_ns += ns;
+		if (rq->system_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->system_ns);
+			cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->system_ns %= JIFFY_NS;
+		}
+	}
+	acct_update_integrals(p);
+}
+
+static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
+{
+	u64 *cpustat = kcpustat_this_cpu->cpustat;
+	unsigned long ticks;
+
+	p->utime_ns += ns;
+	if (p->utime_ns >= JIFFY_NS) {
+		ticks = NS_TO_JIFFIES(p->utime_ns);
+		p->utime_ns %= JIFFY_NS;
+		p->utime += (__force u64)TICK_APPROX_NS * ticks;
+		account_group_user_time(p, TICK_APPROX_NS * ticks);
+	}
+	p->sched_time += ns;
+	account_group_exec_runtime(p, ns);
+
+	if (this_cpu_ksoftirqd() == p) {
+		/*
+		 * ksoftirqd time do not get accounted in cpu_softirq_time.
+		 * So, we have to handle it separately here.
+		 */
+		rq->softirq_ns += ns;
+		if (rq->softirq_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->softirq_ns);
+			cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->softirq_ns %= JIFFY_NS;
+		}
+	}
+
+	if (task_nice(p) > 0 || idleprio_task(p)) {
+		rq->nice_ns += ns;
+		if (rq->nice_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->nice_ns);
+			cpustat[CPUTIME_NICE] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->nice_ns %= JIFFY_NS;
+		}
+	} else {
+		rq->user_ns += ns;
+		if (rq->user_ns >= JIFFY_NS) {
+			ticks = NS_TO_JIFFIES(rq->user_ns);
+			cpustat[CPUTIME_USER] += (__force u64)TICK_APPROX_NS * ticks;
+			rq->user_ns %= JIFFY_NS;
+		}
+	}
+	acct_update_integrals(p);
+}
+
+/*
+ * This is called on clock ticks.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ * CPU scheduler quota accounting is also performed here in microseconds.
+ */
+static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
+{
+	s64 account_ns = rq->niffies - p->last_ran;
+	struct task_struct *idle = rq->idle;
+
+	/* Accurate tick timekeeping */
+	if (user_mode(get_irq_regs()))
+		pc_user_time(rq, p, account_ns);
+	else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
+		pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
+	} else
+		pc_idle_time(rq, idle, account_ns);
+
+	/* time_slice accounting is done in usecs to avoid overflow on 32bit */
+	if (p->policy != SCHED_FIFO && p != idle)
+		p->time_slice -= NS_TO_US(account_ns);
+
+	p->last_ran = rq->niffies;
+}
+
+/*
+ * This is called on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ * CPU scheduler quota accounting is also performed here in microseconds.
+ */
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
+{
+	s64 account_ns = rq->niffies - p->last_ran;
+	struct task_struct *idle = rq->idle;
+
+	/* Accurate subtick timekeeping */
+	if (p != idle)
+		pc_user_time(rq, p, account_ns);
+	else
+		pc_idle_time(rq, idle, account_ns);
+
+	/* time_slice accounting is done in usecs to avoid overflow on 32bit */
+	if (p->policy != SCHED_FIFO && p != idle)
+		p->time_slice -= NS_TO_US(account_ns);
+}
+
+/*
+ * Return any ns on the sched_clock that have not yet been accounted in
+ * @p in case that task is currently running.
+ *
+ * Called with task_rq_lock(p) held.
+ */
+static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
+{
+	u64 ns = 0;
+
+	/*
+	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+	 * project cycles that may never be accounted to this
+	 * thread, breaking clock_gettime().
+	 */
+	if (p == rq->curr && task_on_rq_queued(p)) {
+		update_clocks(rq);
+		ns = rq->niffies - p->last_ran;
+	}
+
+	return ns;
+}
+
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
+
+void arch_set_thermal_pressure(struct cpumask *cpus,
+			       unsigned long th_pressure)
+{
+	int cpu;
+
+	for_each_cpu(cpu, cpus)
+		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+}
+
+/*
+ * Return accounted runtime for the task.
+ * Return separately the current's pending runtime that have not been
+ * accounted yet.
+ */
+unsigned long long task_sched_runtime(struct task_struct *p)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+	u64 ns;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+	/*
+	 * 64-bit doesn't need locks to atomically read a 64-bit value.
+	 * So we have a optimisation chance when the task's delta_exec is 0.
+	 * Reading ->on_cpu is racy, but this is ok.
+	 *
+	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
+	 * If we race with it entering CPU, unaccounted time is 0. This is
+	 * indistinguishable from the read occurring a few cycles earlier.
+	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+	 * been accounted, so we're correct here as well.
+	 */
+	if (!p->on_cpu || !task_on_rq_queued(p))
+		return tsk_seruntime(p);
+#endif
+
+	rq = task_rq_lock(p, &rf);
+	ns = p->sched_time + do_task_delta_exec(p, rq);
+	task_rq_unlock(rq, p, &rf);
+
+	return ns;
+}
+
+/*
+ * Functions to test for when SCHED_ISO tasks have used their allocated
+ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
+ * data is modified only by the local runqueue during scheduler_tick with
+ * interrupts disabled.
+ */
+
+/*
+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
+ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
+ * slow division.
+ */
+static inline void iso_tick(struct rq *rq)
+{
+	rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
+	rq->iso_ticks += 100;
+	if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
+		rq->iso_refractory = true;
+		if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
+			rq->iso_ticks = ISO_PERIOD * 100;
+	}
+}
+
+/* No SCHED_ISO task was running so decrease rq->iso_ticks */
+static inline void no_iso_tick(struct rq *rq, int ticks)
+{
+	if (rq->iso_ticks > 0 || rq->iso_refractory) {
+		rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
+		if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
+			rq->iso_refractory = false;
+			if (unlikely(rq->iso_ticks < 0))
+				rq->iso_ticks = 0;
+		}
+	}
+}
+
+/* This manages tasks that have run out of timeslice during a scheduler_tick */
+static void task_running_tick(struct rq *rq)
+{
+	struct task_struct *p = rq->curr;
+
+	/*
+	 * If a SCHED_ISO task is running we increment the iso_ticks. In
+	 * order to prevent SCHED_ISO tasks from causing starvation in the
+	 * presence of true RT tasks we account those as iso_ticks as well.
+	 */
+	if (rt_task(p) || task_running_iso(p))
+		iso_tick(rq);
+	else
+		no_iso_tick(rq, 1);
+
+	/* SCHED_FIFO tasks never run out of timeslice. */
+	if (p->policy == SCHED_FIFO)
+		return;
+
+	if (iso_task(p)) {
+		if (task_running_iso(p)) {
+			if (rq->iso_refractory) {
+				/*
+				 * SCHED_ISO task is running as RT and limit
+				 * has been hit. Force it to reschedule as
+				 * SCHED_NORMAL by zeroing its time_slice
+				 */
+				p->time_slice = 0;
+			}
+		} else if (!rq->iso_refractory) {
+			/* Can now run again ISO. Reschedule to pick up prio */
+			goto out_resched;
+		}
+	}
+
+	/*
+	 * Tasks that were scheduled in the first half of a tick are not
+	 * allowed to run into the 2nd half of the next tick if they will
+	 * run out of time slice in the interim. Otherwise, if they have
+	 * less than RESCHED_US μs of time slice left they will be rescheduled.
+	 * Dither is used as a backup for when hrexpiry is disabled or high res
+	 * timers not configured in.
+	 */
+	if (p->time_slice - rq->dither >= RESCHED_US)
+		return;
+out_resched:
+	rq_lock(rq);
+	__set_tsk_resched(p);
+	rq_unlock(rq);
+}
+
+static inline void task_tick(struct rq *rq)
+{
+	if (!rq_idle(rq))
+		task_running_tick(rq);
+	else if (rq->last_jiffy > rq->last_scheduler_tick)
+		no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+/*
+ * We can stop the timer tick any time highres timers are active since
+ * we rely entirely on highres timeouts for task expiry rescheduling.
+ */
+static void sched_stop_tick(struct rq *rq, int cpu)
+{
+	if (!hrexpiry_enabled(rq))
+		return;
+	if (!tick_nohz_full_enabled())
+		return;
+	if (!tick_nohz_full_cpu(cpu))
+		return;
+	tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+
+static inline void sched_start_tick(struct rq *rq, int cpu)
+{
+	tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+
+struct tick_work {
+	int			cpu;
+	atomic_t		state;
+	struct delayed_work	work;
+};
+/* Values for ->state, see diagram below. */
+#define TICK_SCHED_REMOTE_OFFLINE	0
+#define TICK_SCHED_REMOTE_OFFLINING	1
+#define TICK_SCHED_REMOTE_RUNNING	2
+
+/*
+ * State diagram for ->state:
+ *
+ *
+ *          TICK_SCHED_REMOTE_OFFLINE
+ *                    |   ^
+ *                    |   |
+ *                    |   | sched_tick_remote()
+ *                    |   |
+ *                    |   |
+ *                    +--TICK_SCHED_REMOTE_OFFLINING
+ *                    |   ^
+ *                    |   |
+ * sched_tick_start() |   | sched_tick_stop()
+ *                    |   |
+ *                    V   |
+ *          TICK_SCHED_REMOTE_RUNNING
+ *
+ *
+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
+ * and sched_tick_start() are happy to leave the state in RUNNING.
+ */
+
+static struct tick_work __percpu *tick_work_cpu;
+
+static void sched_tick_remote(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct tick_work *twork = container_of(dwork, struct tick_work, work);
+	int cpu = twork->cpu;
+	struct rq *rq = cpu_rq(cpu);
+	struct task_struct *curr;
+	u64 delta;
+	int os;
+
+	/*
+	 * Handle the tick only if it appears the remote CPU is running in full
+	 * dynticks mode. The check is racy by nature, but missing a tick or
+	 * having one too much is no big deal because the scheduler tick updates
+	 * statistics and checks timeslices in a time-independent way, regardless
+	 * of when exactly it is running.
+	 */
+	if (!tick_nohz_tick_stopped_cpu(cpu))
+		goto out_requeue;
+
+	rq_lock_irq(rq);
+	if (cpu_is_offline(cpu))
+		goto out_unlock;
+
+	curr = rq->curr;
+	update_rq_clock(rq);
+
+	if (!is_idle_task(curr)) {
+		/*
+		 * Make sure the next tick runs within a reasonable
+		 * amount of time.
+		 */
+		delta = rq_clock_task(rq) - curr->last_ran;
+		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+	}
+	task_tick(rq);
+
+out_unlock:
+	rq_unlock_irq(rq, NULL);
+
+out_requeue:
+
+	/*
+	 * Run the remote tick once per second (1Hz). This arbitrary
+	 * frequency is large enough to avoid overload but short enough
+	 * to keep scheduler internal stats reasonably up to date.  But
+	 * first update state to reflect hotplug activity if required.
+	 */
+	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
+	if (os == TICK_SCHED_REMOTE_RUNNING)
+		queue_delayed_work(system_unbound_wq, dwork, HZ);
+}
+
+static void sched_tick_start(int cpu)
+{
+	struct tick_work *twork;
+	int os;
+
+	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+		return;
+
+	WARN_ON_ONCE(!tick_work_cpu);
+
+	twork = per_cpu_ptr(tick_work_cpu, cpu);
+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
+	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
+	if (os == TICK_SCHED_REMOTE_OFFLINE) {
+		twork->cpu = cpu;
+		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+	}
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void sched_tick_stop(int cpu)
+{
+	struct tick_work *twork;
+	int os;
+
+	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+		return;
+
+	WARN_ON_ONCE(!tick_work_cpu);
+
+	twork = per_cpu_ptr(tick_work_cpu, cpu);
+	/* There cannot be competing actions, but don't rely on stop-machine. */
+	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+	/* Don't cancel, as this would mess up the state machine. */
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __init sched_tick_offload_init(void)
+{
+	tick_work_cpu = alloc_percpu(struct tick_work);
+	BUG_ON(!tick_work_cpu);
+	return 0;
+}
+
+#else /* !CONFIG_NO_HZ_FULL */
+static inline void sched_stop_tick(struct rq *rq, int cpu) {}
+static inline void sched_start_tick(struct rq *rq, int cpu) {}
+static inline void sched_tick_start(int cpu) { }
+static inline void sched_tick_stop(int cpu) { }
+#endif
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void scheduler_tick(void)
+{
+	int cpu __maybe_unused = smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+
+	arch_scale_freq_tick();
+	sched_clock_tick();
+	update_clocks(rq);
+	update_load_avg(rq, 0);
+	update_cpu_clock_tick(rq, rq->curr);
+	task_tick(rq);
+	rq->last_scheduler_tick = rq->last_jiffy;
+	rq->last_tick = rq->clock;
+	psi_task_tick(rq);
+	perf_event_task_tick();
+	sched_stop_tick(rq, cpu);
+}
+
+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
+				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
+/*
+ * If the value passed in is equal to the current preempt count
+ * then we just disabled preemption. Start timing the latency.
+ */
+static inline void preempt_latency_start(int val)
+{
+	if (preempt_count() == val) {
+		unsigned long ip = get_lock_parent_ip();
+#ifdef CONFIG_DEBUG_PREEMPT
+		current->preempt_disable_ip = ip;
+#endif
+		trace_preempt_off(CALLER_ADDR0, ip);
+	}
+}
+
+void preempt_count_add(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+	/*
+	 * Underflow?
+	 */
+	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
+		return;
+#endif
+	__preempt_count_add(val);
+#ifdef CONFIG_DEBUG_PREEMPT
+	/*
+	 * Spinlock count overflowing soon?
+	 */
+	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+				PREEMPT_MASK - 10);
+#endif
+	preempt_latency_start(val);
+}
+EXPORT_SYMBOL(preempt_count_add);
+NOKPROBE_SYMBOL(preempt_count_add);
+
+/*
+ * If the value passed in equals to the current preempt count
+ * then we just enabled preemption. Stop timing the latency.
+ */
+static inline void preempt_latency_stop(int val)
+{
+	if (preempt_count() == val)
+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+}
+
+void preempt_count_sub(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+	/*
+	 * Underflow?
+	 */
+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+		return;
+	/*
+	 * Is the spinlock portion underflowing?
+	 */
+	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
+			!(preempt_count() & PREEMPT_MASK)))
+		return;
+#endif
+
+	preempt_latency_stop(val);
+	__preempt_count_sub(val);
+}
+EXPORT_SYMBOL(preempt_count_sub);
+NOKPROBE_SYMBOL(preempt_count_sub);
+
+#else
+static inline void preempt_latency_start(int val) { }
+static inline void preempt_latency_stop(int val) { }
+#endif
+
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+	return p->preempt_disable_ip;
+#else
+	return 0;
+#endif
+}
+
+/*
+ * The time_slice is only refilled when it is empty and that is when we set a
+ * new deadline. Make sure update_clocks has been called recently to update
+ * rq->niffies.
+ */
+static void time_slice_expired(struct task_struct *p, struct rq *rq)
+{
+	p->time_slice = timeslice();
+	p->deadline = rq->niffies + task_deadline_diff(p);
+#ifdef CONFIG_SMT_NICE
+	if (!p->mm)
+		p->smt_bias = 0;
+	else if (rt_task(p))
+		p->smt_bias = 1 << 30;
+	else if (task_running_iso(p))
+		p->smt_bias = 1 << 29;
+	else if (idleprio_task(p)) {
+		if (task_running_idle(p))
+			p->smt_bias = 0;
+		else
+			p->smt_bias = 1;
+	} else if (--p->smt_bias < 1)
+		p->smt_bias = MAX_PRIO - p->static_prio;
+#endif
+}
+
+/*
+ * Timeslices below RESCHED_US are considered as good as expired as there's no
+ * point rescheduling when there's so little time left. SCHED_BATCH tasks
+ * have been flagged be not latency sensitive and likely to be fully CPU
+ * bound so every time they're rescheduled they have their time_slice
+ * refilled, but get a new later deadline to have little effect on
+ * SCHED_NORMAL tasks.
+
+ */
+static inline void check_deadline(struct task_struct *p, struct rq *rq)
+{
+	if (p->time_slice < RESCHED_US || batch_task(p))
+		time_slice_expired(p, rq);
+}
+
+/*
+ * Task selection with skiplists is a simple matter of picking off the first
+ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
+ * being bound to the number of processors.
+ *
+ * Runqueues are selectively locked based on their unlocked data and then
+ * unlocked if not needed. At most 3 locks will be held at any time and are
+ * released as soon as they're no longer needed. All balancing between CPUs
+ * is thus done here in an extremely simple first come best fit manner.
+ *
+ * This iterates over runqueues in cache locality order. In interactive mode
+ * it iterates over all CPUs and finds the task with the best key/deadline.
+ * In non-interactive mode it will only take a task if it's from the current
+ * runqueue or a runqueue with more tasks than the current one with a better
+ * key/deadline.
+ */
+#ifdef CONFIG_SMP
+static inline struct task_struct
+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
+{
+	struct rq *locked = NULL, *chosen = NULL;
+	struct task_struct *edt = idle;
+	int i, best_entries = 0;
+	u64 best_key = ~0ULL;
+
+	for (i = 0; i < total_runqueues; i++) {
+		struct rq *other_rq = rq_order(rq, i);
+		skiplist_node *next;
+		int entries;
+
+		entries = other_rq->sl->entries;
+		/*
+		 * Check for queued entres lockless first. The local runqueue
+		 * is locked so entries will always be accurate.
+		 */
+		if (!sched_interactive) {
+			/*
+			 * Don't reschedule balance across nodes unless the CPU
+			 * is idle.
+			 */
+			if (edt != idle && rq->cpu_locality[other_rq->cpu] > LOCALITY_SMP)
+				break;
+			if (entries <= best_entries)
+				continue;
+		} else if (!entries)
+			continue;
+
+		/* if (i) implies other_rq != rq */
+		if (i) {
+			/* Check for best id queued lockless first */
+			if (other_rq->best_key >= best_key)
+				continue;
+
+			if (unlikely(!trylock_rq(rq, other_rq)))
+				continue;
+
+			/* Need to reevaluate entries after locking */
+			entries = other_rq->sl->entries;
+			if (unlikely(!entries)) {
+				unlock_rq(other_rq);
+				continue;
+			}
+		}
+
+		next = other_rq->node;
+		/*
+		 * In interactive mode we check beyond the best entry on other
+		 * runqueues if we can't get the best for smt or affinity
+		 * reasons.
+		 */
+		while ((next = next->next[0]) != other_rq->node) {
+			struct task_struct *p;
+			u64 key = next->key;
+
+			/* Reevaluate key after locking */
+			if (key >= best_key)
+				break;
+
+			p = next->value;
+			if (!smt_schedule(p, rq)) {
+				if (i && !sched_interactive)
+					break;
+				continue;
+			}
+
+			if (sched_other_cpu(p, cpu)) {
+				if (sched_interactive || !i)
+					continue;
+				break;
+			}
+			/* Make sure affinity is ok */
+			if (i) {
+				/* From this point on p is the best so far */
+				if (locked)
+					unlock_rq(locked);
+				chosen = locked = other_rq;
+			}
+			best_entries = entries;
+			best_key = key;
+			edt = p;
+			break;
+		}
+		/* rq->preempting is a hint only as the state may have changed
+		 * since it was set with the resched call but if we have met
+		 * the condition we can break out here. */
+		if (edt == rq->preempting)
+			break;
+		if (i && other_rq != chosen)
+			unlock_rq(other_rq);
+	}
+
+	if (likely(edt != idle))
+		take_task(rq, cpu, edt);
+
+	if (locked)
+		unlock_rq(locked);
+
+	rq->preempting = NULL;
+
+	return edt;
+}
+#else /* CONFIG_SMP */
+static inline struct task_struct
+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
+{
+	struct task_struct *edt;
+
+	if (unlikely(!rq->sl->entries))
+		return idle;
+	edt = rq->node->next[0]->value;
+	take_task(rq, cpu, edt);
+	return edt;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Print scheduling while atomic bug:
+ */
+static noinline void __schedule_bug(struct task_struct *prev)
+{
+	/* Save this before calling printk(), since that will clobber it */
+	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
+
+	if (oops_in_progress)
+		return;
+
+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+		prev->comm, prev->pid, preempt_count());
+
+	debug_show_held_locks(prev);
+	print_modules();
+	if (irqs_disabled())
+		print_irqtrace_events(prev);
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+	    && in_atomic_preempt_off()) {
+		pr_err("Preemption disabled at:");
+		print_ip_sym(preempt_disable_ip);
+		pr_cont("\n");
+	}
+	dump_stack();
+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+
+/*
+ * Various schedule()-time debugging checks and statistics:
+ */
+static inline void schedule_debug(struct task_struct *prev, bool preempt)
+{
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+	if (task_stack_end_corrupted(prev))
+		panic("corrupted stack end detected inside scheduler\n");
+#endif
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+	if (!preempt && prev->state && prev->non_block_count) {
+		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
+			prev->comm, prev->pid, prev->non_block_count);
+		dump_stack();
+		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+	}
+#endif
+
+	if (unlikely(in_atomic_preempt_off())) {
+		__schedule_bug(prev);
+		preempt_count_set(PREEMPT_DISABLED);
+	}
+	rcu_sleep_check();
+
+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+	schedstat_inc(this_rq()->sched_count);
+}
+
+/*
+ * The currently running task's information is all stored in rq local data
+ * which is only modified by the local CPU.
+ */
+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
+{
+	if (p == rq->idle || p->policy == SCHED_FIFO)
+		hrexpiry_clear(rq);
+	else
+		hrexpiry_start(rq, US_TO_NS(p->time_slice));
+	if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
+		rq->dither = 0;
+	else
+		rq->dither = rq_dither(rq);
+
+	rq->rq_deadline = p->deadline;
+	rq->rq_prio = p->prio;
+#ifdef CONFIG_SMT_NICE
+	rq->rq_mm = p->mm;
+	rq->rq_smt_bias = p->smt_bias;
+#endif
+}
+
+#ifdef CONFIG_SMT_NICE
+static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
+static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
+static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
+static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
+
+/* Iterate over smt siblings when we've scheduled a process on cpu and decide
+ * whether they should continue running or be descheduled. */
+static void check_smt_siblings(struct rq *this_rq)
+{
+	int other_cpu;
+
+	for_each_cpu(other_cpu, &this_rq->thread_mask) {
+		struct task_struct *p;
+		struct rq *rq;
+
+		rq = cpu_rq(other_cpu);
+		if (rq_idle(rq))
+			continue;
+		p = rq->curr;
+		if (!smt_schedule(p, this_rq))
+			resched_curr(rq);
+	}
+}
+
+static void wake_smt_siblings(struct rq *this_rq)
+{
+	int other_cpu;
+
+	for_each_cpu(other_cpu, &this_rq->thread_mask) {
+		struct rq *rq;
+
+		rq = cpu_rq(other_cpu);
+		if (rq_idle(rq))
+			resched_idle(rq);
+	}
+}
+#else
+static void check_siblings(struct rq __maybe_unused *this_rq) {}
+static void wake_siblings(struct rq __maybe_unused *this_rq) {}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ *
+ * The main means of driving the scheduler and thus entering this function are:
+ *
+ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
+ *
+ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
+ *      paths. For example, see arch/x86/entry_64.S.
+ *
+ *      To drive preemption between tasks, the scheduler sets the flag in timer
+ *      interrupt handler scheduler_tick().
+ *
+ *   3. Wakeups don't really cause entry into schedule(). They add a
+ *      task to the run-queue and that's it.
+ *
+ *      Now, if the new task added to the run-queue preempts the current
+ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
+ *      called on the nearest possible occasion:
+ *
+ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
+ *
+ *         - in syscall or exception context, at the next outmost
+ *           preempt_enable(). (this might be as soon as the wake_up()'s
+ *           spin_unlock()!)
+ *
+ *         - in IRQ context, return from interrupt-handler to
+ *           preemptible context
+ *
+ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
+ *         then at the next:
+ *
+ *          - cond_resched() call
+ *          - explicit schedule() call
+ *          - return from syscall or exception to user-space
+ *          - return from interrupt-handler to user-space
+ *
+ * WARNING: must be called with preemption disabled!
+ */
+static void __sched notrace __schedule(bool preempt)
+{
+	struct task_struct *prev, *next, *idle;
+	unsigned long *switch_count;
+	bool deactivate = false;
+	struct rq *rq;
+	u64 niffies;
+	int cpu;
+
+	cpu = smp_processor_id();
+	rq = cpu_rq(cpu);
+	prev = rq->curr;
+	idle = rq->idle;
+
+	schedule_debug(prev, preempt);
+
+	local_irq_disable();
+	rcu_note_context_switch(preempt);
+
+	/*
+	 * Make sure that signal_pending_state()->signal_pending() below
+	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+	 * done by the caller to avoid the race with signal_wake_up().
+	 *
+	 * The membarrier system call requires a full memory barrier
+	 * after coming from user-space, before storing to rq->curr.
+	 */
+	rq_lock(rq);
+	smp_mb__after_spinlock();
+#ifdef CONFIG_SMP
+	if (rq->preempt) {
+		/*
+		 * Make sure resched_curr hasn't triggered a preemption
+		 * locklessly on a task that has since scheduled away. Spurious
+		 * wakeup of idle is okay though.
+		 */
+		if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
+			rq->preempt = NULL;
+			clear_preempt_need_resched();
+			rq_unlock_irq(rq, NULL);
+			return;
+		}
+		rq->preempt = NULL;
+	}
+#endif
+
+	switch_count = &prev->nivcsw;
+	if (!preempt && prev->state) {
+		if (signal_pending_state(prev->state, prev)) {
+			prev->state = TASK_RUNNING;
+		} else {
+			deactivate = true;
+
+			if (prev->in_iowait) {
+				atomic_inc(&rq->nr_iowait);
+				delayacct_blkio_start();
+			}
+		}
+		switch_count = &prev->nvcsw;
+	}
+
+	/*
+	 * Store the niffy value here for use by the next task's last_ran
+	 * below to avoid losing niffies due to update_clocks being called
+	 * again after this point.
+	 */
+	update_clocks(rq);
+	niffies = rq->niffies;
+	update_cpu_clock_switch(rq, prev);
+
+	clear_tsk_need_resched(prev);
+	clear_preempt_need_resched();
+
+	if (idle != prev) {
+		check_deadline(prev, rq);
+		return_task(prev, rq, cpu, deactivate);
+	}
+
+	next = earliest_deadline_task(rq, cpu, idle);
+	if (likely(next->prio != PRIO_LIMIT))
+		clear_cpuidle_map(cpu);
+	else {
+		set_cpuidle_map(cpu);
+		update_load_avg(rq, 0);
+	}
+
+	set_rq_task(rq, next);
+	next->last_ran = niffies;
+
+	if (likely(prev != next)) {
+		/*
+		 * Don't reschedule an idle task or deactivated tasks
+		 */
+		if (prev == idle) {
+			rq->nr_running++;
+			if (rt_task(next))
+				rq->rt_nr_running++;
+		} else if (!deactivate)
+			resched_suitable_idle(prev);
+		if (unlikely(next == idle)) {
+			rq->nr_running--;
+			if (rt_task(prev))
+				rq->rt_nr_running--;
+			wake_siblings(rq);
+		} else
+			check_siblings(rq);
+		rq->nr_switches++;
+		/*
+		 * RCU users of rcu_dereference(rq->curr) may not see
+		 * changes to task_struct made by pick_next_task().
+		 */
+		RCU_INIT_POINTER(rq->curr, next);
+		/*
+		 * The membarrier system call requires each architecture
+		 * to have a full memory barrier after updating
+		 * rq->curr, before returning to user-space.
+		 *
+		 * Here are the schemes providing that barrier on the
+		 * various architectures:
+		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
+		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
+		 * - finish_lock_switch() for weakly-ordered
+		 *   architectures where spin_unlock is a full barrier,
+		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
+		 *   is a RELEASE barrier),
+		 */
+		++*switch_count;
+
+		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+
+		trace_sched_switch(preempt, prev, next);
+		context_switch(rq, prev, next); /* unlocks the rq */
+	} else {
+		check_siblings(rq);
+		rq_unlock(rq);
+		local_irq_enable();
+	}
+}
+
+void __noreturn do_task_dead(void)
+{
+	/* Causes final put_task_struct in finish_task_switch(). */
+	set_special_state(TASK_DEAD);
+
+	/* Tell freezer to ignore us: */
+	current->flags |= PF_NOFREEZE;
+	__schedule(false);
+	BUG();
+
+	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
+	for (;;)
+		cpu_relax();
+}
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+	if (!tsk->state)
+		return;
+
+	/*
+	 * If a worker went to sleep, notify and ask workqueue whether
+	 * it wants to wake up a task to maintain concurrency.
+	 * As this function is called inside the schedule() context,
+	 * we disable preemption to avoid it calling schedule() again
+	 * in the possible wakeup of a kworker and because wq_worker_sleeping()
+	 * requires it.
+	 */
+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+		preempt_disable();
+		if (tsk->flags & PF_WQ_WORKER)
+			wq_worker_sleeping(tsk);
+		else
+			io_wq_worker_sleeping(tsk);
+		preempt_enable_no_resched();
+	}
+
+	if (tsk_is_pi_blocked(tsk))
+		return;
+
+	/*
+	 * If we are going to sleep and we have plugged IO queued,
+	 * make sure to submit it to avoid deadlocks.
+	 */
+	if (blk_needs_flush_plug(tsk))
+		blk_schedule_flush_plug(tsk);
+}
+
+static inline void sched_update_worker(struct task_struct *tsk)
+{
+	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+		if (tsk->flags & PF_WQ_WORKER)
+			wq_worker_running(tsk);
+		else
+			io_wq_worker_running(tsk);
+	}
+}
+
+asmlinkage __visible void __sched schedule(void)
+{
+	struct task_struct *tsk = current;
+
+	sched_submit_work(tsk);
+	do {
+		preempt_disable();
+		__schedule(false);
+		sched_preempt_enable_no_resched();
+	} while (need_resched());
+	sched_update_worker(tsk);
+}
+
+EXPORT_SYMBOL(schedule);
+
+/*
+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
+ * state (have scheduled out non-voluntarily) by making sure that all
+ * tasks have either left the run queue or have gone into user space.
+ * As idle tasks do not do either, they must not ever be preempted
+ * (schedule out non-voluntarily).
+ *
+ * schedule_idle() is similar to schedule_preempt_disable() except that it
+ * never enables preemption because it does not call sched_submit_work().
+ */
+void __sched schedule_idle(void)
+{
+	/*
+	 * As this skips calling sched_submit_work(), which the idle task does
+	 * regardless because that function is a nop when the task is in a
+	 * TASK_RUNNING state, make sure this isn't used someplace that the
+	 * current task can be in any other state. Note, idle is always in the
+	 * TASK_RUNNING state.
+	 */
+	WARN_ON_ONCE(current->state);
+	do {
+		__schedule(false);
+	} while (need_resched());
+}
+
+#ifdef CONFIG_CONTEXT_TRACKING
+asmlinkage __visible void __sched schedule_user(void)
+{
+	/*
+	 * If we come here after a random call to set_need_resched(),
+	 * or we have been woken up remotely but the IPI has not yet arrived,
+	 * we haven't yet exited the RCU idle mode. Do it here manually until
+	 * we find a better solution.
+	 *
+	 * NB: There are buggy callers of this function.  Ideally we
+	 * should warn if prev_state != IN_USER, but that will trigger
+	 * too frequently to make sense yet.
+	 */
+	enum ctx_state prev_state = exception_enter();
+	schedule();
+	exception_exit(prev_state);
+}
+#endif
+
+/**
+ * schedule_preempt_disabled - called with preemption disabled
+ *
+ * Returns with preemption disabled. Note: preempt_count must be 1
+ */
+void __sched schedule_preempt_disabled(void)
+{
+	sched_preempt_enable_no_resched();
+	schedule();
+	preempt_disable();
+}
+
+static void __sched notrace preempt_schedule_common(void)
+{
+	do {
+		/*
+		 * Because the function tracer can trace preempt_count_sub()
+		 * and it also uses preempt_enable/disable_notrace(), if
+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
+		 * by the function tracer will call this function again and
+		 * cause infinite recursion.
+		 *
+		 * Preemption must be disabled here before the function
+		 * tracer can trace. Break up preempt_disable() into two
+		 * calls. One to disable preemption without fear of being
+		 * traced. The other to still record the preemption latency,
+		 * which can also be traced by the function tracer.
+		 */
+		preempt_disable_notrace();
+		preempt_latency_start(1);
+		__schedule(true);
+		preempt_latency_stop(1);
+		preempt_enable_no_resched_notrace();
+
+		/*
+		 * Check again in case we missed a preemption opportunity
+		 * between schedule and now.
+		 */
+	} while (need_resched());
+}
+
+#ifdef CONFIG_PREEMPTION
+/*
+ * This is the entry point to schedule() from in-kernel preemption
+ * off of preempt_enable.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule(void)
+{
+	/*
+	 * If there is a non-zero preempt_count or interrupts are disabled,
+	 * we do not want to preempt the current task. Just return..
+	 */
+	if (likely(!preemptible()))
+		return;
+
+	preempt_schedule_common();
+}
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
+
+/**
+ * preempt_schedule_notrace - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+{
+	enum ctx_state prev_ctx;
+
+	if (likely(!preemptible()))
+		return;
+
+	do {
+		/*
+		 * Because the function tracer can trace preempt_count_sub()
+		 * and it also uses preempt_enable/disable_notrace(), if
+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
+		 * by the function tracer will call this function again and
+		 * cause infinite recursion.
+		 *
+		 * Preemption must be disabled here before the function
+		 * tracer can trace. Break up preempt_disable() into two
+		 * calls. One to disable preemption without fear of being
+		 * traced. The other to still record the preemption latency,
+		 * which can also be traced by the function tracer.
+		 */
+		preempt_disable_notrace();
+		preempt_latency_start(1);
+		/*
+		 * Needs preempt disabled in case user_exit() is traced
+		 * and the tracer calls preempt_enable_notrace() causing
+		 * an infinite recursion.
+		 */
+		prev_ctx = exception_enter();
+		__schedule(true);
+		exception_exit(prev_ctx);
+
+		preempt_latency_stop(1);
+		preempt_enable_no_resched_notrace();
+	} while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
+
+#endif /* CONFIG_PREEMPTION */
+
+/*
+ * This is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage __visible void __sched preempt_schedule_irq(void)
+{
+	enum ctx_state prev_state;
+
+	/* Catch callers which need to be fixed */
+	BUG_ON(preempt_count() || !irqs_disabled());
+
+	prev_state = exception_enter();
+
+	do {
+		preempt_disable();
+		local_irq_enable();
+		__schedule(true);
+		local_irq_disable();
+		sched_preempt_enable_no_resched();
+	} while (need_resched());
+
+	exception_exit(prev_state);
+}
+
+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
+			  void *key)
+{
+	return try_to_wake_up(curr->private, mode, wake_flags);
+}
+EXPORT_SYMBOL(default_wake_function);
+
+#ifdef CONFIG_RT_MUTEXES
+
+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+{
+	if (pi_task)
+		prio = min(prio, pi_task->prio);
+
+	return prio;
+}
+
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+	struct task_struct *pi_task = rt_mutex_get_top_task(p);
+
+	return __rt_effective_prio(pi_task, prio);
+}
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task to boost
+ * @pi_task: donor task
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
+ */
+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+{
+	int prio, oldprio;
+	struct rq *rq;
+
+	/* XXX used to be waiter->prio, not waiter->task->prio */
+	prio = __rt_effective_prio(pi_task, p->normal_prio);
+
+	/*
+	 * If nothing changed; bail early.
+	 */
+	if (p->pi_top_task == pi_task && prio == p->prio)
+		return;
+
+	rq = __task_rq_lock(p, NULL);
+	update_rq_clock(rq);
+	/*
+	 * Set under pi_lock && rq->lock, such that the value can be used under
+	 * either lock.
+	 *
+	 * Note that there is loads of tricky to make this pointer cache work
+	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
+	 * ensure a task is de-boosted (pi_task is set to NULL) before the
+	 * task is allowed to run again (and can exit). This ensures the pointer
+	 * points to a blocked task -- which guaratees the task is present.
+	 */
+	p->pi_top_task = pi_task;
+
+	/*
+	 * For FIFO/RR we only need to set prio, if that matches we're done.
+	 */
+	if (prio == p->prio)
+		goto out_unlock;
+
+	/*
+	 * Idle task boosting is a nono in general. There is one
+	 * exception, when PREEMPT_RT and NOHZ is active:
+	 *
+	 * The idle task calls get_next_timer_interrupt() and holds
+	 * the timer wheel base->lock on the CPU and another CPU wants
+	 * to access the timer (probably to cancel it). We can safely
+	 * ignore the boosting request, as the idle CPU runs this code
+	 * with interrupts disabled and will complete the lock
+	 * protected section without being interrupted. So there is no
+	 * real need to boost.
+	 */
+	if (unlikely(p == rq->idle)) {
+		WARN_ON(p != rq->curr);
+		WARN_ON(p->pi_blocked_on);
+		goto out_unlock;
+	}
+
+	trace_sched_pi_setprio(p, pi_task);
+	oldprio = p->prio;
+	p->prio = prio;
+	if (task_running(rq, p)){
+		if (prio > oldprio)
+			resched_task(p);
+	} else if (task_queued(p)) {
+		dequeue_task(rq, p, DEQUEUE_SAVE);
+		enqueue_task(rq, p, ENQUEUE_RESTORE);
+		if (prio < oldprio)
+			try_preempt(p, rq);
+	}
+out_unlock:
+	__task_rq_unlock(rq, NULL);
+}
+#else
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+	return prio;
+}
+#endif
+
+/*
+ * Adjust the deadline for when the priority is to change, before it's
+ * changed.
+ */
+static inline void adjust_deadline(struct task_struct *p, int new_prio)
+{
+	p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+	int new_static, old_static;
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
+		return;
+	new_static = NICE_TO_PRIO(nice);
+	/*
+	 * We have to be careful, if called from sys_setpriority(),
+	 * the task might be in the middle of scheduling on another CPU.
+	 */
+	rq = task_rq_lock(p, &rf);
+	update_rq_clock(rq);
+
+	/*
+	 * The RT priorities are set via sched_setscheduler(), but we still
+	 * allow the 'normal' nice value to be set - but as expected
+	 * it wont have any effect on scheduling until the task is
+	 * not SCHED_NORMAL/SCHED_BATCH:
+	 */
+	if (has_rt_policy(p)) {
+		p->static_prio = new_static;
+		goto out_unlock;
+	}
+
+	adjust_deadline(p, new_static);
+	old_static = p->static_prio;
+	p->static_prio = new_static;
+	p->prio = effective_prio(p);
+
+	if (task_queued(p)) {
+		dequeue_task(rq, p, DEQUEUE_SAVE);
+		enqueue_task(rq, p, ENQUEUE_RESTORE);
+		if (new_static < old_static)
+			try_preempt(p, rq);
+	} else if (task_running(rq, p)) {
+		set_rq_task(rq, p);
+		if (old_static < new_static)
+			resched_task(p);
+	}
+out_unlock:
+	task_rq_unlock(rq, p, &rf);
+}
+EXPORT_SYMBOL(set_user_nice);
+
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const struct task_struct *p, const int nice)
+{
+	/* Convert nice value [19,-20] to rlimit style value [1,40] */
+	int nice_rlim = nice_to_rlimit(nice);
+
+	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+		capable(CAP_SYS_NICE));
+}
+
+#ifdef __ARCH_WANT_SYS_NICE
+
+/*
+ * sys_nice - change the priority of the current process.
+ * @increment: priority increment
+ *
+ * sys_setpriority is a more generic, but much slower function that
+ * does similar things.
+ */
+SYSCALL_DEFINE1(nice, int, increment)
+{
+	long nice, retval;
+
+	/*
+	 * Setpriority might change our priority at the same moment.
+	 * We don't have to worry. Conceptually one call occurs first
+	 * and we have a single winner.
+	 */
+
+	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
+	nice = task_nice(current) + increment;
+
+	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
+	if (increment < 0 && !can_nice(current, nice))
+		return -EPERM;
+
+	retval = security_task_setnice(current, nice);
+	if (retval)
+		return retval;
+
+	set_user_nice(current, nice);
+	return 0;
+}
+
+#endif
+
+/**
+ * task_prio - return the priority value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The priority value as seen by users in /proc.
+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
+ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
+ */
+int task_prio(const struct task_struct *p)
+{
+	int delta, prio = p->prio - MAX_RT_PRIO;
+
+	/* rt tasks and iso tasks */
+	if (prio <= 0)
+		goto out;
+
+	/* Convert to ms to avoid overflows */
+	delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
+	if (unlikely(delta < 0))
+		delta = 0;
+	delta = delta * 40 / ms_longest_deadline_diff();
+	if (delta <= 80)
+		prio += delta;
+	if (idleprio_task(p))
+		prio += 40;
+out:
+	return prio;
+}
+
+/**
+ * idle_cpu - is a given CPU idle currently?
+ * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int idle_cpu(int cpu)
+{
+	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
+}
+
+/**
+ * available_idle_cpu - is a given CPU idle for enqueuing work.
+ * @cpu: the CPU in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int available_idle_cpu(int cpu)
+{
+	if (!idle_cpu(cpu))
+		return 0;
+
+	if (vcpu_is_preempted(cpu))
+		return 0;
+
+	return 1;
+}
+
+/**
+ * idle_task - return the idle task for a given CPU.
+ * @cpu: the processor in question.
+ *
+ * Return: The idle task for the CPU @cpu.
+ */
+struct task_struct *idle_task(int cpu)
+{
+	return cpu_rq(cpu)->idle;
+}
+
+/**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+	return pid ? find_task_by_vpid(pid) : current;
+}
+
+/* Actually do priority change: must hold rq lock. */
+static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
+			   int prio, const struct sched_attr *attr,
+			   bool keep_boost)
+{
+	int oldrtprio, oldprio;
+
+	/*
+	 * If params can't change scheduling class changes aren't allowed
+	 * either.
+	 */
+	if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)
+		return;
+
+	p->policy = policy;
+	oldrtprio = p->rt_priority;
+	p->rt_priority = prio;
+	p->normal_prio = normal_prio(p);
+	oldprio = p->prio;
+	/*
+	 * Keep a potential priority boosting if called from
+	 * sched_setscheduler().
+	 */
+	p->prio = normal_prio(p);
+	if (keep_boost)
+		p->prio = rt_effective_prio(p, p->prio);
+
+	if (task_running(rq, p)) {
+		set_rq_task(rq, p);
+		resched_task(p);
+	} else if (task_queued(p)) {
+		dequeue_task(rq, p, DEQUEUE_SAVE);
+		enqueue_task(rq, p, ENQUEUE_RESTORE);
+		if (p->prio < oldprio || p->rt_priority > oldrtprio)
+			try_preempt(p, rq);
+	}
+}
+
+/*
+ * Check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+	const struct cred *cred = current_cred(), *pcred;
+	bool match;
+
+	rcu_read_lock();
+	pcred = __task_cred(p);
+	match = (uid_eq(cred->euid, pcred->euid) ||
+		 uid_eq(cred->euid, pcred->uid));
+	rcu_read_unlock();
+	return match;
+}
+
+static int __sched_setscheduler(struct task_struct *p,
+				const struct sched_attr *attr,
+				bool user, bool pi)
+{
+	int retval, policy = attr->sched_policy, oldpolicy = -1, priority = attr->sched_priority;
+	unsigned long rlim_rtprio = 0;
+	struct rq_flags rf;
+	int reset_on_fork;
+	struct rq *rq;
+
+	/* The pi code expects interrupts enabled */
+	BUG_ON(pi && in_interrupt());
+
+	if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
+		unsigned long lflags;
+
+		if (!lock_task_sighand(p, &lflags))
+			return -ESRCH;
+		rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
+		unlock_task_sighand(p, &lflags);
+		if (rlim_rtprio)
+			goto recheck;
+		/*
+		 * If the caller requested an RT policy without having the
+		 * necessary rights, we downgrade the policy to SCHED_ISO.
+		 * We also set the parameter to zero to pass the checks.
+		 */
+		policy = SCHED_ISO;
+		priority = 0;
+	}
+recheck:
+	/* Double check policy once rq lock held */
+	if (policy < 0) {
+		reset_on_fork = p->sched_reset_on_fork;
+		policy = oldpolicy = p->policy;
+	} else {
+		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
+		policy &= ~SCHED_RESET_ON_FORK;
+
+		if (!SCHED_RANGE(policy))
+			return -EINVAL;
+	}
+
+	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
+		return -EINVAL;
+
+	/*
+	 * Valid priorities for SCHED_FIFO and SCHED_RR are
+	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
+	 * SCHED_BATCH is 0.
+	 */
+	if (priority < 0 ||
+	    (p->mm && priority > MAX_USER_RT_PRIO - 1) ||
+	    (!p->mm && priority > MAX_RT_PRIO - 1))
+		return -EINVAL;
+	if (is_rt_policy(policy) != (priority != 0))
+		return -EINVAL;
+
+	/*
+	 * Allow unprivileged RT tasks to decrease priority:
+	 */
+	if (user && !capable(CAP_SYS_NICE)) {
+		if (is_rt_policy(policy)) {
+			unsigned long rlim_rtprio =
+					task_rlimit(p, RLIMIT_RTPRIO);
+
+			/* Can't set/change the rt policy */
+			if (policy != p->policy && !rlim_rtprio)
+				return -EPERM;
+
+			/* Can't increase priority */
+			if (priority > p->rt_priority &&
+			    priority > rlim_rtprio)
+				return -EPERM;
+		} else {
+			switch (p->policy) {
+				/*
+				 * Can only downgrade policies but not back to
+				 * SCHED_NORMAL
+				 */
+				case SCHED_ISO:
+					if (policy == SCHED_ISO)
+						goto out;
+					if (policy != SCHED_NORMAL)
+						return -EPERM;
+					break;
+				case SCHED_BATCH:
+					if (policy == SCHED_BATCH)
+						goto out;
+					if (policy != SCHED_IDLEPRIO)
+						return -EPERM;
+					break;
+				case SCHED_IDLEPRIO:
+					if (policy == SCHED_IDLEPRIO)
+						goto out;
+					return -EPERM;
+				default:
+					break;
+			}
+		}
+
+		/* Can't change other user's priorities */
+		if (!check_same_owner(p))
+			return -EPERM;
+
+		/* Normal users shall not reset the sched_reset_on_fork flag: */
+		if (p->sched_reset_on_fork && !reset_on_fork)
+			return -EPERM;
+	}
+
+	if (user) {
+		retval = security_task_setscheduler(p);
+		if (retval)
+			return retval;
+	}
+
+	if (pi)
+		cpuset_read_lock();
+
+	/*
+	 * Make sure no PI-waiters arrive (or leave) while we are
+	 * changing the priority of the task:
+	 *
+	 * To be able to change p->policy safely, the runqueue lock must be
+	 * held.
+	 */
+	rq = task_rq_lock(p, &rf);
+	update_rq_clock(rq);
+
+	/*
+	 * Changing the policy of the stop threads its a very bad idea:
+	 */
+	if (p == rq->stop) {
+		retval = -EINVAL;
+		goto unlock;
+	}
+
+	/*
+	 * If not changing anything there's no need to proceed further:
+	 */
+	if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
+	    priority == p->rt_priority))) {
+		retval = 0;
+		goto unlock;
+	}
+
+	/* Re-check policy now with rq lock held */
+	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+		policy = oldpolicy = -1;
+		task_rq_unlock(rq, p, &rf);
+		if (pi)
+			cpuset_read_unlock();
+		goto recheck;
+	}
+	p->sched_reset_on_fork = reset_on_fork;
+
+	__setscheduler(p, rq, policy, priority, attr, pi);
+
+	/* Avoid rq from going away on us: */
+	preempt_disable();
+	task_rq_unlock(rq, p, &rf);
+
+	if (pi) {
+		cpuset_read_unlock();
+		rt_mutex_adjust_pi(p);
+	}
+	preempt_enable();
+out:
+	return 0;
+
+unlock:
+	task_rq_unlock(rq, p, &rf);
+	if (pi)
+		cpuset_read_unlock();
+	return retval;
+}
+
+static int _sched_setscheduler(struct task_struct *p, int policy,
+			       const struct sched_param *param, bool check)
+{
+	struct sched_attr attr = {
+		.sched_policy   = policy,
+		.sched_priority = param->sched_priority,
+		.sched_nice	= PRIO_TO_NICE(p->static_prio),
+	};
+
+	return __sched_setscheduler(p, &attr, check, true);
+}
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ *
+ * NOTE that the task may be already dead.
+ */
+int sched_setscheduler(struct task_struct *p, int policy,
+		       const struct sched_param *param)
+{
+	return _sched_setscheduler(p, policy, param, true);
+}
+
+EXPORT_SYMBOL_GPL(sched_setscheduler);
+
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+	return __sched_setscheduler(p, attr, true, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr);
+
+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
+{
+	return __sched_setscheduler(p, attr, false, true);
+}
+
+/**
+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Just like sched_setscheduler, only don't bother checking if the
+ * current context has permission.  For example, this is needed in
+ * stop_machine(): we create temporary high priority worker threads,
+ * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+			       const struct sched_param *param)
+{
+	return _sched_setscheduler(p, policy, param, false);
+}
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
+
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+	struct sched_param lparam;
+	struct task_struct *p;
+	int retval;
+
+	if (!param || pid < 0)
+		return -EINVAL;
+	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+		return -EFAULT;
+
+	rcu_read_lock();
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (likely(p))
+		get_task_struct(p);
+	rcu_read_unlock();
+
+	if (likely(p)) {
+		retval = sched_setscheduler(p, policy, &lparam);
+		put_task_struct(p);
+	}
+
+	return retval;
+}
+
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr,
+			   struct sched_attr *attr)
+{
+	u32 size;
+	int ret;
+
+	/* Zero the full structure, so that a short copy will be nice: */
+	memset(attr, 0, sizeof(*attr));
+
+	ret = get_user(size, &uattr->size);
+	if (ret)
+		return ret;
+
+	/* ABI compatibility quirk: */
+	if (!size)
+		size = SCHED_ATTR_SIZE_VER0;
+
+	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
+		goto err_size;
+
+	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+	if (ret) {
+		if (ret == -E2BIG)
+			goto err_size;
+		return ret;
+	}
+
+	/*
+	 * XXX: Do we want to be lenient like existing syscalls; or do we want
+	 * to be strict and return an error on out-of-bounds values?
+	 */
+	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+
+	/* sched/core.c uses zero here but we already know ret is zero */
+	return 0;
+
+err_size:
+	put_user(sizeof(*attr), &uattr->size);
+	return -E2BIG;
+}
+
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY	-1
+
+/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
+{
+	if (policy < 0)
+		return -EINVAL;
+
+	return do_sched_setscheduler(pid, policy, param);
+}
+
+/**
+ * sys_sched_setparam - set/change the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+{
+	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
+}
+
+/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ */
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+			       unsigned int, flags)
+{
+	struct sched_attr attr;
+	struct task_struct *p;
+	int retval;
+
+	if (!uattr || pid < 0 || flags)
+		return -EINVAL;
+
+	retval = sched_copy_attr(uattr, &attr);
+	if (retval)
+		return retval;
+
+	if ((int)attr.sched_policy < 0)
+		return -EINVAL;
+	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
+		attr.sched_policy = SETPARAM_POLICY;
+
+	rcu_read_lock();
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (likely(p))
+		get_task_struct(p);
+	rcu_read_unlock();
+
+	if (likely(p)) {
+		retval = sched_setattr(p, &attr);
+		put_task_struct(p);
+	}
+
+	return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+ * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
+ */
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+{
+	struct task_struct *p;
+	int retval = -EINVAL;
+
+	if (pid < 0)
+		goto out_nounlock;
+
+	retval = -ESRCH;
+	rcu_read_lock();
+	p = find_process_by_pid(pid);
+	if (p) {
+		retval = security_task_getscheduler(p);
+		if (!retval)
+			retval = p->policy;
+	}
+	rcu_read_unlock();
+
+out_nounlock:
+	return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
+ */
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+{
+	struct sched_param lp = { .sched_priority = 0 };
+	struct task_struct *p;
+	int retval = -EINVAL;
+
+	if (!param || pid < 0)
+		goto out_nounlock;
+
+	rcu_read_lock();
+	p = find_process_by_pid(pid);
+	retval = -ESRCH;
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	if (has_rt_policy(p))
+		lp.sched_priority = p->rt_priority;
+	rcu_read_unlock();
+
+	/*
+	 * This one might sleep, we cannot do it with a spinlock held ...
+	 */
+	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+	return retval;
+
+out_unlock:
+	rcu_read_unlock();
+	return retval;
+}
+
+/*
+ * Copy the kernel size attribute structure (which might be larger
+ * than what user-space knows about) to user-space.
+ *
+ * Note that all cases are valid: user-space buffer can be larger or
+ * smaller than the kernel-space buffer. The usual case is that both
+ * have the same size.
+ */
+static int
+sched_attr_copy_to_user(struct sched_attr __user *uattr,
+			struct sched_attr *kattr,
+			unsigned int usize)
+{
+	unsigned int ksize = sizeof(*kattr);
+
+	if (!access_ok(uattr, usize))
+		return -EFAULT;
+
+	/*
+	 * sched_getattr() ABI forwards and backwards compatibility:
+	 *
+	 * If usize == ksize then we just copy everything to user-space and all is good.
+	 *
+	 * If usize < ksize then we only copy as much as user-space has space for,
+	 * this keeps ABI compatibility as well. We skip the rest.
+	 *
+	 * If usize > ksize then user-space is using a newer version of the ABI,
+	 * which part the kernel doesn't know about. Just ignore it - tooling can
+	 * detect the kernel's knowledge of attributes from the attr->size value
+	 * which is set to ksize in this case.
+	 */
+	kattr->size = min(usize, ksize);
+
+	if (copy_to_user(uattr, kattr, kattr->size))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @usize: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+		unsigned int, usize, unsigned int, flags)
+{
+	struct sched_attr kattr = { };
+	struct task_struct *p;
+	int retval;
+
+	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
+	    usize < SCHED_ATTR_SIZE_VER0 || flags)
+		return -EINVAL;
+
+	rcu_read_lock();
+	p = find_process_by_pid(pid);
+	retval = -ESRCH;
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	kattr.sched_policy = p->policy;
+	if (rt_task(p))
+		kattr.sched_priority = p->rt_priority;
+	else
+		kattr.sched_nice = task_nice(p);
+
+	rcu_read_unlock();
+
+	return sched_attr_copy_to_user(uattr, &kattr, usize);
+
+out_unlock:
+	rcu_read_unlock();
+	return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
+	cpumask_var_t cpus_allowed, new_mask;
+	struct task_struct *p;
+	int retval;
+
+	rcu_read_lock();
+
+	p = find_process_by_pid(pid);
+	if (!p) {
+		rcu_read_unlock();
+		return -ESRCH;
+	}
+
+	/* Prevent p going away */
+	get_task_struct(p);
+	rcu_read_unlock();
+
+	if (p->flags & PF_NO_SETAFFINITY) {
+		retval = -EINVAL;
+		goto out_put_task;
+	}
+	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+		retval = -ENOMEM;
+		goto out_put_task;
+	}
+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+		retval = -ENOMEM;
+		goto out_free_cpus_allowed;
+	}
+	retval = -EPERM;
+	if (!check_same_owner(p)) {
+		rcu_read_lock();
+		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+			rcu_read_unlock();
+			goto out_unlock;
+		}
+		rcu_read_unlock();
+	}
+
+	retval = security_task_setscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	cpuset_cpus_allowed(p, cpus_allowed);
+	cpumask_and(new_mask, in_mask, cpus_allowed);
+again:
+	retval = __set_cpus_allowed_ptr(p, new_mask, true);
+
+	if (!retval) {
+		cpuset_cpus_allowed(p, cpus_allowed);
+		if (!cpumask_subset(new_mask, cpus_allowed)) {
+			/*
+			 * We must have raced with a concurrent cpuset
+			 * update. Just reset the cpus_allowed to the
+			 * cpuset's cpus_allowed
+			 */
+			cpumask_copy(new_mask, cpus_allowed);
+			goto again;
+		}
+	}
+out_unlock:
+	free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+	free_cpumask_var(cpus_allowed);
+out_put_task:
+	put_task_struct(p);
+	return retval;
+}
+
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+			     cpumask_t *new_mask)
+{
+	if (len < cpumask_size())
+		cpumask_clear(new_mask);
+	else if (len > cpumask_size())
+		len = cpumask_size();
+
+	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+
+/**
+ * sys_sched_setaffinity - set the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new CPU mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+		unsigned long __user *, user_mask_ptr)
+{
+	cpumask_var_t new_mask;
+	int retval;
+
+	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+		return -ENOMEM;
+
+	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
+	if (retval == 0)
+		retval = sched_setaffinity(pid, new_mask);
+	free_cpumask_var(new_mask);
+	return retval;
+}
+
+long sched_getaffinity(pid_t pid, cpumask_t *mask)
+{
+	struct task_struct *p;
+	unsigned long flags;
+	int retval;
+
+	get_online_cpus();
+	rcu_read_lock();
+
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+out_unlock:
+	rcu_read_unlock();
+	put_online_cpus();
+
+	return retval;
+}
+
+/**
+ * sys_sched_getaffinity - get the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+		unsigned long __user *, user_mask_ptr)
+{
+	int ret;
+	cpumask_var_t mask;
+
+	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+		return -EINVAL;
+	if (len & (sizeof(unsigned long)-1))
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+		return -ENOMEM;
+
+	ret = sched_getaffinity(pid, mask);
+	if (ret == 0) {
+		unsigned int retlen = min(len, cpumask_size());
+
+		if (copy_to_user(user_mask_ptr, mask, retlen))
+			ret = -EFAULT;
+		else
+			ret = retlen;
+	}
+	free_cpumask_var(mask);
+
+	return ret;
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. It does this by
+ * scheduling away the current task. If it still has the earliest deadline
+ * it will be scheduled again as the next task.
+ *
+ * Return: 0.
+ */
+static void do_sched_yield(void)
+{
+	struct rq *rq;
+
+	if (!sched_yield_type)
+		return;
+
+	local_irq_disable();
+	rq = this_rq();
+	rq_lock(rq);
+
+	if (sched_yield_type > 1)
+		time_slice_expired(current, rq);
+	schedstat_inc(rq->yld_count);
+
+	/*
+	 * Since we are going to call schedule() anyway, there's
+	 * no need to preempt or enable interrupts:
+	 */
+	preempt_disable();
+	rq_unlock(rq);
+	sched_preempt_enable_no_resched();
+
+	schedule();
+}
+
+SYSCALL_DEFINE0(sched_yield)
+{
+	do_sched_yield();
+	return 0;
+}
+
+#ifndef CONFIG_PREEMPTION
+int __sched _cond_resched(void)
+{
+	if (should_resched(0)) {
+		preempt_schedule_common();
+		return 1;
+	}
+	rcu_all_qs();
+	return 0;
+}
+EXPORT_SYMBOL(_cond_resched);
+#endif
+
+/*
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+int __cond_resched_lock(spinlock_t *lock)
+{
+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
+	int ret = 0;
+
+	lockdep_assert_held(lock);
+
+	if (spin_needbreak(lock) || resched) {
+		spin_unlock(lock);
+		if (resched)
+			preempt_schedule_common();
+		else
+			cpu_relax();
+		ret = 1;
+		spin_lock(lock);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(__cond_resched_lock);
+
+/**
+ * yield - yield the current processor to other threads.
+ *
+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
+ *
+ * The scheduler is at all times free to pick the calling task as the most
+ * eligible task to run, if removing the yield() call from your code breaks
+ * it, its already broken.
+ *
+ * Typical broken usage is:
+ *
+ * while (!event)
+ *	yield();
+ *
+ * where one assumes that yield() will let 'the other' process run that will
+ * make event true. If the current task is a SCHED_FIFO task that will never
+ * happen. Never use yield() as a progress guarantee!!
+ *
+ * If you want to use yield() to wait for something, use wait_event().
+ * If you want to use yield() to be 'nice' for others, use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+void __sched yield(void)
+{
+	set_current_state(TASK_RUNNING);
+	do_sched_yield();
+}
+EXPORT_SYMBOL(yield);
+
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ * @p: target task
+ * @preempt: whether task preemption is allowed or not
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * Return:
+ *	true (>0) if we indeed boosted the target task.
+ *	false (0) if we failed to boost the target.
+ *	-ESRCH if there's no task to yield to.
+ */
+int __sched yield_to(struct task_struct *p, bool preempt)
+{
+	struct task_struct *rq_p;
+	struct rq *rq, *p_rq;
+	unsigned long flags;
+	int yielded = 0;
+
+	local_irq_save(flags);
+	rq = this_rq();
+
+again:
+	p_rq = task_rq(p);
+	/*
+	 * If we're the only runnable task on the rq and target rq also
+	 * has only one task, there's absolutely no point in yielding.
+	 */
+	if (task_running(p_rq, p) || p->state) {
+		yielded = -ESRCH;
+		goto out_irq;
+	}
+
+	double_rq_lock(rq, p_rq);
+	if (unlikely(task_rq(p) != p_rq)) {
+		double_rq_unlock(rq, p_rq);
+		goto again;
+	}
+
+	yielded = 1;
+	schedstat_inc(rq->yld_count);
+	rq_p = rq->curr;
+	if (p->deadline > rq_p->deadline)
+		p->deadline = rq_p->deadline;
+	p->time_slice += rq_p->time_slice;
+	if (p->time_slice > timeslice())
+		p->time_slice = timeslice();
+	time_slice_expired(rq_p, rq);
+	if (preempt && rq != p_rq)
+		resched_task(p_rq->curr);
+	double_rq_unlock(rq, p_rq);
+out_irq:
+	local_irq_restore(flags);
+
+	if (yielded > 0)
+		schedule();
+	return yielded;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
+int io_schedule_prepare(void)
+{
+	int old_iowait = current->in_iowait;
+
+	current->in_iowait = 1;
+	blk_schedule_flush_plug(current);
+
+	return old_iowait;
+}
+
+void io_schedule_finish(int token)
+{
+	current->in_iowait = token;
+}
+
+/*
+ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+
+long __sched io_schedule_timeout(long timeout)
+{
+	int token;
+	long ret;
+
+	token = io_schedule_prepare();
+	ret = schedule_timeout(timeout);
+	io_schedule_finish(token);
+
+	return ret;
+}
+EXPORT_SYMBOL(io_schedule_timeout);
+
+void __sched io_schedule(void)
+{
+	int token;
+
+	token = io_schedule_prepare();
+	schedule();
+	io_schedule_finish(token);
+}
+EXPORT_SYMBOL(io_schedule);
+
+/**
+ * sys_sched_get_priority_max - return maximum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+{
+	int ret = -EINVAL;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+		ret = MAX_USER_RT_PRIO-1;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_BATCH:
+	case SCHED_ISO:
+	case SCHED_IDLEPRIO:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * sys_sched_get_priority_min - return minimum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+{
+	int ret = -EINVAL;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+		ret = 1;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_BATCH:
+	case SCHED_ISO:
+	case SCHED_IDLEPRIO:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+{
+	struct task_struct *p;
+	unsigned int time_slice;
+	struct rq_flags rf;
+	struct rq *rq;
+	int retval;
+
+	if (pid < 0)
+		return -EINVAL;
+
+	retval = -ESRCH;
+	rcu_read_lock();
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = security_task_getscheduler(p);
+	if (retval)
+		goto out_unlock;
+
+	rq = task_rq_lock(p, &rf);
+	time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
+	task_rq_unlock(rq, p, &rf);
+
+	rcu_read_unlock();
+	*t = ns_to_timespec64(time_slice);
+	return 0;
+
+out_unlock:
+	rcu_read_unlock();
+	return retval;
+}
+
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ * this syscall writes the default timeslice value of a given process
+ * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
+ */
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+		struct __kernel_timespec __user *, interval)
+{
+	struct timespec64 t;
+	int retval = sched_rr_get_interval(pid, &t);
+
+	if (retval == 0)
+		retval = put_timespec64(&t, interval);
+
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
+		struct old_timespec32 __user *, interval)
+{
+	struct timespec64 t;
+	int retval = sched_rr_get_interval(pid, &t);
+
+	if (retval == 0)
+		retval = put_old_timespec32(&t, interval);
+	return retval;
+}
+#endif
+
+void sched_show_task(struct task_struct *p)
+{
+	unsigned long free = 0;
+	int ppid;
+
+	if (!try_get_task_stack(p))
+		return;
+
+	printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
+
+	if (p->state == TASK_RUNNING)
+		printk(KERN_CONT "  running task    ");
+#ifdef CONFIG_DEBUG_STACK_USAGE
+	free = stack_not_used(p);
+#endif
+	ppid = 0;
+	rcu_read_lock();
+	if (pid_alive(p))
+		ppid = task_pid_nr(rcu_dereference(p->real_parent));
+	rcu_read_unlock();
+	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
+		task_pid_nr(p), ppid,
+		(unsigned long)task_thread_info(p)->flags);
+
+	print_worker_info(KERN_INFO, p);
+	show_stack(p, NULL);
+	put_task_stack(p);
+}
+EXPORT_SYMBOL_GPL(sched_show_task);
+
+static inline bool
+state_filter_match(unsigned long state_filter, struct task_struct *p)
+{
+	/* no filter, everything matches */
+	if (!state_filter)
+		return true;
+
+	/* filter, but doesn't match */
+	if (!(p->state & state_filter))
+		return false;
+
+	/*
+	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
+	 * TASK_KILLABLE).
+	 */
+	if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
+		return false;
+
+	return true;
+}
+
+void show_state_filter(unsigned long state_filter)
+{
+	struct task_struct *g, *p;
+
+#if BITS_PER_LONG == 32
+	printk(KERN_INFO
+		"  task                PC stack   pid father\n");
+#else
+	printk(KERN_INFO
+		"  task                        PC stack   pid father\n");
+#endif
+	rcu_read_lock();
+	for_each_process_thread(g, p) {
+		/*
+		 * reset the NMI-timeout, listing all files on a slow
+		 * console might take a lot of time:
+		 * Also, reset softlockup watchdogs on all CPUs, because
+		 * another CPU might be blocked waiting for us to process
+		 * an IPI.
+		 */
+		touch_nmi_watchdog();
+		touch_all_softlockup_watchdogs();
+		if (state_filter_match(state_filter, p))
+			sched_show_task(p);
+	}
+
+	rcu_read_unlock();
+	/*
+	 * Only show locks if all tasks are dumped:
+	 */
+	if (!state_filter)
+		debug_show_all_locks();
+}
+
+void dump_cpu_task(int cpu)
+{
+	pr_info("Task dump for CPU %d:\n", cpu);
+	sched_show_task(cpu_curr(cpu));
+}
+
+#ifdef CONFIG_SMP
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{
+	cpumask_copy(&p->cpus_mask, new_mask);
+	p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+	struct rq *rq = task_rq(p);
+
+	lockdep_assert_held(&p->pi_lock);
+
+	cpumask_copy(&p->cpus_mask, new_mask);
+
+	if (task_queued(p)) {
+		/*
+		 * Because __kthread_bind() calls this on blocked tasks without
+		 * holding rq->lock.
+		 */
+		lockdep_assert_held(rq->lock);
+	}
+}
+
+/*
+ * Calling do_set_cpus_allowed from outside the scheduler code should not be
+ * called on a running or queued task. We should be holding pi_lock.
+ */
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+	__do_set_cpus_allowed(p, new_mask);
+	if (needs_other_cpu(p, task_cpu(p))) {
+		struct rq *rq;
+
+		rq = __task_rq_lock(p, NULL);
+		set_task_cpu(p, valid_task_cpu(p));
+		resched_task(p);
+		__task_rq_unlock(rq, NULL);
+	}
+}
+#endif
+
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: cpu the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
+void init_idle(struct task_struct *idle, int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
+	raw_spin_lock(rq->lock);
+	idle->last_ran = rq->niffies;
+	time_slice_expired(idle, rq);
+	idle->state = TASK_RUNNING;
+	/* Setting prio to illegal value shouldn't matter when never queued */
+	idle->prio = PRIO_LIMIT;
+	idle->flags |= PF_IDLE;
+
+	kasan_unpoison_task_stack(idle);
+
+#ifdef CONFIG_SMP
+	/*
+	 * It's possible that init_idle() gets called multiple times on a task,
+	 * in that case do_set_cpus_allowed() will not do the right thing.
+	 *
+	 * And since this is boot we can forgo the serialisation.
+	 */
+	set_cpus_allowed_common(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMT_NICE
+	idle->smt_bias = 0;
+#endif
+#endif
+	set_rq_task(rq, idle);
+
+	/* Silence PROVE_RCU */
+	rcu_read_lock();
+	set_task_cpu(idle, cpu);
+	rcu_read_unlock();
+
+	rq->idle = idle;
+	rcu_assign_pointer(rq->curr, idle);
+	idle->on_rq = TASK_ON_RQ_QUEUED;
+	raw_spin_unlock(rq->lock);
+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
+
+	/* Set the preempt count _outside_ the spinlocks! */
+	init_idle_preempt_count(idle, cpu);
+
+	ftrace_graph_init_idle_task(idle, cpu);
+	vtime_init_idle(idle, cpu);
+#ifdef CONFIG_SMP
+	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
+}
+
+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
+			      const struct cpumask __maybe_unused *trial)
+{
+	return 1;
+}
+
+int task_can_attach(struct task_struct *p,
+		    const struct cpumask *cs_cpus_allowed)
+{
+	int ret = 0;
+
+	/*
+	 * Kthreads which disallow setaffinity shouldn't be moved
+	 * to a new cpuset; we don't want to change their CPU
+	 * affinity and isolating such threads by their set of
+	 * allowed nodes is unnecessary.  Thus, cpusets are not
+	 * applicable for such threads.  This prevents checking for
+	 * success of set_cpus_allowed_ptr() on all attached tasks
+	 * before cpus_mask may be changed.
+	 */
+	if (p->flags & PF_NO_SETAFFINITY)
+		ret = -EINVAL;
+
+	return ret;
+}
+
+void resched_cpu(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct rq_flags rf;
+
+	rq_lock_irqsave(rq, &rf);
+	if (cpu_online(cpu) || cpu == smp_processor_id())
+		resched_curr(rq);
+	rq_unlock_irqrestore(rq, &rf);
+}
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ_COMMON
+void select_nohz_load_balancer(int stop_tick)
+{
+}
+
+void set_cpu_sd_state_idle(void) {}
+void nohz_balance_enter_idle(int cpu) {}
+
+/*
+ * In the semi idle case, use the nearest busy CPU for migrating timers
+ * from an idle CPU.  This is good for power-savings.
+ *
+ * We don't do similar optimization for completely idle system, as
+ * selecting an idle CPU will add more delays to the timers than intended
+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
+ */
+int get_nohz_timer_target(void)
+{
+	int i, cpu = smp_processor_id(), default_cpu = -1;
+	struct sched_domain *sd;
+
+	if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
+		if (!idle_cpu(cpu))
+			return cpu;
+		default_cpu = cpu;
+	}
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd) {
+		for_each_cpu_and(i, sched_domain_span(sd),
+			housekeeping_cpumask(HK_FLAG_TIMER)) {
+			if (cpu == i)
+				continue;
+
+			if (!idle_cpu(i)) {
+				cpu = i;
+				goto unlock;
+			}
+		}
+	}
+
+	if (default_cpu == -1)
+		default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
+	cpu = default_cpu;
+unlock:
+	rcu_read_unlock();
+	return cpu;
+}
+
+/*
+ * When add_timer_on() enqueues a timer into the timer wheel of an
+ * idle CPU then this timer might expire before the next timer event
+ * which is scheduled to wake up that CPU. In case of a completely
+ * idle system the next event might even be infinite time into the
+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
+ * leaves the inner idle loop so the newly added timer is taken into
+ * account when the CPU goes back to idle and evaluates the timer
+ * wheel for the next timer event.
+ */
+void wake_up_idle_cpu(int cpu)
+{
+	if (cpu == smp_processor_id())
+		return;
+
+	if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
+		smp_sched_reschedule(cpu);
+	else
+		trace_sched_wake_idle_without_ipi(cpu);
+}
+
+static bool wake_up_full_nohz_cpu(int cpu)
+{
+	/*
+	 * We just need the target to call irq_exit() and re-evaluate
+	 * the next tick. The nohz full kick at least implies that.
+	 * If needed we can still optimize that later with an
+	 * empty IRQ.
+	 */
+	if (cpu_is_offline(cpu))
+		return true;  /* Don't try to wake offline CPUs. */
+	if (tick_nohz_full_cpu(cpu)) {
+		if (cpu != smp_processor_id() ||
+		    tick_nohz_tick_stopped())
+			tick_nohz_full_kick_cpu(cpu);
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * Wake up the specified CPU.  If the CPU is going offline, it is the
+ * caller's responsibility to deal with the lost wakeup, for example,
+ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
+ */
+void wake_up_nohz_cpu(int cpu)
+{
+	if (!wake_up_full_nohz_cpu(cpu))
+		wake_up_idle_cpu(cpu);
+}
+#endif /* CONFIG_NO_HZ_COMMON */
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+				  const struct cpumask *new_mask, bool check)
+{
+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
+	bool queued = false, running_wrong = false, kthread;
+	unsigned int dest_cpu;
+	struct rq_flags rf;
+	struct rq *rq;
+	int ret = 0;
+
+	rq = task_rq_lock(p, &rf);
+	update_rq_clock(rq);
+
+	kthread = !!(p->flags & PF_KTHREAD);
+	if (kthread) {
+		/*
+		 * Kernel threads are allowed on online && !active CPUs
+		 */
+		cpu_valid_mask = cpu_online_mask;
+	}
+
+	/*
+	 * Must re-check here, to close a race against __kthread_bind(),
+	 * sched_setaffinity() is not guaranteed to observe the flag.
+	 */
+	if (check && (p->flags & PF_NO_SETAFFINITY)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (cpumask_equal(&p->cpus_mask, new_mask))
+		goto out;
+
+	/*
+	 * Picking a ~random cpu helps in cases where we are changing affinity
+	 * for groups of tasks (ie. cpuset), so that load balancing is not
+	 * immediately required to distribute the tasks within their new mask.
+	 */
+	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
+	if (dest_cpu >= nr_cpu_ids) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	queued = task_queued(p);
+	__do_set_cpus_allowed(p, new_mask);
+
+	if (kthread) {
+		/*
+		 * For kernel threads that do indeed end up on online &&
+		 * !active we want to ensure they are strict per-CPU threads.
+		 */
+		WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
+			!cpumask_intersects(new_mask, cpu_active_mask) &&
+			p->nr_cpus_allowed != 1);
+	}
+
+	/* Can the task run on the task's current CPU? If so, we're done */
+	if (cpumask_test_cpu(task_cpu(p), new_mask))
+		goto out;
+
+	if (task_running(rq, p)) {
+		/* Task is running on the wrong cpu now, reschedule it. */
+		if (rq == this_rq()) {
+			set_task_cpu(p, dest_cpu);
+			set_tsk_need_resched(p);
+			running_wrong = true;
+		} else
+			resched_task(p);
+	} else {
+		if (queued) {
+			/*
+			 * Switch runqueue locks after dequeueing the task
+			 * here while still holding the pi_lock to be holding
+			 * the correct lock for enqueueing.
+			 */
+			dequeue_task(rq, p, 0);
+			rq_unlock(rq);
+
+			rq = cpu_rq(dest_cpu);
+			rq_lock(rq);
+		}
+		set_task_cpu(p, dest_cpu);
+		if (queued)
+			enqueue_task(rq, p, 0);
+	}
+	if (queued)
+		try_preempt(p, rq);
+	if (running_wrong)
+		preempt_disable();
+out:
+	task_rq_unlock(rq, p, &rf);
+
+	if (running_wrong) {
+		__schedule(true);
+		preempt_enable();
+	}
+
+	return ret;
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+	return __set_cpus_allowed_ptr(p, new_mask, false);
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Run through task list and find tasks affined to the dead cpu, then remove
+ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
+ * cpu 0 and src_cpu's runqueue locks. We should be holding both rq lock and
+ * pi_lock to change cpus_mask but it's not going to matter here.
+ */
+static void bind_zero(int src_cpu)
+{
+	struct task_struct *p, *t;
+	struct rq *rq0;
+	int bound = 0;
+
+	if (src_cpu == 0)
+		return;
+
+	rq0 = cpu_rq(0);
+
+	do_each_thread(t, p) {
+		if (cpumask_test_cpu(src_cpu, p->cpus_ptr)) {
+			bool local = (task_cpu(p) == src_cpu);
+			struct rq *rq = task_rq(p);
+
+			/* task_running is the cpu stopper thread */
+			if (local && task_running(rq, p))
+				continue;
+			atomic_clear_cpu(src_cpu, &p->cpus_mask);
+			atomic_set_cpu(0, &p->cpus_mask);
+			p->zerobound = true;
+			bound++;
+			if (local) {
+				bool queued = task_queued(p);
+
+				if (queued)
+					dequeue_task(rq, p, 0);
+				set_task_cpu(p, 0);
+				if (queued)
+					enqueue_task(rq0, p, 0);
+			}
+		}
+	} while_each_thread(t, p);
+
+	if (bound) {
+		printk(KERN_INFO "MuQSS removed affinity for %d processes to cpu %d\n",
+		       bound, src_cpu);
+	}
+}
+
+/* Find processes with the zerobound flag and reenable their affinity for the
+ * CPU coming alive. */
+static void unbind_zero(int src_cpu)
+{
+	int unbound = 0, zerobound = 0;
+	struct task_struct *p, *t;
+
+	if (src_cpu == 0)
+		return;
+
+	do_each_thread(t, p) {
+		if (!p->mm)
+			p->zerobound = false;
+		if (p->zerobound) {
+			unbound++;
+			cpumask_set_cpu(src_cpu, &p->cpus_mask);
+			/* Once every CPU affinity has been re-enabled, remove
+			 * the zerobound flag */
+			if (cpumask_subset(cpu_possible_mask, p->cpus_ptr)) {
+				p->zerobound = false;
+				zerobound++;
+			}
+		}
+	} while_each_thread(t, p);
+
+	if (unbound) {
+		printk(KERN_INFO "MuQSS added affinity for %d processes to cpu %d\n",
+		       unbound, src_cpu);
+	}
+	if (zerobound) {
+		printk(KERN_INFO "MuQSS released forced binding to cpu0 for %d processes\n",
+		       zerobound);
+	}
+}
+
+/*
+ * Ensure that the idle task is using init_mm right before its cpu goes
+ * offline.
+ */
+void idle_task_exit(void)
+{
+	struct mm_struct *mm = current->active_mm;
+
+	BUG_ON(cpu_online(smp_processor_id()));
+
+	if (mm != &init_mm) {
+		switch_mm(mm, &init_mm, current);
+		current->active_mm = &init_mm;
+		finish_arch_post_lock_switch();
+	}
+	mmdrop(mm);
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void unbind_zero(int src_cpu) {}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
+	struct sched_param start_param = { .sched_priority = 0 };
+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+	if (stop) {
+		/*
+		 * Make it appear like a SCHED_FIFO task, its something
+		 * userspace knows about and won't get confused about.
+		 *
+		 * Also, it will make PI more or less work without too
+		 * much confusion -- but then, stop work should not
+		 * rely on PI working anyway.
+		 */
+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
+	}
+
+	cpu_rq(cpu)->stop = stop;
+
+	if (old_stop) {
+		/*
+		 * Reset it back to a normal scheduling policy so that
+		 * it can die in pieces.
+		 */
+		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
+	}
+}
+
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+static struct ctl_table sd_ctl_dir[] = {
+	{
+		.procname	= "sched_domain",
+		.mode		= 0555,
+	},
+	{}
+};
+
+static struct ctl_table sd_ctl_root[] = {
+	{
+		.procname	= "kernel",
+		.mode		= 0555,
+		.child		= sd_ctl_dir,
+	},
+	{}
+};
+
+static struct ctl_table *sd_alloc_ctl_entry(int n)
+{
+	struct ctl_table *entry =
+		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+
+	return entry;
+}
+
+static void sd_free_ctl_entry(struct ctl_table **tablep)
+{
+	struct ctl_table *entry;
+
+	/*
+	 * In the intermediate directories, both the child directory and
+	 * procname are dynamically allocated and could fail but the mode
+	 * will always be set. In the lowest directory the names are
+	 * static strings and all have proc handlers.
+	 */
+	for (entry = *tablep; entry->mode; entry++) {
+		if (entry->child)
+			sd_free_ctl_entry(&entry->child);
+		if (entry->proc_handler == NULL)
+			kfree(entry->procname);
+	}
+
+	kfree(*tablep);
+	*tablep = NULL;
+}
+
+static void
+set_table_entry(struct ctl_table *entry,
+		const char *procname, void *data, int maxlen,
+		umode_t mode, proc_handler *proc_handler)
+{
+	entry->procname = procname;
+	entry->data = data;
+	entry->maxlen = maxlen;
+	entry->mode = mode;
+	entry->proc_handler = proc_handler;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_domain_table(struct sched_domain *sd)
+{
+	struct ctl_table *table = sd_alloc_ctl_entry(9);
+
+	if (table == NULL)
+		return NULL;
+
+	set_table_entry(&table[0], "min_interval",	  &sd->min_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[1], "max_interval",	  &sd->max_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[2], "busy_factor",	  &sd->busy_factor,	    sizeof(int),  0644, proc_dointvec_minmax);
+	set_table_entry(&table[3], "imbalance_pct",	  &sd->imbalance_pct,	    sizeof(int),  0644, proc_dointvec_minmax);
+	set_table_entry(&table[4], "cache_nice_tries",	  &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
+	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0644, proc_dointvec_minmax);
+	set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
+	set_table_entry(&table[7], "name",		  sd->name,	       CORENAME_MAX_SIZE, 0444, proc_dostring);
+	/* &table[8] is terminator */
+
+	return table;
+}
+
+static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+{
+	struct ctl_table *entry, *table;
+	struct sched_domain *sd;
+	int domain_num = 0, i;
+	char buf[32];
+
+	for_each_domain(cpu, sd)
+		domain_num++;
+	entry = table = sd_alloc_ctl_entry(domain_num + 1);
+	if (table == NULL)
+		return NULL;
+
+	i = 0;
+	for_each_domain(cpu, sd) {
+		snprintf(buf, 32, "domain%d", i);
+		entry->procname = kstrdup(buf, GFP_KERNEL);
+		entry->mode = 0555;
+		entry->child = sd_alloc_ctl_domain_table(sd);
+		entry++;
+		i++;
+	}
+	return table;
+}
+
+static cpumask_var_t sd_sysctl_cpus;
+static struct ctl_table_header *sd_sysctl_header;
+
+void register_sched_domain_sysctl(void)
+{
+	static struct ctl_table *cpu_entries;
+	static struct ctl_table **cpu_idx;
+	char buf[32];
+	int i;
+
+	if (!cpu_entries) {
+		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
+		if (!cpu_entries)
+			return;
+
+		WARN_ON(sd_ctl_dir[0].child);
+		sd_ctl_dir[0].child = cpu_entries;
+	}
+
+	if (!cpu_idx) {
+		struct ctl_table *e = cpu_entries;
+
+		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
+		if (!cpu_idx)
+			return;
+
+		/* deal with sparse possible map */
+		for_each_possible_cpu(i) {
+			cpu_idx[i] = e;
+			e++;
+		}
+	}
+
+	if (!cpumask_available(sd_sysctl_cpus)) {
+		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
+			return;
+
+		/* init to possible to not have holes in @cpu_entries */
+		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
+	}
+
+	for_each_cpu(i, sd_sysctl_cpus) {
+		struct ctl_table *e = cpu_idx[i];
+
+		if (e->child)
+			sd_free_ctl_entry(&e->child);
+
+		if (!e->procname) {
+			snprintf(buf, 32, "cpu%d", i);
+			e->procname = kstrdup(buf, GFP_KERNEL);
+		}
+		e->mode = 0555;
+		e->child = sd_alloc_ctl_cpu_table(i);
+
+		__cpumask_clear_cpu(i, sd_sysctl_cpus);
+	}
+
+	WARN_ON(sd_sysctl_header);
+	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
+}
+
+void dirty_sched_domain_sysctl(int cpu)
+{
+	if (cpumask_available(sd_sysctl_cpus))
+		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
+}
+
+/* may be called multiple times per register */
+void unregister_sched_domain_sysctl(void)
+{
+	unregister_sysctl_table(sd_sysctl_header);
+	sd_sysctl_header = NULL;
+}
+#endif /* CONFIG_SYSCTL */
+
+void set_rq_online(struct rq *rq)
+{
+	if (!rq->online) {
+		cpumask_set_cpu(cpu_of(rq), rq->rd->online);
+		rq->online = true;
+	}
+}
+
+void set_rq_offline(struct rq *rq)
+{
+	if (rq->online) {
+		int cpu = cpu_of(rq);
+
+		cpumask_clear_cpu(cpu, rq->rd->online);
+		rq->online = false;
+		clear_cpuidle_map(cpu);
+	}
+}
+
+/*
+ * used to mark begin/end of suspend/resume:
+ */
+static int num_cpus_frozen;
+
+/*
+ * Update cpusets according to cpu_active mask.  If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
+ */
+static void cpuset_cpu_active(void)
+{
+	if (cpuhp_tasks_frozen) {
+		/*
+		 * num_cpus_frozen tracks how many CPUs are involved in suspend
+		 * resume sequence. As long as this is not the last online
+		 * operation in the resume sequence, just build a single sched
+		 * domain, ignoring cpusets.
+		 */
+		partition_sched_domains(1, NULL, NULL);
+		if (--num_cpus_frozen)
+			return;
+		/*
+		 * This is the last CPU online operation. So fall through and
+		 * restore the original sched domains by considering the
+		 * cpuset configurations.
+		 */
+		cpuset_force_rebuild();
+	}
+
+	cpuset_update_active_cpus();
+}
+
+static int cpuset_cpu_inactive(unsigned int cpu)
+{
+	if (!cpuhp_tasks_frozen) {
+		cpuset_update_active_cpus();
+	} else {
+		num_cpus_frozen++;
+		partition_sched_domains(1, NULL, NULL);
+	}
+	return 0;
+}
+
+int sched_cpu_activate(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct rq_flags rf;
+
+#ifdef CONFIG_SCHED_SMT
+	/*
+	 * When going up, increment the number of cores with SMT present.
+	 */
+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+		static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+	set_cpu_active(cpu, true);
+
+	if (sched_smp_initialized) {
+		sched_domains_numa_masks_set(cpu);
+		cpuset_cpu_active();
+	}
+
+	/*
+	 * Put the rq online, if not already. This happens:
+	 *
+	 * 1) In the early boot process, because we build the real domains
+	 *    after all CPUs have been brought up.
+	 *
+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
+	 *    domains.
+	 */
+	rq_lock_irqsave(rq, &rf);
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_online(rq);
+	}
+	unbind_zero(cpu);
+	rq_unlock_irqrestore(rq, &rf);
+
+	return 0;
+}
+
+int sched_cpu_deactivate(unsigned int cpu)
+{
+	int ret;
+
+	set_cpu_active(cpu, false);
+	/*
+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+	 * users of this state to go away such that all new such users will
+	 * observe it.
+	 *
+	 * Do sync before park smpboot threads to take care the rcu boost case.
+	 */
+	synchronize_rcu();
+
+#ifdef CONFIG_SCHED_SMT
+	/*
+	 * When going down, decrement the number of cores with SMT present.
+	 */
+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+		static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+
+	if (!sched_smp_initialized)
+		return 0;
+
+	ret = cpuset_cpu_inactive(cpu);
+	if (ret) {
+		set_cpu_active(cpu, true);
+		return ret;
+	}
+	sched_domains_numa_masks_clear(cpu);
+	return 0;
+}
+
+int sched_cpu_starting(unsigned int cpu)
+{
+	sched_tick_start(cpu);
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+int sched_cpu_dying(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	/* Handle pending wakeups and then migrate everything off */
+	sched_ttwu_pending();
+	sched_tick_stop(cpu);
+
+	local_irq_save(flags);
+	double_rq_lock(rq, cpu_rq(0));
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_offline(rq);
+	}
+	bind_zero(cpu);
+	double_rq_unlock(rq, cpu_rq(0));
+	sched_start_tick(rq, cpu);
+	hrexpiry_clear(rq);
+	local_irq_restore(flags);
+
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+/*
+ * Cheaper version of the below functions in case support for SMT and MC is
+ * compiled in but CPUs have no siblings.
+ */
+static bool sole_cpu_idle(struct rq *rq)
+{
+	return rq_idle(rq);
+}
+#endif
+#ifdef CONFIG_SCHED_SMT
+static const cpumask_t *thread_cpumask(int cpu)
+{
+	return topology_sibling_cpumask(cpu);
+}
+/* All this CPU's SMT siblings are idle */
+static bool siblings_cpu_idle(struct rq *rq)
+{
+	return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
+}
+#endif
+#ifdef CONFIG_SCHED_MC
+static const cpumask_t *core_cpumask(int cpu)
+{
+	return topology_core_cpumask(cpu);
+}
+/* All this CPU's shared cache siblings are idle */
+static bool cache_cpu_idle(struct rq *rq)
+{
+	return cpumask_subset(&rq->core_mask, &cpu_idle_map);
+}
+/* MC siblings CPU mask which share the same LLC */
+static const cpumask_t *llc_core_cpumask(int cpu)
+{
+#ifdef CONFIG_X86
+	return per_cpu(cpu_llc_shared_map, cpu);
+#else
+	return topology_core_cpumask(cpu);
+#endif
+}
+#endif
+
+enum sched_domain_level {
+	SD_LV_NONE = 0,
+	SD_LV_SIBLING,
+	SD_LV_MC,
+	SD_LV_BOOK,
+	SD_LV_CPU,
+	SD_LV_NODE,
+	SD_LV_ALLNODES,
+	SD_LV_MAX
+};
+
+/*
+ * Set up the relative cache distance of each online cpu from each
+ * other in a simple array for quick lookup. Locality is determined
+ * by the closest sched_domain that CPUs are separated by. CPUs with
+ * shared cache in SMT and MC are treated as local. Separate CPUs
+ * (within the same package or physically) within the same node are
+ * treated as not local. CPUs not even in the same domain (different
+ * nodes) are treated as very distant.
+ */
+static void __init select_leaders(void)
+{
+	struct rq *rq, *other_rq, *leader;
+	struct sched_domain *sd;
+	int cpu, other_cpu;
+#ifdef CONFIG_SCHED_SMT
+	bool smt_threads = false;
+#endif
+
+	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
+		rq = cpu_rq(cpu);
+		leader = NULL;
+		/* First check if this cpu is in the same node */
+		for_each_domain(cpu, sd) {
+			if (sd->level > SD_LV_MC)
+				continue;
+			if (rqshare != RQSHARE_ALL)
+				leader = NULL;
+			/* Set locality to local node if not already found lower */
+			for_each_cpu(other_cpu, sched_domain_span(sd)) {
+				if (rqshare >= RQSHARE_SMP) {
+					other_rq = cpu_rq(other_cpu);
+
+					/* Set the smp_leader to the first CPU */
+					if (!leader)
+						leader = rq;
+					if (!other_rq->smp_leader)
+						other_rq->smp_leader = leader;
+				}
+				if (rq->cpu_locality[other_cpu] > LOCALITY_SMP)
+					rq->cpu_locality[other_cpu] = LOCALITY_SMP;
+			}
+		}
+
+		/*
+		 * Each runqueue has its own function in case it doesn't have
+		 * siblings of its own allowing mixed topologies.
+		 */
+#ifdef CONFIG_SCHED_MC
+		leader = NULL;
+		if (cpumask_weight(core_cpumask(cpu)) > 1) {
+			cpumask_copy(&rq->core_mask, llc_core_cpumask(cpu));
+			cpumask_clear_cpu(cpu, &rq->core_mask);
+			for_each_cpu(other_cpu, core_cpumask(cpu)) {
+				if (rqshare == RQSHARE_MC ||
+					(rqshare == RQSHARE_MC_LLC && cpumask_test_cpu(other_cpu, llc_core_cpumask(cpu)))) {
+					other_rq = cpu_rq(other_cpu);
+
+					/* Set the mc_leader to the first CPU */
+					if (!leader)
+						leader = rq;
+					if (!other_rq->mc_leader)
+						other_rq->mc_leader = leader;
+				}
+				if (rq->cpu_locality[other_cpu] > LOCALITY_MC) {
+					/* this is to get LLC into play even in case LLC sharing is not used */
+					if (cpumask_test_cpu(other_cpu, llc_core_cpumask(cpu)))
+						rq->cpu_locality[other_cpu] = LOCALITY_MC_LLC;
+					else
+						rq->cpu_locality[other_cpu] = LOCALITY_MC;
+				}
+			}
+			rq->cache_idle = cache_cpu_idle;
+		}
+#endif
+#ifdef CONFIG_SCHED_SMT
+		leader = NULL;
+		if (cpumask_weight(thread_cpumask(cpu)) > 1) {
+			cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
+			cpumask_clear_cpu(cpu, &rq->thread_mask);
+			for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+				if (rqshare == RQSHARE_SMT) {
+					other_rq = cpu_rq(other_cpu);
+
+					/* Set the smt_leader to the first CPU */
+					if (!leader)
+						leader = rq;
+					if (!other_rq->smt_leader)
+						other_rq->smt_leader = leader;
+				}
+				if (rq->cpu_locality[other_cpu] > LOCALITY_SMT)
+					rq->cpu_locality[other_cpu] = LOCALITY_SMT;
+			}
+			rq->siblings_idle = siblings_cpu_idle;
+			smt_threads = true;
+		}
+#endif
+	}
+
+#ifdef CONFIG_SMT_NICE
+	if (smt_threads) {
+		check_siblings = &check_smt_siblings;
+		wake_siblings = &wake_smt_siblings;
+		smt_schedule = &smt_should_schedule;
+	}
+#endif
+
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		for_each_online_cpu(other_cpu) {
+			printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
+		}
+	}
+}
+
+/* FIXME freeing locked spinlock */
+static void __init share_and_free_rq(struct rq *leader, struct rq *rq)
+{
+	WARN_ON(rq->nr_running > 0);
+
+	kfree(rq->node);
+	kfree(rq->sl);
+	kfree(rq->lock);
+	rq->node = leader->node;
+	rq->sl = leader->sl;
+	rq->lock = leader->lock;
+	rq->is_leader = false;
+	barrier();
+	/* To make up for not unlocking the freed runlock */
+	preempt_enable();
+}
+
+static void __init share_rqs(void)
+{
+	struct rq *rq, *leader;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		leader = rq->smp_leader;
+
+		rq_lock(rq);
+		if (leader && rq != leader) {
+			printk(KERN_INFO "MuQSS sharing SMP runqueue from CPU %d to CPU %d\n",
+			       leader->cpu, rq->cpu);
+			share_and_free_rq(leader, rq);
+		} else
+			rq_unlock(rq);
+	}
+
+#ifdef CONFIG_SCHED_MC
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		leader = rq->mc_leader;
+
+		rq_lock(rq);
+		if (leader && rq != leader) {
+			printk(KERN_INFO "MuQSS sharing MC runqueue from CPU %d to CPU %d\n",
+			       leader->cpu, rq->cpu);
+			share_and_free_rq(leader, rq);
+		} else
+			rq_unlock(rq);
+	}
+#endif /* CONFIG_SCHED_MC */
+
+#ifdef CONFIG_SCHED_SMT
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		leader = rq->smt_leader;
+
+		rq_lock(rq);
+		if (leader && rq != leader) {
+			printk(KERN_INFO "MuQSS sharing SMT runqueue from CPU %d to CPU %d\n",
+			       leader->cpu, rq->cpu);
+			share_and_free_rq(leader, rq);
+		} else
+			rq_unlock(rq);
+	}
+#endif /* CONFIG_SCHED_SMT */
+}
+
+static void __init setup_rq_orders(void)
+{
+	int *selected_cpus, *ordered_cpus;
+	struct rq *rq, *other_rq;
+	int cpu, other_cpu, i;
+
+	selected_cpus = kmalloc(sizeof(int) * NR_CPUS, GFP_ATOMIC);
+	ordered_cpus = kmalloc(sizeof(int) * NR_CPUS, GFP_ATOMIC);
+
+	total_runqueues = 0;
+	for_each_online_cpu(cpu) {
+		int locality, total_rqs = 0, total_cpus = 0;
+
+		rq = cpu_rq(cpu);
+		if (rq->is_leader)
+			total_runqueues++;
+
+		for (locality = LOCALITY_SAME; locality <= LOCALITY_DISTANT; locality++) {
+			int selected_cpu_cnt, selected_cpu_idx, test_cpu_idx, cpu_idx, best_locality, test_cpu;
+			int ordered_cpus_idx;
+
+			ordered_cpus_idx = -1;
+			selected_cpu_cnt = 0;
+
+			for_each_online_cpu(test_cpu) {
+				if (cpu < num_online_cpus() / 2)
+					other_cpu = cpu + test_cpu;
+				else
+					other_cpu = cpu - test_cpu;
+				if (other_cpu < 0)
+					other_cpu += num_online_cpus();
+				else
+					other_cpu %= num_online_cpus();
+				/* gather CPUs of the same locality */
+				if (rq->cpu_locality[other_cpu] == locality) {
+					selected_cpus[selected_cpu_cnt] = other_cpu;
+					selected_cpu_cnt++;
+				}
+			}
+
+			/* reserve first CPU as starting point */
+			if (selected_cpu_cnt > 0) {
+				ordered_cpus_idx++;
+				ordered_cpus[ordered_cpus_idx] = selected_cpus[ordered_cpus_idx];
+				selected_cpus[ordered_cpus_idx] = -1;
+			}
+
+			/* take each CPU and sort it within the same locality based on each inter-CPU localities */
+			for(test_cpu_idx = 1; test_cpu_idx < selected_cpu_cnt; test_cpu_idx++) {
+				/* starting point with worst locality and current CPU */
+				best_locality = LOCALITY_DISTANT;
+				selected_cpu_idx = test_cpu_idx;
+
+				/* try to find the best locality within group */
+				for(cpu_idx = 1; cpu_idx < selected_cpu_cnt; cpu_idx++) {
+					/* if CPU has not been used and locality is better */
+					if (selected_cpus[cpu_idx] > -1) {
+						other_rq = cpu_rq(ordered_cpus[ordered_cpus_idx]);
+						if (best_locality > other_rq->cpu_locality[selected_cpus[cpu_idx]]) {
+							/* assign best locality and best CPU idx in array */
+							best_locality = other_rq->cpu_locality[selected_cpus[cpu_idx]];
+							selected_cpu_idx = cpu_idx;
+						}
+					}
+				}
+
+				/* add our next best CPU to ordered list */
+				ordered_cpus_idx++;
+				ordered_cpus[ordered_cpus_idx] = selected_cpus[selected_cpu_idx];
+				/* mark this CPU as used */
+				selected_cpus[selected_cpu_idx] =  -1;
+			}
+
+			/* set up RQ and CPU orders */
+			for (test_cpu = 0; test_cpu <= ordered_cpus_idx; test_cpu++) {
+				other_rq = cpu_rq(ordered_cpus[test_cpu]);
+				/* set up cpu orders */
+				rq->cpu_order[total_cpus++] = other_rq;
+				if (other_rq->is_leader) {
+					/* set up RQ orders */
+					rq->rq_order[total_rqs++] = other_rq;
+				}
+			}
+		}
+	}
+
+	kfree(selected_cpus);
+	kfree(ordered_cpus);
+
+#ifdef CONFIG_X86
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		for (i = 0; i < total_runqueues; i++) {
+			printk(KERN_DEBUG "MuQSS CPU %d llc %d RQ order %d RQ %d llc %d\n", cpu, per_cpu(cpu_llc_id, cpu), i,
+			       rq->rq_order[i]->cpu, per_cpu(cpu_llc_id, rq->rq_order[i]->cpu));
+		}
+	}
+
+	for_each_online_cpu(cpu) {
+		rq = cpu_rq(cpu);
+		for (i = 0; i < num_online_cpus(); i++) {
+			printk(KERN_DEBUG "MuQSS CPU %d llc %d CPU order %d RQ %d llc %d\n", cpu, per_cpu(cpu_llc_id, cpu), i,
+			       rq->cpu_order[i]->cpu, per_cpu(cpu_llc_id, rq->cpu_order[i]->cpu));
+		}
+	}
+#endif
+}
+
+void __init sched_init_smp(void)
+{
+	sched_init_numa();
+
+	/*
+	 * There's no userspace yet to cause hotplug operations; hence all the
+	 * cpu masks are stable and all blatant races in the below code cannot
+	 * happen.
+	 */
+	mutex_lock(&sched_domains_mutex);
+	sched_init_domains(cpu_active_mask);
+	mutex_unlock(&sched_domains_mutex);
+
+	/* Move init over to a non-isolated CPU */
+	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
+		BUG();
+
+	local_irq_disable();
+	mutex_lock(&sched_domains_mutex);
+	lock_all_rqs();
+
+	printk(KERN_INFO "MuQSS possible/present/online CPUs: %d/%d/%d\n",
+		num_possible_cpus(), num_present_cpus(), num_online_cpus());
+
+	select_leaders();
+
+	unlock_all_rqs();
+	mutex_unlock(&sched_domains_mutex);
+
+	share_rqs();
+
+	local_irq_enable();
+
+	setup_rq_orders();
+
+	switch (rqshare) {
+		case RQSHARE_ALL:
+			/* This should only ever read 1 */
+			printk(KERN_INFO "MuQSS runqueue share type ALL total runqueues: %d\n",
+			       total_runqueues);
+			break;
+		case RQSHARE_SMP:
+			printk(KERN_INFO "MuQSS runqueue share type SMP total runqueues: %d\n",
+			       total_runqueues);
+			break;
+		case RQSHARE_MC:
+			printk(KERN_INFO "MuQSS runqueue share type MC total runqueues: %d\n",
+			       total_runqueues);
+			break;
+		case RQSHARE_MC_LLC:
+			printk(KERN_INFO "MuQSS runqueue share type LLC total runqueues: %d\n",
+			       total_runqueues);
+			break;
+		case RQSHARE_SMT:
+			printk(KERN_INFO "MuQSS runqueue share type SMT total runqueues: %d\n",
+			       total_runqueues);
+			break;
+		case RQSHARE_NONE:
+			printk(KERN_INFO "MuQSS runqueue share type NONE total runqueues: %d\n",
+			       total_runqueues);
+			break;
+	}
+
+	sched_smp_initialized = true;
+}
+#else
+void __init sched_init_smp(void)
+{
+	sched_smp_initialized = true;
+}
+#endif /* CONFIG_SMP */
+
+int in_sched_functions(unsigned long addr)
+{
+	return in_lock_functions(addr) ||
+		(addr >= (unsigned long)__sched_text_start
+		&& addr < (unsigned long)__sched_text_end);
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+/* task group related information */
+struct task_group {
+	struct cgroup_subsys_state css;
+
+	struct rcu_head rcu;
+	struct list_head list;
+
+	struct task_group *parent;
+	struct list_head siblings;
+	struct list_head children;
+};
+
+/*
+ * Default task group.
+ * Every task in system belongs to this group at bootup.
+ */
+struct task_group root_task_group;
+LIST_HEAD(task_groups);
+
+/* Cacheline aligned slab cache for task_group */
+static struct kmem_cache *task_group_cache __read_mostly;
+#endif /* CONFIG_CGROUP_SCHED */
+
+void __init sched_init(void)
+{
+#ifdef CONFIG_SMP
+	int cpu_ids;
+#endif
+	int i;
+	struct rq *rq;
+
+	wait_bit_init();
+
+	prio_ratios[0] = 128;
+	for (i = 1 ; i < NICE_WIDTH ; i++)
+		prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
+
+	skiplist_node_init(&init_task.node);
+
+#ifdef CONFIG_SMP
+	init_defrootdomain();
+	cpumask_clear(&cpu_idle_map);
+#else
+	uprq = &per_cpu(runqueues, 0);
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+	task_group_cache = KMEM_CACHE(task_group, 0);
+
+	list_add(&root_task_group.list, &task_groups);
+	INIT_LIST_HEAD(&root_task_group.children);
+	INIT_LIST_HEAD(&root_task_group.siblings);
+#endif /* CONFIG_CGROUP_SCHED */
+	for_each_possible_cpu(i) {
+		rq = cpu_rq(i);
+		rq->node = kmalloc(sizeof(skiplist_node), GFP_ATOMIC);
+		skiplist_init(rq->node);
+		rq->sl = new_skiplist(rq->node);
+		rq->lock = kmalloc(sizeof(raw_spinlock_t), GFP_ATOMIC);
+		raw_spin_lock_init(rq->lock);
+		rq->nr_running = 0;
+		rq->nr_uninterruptible = 0;
+		rq->nr_switches = 0;
+		rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
+		rq->last_jiffy = jiffies;
+		rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
+			      rq->iowait_ns = rq->idle_ns = 0;
+		rq->dither = 0;
+		set_rq_task(rq, &init_task);
+		rq->iso_ticks = 0;
+		rq->iso_refractory = false;
+#ifdef CONFIG_SMP
+		rq->is_leader = true;
+		rq->smp_leader = NULL;
+#ifdef CONFIG_SCHED_MC
+		rq->mc_leader = NULL;
+#endif
+#ifdef CONFIG_SCHED_SMT
+		rq->smt_leader = NULL;
+#endif
+		rq->sd = NULL;
+		rq->rd = NULL;
+		rq->online = false;
+		rq->cpu = i;
+		rq_attach_root(rq, &def_root_domain);
+#endif
+		init_rq_hrexpiry(rq);
+		atomic_set(&rq->nr_iowait, 0);
+	}
+
+#ifdef CONFIG_SMP
+	cpu_ids = i;
+	/*
+	 * Set the base locality for cpu cache distance calculation to
+	 * "distant" (3). Make sure the distance from a CPU to itself is 0.
+	 */
+	for_each_possible_cpu(i) {
+		int j;
+
+		rq = cpu_rq(i);
+#ifdef CONFIG_SCHED_SMT
+		rq->siblings_idle = sole_cpu_idle;
+#endif
+#ifdef CONFIG_SCHED_MC
+		rq->cache_idle = sole_cpu_idle;
+#endif
+		rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
+		for_each_possible_cpu(j) {
+			if (i == j)
+				rq->cpu_locality[j] = LOCALITY_SAME;
+			else
+				rq->cpu_locality[j] = LOCALITY_DISTANT;
+		}
+		rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
+		rq->cpu_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
+		rq->rq_order[0] = rq->cpu_order[0] = rq;
+		for (j = 1; j < cpu_ids; j++)
+			rq->rq_order[j] = rq->cpu_order[j] = cpu_rq(j);
+	}
+#endif
+
+	/*
+	 * The boot idle thread does lazy MMU switching as well:
+	 */
+	mmgrab(&init_mm);
+	enter_lazy_tlb(&init_mm, current);
+
+	/*
+	 * Make us the idle thread. Technically, schedule() should not be
+	 * called from this thread, however somewhere below it might be,
+	 * but because we are the idle thread, we just pick up running again
+	 * when this runqueue becomes "idle".
+	 */
+	init_idle(current, smp_processor_id());
+
+#ifdef CONFIG_SMP
+	idle_thread_set_boot_cpu();
+#endif /* SMP */
+
+	init_schedstats();
+
+	psi_init();
+}
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+static inline int preempt_count_equals(int preempt_offset)
+{
+	int nested = preempt_count() + rcu_preempt_depth();
+
+	return (nested == preempt_offset);
+}
+
+void __might_sleep(const char *file, int line, int preempt_offset)
+{
+	/*
+	 * Blocking primitives will set (and therefore destroy) current->state,
+	 * since we will exit with TASK_RUNNING make sure we enter with it,
+	 * otherwise we will destroy state.
+	 */
+	WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+			"do not call blocking ops when !TASK_RUNNING; "
+			"state=%lx set at [<%p>] %pS\n",
+			current->state,
+			(void *)current->task_state_change,
+			(void *)current->task_state_change);
+
+	___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void __cant_sleep(const char *file, int line, int preempt_offset)
+{
+	static unsigned long prev_jiffy;
+
+	if (irqs_disabled())
+		return;
+
+	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+		return;
+
+	if (preempt_count() > preempt_offset)
+		return;
+
+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+		return;
+	prev_jiffy = jiffies;
+
+	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
+	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+			in_atomic(), irqs_disabled(),
+			current->pid, current->comm);
+
+	debug_show_held_locks(current);
+	dump_stack();
+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
+{
+	/* Ratelimiting timestamp: */
+	static unsigned long prev_jiffy;
+
+	unsigned long preempt_disable_ip;
+
+	/* WARN_ON_ONCE() by default, no rate limit required: */
+	rcu_sleep_check();
+
+	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+	     !is_idle_task(current) && !current->non_block_count) ||
+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+	    oops_in_progress)
+		return;
+
+	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+		return;
+	prev_jiffy = jiffies;
+
+	/* Save this before calling printk(), since that will clobber it: */
+	preempt_disable_ip = get_preempt_disable_ip(current);
+
+	printk(KERN_ERR
+		"BUG: sleeping function called from invalid context at %s:%d\n",
+			file, line);
+	printk(KERN_ERR
+		"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+			in_atomic(), irqs_disabled(), current->non_block_count,
+			current->pid, current->comm);
+
+	if (task_stack_end_corrupted(current))
+		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
+	debug_show_held_locks(current);
+	if (irqs_disabled())
+		print_irqtrace_events(current);
+	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+	    && !preempt_count_equals(preempt_offset)) {
+		pr_err("Preemption disabled at:");
+		print_ip_sym(preempt_disable_ip);
+		pr_cont("\n");
+	}
+	dump_stack();
+	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL(___might_sleep);
+#endif
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static inline void normalise_rt_tasks(void)
+{
+	struct sched_attr attr = {};
+	struct task_struct *g, *p;
+	struct rq_flags rf;
+	struct rq *rq;
+
+	read_lock(&tasklist_lock);
+	for_each_process_thread(g, p) {
+		/*
+		 * Only normalize user tasks:
+		 */
+		if (p->flags & PF_KTHREAD)
+			continue;
+
+		if (!rt_task(p) && !iso_task(p))
+			continue;
+
+		rq = task_rq_lock(p, &rf);
+		__setscheduler(p, rq, SCHED_NORMAL, 0, &attr, false);
+		task_rq_unlock(rq, p, &rf);
+	}
+	read_unlock(&tasklist_lock);
+}
+
+void normalize_rt_tasks(void)
+{
+	normalise_rt_tasks();
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
+/*
+ * These functions are only useful for the IA64 MCA handling, or kdb.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given CPU.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
+ */
+struct task_struct *curr_task(int cpu)
+{
+	return cpu_curr(cpu);
+}
+
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
+/**
+ * ia64_set_curr_task - set the current task for a given CPU.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack.  It allows the architecture to switch the
+ * notion of the current task on a CPU in a non-blocking manner.  This function
+ * must be called with all CPU's synchronised, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void ia64_set_curr_task(int cpu, struct task_struct *p)
+{
+	cpu_curr(cpu) = p;
+}
+
+#endif
+
+void init_idle_bootup_task(struct task_struct *idle)
+{}
+
+#ifdef CONFIG_SCHED_DEBUG
+__read_mostly bool sched_debug_enabled;
+
+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+			  struct seq_file *m)
+{
+	seq_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
+		   get_nr_threads(p));
+}
+
+void proc_sched_set_task(struct task_struct *p)
+{}
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+static void sched_free_group(struct task_group *tg)
+{
+	kmem_cache_free(task_group_cache, tg);
+}
+
+/* allocate runqueue etc for a new task group */
+struct task_group *sched_create_group(struct task_group *parent)
+{
+	struct task_group *tg;
+
+	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
+	if (!tg)
+		return ERR_PTR(-ENOMEM);
+
+	return tg;
+}
+
+void sched_online_group(struct task_group *tg, struct task_group *parent)
+{
+}
+
+/* rcu callback to free various structures associated with a task group */
+static void sched_free_group_rcu(struct rcu_head *rhp)
+{
+	/* Now it should be safe to free those cfs_rqs */
+	sched_free_group(container_of(rhp, struct task_group, rcu));
+}
+
+void sched_destroy_group(struct task_group *tg)
+{
+	/* Wait for possible concurrent references to cfs_rqs complete */
+	call_rcu(&tg->rcu, sched_free_group_rcu);
+}
+
+void sched_offline_group(struct task_group *tg)
+{
+}
+
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+	return css ? container_of(css, struct task_group, css) : NULL;
+}
+
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+	struct task_group *parent = css_tg(parent_css);
+	struct task_group *tg;
+
+	if (!parent) {
+		/* This is early initialization for the top cgroup */
+		return &root_task_group.css;
+	}
+
+	tg = sched_create_group(parent);
+	if (IS_ERR(tg))
+		return ERR_PTR(-ENOMEM);
+	return &tg->css;
+}
+
+/* Expose task group only after completing cgroup initialization */
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+	struct task_group *tg = css_tg(css);
+	struct task_group *parent = css_tg(css->parent);
+
+	if (parent)
+		sched_online_group(tg, parent);
+	return 0;
+}
+
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+{
+	struct task_group *tg = css_tg(css);
+
+	sched_offline_group(tg);
+}
+
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+{
+	struct task_group *tg = css_tg(css);
+
+	/*
+	 * Relies on the RCU grace period between css_released() and this.
+	 */
+	sched_free_group(tg);
+}
+
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+}
+
+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+	return 0;
+}
+
+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+{
+}
+
+static struct cftype cpu_legacy_files[] = {
+	{ }	/* Terminate */
+};
+
+static struct cftype cpu_files[] = {
+	{ }	/* terminate */
+};
+
+static int cpu_extra_stat_show(struct seq_file *sf,
+			       struct cgroup_subsys_state *css)
+{
+	return 0;
+}
+
+struct cgroup_subsys cpu_cgrp_subsys = {
+	.css_alloc	= cpu_cgroup_css_alloc,
+	.css_online	= cpu_cgroup_css_online,
+	.css_released	= cpu_cgroup_css_released,
+	.css_free	= cpu_cgroup_css_free,
+	.css_extra_stat_show = cpu_extra_stat_show,
+	.fork		= cpu_cgroup_fork,
+	.can_attach	= cpu_cgroup_can_attach,
+	.attach		= cpu_cgroup_attach,
+	.legacy_cftypes	= cpu_files,
+	.legacy_cftypes	= cpu_legacy_files,
+	.dfl_cftypes	= cpu_files,
+	.early_init	= true,
+	.threaded	= true,
+};
+#endif	/* CONFIG_CGROUP_SCHED */
+
+#undef CREATE_TRACE_POINTS
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
new file mode 100644
index 00000000000..b34f2797e44
--- /dev/null
+++ b/kernel/sched/MuQSS.h
@@ -0,0 +1,1051 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MUQSS_SCHED_H
+#define MUQSS_SCHED_H
+
+#include <linux/sched/clock.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/cputime.h>
+#include <linux/sched/deadline.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/init.h>
+#include <linux/sched/isolation.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/nohz.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/smt.h>
+#include <linux/sched/stat.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/topology.h>
+#include <linux/sched/wake_q.h>
+
+#include <uapi/linux/sched/types.h>
+
+#include <linux/cgroup.h>
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
+#include <linux/cpuset.h>
+#include <linux/ctype.h>
+#include <linux/energy_model.h>
+#include <linux/freezer.h>
+#include <linux/kernel_stat.h>
+#include <linux/kthread.h>
+#include <linux/membarrier.h>
+#include <linux/livepatch.h>
+#include <linux/proc_fs.h>
+#include <linux/psi.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/skip_list.h>
+#include <linux/stop_machine.h>
+#include <linux/suspend.h>
+#include <linux/swait.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+#include <linux/tsacct_kern.h>
+#include <linux/u64_stats_sync.h>
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
+
+#include "cpupri.h"
+
+#ifdef CONFIG_SCHED_DEBUG
+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
+#else
+# define SCHED_WARN_ON(x)	((void)(x))
+#endif
+
+/*
+ * wake flags
+ */
+#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
+#define WF_FORK		0x02		/* child wakeup after fork */
+#define WF_MIGRATED	0x04		/* internal use, task got migrated */
+
+/* task_struct::on_rq states: */
+#define TASK_ON_RQ_QUEUED	1
+#define TASK_ON_RQ_MIGRATING	2
+
+struct rq;
+
+#ifdef CONFIG_SMP
+
+static inline bool sched_asym_prefer(int a, int b)
+{
+	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
+}
+
+struct perf_domain {
+	struct em_perf_domain *em_pd;
+	struct perf_domain *next;
+	struct rcu_head rcu;
+};
+
+/* Scheduling group status flags */
+#define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */
+#define SG_OVERUTILIZED		0x2 /* One or more CPUs are over-utilized. */
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+	atomic_t refcount;
+	atomic_t rto_count;
+	struct rcu_head rcu;
+	cpumask_var_t span;
+	cpumask_var_t online;
+
+	/*
+	 * Indicate pullable load on at least one CPU, e.g:
+	 * - More than one runnable task
+	 * - Running task is misfit
+	 */
+	int			overload;
+
+	/* Indicate one or more cpus over-utilized (tipping point) */
+	int			overutilized;
+
+	/*
+	 * The bit corresponding to a CPU gets set here if such CPU has more
+	 * than one runnable -deadline task (as it is below for RT tasks).
+	 */
+	cpumask_var_t dlo_mask;
+	atomic_t dlo_count;
+	/* Replace unused CFS structures with void */
+	//struct dl_bw dl_bw;
+	//struct cpudl cpudl;
+	void *dl_bw;
+	void *cpudl;
+
+	/*
+	 * The "RT overload" flag: it gets set if a CPU has more than
+	 * one runnable RT task.
+	 */
+	cpumask_var_t rto_mask;
+	//struct cpupri cpupri;
+	void *cpupri;
+
+	unsigned long max_cpu_capacity;
+
+	/*
+	 * NULL-terminated list of performance domains intersecting with the
+	 * CPUs of the rd. Protected by RCU.
+	 */
+	struct perf_domain	*pd;
+};
+
+extern void init_defrootdomain(void);
+extern int sched_init_domains(const struct cpumask *cpu_map);
+extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
+
+static inline void cpupri_cleanup(void __maybe_unused *cpupri)
+{
+}
+
+static inline void cpudl_cleanup(void __maybe_unused *cpudl)
+{
+}
+
+static inline void init_dl_bw(void __maybe_unused *dl_bw)
+{
+}
+
+static inline int cpudl_init(void __maybe_unused *dl_bw)
+{
+	return 0;
+}
+
+static inline int cpupri_init(void __maybe_unused *cpupri)
+{
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ * This data should only be modified by the local cpu.
+ */
+struct rq {
+	raw_spinlock_t *lock;
+	raw_spinlock_t *orig_lock;
+
+	struct task_struct __rcu	*curr;
+	struct task_struct	*idle;
+	struct task_struct	*stop;
+	struct mm_struct *prev_mm;
+
+	unsigned int nr_running;
+	/*
+	 * This is part of a global counter where only the total sum
+	 * over all CPUs matters. A task can increase this counter on
+	 * one CPU and if it got migrated afterwards it may decrease
+	 * it on another CPU. Always updated under the runqueue lock:
+	 */
+	unsigned long nr_uninterruptible;
+	u64 nr_switches;
+
+	/* Stored data about rq->curr to work outside rq lock */
+	u64 rq_deadline;
+	int rq_prio;
+
+	/* Best queued id for use outside lock */
+	u64 best_key;
+
+	unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
+	unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
+	u64 niffies; /* Last time this RQ updated rq clock */
+	u64 last_niffy; /* Last niffies as updated by local clock */
+	u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
+
+	u64 load_update; /* When we last updated load */
+	unsigned long load_avg; /* Rolling load average */
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+	u64 irq_load_update; /* When we last updated IRQ load */
+	unsigned long irq_load_avg; /* Rolling IRQ load average */
+#endif
+#ifdef CONFIG_SMT_NICE
+	struct mm_struct *rq_mm;
+	int rq_smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+	/* Accurate timekeeping data */
+	unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
+		iowait_ns, idle_ns;
+	atomic_t nr_iowait;
+
+#ifdef CONFIG_MEMBARRIER
+	int membarrier_state;
+#endif
+
+	skiplist_node *node;
+	skiplist *sl;
+#ifdef CONFIG_SMP
+	struct task_struct *preempt; /* Preempt triggered on this task */
+	struct task_struct *preempting; /* Hint only, what task is preempting */
+
+	int cpu;		/* cpu of this runqueue */
+	bool online;
+
+	struct root_domain *rd;
+	struct sched_domain *sd;
+
+	unsigned long cpu_capacity_orig;
+
+	int *cpu_locality; /* CPU relative cache distance */
+	struct rq **rq_order; /* Shared RQs ordered by relative cache distance */
+	struct rq **cpu_order; /* RQs of discrete CPUs ordered by distance */
+
+	bool is_leader;
+	struct rq *smp_leader; /* First physical CPU per node */
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+	struct sched_avg	avg_thermal;
+#endif /* CONFIG_SCHED_THERMAL_PRESSURE */
+#ifdef CONFIG_SCHED_SMT
+	struct rq *smt_leader; /* First logical CPU in SMT siblings */
+	cpumask_t thread_mask;
+	bool (*siblings_idle)(struct rq *rq);
+	/* See if all smt siblings are idle */
+#endif /* CONFIG_SCHED_SMT */
+#ifdef CONFIG_SCHED_MC
+	struct rq *mc_leader; /* First logical CPU in MC siblings */
+	cpumask_t core_mask;
+	bool (*cache_idle)(struct rq *rq);
+	/* See if all cache siblings are idle */
+#endif /* CONFIG_SCHED_MC */
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+	u64 prev_irq_time;
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#ifdef CONFIG_PARAVIRT
+	u64 prev_steal_time;
+#endif /* CONFIG_PARAVIRT */
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+	u64 prev_steal_time_rq;
+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
+
+	u64 clock, old_clock, last_tick;
+	/* Ensure that all clocks are in the same cache line */
+	u64 clock_task ____cacheline_aligned;
+	int dither;
+
+	int iso_ticks;
+	bool iso_refractory;
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+	struct hrtimer hrexpiry_timer;
+#endif
+
+	int rt_nr_running; /* Number real time tasks running */
+#ifdef CONFIG_SCHEDSTATS
+
+	/* latency stats */
+	struct sched_info rq_sched_info;
+	unsigned long long rq_cpu_time;
+	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+	/* sys_sched_yield() stats */
+	unsigned int yld_count;
+
+	/* schedule() stats */
+	unsigned int sched_switch;
+	unsigned int sched_count;
+	unsigned int sched_goidle;
+
+	/* try_to_wake_up() stats */
+	unsigned int ttwu_count;
+	unsigned int ttwu_local;
+#endif /* CONFIG_SCHEDSTATS */
+
+#ifdef CONFIG_SMP
+	struct llist_head wake_list;
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+	/* Must be inspected within a rcu lock section */
+	struct cpuidle_state *idle_state;
+#endif
+};
+
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+	return READ_ONCE(rq->clock);
+}
+
+static inline u64 rq_clock(struct rq *rq)
+{
+	lockdep_assert_held(rq->lock);
+
+	return rq->clock;
+}
+
+static inline u64 rq_clock_task(struct rq *rq)
+{
+	lockdep_assert_held(rq->lock);
+
+	return rq->clock_task;
+}
+
+/**
+ * By default the decay is the default pelt decay period.
+ * The decay shift can change the decay period in
+ * multiples of 32.
+ *  Decay shift		Decay period(ms)
+ *	0			32
+ *	1			64
+ *	2			128
+ *	3			256
+ *	4			512
+ */
+extern int sched_thermal_decay_shift;
+
+static inline u64 rq_clock_thermal(struct rq *rq)
+{
+	return rq_clock_task(rq) >> sched_thermal_decay_shift;
+}
+
+struct rq_flags {
+	unsigned long flags;
+};
+
+#ifdef CONFIG_SMP
+struct rq *cpu_rq(int cpu);
+#endif
+
+#ifndef CONFIG_SMP
+extern struct rq *uprq;
+#define cpu_rq(cpu)	(uprq)
+#define this_rq()	(uprq)
+#define raw_rq()	(uprq)
+#define task_rq(p)	(uprq)
+#define cpu_curr(cpu)	((uprq)->curr)
+#else /* CONFIG_SMP */
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#define this_rq()		this_cpu_ptr(&runqueues)
+#define raw_rq()		raw_cpu_ptr(&runqueues)
+#define task_rq(p)		cpu_rq(task_cpu(p))
+#endif /* CONFIG_SMP */
+
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+	return rq->curr == p;
+}
+
+static inline int task_running(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+	return p->on_cpu;
+#else
+	return task_current(rq, p);
+#endif
+}
+
+static inline int task_on_rq_queued(struct task_struct *p)
+{
+	return p->on_rq == TASK_ON_RQ_QUEUED;
+}
+
+static inline int task_on_rq_migrating(struct task_struct *p)
+{
+	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
+}
+
+static inline void rq_lock(struct rq *rq)
+	__acquires(rq->lock)
+{
+	raw_spin_lock(rq->lock);
+}
+
+static inline void rq_unlock(struct rq *rq)
+	__releases(rq->lock)
+{
+	raw_spin_unlock(rq->lock);
+}
+
+static inline void rq_lock_irq(struct rq *rq)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irq(rq->lock);
+}
+
+static inline void rq_unlock_irq(struct rq *rq, struct rq_flags __always_unused *rf)
+	__releases(rq->lock)
+{
+	raw_spin_unlock_irq(rq->lock);
+}
+
+static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irqsave(rq->lock, rf->flags);
+}
+
+static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	raw_spin_unlock_irqrestore(rq->lock, rf->flags);
+}
+
+static inline struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	while (42) {
+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
+		rq = task_rq(p);
+		raw_spin_lock(rq->lock);
+		if (likely(rq == task_rq(p)))
+			break;
+		raw_spin_unlock(rq->lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+	}
+	return rq;
+}
+
+static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	rq_unlock(rq);
+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+}
+
+static inline struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags __always_unused *rf)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	while (42) {
+		rq = task_rq(p);
+		raw_spin_lock(rq->lock);
+		if (likely(rq == task_rq(p)))
+			break;
+		raw_spin_unlock(rq->lock);
+	}
+	return rq;
+}
+
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags __always_unused *rf)
+{
+	rq_unlock(rq);
+}
+
+static inline struct rq *
+this_rq_lock_irq(struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	rq_lock(rq);
+	return rq;
+}
+
+/*
+ * {de,en}queue flags: Most not used on MuQSS.
+ *
+ * DEQUEUE_SLEEP  - task is no longer runnable
+ * ENQUEUE_WAKEUP - task just became runnable
+ *
+ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
+ *                are in a known state which allows modification. Such pairs
+ *                should preserve as much state as possible.
+ *
+ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
+ *        in the runqueue.
+ *
+ * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
+ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
+ * ENQUEUE_MIGRATED  - the task was migrated during wakeup
+ *
+ */
+
+#define DEQUEUE_SLEEP		0x01
+#define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
+
+#define ENQUEUE_WAKEUP		0x01
+#define ENQUEUE_RESTORE		0x02
+
+#ifdef CONFIG_SMP
+#define ENQUEUE_MIGRATED	0x40
+#else
+#define ENQUEUE_MIGRATED	0x00
+#endif
+
+#ifdef CONFIG_NUMA
+enum numa_topology_type {
+	NUMA_DIRECT,
+	NUMA_GLUELESS_MESH,
+	NUMA_BACKPLANE,
+};
+extern enum numa_topology_type sched_numa_topology_type;
+extern int sched_max_numa_distance;
+extern bool find_numa_distance(int distance);
+extern void sched_init_numa(void);
+extern void sched_domains_numa_masks_set(unsigned int cpu);
+extern void sched_domains_numa_masks_clear(unsigned int cpu);
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
+#else
+static inline void sched_init_numa(void) { }
+static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
+static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+	return nr_cpu_ids;
+}
+#endif
+
+extern struct mutex sched_domains_mutex;
+extern struct static_key_false sched_schedstats;
+
+#define rcu_dereference_check_sched_domain(p) \
+	rcu_dereference_check((p), \
+			      lockdep_is_held(&sched_domains_mutex))
+
+#ifdef CONFIG_SMP
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See destroy_sched_domains: call_rcu for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, __sd) \
+	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
+			__sd; __sd = __sd->parent)
+
+/**
+ * highest_flag_domain - Return highest sched_domain containing flag.
+ * @cpu:	The cpu whose highest level of sched domain is to
+ *		be returned.
+ * @flag:	The flag to check for the highest sched_domain
+ *		for the given cpu.
+ *
+ * Returns the highest sched_domain of a cpu which contains the given flag.
+ */
+static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
+{
+	struct sched_domain *sd, *hsd = NULL;
+
+	for_each_domain(cpu, sd) {
+		if (!(sd->flags & flag))
+			break;
+		hsd = sd;
+	}
+
+	return hsd;
+}
+
+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
+{
+	struct sched_domain *sd;
+
+	for_each_domain(cpu, sd) {
+		if (sd->flags & flag)
+			break;
+	}
+
+	return sd;
+}
+
+DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(int, sd_llc_size);
+DECLARE_PER_CPU(int, sd_llc_id);
+DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
+DECLARE_PER_CPU(struct sched_domain *, sd_numa);
+DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
+DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+
+struct sched_group_capacity {
+	atomic_t ref;
+	/*
+	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
+	 * for a single CPU.
+	 */
+	unsigned long		capacity;
+	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
+	unsigned long		max_capacity;		/* Max per-CPU capacity in group */
+	unsigned long		next_update;
+	int			imbalance;		/* XXX unrelated to capacity but shared group state */
+
+#ifdef CONFIG_SCHED_DEBUG
+	int id;
+#endif
+
+	unsigned long cpumask[0]; /* balance mask */
+};
+
+struct sched_group {
+	struct sched_group *next;	/* Must be a circular list */
+	atomic_t ref;
+
+	unsigned int group_weight;
+	struct sched_group_capacity *sgc;
+	int asym_prefer_cpu;		/* cpu of highest priority in group */
+
+	/*
+	 * The CPUs this group covers.
+	 *
+	 * NOTE: this field is variable length. (Allocated dynamically
+	 * by attaching extra space to the end of the structure,
+	 * depending on how many CPUs the kernel has booted up with)
+	 */
+	unsigned long cpumask[0];
+};
+
+static inline struct cpumask *sched_group_span(struct sched_group *sg)
+{
+	return to_cpumask(sg->cpumask);
+}
+
+/*
+ * See build_balance_mask().
+ */
+static inline struct cpumask *group_balance_mask(struct sched_group *sg)
+{
+	return to_cpumask(sg->sgc->cpumask);
+}
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+	return cpumask_first(sched_group_span(group));
+}
+
+
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+void register_sched_domain_sysctl(void);
+void dirty_sched_domain_sysctl(int cpu);
+void unregister_sched_domain_sysctl(void);
+#else
+static inline void register_sched_domain_sysctl(void)
+{
+}
+static inline void dirty_sched_domain_sysctl(int cpu)
+{
+}
+static inline void unregister_sched_domain_sysctl(void)
+{
+}
+#endif
+
+extern void sched_ttwu_pending(void);
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+extern void set_rq_online (struct rq *rq);
+extern void set_rq_offline(struct rq *rq);
+extern bool sched_smp_initialized;
+
+static inline void update_group_capacity(struct sched_domain *sd, int cpu)
+{
+}
+
+static inline void trigger_load_balance(struct rq *rq)
+{
+}
+
+#define sched_feat(x) 0
+
+#else /* CONFIG_SMP */
+
+static inline void sched_ttwu_pending(void) { }
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_CPU_IDLE
+static inline void idle_set_state(struct rq *rq,
+				  struct cpuidle_state *idle_state)
+{
+	rq->idle_state = idle_state;
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+	SCHED_WARN_ON(!rcu_read_lock_held());
+	return rq->idle_state;
+}
+#else
+static inline void idle_set_state(struct rq *rq,
+				  struct cpuidle_state *idle_state)
+{
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+	return NULL;
+}
+#endif
+
+#ifdef CONFIG_SCHED_DEBUG
+extern bool sched_debug_enabled;
+#endif
+
+extern void schedule_idle(void);
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+struct irqtime {
+	u64			total;
+	u64			tick_delta;
+	u64			irq_start_time;
+	struct u64_stats_sync	sync;
+};
+
+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
+
+/*
+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
+ * and never move forward.
+ */
+static inline u64 irq_time_read(int cpu)
+{
+	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
+	unsigned int seq;
+	u64 total;
+
+	do {
+		seq = __u64_stats_fetch_begin(&irqtime->sync);
+		total = irqtime->total;
+	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
+
+	return total;
+}
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+static inline bool sched_stop_runnable(struct rq *rq)
+{
+	return rq->stop && task_on_rq_queued(rq->stop);
+}
+
+#ifdef CONFIG_SMP
+static inline int cpu_of(struct rq *rq)
+{
+	return rq->cpu;
+}
+#else /* CONFIG_SMP */
+static inline int cpu_of(struct rq *rq)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+
+static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
+{
+	struct update_util_data *data;
+
+	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+						  cpu_of(rq)));
+
+	if (data)
+		data->func(data, rq->niffies, flags);
+}
+#else
+static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
+{
+}
+#endif /* CONFIG_CPU_FREQ */
+
+static __always_inline
+unsigned int uclamp_rq_util_with(struct rq __maybe_unused *rq, unsigned int util,
+			      struct task_struct __maybe_unused *p)
+{
+	return util;
+}
+
+#ifndef arch_scale_freq_tick
+static __always_inline
+void arch_scale_freq_tick(void)
+{
+}
+#endif
+
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant()	(true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant()	(false)
+#endif
+
+#ifdef CONFIG_64BIT
+static inline u64 read_sum_exec_runtime(struct task_struct *t)
+{
+	return tsk_seruntime(t);
+}
+#else
+static inline u64 read_sum_exec_runtime(struct task_struct *t)
+{
+	struct rq_flags rf;
+	u64 ns;
+	struct rq *rq;
+
+	rq = task_rq_lock(t, &rf);
+	ns = tsk_seruntime(t);
+	task_rq_unlock(rq, t, &rf);
+
+	return ns;
+}
+#endif
+
+#ifndef arch_scale_freq_capacity
+static __always_inline
+unsigned long arch_scale_freq_capacity(int cpu)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+#endif
+
+#ifdef CONFIG_NO_HZ_FULL
+extern bool sched_can_stop_tick(struct rq *rq);
+extern int __init sched_tick_offload_init(void);
+
+/*
+ * Tick may be needed by tasks in the runqueue depending on their policy and
+ * requirements. If tick is needed, lets send the target an IPI to kick it out of
+ * nohz mode if necessary.
+ */
+static inline void sched_update_tick_dependency(struct rq *rq)
+{
+	int cpu;
+
+	if (!tick_nohz_full_enabled())
+		return;
+
+	cpu = cpu_of(rq);
+
+	if (!tick_nohz_full_cpu(cpu))
+		return;
+
+	if (sched_can_stop_tick(rq))
+		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
+	else
+		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+#else
+static inline int sched_tick_offload_init(void) { return 0; }
+static inline void sched_update_tick_dependency(struct rq *rq) { }
+#endif
+
+#define SCHED_FLAG_SUGOV	0x10000000
+
+static inline bool rt_rq_is_runnable(struct rq *rt_rq)
+{
+	return rt_rq->rt_nr_running;
+}
+
+/**
+ * enum schedutil_type - CPU utilization type
+ * @FREQUENCY_UTIL:	Utilization used to select frequency
+ * @ENERGY_UTIL:	Utilization used during energy calculation
+ *
+ * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
+ * need to be aggregated differently depending on the usage made of them. This
+ * enum is used within schedutil_freq_util() to differentiate the types of
+ * utilization expected by the callers, and adjust the aggregation accordingly.
+ */
+enum schedutil_type {
+	FREQUENCY_UTIL,
+	ENERGY_UTIL,
+};
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+
+unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
+				 unsigned long max, enum schedutil_type type,
+				 struct task_struct *p);
+
+static inline unsigned long cpu_bw_dl(struct rq *rq)
+{
+	return 0;
+}
+
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+	return 0;
+}
+
+static inline unsigned long cpu_util_cfs(struct rq *rq)
+{
+	unsigned long ret = READ_ONCE(rq->load_avg);
+
+	if (ret > SCHED_CAPACITY_SCALE)
+		ret = SCHED_CAPACITY_SCALE;
+	return ret;
+}
+
+static inline unsigned long cpu_util_rt(struct rq *rq)
+{
+	unsigned long ret = READ_ONCE(rq->rt_nr_running);
+
+	if (ret > SCHED_CAPACITY_SCALE)
+		ret = SCHED_CAPACITY_SCALE;
+	return ret;
+}
+
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+	unsigned long ret = READ_ONCE(rq->irq_load_avg);
+
+	if (ret > SCHED_CAPACITY_SCALE)
+		ret = SCHED_CAPACITY_SCALE;
+	return ret;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+	util *= (max - irq);
+	util /= max;
+
+	return util;
+
+}
+#else
+static inline unsigned long cpu_util_irq(struct rq *rq)
+{
+	return 0;
+}
+
+static inline
+unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
+{
+	return util;
+}
+#endif
+#endif
+
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
+
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
+
+static inline bool sched_energy_enabled(void)
+{
+	return static_branch_unlikely(&sched_energy_present);
+}
+
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
+
+#define perf_domain_span(pd) NULL
+static inline bool sched_energy_enabled(void) { return false; }
+
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
+
+#ifdef CONFIG_MEMBARRIER
+/*
+ * The scheduler provides memory barriers required by membarrier between:
+ * - prior user-space memory accesses and store to rq->membarrier_state,
+ * - store to rq->membarrier_state and following user-space memory accesses.
+ * In the same way it provides those guarantees around store to rq->curr.
+ */
+static inline void membarrier_switch_mm(struct rq *rq,
+					struct mm_struct *prev_mm,
+					struct mm_struct *next_mm)
+{
+	int membarrier_state;
+
+	if (prev_mm == next_mm)
+		return;
+
+	membarrier_state = atomic_read(&next_mm->membarrier_state);
+	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
+		return;
+
+	WRITE_ONCE(rq->membarrier_state, membarrier_state);
+}
+#else
+static inline void membarrier_switch_mm(struct rq *rq,
+					struct mm_struct *prev_mm,
+					struct mm_struct *next_mm)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+	if (!(p->flags & PF_KTHREAD))
+		return false;
+
+	if (p->nr_cpus_allowed != 1)
+		return false;
+
+	return true;
+}
+#endif
+
+void swake_up_all_locked(struct swait_queue_head *q);
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/* pelt.h compat CONFIG_SCHED_THERMAL_PRESSURE impossible with MUQSS */
+static inline int
+update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+{
+	return 0;
+}
+
+static inline u64 thermal_load_avg(struct rq *rq)
+{
+	return 0;
+}
+
+#endif /* MUQSS_SCHED_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a7ef76a6269..620780354ff 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -57,7 +57,11 @@ const_debug unsigned int sysctl_sched_features =
  * Number of tasks to iterate in a single balance run.
  * Limited because this is done with IRQs disabled.
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+const_debug unsigned int sysctl_sched_nr_migrate = 128;
+#else
 const_debug unsigned int sysctl_sched_nr_migrate = 32;
+#endif
 
 /*
  * period over which we measure -rt task CPU usage in us.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 7fbaee24c82..15d274af9b1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -183,6 +183,12 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
 	return cpufreq_driver_resolve_freq(policy, freq);
 }
 
+#ifdef CONFIG_SCHED_MUQSS
+#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(rq)
+#else
+#define rt_rq_runnable(rq_rt) rt_rq_is_runnable(&rq->rt)
+#endif
+
 /*
  * This function computes an effective utilization for the given CPU, to be
  * used for frequency selection given the linear relation: f = u * f_max.
@@ -211,7 +217,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
 	struct rq *rq = cpu_rq(cpu);
 
 	if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
-	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
+	    type == FREQUENCY_UTIL && rt_rq_runnable(rq)) {
 		return max;
 	}
 
@@ -656,7 +662,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
 	struct task_struct *thread;
 	struct sched_attr attr = {
 		.size		= sizeof(struct sched_attr),
+#ifdef CONFIG_SCHED_MUQSS
+		.sched_policy	= SCHED_RR,
+#else
 		.sched_policy	= SCHED_DEADLINE,
+#endif
 		.sched_flags	= SCHED_FLAG_SUGOV,
 		.sched_nice	= 0,
 		.sched_priority	= 0,
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index efbb492bb94..f0288c32ab1 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -17,6 +17,7 @@ struct cpupri {
 	int			*cpu_to_pri;
 };
 
+#ifndef CONFIG_SCHED_MUQSS
 #ifdef CONFIG_SMP
 int  cpupri_find(struct cpupri *cp, struct task_struct *p,
 		 struct cpumask *lowest_mask);
@@ -27,3 +28,4 @@ void cpupri_set(struct cpupri *cp, int cpu, int pri);
 int  cpupri_init(struct cpupri *cp);
 void cpupri_cleanup(struct cpupri *cp);
 #endif
+#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ff9435dee1d..d7bd67204d6 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -266,26 +266,6 @@ static inline u64 account_other_time(u64 max)
 	return accounted;
 }
 
-#ifdef CONFIG_64BIT
-static inline u64 read_sum_exec_runtime(struct task_struct *t)
-{
-	return t->se.sum_exec_runtime;
-}
-#else
-static u64 read_sum_exec_runtime(struct task_struct *t)
-{
-	u64 ns;
-	struct rq_flags rf;
-	struct rq *rq;
-
-	rq = task_rq_lock(t, &rf);
-	ns = t->se.sum_exec_runtime;
-	task_rq_unlock(rq, t, &rf);
-
-	return ns;
-}
-#endif
-
 /*
  * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
  * tasks (sum on group iteration) belonging to @tsk's group.
@@ -658,7 +638,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
 	struct task_cputime cputime = {
-		.sum_exec_runtime = p->se.sum_exec_runtime,
+		.sum_exec_runtime = tsk_seruntime(p),
 	};
 
 	task_cputime(p, &cputime.utime, &cputime.stime);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5c31875a7d9..49745ab7d1b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,8 +37,13 @@
  *
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+unsigned int sysctl_sched_latency			= 4000000ULL;
+static unsigned int normalized_sysctl_sched_latency	= 4000000ULL;
+#else
 unsigned int sysctl_sched_latency			= 6000000ULL;
 static unsigned int normalized_sysctl_sched_latency	= 6000000ULL;
+#endif
 
 /*
  * The initial- and re-scaling of tunables is configurable
@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
  *
  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+unsigned int sysctl_sched_min_granularity			= 400000ULL;
+static unsigned int normalized_sysctl_sched_min_granularity	= 400000ULL;
+#else
 unsigned int sysctl_sched_min_granularity			= 750000ULL;
 static unsigned int normalized_sysctl_sched_min_granularity	= 750000ULL;
+#endif
 
 /*
  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+static unsigned int sched_nr_latency = 10;
+#else
 static unsigned int sched_nr_latency = 8;
+#endif
 
 /*
  * After fork, child runs first. If set to 0 (default) then
@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
  *
  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+unsigned int sysctl_sched_wakeup_granularity			= 500000UL;
+static unsigned int normalized_sysctl_sched_wakeup_granularity	= 500000UL;
+
+const_debug unsigned int sysctl_sched_migration_cost	= 250000UL;
+#else
 unsigned int sysctl_sched_wakeup_granularity			= 1000000UL;
 static unsigned int normalized_sysctl_sched_wakeup_granularity	= 1000000UL;
 
 const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
+#endif
 
 int sched_thermal_decay_shift;
 static int __init setup_sched_thermal_decay_shift(char *str)
@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu)
  *
  * (default: 5 msec, units: microseconds)
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+unsigned int sysctl_sched_cfs_bandwidth_slice		= 3000UL;
+#else
 unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
 #endif
+#endif
 
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index b743bf38f08..c769795d726 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -361,6 +361,7 @@ void cpu_startup_entry(enum cpuhp_state state)
 		do_idle();
 }
 
+#ifndef CONFIG_SCHED_MUQSS
 /*
  * idle-task scheduling class.
  */
@@ -481,3 +482,4 @@ const struct sched_class idle_sched_class = {
 	.switched_to		= switched_to_idle,
 	.update_curr		= update_curr_idle,
 };
+#endif /* CONFIG_SCHED_MUQSS */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f58677a8f2..68ea6e09b59 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,19 @@
 /*
  * Scheduler internal types and methods:
  */
+#ifdef CONFIG_SCHED_MUQSS
+#include "MuQSS.h"
+
+/* Begin compatibility wrappers for MuQSS/CFS differences */
+#define rq_rt_nr_running(rq) ((rq)->rt_nr_running)
+#define rq_h_nr_running(rq) ((rq)->nr_running)
+
+#else /* CONFIG_SCHED_MUQSS */
+
+#define rq_rt_nr_running(rq) ((rq)->rt.rt_nr_running)
+#define rq_h_nr_running(rq) ((rq)->cfs.h_nr_running)
+
+
 #include <linux/sched.h>
 
 #include <linux/sched/autogroup.h>
@@ -2548,3 +2561,25 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
 
 void swake_up_all_locked(struct swait_queue_head *q);
 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/* MuQSS compatibility functions */
+#ifdef CONFIG_64BIT
+static inline u64 read_sum_exec_runtime(struct task_struct *t)
+{
+	return t->se.sum_exec_runtime;
+}
+#else
+static inline u64 read_sum_exec_runtime(struct task_struct *t)
+{
+	u64 ns;
+	struct rq_flags rf;
+	struct rq *rq;
+
+	rq = task_rq_lock(t, &rf);
+	ns = t->se.sum_exec_runtime;
+	task_rq_unlock(rq, t, &rf);
+
+	return ns;
+}
+#endif
+#endif /* CONFIG_SCHED_MUQSS */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8344757bba6..d819af35a77 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -450,7 +450,11 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
 	struct root_domain *old_rd = NULL;
 	unsigned long flags;
 
+#ifdef CONFIG_SCHED_MUQSS
+	raw_spin_lock_irqsave(rq->lock, flags);
+#else
 	raw_spin_lock_irqsave(&rq->lock, flags);
+#endif
 
 	if (rq->rd) {
 		old_rd = rq->rd;
@@ -476,7 +480,11 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
 		set_rq_online(rq);
 
+#ifdef CONFIG_SCHED_MUQSS
+	raw_spin_unlock_irqrestore(rq->lock, flags);
+#else
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
+#endif
 
 	if (old_rd)
 		call_rcu(&old_rd->rcu, free_rootdomain);
diff --git a/kernel/skip_list.c b/kernel/skip_list.c
new file mode 100644
index 00000000000..bf5c6e97e13
--- /dev/null
+++ b/kernel/skip_list.c
@@ -0,0 +1,148 @@
+/*
+  Copyright (C) 2011,2016 Con Kolivas.
+
+  Code based on example originally by William Pugh.
+
+Skip Lists are a probabilistic alternative to balanced trees, as
+described in the June 1990 issue of CACM and were invented by
+William Pugh in 1987.
+
+A couple of comments about this implementation:
+The routine randomLevel has been hard-coded to generate random
+levels using p=0.25. It can be easily changed.
+
+The insertion routine has been implemented so as to use the
+dirty hack described in the CACM paper: if a random level is
+generated that is more than the current maximum level, the
+current maximum level plus one is used instead.
+
+Levels start at zero and go up to MaxLevel (which is equal to
+MaxNumberOfLevels-1).
+
+The routines defined in this file are:
+
+init: defines slnode
+
+new_skiplist: returns a new, empty list
+
+randomLevel: Returns a random level based on a u64 random seed passed to it.
+In MuQSS, the "niffy" time is used for this purpose.
+
+insert(l,key, value): inserts the binding (key, value) into l. This operation
+occurs in O(log n) time.
+
+delnode(slnode, l, node): deletes any binding of key from the l based on the
+actual node value. This operation occurs in O(k) time where k is the
+number of levels of the node in question (max 8). The original delete
+function occurred in O(log n) time and involved a search.
+
+MuQSS Notes: In this implementation of skiplists, there are bidirectional
+next/prev pointers and the insert function returns a pointer to the actual
+node the value is stored. The key here is chosen by the scheduler so as to
+sort tasks according to the priority list requirements and is no longer used
+by the scheduler after insertion. The scheduler lookup, however, occurs in
+O(1) time because it is always the first item in the level 0 linked list.
+Since the task struct stores a copy of the node pointer upon skiplist_insert,
+it can also remove it much faster than the original implementation with the
+aid of prev<->next pointer manipulation and no searching.
+
+*/
+
+#include <linux/slab.h>
+#include <linux/skip_list.h>
+
+#define MaxNumberOfLevels 8
+#define MaxLevel (MaxNumberOfLevels - 1)
+
+void skiplist_init(skiplist_node *slnode)
+{
+	int i;
+
+	slnode->key = 0xFFFFFFFFFFFFFFFF;
+	slnode->level = 0;
+	slnode->value = NULL;
+	for (i = 0; i < MaxNumberOfLevels; i++)
+		slnode->next[i] = slnode->prev[i] = slnode;
+}
+
+skiplist *new_skiplist(skiplist_node *slnode)
+{
+	skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
+
+	BUG_ON(!l);
+	l->header = slnode;
+	return l;
+}
+
+void free_skiplist(skiplist *l)
+{
+	skiplist_node *p, *q;
+
+	p = l->header;
+	do {
+		q = p->next[0];
+		p->next[0]->prev[0] = q->prev[0];
+		skiplist_node_init(p);
+		p = q;
+	} while (p != l->header);
+	kfree(l);
+}
+
+void skiplist_node_init(skiplist_node *node)
+{
+	memset(node, 0, sizeof(skiplist_node));
+}
+
+static inline unsigned int randomLevel(const long unsigned int randseed)
+{
+	return find_first_bit(&randseed, MaxLevel) / 2;
+}
+
+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
+{
+	skiplist_node *update[MaxNumberOfLevels];
+	skiplist_node *p, *q;
+	int k = l->level;
+
+	p = l->header;
+	do {
+		while (q = p->next[k], q->key <= key)
+			p = q;
+		update[k] = p;
+	} while (--k >= 0);
+
+	++l->entries;
+	k = randomLevel(randseed);
+	if (k > l->level) {
+		k = ++l->level;
+		update[k] = l->header;
+	}
+
+	node->level = k;
+	node->key = key;
+	node->value = value;
+	do {
+		p = update[k];
+		node->next[k] = p->next[k];
+		p->next[k] = node;
+		node->prev[k] = p;
+		node->next[k]->prev[k] = node;
+	} while (--k >= 0);
+}
+
+void skiplist_delete(skiplist *l, skiplist_node *node)
+{
+	int k, m = node->level;
+
+	for (k = 0; k <= m; k++) {
+		node->prev[k]->next[k] = node->next[k];
+		node->next[k]->prev[k] = node->prev[k];
+	}
+	skiplist_node_init(node);
+	if (m == l->level) {
+		while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
+			m--;
+		l->level = m;
+	}
+	l->entries--;
+}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8a176d8727a..63273ba902c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -110,6 +110,9 @@ extern int core_uses_pid;
 extern char core_pattern[];
 extern unsigned int core_pipe_limit;
 #endif
+#ifdef CONFIG_USER_NS
+extern int unprivileged_userns_clone;
+#endif
 extern int pid_max;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
@@ -132,7 +135,17 @@ static unsigned long one_ul = 1;
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
 static int one_thousand = 1000;
-#ifdef CONFIG_PRINTK
+#ifdef CONFIG_SCHED_MUQSS
+static int zero = 0;
+static int one = 1;
+extern int rr_interval;
+extern int sched_interactive;
+extern int sched_iso_cpu;
+extern int sched_yield_type;
+extern int hrtimer_granularity_us;
+extern int hrtimeout_min_us;
+#endif
+#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
 static int ten_thousand = 10000;
 #endif
 #ifdef CONFIG_PERF_EVENTS
@@ -288,7 +301,7 @@ static struct ctl_table sysctl_base_table[] = {
 	{ }
 };
 
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
 static int min_sched_granularity_ns = 100000;		/* 100 usecs */
 static int max_sched_granularity_ns = NSEC_PER_SEC;	/* 1 second */
 static int min_wakeup_granularity_ns;			/* 0 usecs */
@@ -305,6 +318,7 @@ static int max_extfrag_threshold = 1000;
 #endif
 
 static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_MUQSS
 	{
 		.procname	= "sched_child_runs_first",
 		.data		= &sysctl_sched_child_runs_first,
@@ -486,6 +500,7 @@ static struct ctl_table kern_table[] = {
 		.extra2		= SYSCTL_ONE,
 	},
 #endif
+#endif /* !CONFIG_SCHED_MUQSS */
 #ifdef CONFIG_PROVE_LOCKING
 	{
 		.procname	= "prove_locking",
@@ -534,6 +549,15 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_USER_NS
+	{
+		.procname	= "unprivileged_userns_clone",
+		.data		= &unprivileged_userns_clone,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+#endif
 #ifdef CONFIG_PROC_SYSCTL
 	{
 		.procname	= "tainted",
@@ -1049,6 +1073,62 @@ static struct ctl_table kern_table[] = {
 		.proc_handler	= proc_dointvec,
 	},
 #endif
+#ifdef CONFIG_SCHED_MUQSS
+	{
+		.procname	= "rr_interval",
+		.data		= &rr_interval,
+		.maxlen		= sizeof (int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &one_thousand,
+	},
+	{
+		.procname	= "interactive",
+		.data		= &sched_interactive,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "iso_cpu",
+		.data		= &sched_iso_cpu,
+		.maxlen		= sizeof (int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "yield_type",
+		.data		= &sched_yield_type,
+		.maxlen		= sizeof (int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two,
+	},
+	{
+		.procname	= "hrtimer_granularity_us",
+		.data		= &hrtimer_granularity_us,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &ten_thousand,
+	},
+	{
+		.procname	= "hrtimeout_min_us",
+		.data		= &hrtimeout_min_us,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &ten_thousand,
+	},
+#endif
 #if defined(CONFIG_S390) && defined(CONFIG_SMP)
 	{
 		.procname	= "spin_retry",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 5c0848ca128..87f09e62784 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -138,3 +138,4 @@ void task_work_run(void)
 		} while (work);
 	}
 }
+EXPORT_SYMBOL_GPL(task_work_run);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index fcc42353f12..60b4fd254c8 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -123,7 +123,7 @@ config CONTEXT_TRACKING
 
 config CONTEXT_TRACKING_FORCE
 	bool "Force context tracking"
-	depends on CONTEXT_TRACKING
+	depends on CONTEXT_TRACKING && !SCHED_MUQSS
 	default y if !NO_HZ_FULL
 	help
 	  The major pre-requirement for full dynticks to work is to
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index f5490222e13..3b3bf431f14 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -190,8 +190,14 @@ int clockevents_tick_resume(struct clock_event_device *dev)
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
 
+#ifdef CONFIG_SCHED_MUQSS
+int __read_mostly hrtimer_granularity_us = 100;
+/* Limit min_delta to 100us */
+#define MIN_DELTA_LIMIT		(hrtimer_granularity_us * NSEC_PER_USEC)
+#else
 /* Limit min_delta to a jiffie */
 #define MIN_DELTA_LIMIT		(NSEC_PER_SEC / HZ)
+#endif
 
 /**
  * clockevents_increase_min_delta - raise minimum delta of a clock event device
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d89da1c7e00..537df71e7d9 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -2216,3 +2216,115 @@ int __sched schedule_hrtimeout(ktime_t *expires,
 	return schedule_hrtimeout_range(expires, 0, mode);
 }
 EXPORT_SYMBOL_GPL(schedule_hrtimeout);
+
+#ifdef CONFIG_SCHED_MUQSS
+/*
+ * As per schedule_hrtimeout but taskes a millisecond value and returns how
+ * many milliseconds are left.
+ */
+long __sched schedule_msec_hrtimeout(long timeout)
+{
+	struct hrtimer_sleeper t;
+	int delta, jiffs;
+	ktime_t expires;
+
+	if (!timeout) {
+		__set_current_state(TASK_RUNNING);
+		return 0;
+	}
+
+	jiffs = msecs_to_jiffies(timeout);
+	/*
+	 * If regular timer resolution is adequate or hrtimer resolution is not
+	 * (yet) better than Hz, as would occur during startup, use regular
+	 * timers.
+	 */
+	if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
+		return schedule_timeout(jiffs);
+
+	delta = (timeout % 1000) * NSEC_PER_MSEC;
+	expires = ktime_set(0, delta);
+
+	hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtimer_set_expires_range_ns(&t.timer, expires, delta);
+
+	hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
+
+	if (likely(t.task))
+		schedule();
+
+	hrtimer_cancel(&t.timer);
+	destroy_hrtimer_on_stack(&t.timer);
+
+	__set_current_state(TASK_RUNNING);
+
+	expires = hrtimer_expires_remaining(&t.timer);
+	timeout = ktime_to_ms(expires);
+	return timeout < 0 ? 0 : timeout;
+}
+
+EXPORT_SYMBOL(schedule_msec_hrtimeout);
+
+#define USECS_PER_SEC 1000000
+extern int hrtimer_granularity_us;
+
+static inline long schedule_usec_hrtimeout(long timeout)
+{
+	struct hrtimer_sleeper t;
+	ktime_t expires;
+	int delta;
+
+	if (!timeout) {
+		__set_current_state(TASK_RUNNING);
+		return 0;
+	}
+
+	if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
+		return schedule_timeout(usecs_to_jiffies(timeout));
+
+	if (timeout < hrtimer_granularity_us)
+		timeout = hrtimer_granularity_us;
+	delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
+	expires = ktime_set(0, delta);
+
+	hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	hrtimer_set_expires_range_ns(&t.timer, expires, delta);
+
+	hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
+
+	if (likely(t.task))
+		schedule();
+
+	hrtimer_cancel(&t.timer);
+	destroy_hrtimer_on_stack(&t.timer);
+
+	__set_current_state(TASK_RUNNING);
+
+	expires = hrtimer_expires_remaining(&t.timer);
+	timeout = ktime_to_us(expires);
+	return timeout < 0 ? 0 : timeout;
+}
+
+int __read_mostly hrtimeout_min_us = 500;
+
+long __sched schedule_min_hrtimeout(void)
+{
+	return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
+}
+
+EXPORT_SYMBOL(schedule_min_hrtimeout);
+
+long __sched schedule_msec_hrtimeout_interruptible(long timeout)
+{
+	__set_current_state(TASK_INTERRUPTIBLE);
+	return schedule_msec_hrtimeout(timeout);
+}
+EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
+
+long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
+{
+	__set_current_state(TASK_UNINTERRUPTIBLE);
+	return schedule_msec_hrtimeout(timeout);
+}
+EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
+#endif /* CONFIG_SCHED_MUQSS */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2fd3b3fa68b..1202d7fe5d8 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -236,7 +236,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
 	u64 stime, utime;
 
 	task_cputime(p, &utime, &stime);
-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
+	store_samples(samples, stime, utime, tsk_seruntime(p));
 }
 
 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
@@ -855,7 +855,7 @@ static void check_thread_timers(struct task_struct *tsk,
 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
 	if (soft != RLIM_INFINITY) {
 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
+		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
 
 		/* At the hard limit, send SIGKILL. No further action. */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 03c9fc395ab..9305d48e3d2 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -43,6 +43,9 @@
 #include <linux/sched/debug.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#ifdef CONFIG_SCHED_MUQSS
+#include <linux/freezer.h>
+#endif
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -1579,7 +1582,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
  * Check, if the next hrtimer event is before the next timer wheel
  * event:
  */
-static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
+static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
 {
 	u64 nextevt = hrtimer_get_next_event();
 
@@ -1597,6 +1600,11 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
 	if (nextevt <= basem)
 		return basem;
 
+#ifdef CONFIG_SCHED_MUQSS
+	if (nextevt < expires && nextevt - basem <= TICK_NSEC)
+		base->is_idle = false;
+#endif
+
 	/*
 	 * Round up to the next jiffie. High resolution timers are
 	 * off, so the hrtimers are expired in the tick and we need to
@@ -1666,7 +1674,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 	}
 	raw_spin_unlock(&base->lock);
 
-	return cmp_next_hrtimer_event(basem, expires);
+	return cmp_next_hrtimer_event(base, basem, expires);
 }
 
 /**
@@ -1903,6 +1911,18 @@ signed long __sched schedule_timeout(signed long timeout)
 
 	expire = timeout + jiffies;
 
+#if defined(CONFIG_HIGH_RES_TIMERS) && defined(CONFIG_SCHED_MUQSS)
+	if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+		/*
+		 * Special case 1 as being a request for the minimum timeout
+		 * and use highres timers to timeout after 1ms to workaround
+		 * the granularity of low Hz tick timers.
+		 */
+		if (!schedule_min_hrtimeout())
+			return 0;
+		goto out_timeout;
+	}
+#endif
 	timer.task = current;
 	timer_setup_on_stack(&timer.timer, process_timeout, 0);
 	__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
@@ -1912,6 +1932,9 @@ signed long __sched schedule_timeout(signed long timeout)
 	/* Remove the timer from the object tracker */
 	destroy_timer_on_stack(&timer.timer);
 
+#if defined(CONFIG_HIGH_RES_TIMERS) && defined(CONFIG_SCHED_MUQSS)
+out_timeout:
+#endif
 	timeout = expire - jiffies;
 
  out:
@@ -2058,6 +2081,18 @@ void msleep(unsigned int msecs)
 {
 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
+#ifdef CONFIG_SCHED_MUQSS
+	/*
+	 * Use high resolution timers where the resolution of tick based
+	 * timers is inadequate.
+	 */
+	if (timeout < 6 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+		while (msecs)
+			msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
+		return;
+	}
+#endif
+
 	while (timeout)
 		timeout = schedule_timeout_uninterruptible(timeout);
 }
@@ -2072,6 +2107,14 @@ unsigned long msleep_interruptible(unsigned int msecs)
 {
 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
+#ifdef CONFIG_SCHED_MUQSS
+	if (timeout < 6 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
+		while (msecs && !signal_pending(current))
+			msecs = schedule_msec_hrtimeout_interruptible(msecs);
+		return msecs;
+	}
+#endif
+
 	while (timeout && !signal_pending(current))
 		timeout = schedule_timeout_interruptible(timeout);
 	return jiffies_to_msecs(timeout);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b5e3496cf80..68930e7f4d2 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1048,10 +1048,15 @@ static int trace_wakeup_test_thread(void *data)
 {
 	/* Make this a -deadline thread */
 	static const struct sched_attr attr = {
+#ifdef CONFIG_SCHED_MUQSS
+		/* No deadline on MuQSS, use RR */
+		.sched_policy = SCHED_RR,
+#else
 		.sched_policy = SCHED_DEADLINE,
 		.sched_runtime = 100000ULL,
 		.sched_deadline = 10000000ULL,
 		.sched_period = 10000000ULL
+#endif
 	};
 	struct wakeup_test_data *x = data;
 
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 8eadadc478f..c36ecd19562 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -21,6 +21,13 @@
 #include <linux/bsearch.h>
 #include <linux/sort.h>
 
+/* sysctl */
+#ifdef CONFIG_USER_NS_UNPRIVILEGED
+int unprivileged_userns_clone = 1;
+#else
+int unprivileged_userns_clone;
+#endif
+
 static struct kmem_cache *user_ns_cachep __read_mostly;
 static DEFINE_MUTEX(userns_state_mutex);
 
diff --git a/lib/Makefile b/lib/Makefile
index 685aee60de1..e9584575b51 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
 endif
 
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
-	 rbtree.o radix-tree.o timerqueue.o xarray.o \
+	 rbtree.o radix-tree.o sradix-tree.o timerqueue.o xarray.o \
 	 idr.o extable.o sha1.o irq_regs.o argv_split.o \
 	 flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c
new file mode 100644
index 00000000000..ab21e6309b9
--- /dev/null
+++ b/lib/sradix-tree.c
@@ -0,0 +1,476 @@
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/gcd.h>
+#include <linux/sradix-tree.h>
+
+static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node)
+{
+	return node->fulls == root->stores_size ||
+		(node->height == 1 && node->count == root->stores_size);
+}
+
+/*
+ *	Extend a sradix tree so it can store key @index.
+ */
+static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index)
+{
+	struct sradix_tree_node *node;
+	unsigned int height;
+
+	if (unlikely(root->rnode == NULL)) {
+		if (!(node = root->alloc()))
+			return -ENOMEM;
+
+		node->height = 1;
+		root->rnode = node;
+		root->height = 1;
+	}
+
+	/* Figure out what the height should be.  */
+	height = root->height;
+	index >>= root->shift * height;
+
+	while (index) {
+		index >>= root->shift;
+		height++;
+	}
+
+	while (height > root->height) {
+		unsigned int newheight;
+
+		if (!(node = root->alloc()))
+			return -ENOMEM;
+
+		/* Increase the height.  */
+		node->stores[0] = root->rnode;
+		root->rnode->parent = node;
+		if (root->extend)
+			root->extend(node, root->rnode);
+
+		newheight = root->height + 1;
+		node->height = newheight;
+		node->count = 1;
+		if (sradix_node_full(root, root->rnode))
+			node->fulls = 1;
+
+		root->rnode = node;
+		root->height = newheight;
+	}
+
+	return 0;
+}
+
+/*
+ * Search the next item from the current node, that is not NULL
+ * and can satify root->iter().
+ */
+void *sradix_tree_next(struct sradix_tree_root *root,
+		       struct sradix_tree_node *node, unsigned long index,
+		       int (*iter)(void *item, unsigned long height))
+{
+	unsigned long offset;
+	void *item;
+
+	if (unlikely(node == NULL)) {
+		node = root->rnode;
+		for (offset = 0; offset < root->stores_size; offset++) {
+			item = node->stores[offset];
+			if (item && (!iter || iter(item, node->height)))
+				break;
+		}
+
+		if (unlikely(offset >= root->stores_size))
+			return NULL;
+
+		if (node->height == 1)
+			return item;
+		else
+			goto go_down;
+	}
+
+	while (node) {
+		offset = (index & root->mask) + 1;
+		for (; offset < root->stores_size; offset++) {
+			item = node->stores[offset];
+			if (item && (!iter || iter(item, node->height)))
+				break;
+		}
+
+		if (offset < root->stores_size)
+			break;
+
+		node = node->parent;
+		index >>= root->shift;
+	}
+
+	if (!node)
+		return NULL;
+
+	while (node->height > 1) {
+go_down:
+		node = item;
+		for (offset = 0; offset < root->stores_size; offset++) {
+			item = node->stores[offset];
+			if (item && (!iter || iter(item, node->height)))
+				break;
+		}
+
+		if (unlikely(offset >= root->stores_size))
+			return NULL;
+	}
+
+	BUG_ON(offset > root->stores_size);
+
+	return item;
+}
+
+/*
+ * Blindly insert the item to the tree. Typically, we reuse the
+ * first empty store item.
+ */
+int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num)
+{
+	unsigned long index;
+	unsigned int height;
+	struct sradix_tree_node *node, *tmp = NULL;
+	int offset, offset_saved;
+	void **store = NULL;
+	int error, i, j, shift;
+
+go_on:
+	index = root->min;
+
+	if (root->enter_node && !sradix_node_full(root, root->enter_node)) {
+		node = root->enter_node;
+		BUG_ON((index >> (root->shift * root->height)));
+	} else {
+		node = root->rnode;
+		if (node == NULL || (index >> (root->shift * root->height))
+		    || sradix_node_full(root, node)) {
+			error = sradix_tree_extend(root, index);
+			if (error)
+				return error;
+
+			node = root->rnode;
+		}
+	}
+
+
+	height = node->height;
+	shift = (height - 1) * root->shift;
+	offset = (index >> shift) & root->mask;
+	while (shift > 0) {
+		offset_saved = offset;
+		for (; offset < root->stores_size; offset++) {
+			store = &node->stores[offset];
+			tmp = *store;
+
+			if (!tmp || !sradix_node_full(root, tmp))
+				break;
+		}
+		BUG_ON(offset >= root->stores_size);
+
+		if (offset != offset_saved) {
+			index += (offset - offset_saved) << shift;
+			index &= ~((1UL << shift) - 1);
+		}
+
+		if (!tmp) {
+			if (!(tmp = root->alloc()))
+				return -ENOMEM;
+
+			tmp->height = shift / root->shift;
+			*store = tmp;
+			tmp->parent = node;
+			node->count++;
+//			if (root->extend)
+//				root->extend(node, tmp);
+		}
+
+		node = tmp;
+		shift -= root->shift;
+		offset = (index >> shift) & root->mask;
+	}
+
+	BUG_ON(node->height != 1);
+
+
+	store = &node->stores[offset];
+	for (i = 0, j = 0;
+	      j < root->stores_size - node->count &&
+	      i < root->stores_size - offset && j < num; i++) {
+		if (!store[i]) {
+			store[i] = item[j];
+			if (root->assign)
+				root->assign(node, index + i, item[j]);
+			j++;
+		}
+	}
+
+	node->count += j;
+	root->num += j;
+	num -= j;
+
+	while (sradix_node_full(root, node)) {
+		node = node->parent;
+		if (!node)
+			break;
+
+		node->fulls++;
+	}
+
+	if (unlikely(!node)) {
+		/* All nodes are full */
+		root->min = 1 << (root->height * root->shift);
+		root->enter_node = NULL;
+	} else {
+		root->min = index + i - 1;
+		root->min |= (1UL << (node->height - 1)) - 1;
+		root->min++;
+		root->enter_node = node;
+	}
+
+	if (num) {
+		item += j;
+		goto go_on;
+	}
+
+	return 0;
+}
+
+
+/**
+ *	sradix_tree_shrink    -    shrink height of a sradix tree to minimal
+ *      @root		sradix tree root
+ *
+ */
+static inline void sradix_tree_shrink(struct sradix_tree_root *root)
+{
+	/* try to shrink tree height */
+	while (root->height > 1) {
+		struct sradix_tree_node *to_free = root->rnode;
+
+		/*
+		 * The candidate node has more than one child, or its child
+		 * is not at the leftmost store, we cannot shrink.
+		 */
+		if (to_free->count != 1 || !to_free->stores[0])
+			break;
+
+		root->rnode = to_free->stores[0];
+		root->rnode->parent = NULL;
+		root->height--;
+		if (unlikely(root->enter_node == to_free))
+			root->enter_node = NULL;
+		root->free(to_free);
+	}
+}
+
+/*
+ * Del the item on the known leaf node and index
+ */
+void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
+				  struct sradix_tree_node *node, unsigned long index)
+{
+	unsigned int offset;
+	struct sradix_tree_node *start, *end;
+
+	BUG_ON(node->height != 1);
+
+	start = node;
+	while (node && !(--node->count))
+		node = node->parent;
+
+	end = node;
+	if (!node) {
+		root->rnode = NULL;
+		root->height = 0;
+		root->min = 0;
+		root->num = 0;
+		root->enter_node = NULL;
+	} else {
+		offset = (index >> (root->shift * (node->height - 1))) & root->mask;
+		if (root->rm)
+			root->rm(node, offset);
+		node->stores[offset] = NULL;
+		root->num--;
+		if (root->min > index) {
+			root->min = index;
+			root->enter_node = node;
+		}
+	}
+
+	if (start != end) {
+		do {
+			node = start;
+			start = start->parent;
+			if (unlikely(root->enter_node == node))
+				root->enter_node = end;
+			root->free(node);
+		} while (start != end);
+
+		/*
+		 * Note that shrink may free "end", so enter_node still need to
+		 * be checked inside.
+		 */
+		sradix_tree_shrink(root);
+	} else if (node->count == root->stores_size - 1) {
+		/* It WAS a full leaf node. Update the ancestors */
+		node = node->parent;
+		while (node) {
+			node->fulls--;
+			if (node->fulls != root->stores_size - 1)
+				break;
+
+			node = node->parent;
+		}
+	}
+}
+
+void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index)
+{
+	unsigned int height, offset;
+	struct sradix_tree_node *node;
+	int shift;
+
+	node = root->rnode;
+	if (node == NULL || (index >> (root->shift * root->height)))
+		return NULL;
+
+	height = root->height;
+	shift = (height - 1) * root->shift;
+
+	do {
+		offset = (index >> shift) & root->mask;
+		node = node->stores[offset];
+		if (!node)
+			return NULL;
+
+		shift -= root->shift;
+	} while (shift >= 0);
+
+	return node;
+}
+
+/*
+ * Return the item if it exists, otherwise create it in place
+ * and return the created item.
+ */
+void *sradix_tree_lookup_create(struct sradix_tree_root *root,
+			unsigned long index, void *(*item_alloc)(void))
+{
+	unsigned int height, offset;
+	struct sradix_tree_node *node, *tmp;
+	void *item;
+	int shift, error;
+
+	if (root->rnode == NULL || (index >> (root->shift * root->height))) {
+		if (item_alloc) {
+			error = sradix_tree_extend(root, index);
+			if (error)
+				return NULL;
+		} else {
+			return NULL;
+		}
+	}
+
+	node = root->rnode;
+	height = root->height;
+	shift = (height - 1) * root->shift;
+
+	do {
+		offset = (index >> shift) & root->mask;
+		if (!node->stores[offset]) {
+			if (!(tmp = root->alloc()))
+				return NULL;
+
+			tmp->height = shift / root->shift;
+			node->stores[offset] = tmp;
+			tmp->parent = node;
+			node->count++;
+			node = tmp;
+		} else {
+			node = node->stores[offset];
+		}
+
+		shift -= root->shift;
+	} while (shift > 0);
+
+	BUG_ON(node->height != 1);
+	offset = index & root->mask;
+	if (node->stores[offset]) {
+		return node->stores[offset];
+	} else if (item_alloc) {
+		if (!(item = item_alloc()))
+			return NULL;
+
+		node->stores[offset] = item;
+
+		/*
+		 * NOTE: we do NOT call root->assign here, since this item is
+		 * newly created by us having no meaning. Caller can call this
+		 * if it's necessary to do so.
+		 */
+
+		node->count++;
+		root->num++;
+
+		while (sradix_node_full(root, node)) {
+			node = node->parent;
+			if (!node)
+				break;
+
+			node->fulls++;
+		}
+
+		if (unlikely(!node)) {
+			/* All nodes are full */
+			root->min = 1 << (root->height * root->shift);
+		} else {
+			if (root->min == index) {
+				root->min |= (1UL << (node->height - 1)) - 1;
+				root->min++;
+				root->enter_node = node;
+			}
+		}
+
+		return item;
+	} else {
+		return NULL;
+	}
+
+}
+
+int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index)
+{
+	unsigned int height, offset;
+	struct sradix_tree_node *node;
+	int shift;
+
+	node = root->rnode;
+	if (node == NULL || (index >> (root->shift * root->height)))
+		return -ENOENT;
+
+	height = root->height;
+	shift = (height - 1) * root->shift;
+
+	do {
+		offset = (index >> shift) & root->mask;
+		node = node->stores[offset];
+		if (!node)
+			return -ENOENT;
+
+		shift -= root->shift;
+	} while (shift > 0);
+
+	offset = index & root->mask;
+	if (!node->stores[offset])
+		return -ENOENT;
+
+	sradix_tree_delete_from_leaf(root, node, index);
+
+	return 0;
+}
diff --git a/mm/Kconfig b/mm/Kconfig
index c1acc34c1c3..ecdcd88f527 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -316,6 +316,32 @@ config KSM
 	  See Documentation/vm/ksm.rst for more information: KSM is inactive
 	  until a program has madvised that an area is MADV_MERGEABLE, and
 	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+choice
+	prompt "Choose UKSM/KSM strategy"
+	default UKSM
+	depends on KSM
+	help
+	  This option allows to select a UKSM/KSM stragety.
+
+config UKSM
+	bool "Ultra-KSM for page merging"
+	depends on KSM
+	help
+	UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same
+	page Merging), but with a fundamentally rewritten core algorithm. With
+	an advanced algorithm, UKSM now can transparently scans all anonymously
+	mapped user space applications with an significantly improved scan speed
+	and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from
+	UKSM. Now UKSM has its first stable release and first real world enterprise user.
+	For more information, please goto its project page.
+	(github.com/dolohow/uksm)
+
+config KSM_LEGACY
+	bool "Legacy KSM implementation"
+	depends on KSM
+	help
+	The legacy KSM implementation from Red Hat.
+endchoice
 
 config DEFAULT_MMAP_MIN_ADDR
 	int "Low address space to protect from user allocation"
diff --git a/mm/Makefile b/mm/Makefile
index fccd3756b25..ad2b211ad1b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -44,7 +44,7 @@ obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o gup.o $(mmu-y)
+			   prfile.o debug.o gup.o $(mmu-y)
 
 # Give 'page_alloc' its own module-parameter namespace
 page-alloc-y := page_alloc.o
@@ -68,7 +68,8 @@ obj-$(CONFIG_SPARSEMEM)	+= sparse.o
 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
 obj-$(CONFIG_SLOB) += slob.o
 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
-obj-$(CONFIG_KSM) += ksm.o
+obj-$(CONFIG_KSM_LEGACY) += ksm.o
+obj-$(CONFIG_UKSM) += uksm.o
 obj-$(CONFIG_PAGE_POISONING) += page_poison.o
 obj-$(CONFIG_SLAB) += slab.o
 obj-$(CONFIG_SLUB) += slub.o
diff --git a/mm/filemap.c b/mm/filemap.c
index 23a051a7ef0..463a4d51462 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -116,8 +116,8 @@
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
-static void page_cache_delete(struct address_space *mapping,
-				   struct page *page, void *shadow)
+static bool __must_check page_cache_delete(struct address_space *mapping,
+					   struct page *page, void *shadow)
 {
 	XA_STATE(xas, &mapping->i_pages, page->index);
 	unsigned int nr = 1;
@@ -151,6 +151,8 @@ static void page_cache_delete(struct address_space *mapping,
 		smp_wmb();
 	}
 	mapping->nrpages -= nr;
+
+	return mapping_empty(mapping);
 }
 
 static void unaccount_page_cache_page(struct address_space *mapping,
@@ -227,15 +229,18 @@ static void unaccount_page_cache_page(struct address_space *mapping,
  * Delete a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
  * is safe.  The caller must hold the i_pages lock.
+ *
+ * If this returns true, the caller must call inode_pages_clear()
+ * after dropping the i_pages lock.
  */
-void __delete_from_page_cache(struct page *page, void *shadow)
+bool __must_check __delete_from_page_cache(struct page *page, void *shadow)
 {
 	struct address_space *mapping = page->mapping;
 
 	trace_mm_filemap_delete_from_page_cache(page);
 
 	unaccount_page_cache_page(mapping, page);
-	page_cache_delete(mapping, page, shadow);
+	return page_cache_delete(mapping, page, shadow);
 }
 
 static void page_cache_free_page(struct address_space *mapping,
@@ -267,12 +272,16 @@ void delete_from_page_cache(struct page *page)
 {
 	struct address_space *mapping = page_mapping(page);
 	unsigned long flags;
+	bool empty;
 
 	BUG_ON(!PageLocked(page));
 	xa_lock_irqsave(&mapping->i_pages, flags);
-	__delete_from_page_cache(page, NULL);
+	empty = __delete_from_page_cache(page, NULL);
 	xa_unlock_irqrestore(&mapping->i_pages, flags);
 
+	if (empty)
+		inode_pages_clear(mapping->host);
+
 	page_cache_free_page(mapping, page);
 }
 EXPORT_SYMBOL(delete_from_page_cache);
@@ -291,8 +300,8 @@ EXPORT_SYMBOL(delete_from_page_cache);
  *
  * The function expects the i_pages lock to be held.
  */
-static void page_cache_delete_batch(struct address_space *mapping,
-			     struct pagevec *pvec)
+static bool __must_check page_cache_delete_batch(struct address_space *mapping,
+						 struct pagevec *pvec)
 {
 	XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
 	int total_pages = 0;
@@ -337,12 +346,15 @@ static void page_cache_delete_batch(struct address_space *mapping,
 		total_pages++;
 	}
 	mapping->nrpages -= total_pages;
+
+	return mapping_empty(mapping);
 }
 
 void delete_from_page_cache_batch(struct address_space *mapping,
 				  struct pagevec *pvec)
 {
 	int i;
+	bool empty;
 	unsigned long flags;
 
 	if (!pagevec_count(pvec))
@@ -354,9 +366,12 @@ void delete_from_page_cache_batch(struct address_space *mapping,
 
 		unaccount_page_cache_page(mapping, pvec->pages[i]);
 	}
-	page_cache_delete_batch(mapping, pvec);
+	empty = page_cache_delete_batch(mapping, pvec);
 	xa_unlock_irqrestore(&mapping->i_pages, flags);
 
+	if (empty)
+		inode_pages_clear(mapping->host);
+
 	for (i = 0; i < pagevec_count(pvec); i++)
 		page_cache_free_page(mapping, pvec->pages[i]);
 }
@@ -831,9 +846,10 @@ static int __add_to_page_cache_locked(struct page *page,
 				      void **shadowp)
 {
 	XA_STATE(xas, &mapping->i_pages, offset);
+	int error;
 	int huge = PageHuge(page);
 	struct mem_cgroup *memcg;
-	int error;
+	bool populated = false;
 	void *old;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -860,6 +876,7 @@ static int __add_to_page_cache_locked(struct page *page,
 		if (xas_error(&xas))
 			goto unlock;
 
+		populated = mapping_empty(mapping);
 		if (xa_is_value(old)) {
 			mapping->nrexceptional--;
 			if (shadowp)
@@ -880,6 +897,10 @@ static int __add_to_page_cache_locked(struct page *page,
 	if (!huge)
 		mem_cgroup_commit_charge(page, memcg, false, false);
 	trace_mm_filemap_add_to_page_cache(page);
+
+	if (populated)
+		inode_pages_set(mapping->host);
+
 	return 0;
 error:
 	page->mapping = NULL;
@@ -2660,7 +2681,7 @@ vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
 	vm_fault_t ret = VM_FAULT_LOCKED;
 
 	sb_start_pagefault(inode->i_sb);
-	file_update_time(vmf->vma->vm_file);
+	vma_file_update_time(vmf->vma);
 	lock_page(page);
 	if (page->mapping != inode->i_mapping) {
 		unlock_page(page);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index dddc863b3cb..8a7b0e52042 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
 	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
 #endif
+#ifdef CONFIG_PCK_INTERACTIVE
+	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
+#else
 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
+#endif
 	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
 	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
@@ -2632,7 +2636,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 		/* Some pages can be beyond i_size: drop them from page cache */
 		if (head[i].index >= end) {
 			ClearPageDirty(head + i);
-			__delete_from_page_cache(head + i, NULL);
+			/* We know we're not removing the last page */
+			(void)__delete_from_page_cache(head + i, NULL);
 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
 				shmem_uncharge(head->mapping->host, 1);
 			put_page(head + i);
diff --git a/mm/internal.h b/mm/internal.h
index b5634e78f01..96372dd70ab 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -189,6 +189,7 @@ extern void prep_compound_page(struct page *page, unsigned int order);
 extern void post_alloc_hook(struct page *page, unsigned int order,
 					gfp_t gfp_flags);
 extern int user_min_free_kbytes;
+extern atomic_long_t kswapd_waiters;
 
 extern void zone_pcp_update(struct zone *zone);
 extern void zone_pcp_reset(struct zone *zone);
diff --git a/mm/ksm.c b/mm/ksm.c
index 281c00129a2..48d9ac62433 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -857,17 +857,6 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
 	return err;
 }
 
-static inline struct stable_node *page_stable_node(struct page *page)
-{
-	return PageKsm(page) ? page_rmapping(page) : NULL;
-}
-
-static inline void set_page_stable_node(struct page *page,
-					struct stable_node *stable_node)
-{
-	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
-}
-
 #ifdef CONFIG_SYSFS
 /*
  * Only called through the sysfs control interface:
diff --git a/mm/memory.c b/mm/memory.c
index 22d218bc56c..bfabe125434 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -144,6 +144,25 @@ EXPORT_SYMBOL(zero_pfn);
 
 unsigned long highest_memmap_pfn __read_mostly;
 
+#ifdef CONFIG_UKSM
+unsigned long uksm_zero_pfn __read_mostly;
+EXPORT_SYMBOL_GPL(uksm_zero_pfn);
+struct page *empty_uksm_zero_page;
+
+static int __init setup_uksm_zero_page(void)
+{
+	empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
+	if (!empty_uksm_zero_page)
+		panic("Oh boy, that early out of memory?");
+
+	SetPageReserved(empty_uksm_zero_page);
+	uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
+
+	return 0;
+}
+core_initcall(setup_uksm_zero_page);
+#endif
+
 /*
  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  */
@@ -159,6 +178,7 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 	trace_rss_stat(mm, member, count);
 }
 
+
 #if defined(SPLIT_RSS_COUNTING)
 
 void sync_mm_rss(struct mm_struct *mm)
@@ -802,8 +822,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 		get_page(page);
 		page_dup_rmap(page, false);
 		rss[mm_counter(page)]++;
+
+		/* Should return NULL in vm_normal_page() */
+		uksm_bugon_zeropage(pte);
 	} else if (pte_devmap(pte)) {
 		page = pte_page(pte);
+	} else {
+		uksm_map_zero_page(pte);
 	}
 
 out_set_pte:
@@ -1076,8 +1101,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 			ptent = ptep_get_and_clear_full(mm, addr, pte,
 							tlb->fullmm);
 			tlb_remove_tlb_entry(tlb, pte, addr);
-			if (unlikely(!page))
+			if (unlikely(!page)) {
+				uksm_unmap_zero_page(ptent);
 				continue;
+			}
 
 			if (!PageAnon(page)) {
 				if (pte_dirty(ptent)) {
@@ -2413,6 +2440,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
 
 	if (likely(src)) {
 		copy_user_highpage(dst, src, addr, vma);
+		uksm_cow_page(vma, src);
 		return true;
 	}
 
@@ -2659,6 +2687,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 							      vmf->address);
 		if (!new_page)
 			goto oom;
+		uksm_cow_pte(vma, vmf->orig_pte);
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
 				vmf->address);
@@ -2700,7 +2729,9 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 						mm_counter_file(old_page));
 				inc_mm_counter_fast(mm, MM_ANONPAGES);
 			}
+			uksm_bugon_zeropage(vmf->orig_pte);
 		} else {
+			uksm_unmap_zero_page(vmf->orig_pte);
 			inc_mm_counter_fast(mm, MM_ANONPAGES);
 		}
 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
diff --git a/mm/mmap.c b/mm/mmap.c
index f609e9ec4a2..57c10c16a96 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -46,6 +46,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/ksm.h>
 #include <linux/sched/mm.h>
 
 #include <linux/uaccess.h>
@@ -179,8 +180,9 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
 	if (vma->vm_file)
-		fput(vma->vm_file);
+		vma_fput(vma);
 	mpol_put(vma_policy(vma));
+       uksm_remove_vma(vma);
 	vm_area_free(vma);
 	return next;
 }
@@ -708,9 +710,16 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	long adjust_next = 0;
 	int remove_next = 0;
 
+/*
+ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is
+ * acquired
+ */
+	uksm_remove_vma(vma);
+
 	if (next && !insert) {
 		struct vm_area_struct *exporter = NULL, *importer = NULL;
 
+		uksm_remove_vma(next);
 		if (end >= next->vm_end) {
 			/*
 			 * vma expands, overlapping all the next, and
@@ -841,6 +850,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 		end_changed = true;
 	}
 	vma->vm_pgoff = pgoff;
+
 	if (adjust_next) {
 		next->vm_start += adjust_next << PAGE_SHIFT;
 		next->vm_pgoff += adjust_next;
@@ -910,7 +920,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	if (remove_next) {
 		if (file) {
 			uprobe_munmap(next, next->vm_start, next->vm_end);
-			fput(file);
+			vma_fput(vma);
 		}
 		if (next->anon_vma)
 			anon_vma_merge(vma, next);
@@ -946,6 +956,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 		if (remove_next == 2) {
 			remove_next = 1;
 			end = next->vm_end;
+			uksm_remove_vma(next);
 			goto again;
 		}
 		else if (next)
@@ -972,10 +983,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			 */
 			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
 		}
+	} else {
+		if (next && !insert)
+			uksm_vma_add_new(next);
 	}
 	if (insert && file)
 		uprobe_mmap(insert);
 
+	uksm_vma_add_new(vma);
 	validate_mm(mm);
 
 	return 0;
@@ -1434,6 +1449,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
 	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+	/* If uksm is enabled, we add VM_MERGEABLE to new VMAs. */
+	uksm_vm_flags_mod(&vm_flags);
+
 	if (flags & MAP_LOCKED)
 		if (!can_do_mlock())
 			return -EPERM;
@@ -1801,6 +1819,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 			allow_write_access(file);
 	}
 	file = vma->vm_file;
+	uksm_vma_add_new(vma);
 out:
 	perf_event_mmap(vma);
 
@@ -1831,8 +1850,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	return addr;
 
 unmap_and_free_vma:
+	vma_fput(vma);
 	vma->vm_file = NULL;
-	fput(file);
 
 	/* Undo any partial mapping done by a device driver. */
 	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
@@ -1843,6 +1862,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	if (vm_flags & VM_DENYWRITE)
 		allow_write_access(file);
 free_vma:
+	uksm_remove_vma(vma);
 	vm_area_free(vma);
 unacct_error:
 	if (charged)
@@ -2683,7 +2703,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 		goto out_free_mpol;
 
 	if (new->vm_file)
-		get_file(new->vm_file);
+		vma_get_file(new);
 
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
@@ -2694,6 +2714,8 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	else
 		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
+	uksm_vma_add_new(new);
+
 	/* Success. */
 	if (!err)
 		return 0;
@@ -2702,7 +2724,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (new->vm_ops && new->vm_ops->close)
 		new->vm_ops->close(new);
 	if (new->vm_file)
-		fput(new->vm_file);
+		vma_fput(new);
 	unlink_anon_vmas(new);
  out_free_mpol:
 	mpol_put(vma_policy(new));
@@ -2894,7 +2916,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 	struct vm_area_struct *vma;
 	unsigned long populate = 0;
 	unsigned long ret = -EINVAL;
-	struct file *file;
+	struct file *file, *prfile;
 
 	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
 		     current->comm, current->pid);
@@ -2969,10 +2991,27 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 		}
 	}
 
-	file = get_file(vma->vm_file);
+	vma_get_file(vma);
+	file = vma->vm_file;
+	prfile = vma->vm_prfile;
 	ret = do_mmap_pgoff(vma->vm_file, start, size,
 			prot, flags, pgoff, &populate, NULL);
+	if (!IS_ERR_VALUE(ret) && file && prfile) {
+		struct vm_area_struct *new_vma;
+
+		new_vma = find_vma(mm, ret);
+		if (!new_vma->vm_prfile)
+			new_vma->vm_prfile = prfile;
+		if (new_vma != vma)
+			get_file(prfile);
+	}
+	/*
+	 * two fput()s instead of vma_fput(vma),
+	 * coz vma may not be available anymore.
+	 */
 	fput(file);
+	if (prfile)
+		fput(prfile);
 out:
 	up_write(&mm->mmap_sem);
 	if (populate)
@@ -3000,6 +3039,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
 	if ((flags & (~VM_EXEC)) != 0)
 		return -EINVAL;
 	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+	uksm_vm_flags_mod(&flags);
 
 	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
 	if (IS_ERR_VALUE(mapped_addr))
@@ -3050,6 +3090,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
 	vma->vm_flags = flags;
 	vma->vm_page_prot = vm_get_page_prot(flags);
 	vma_link(mm, vma, prev, rb_link, rb_parent);
+	uksm_vma_add_new(vma);
 out:
 	perf_event_mmap(vma);
 	mm->total_vm += len >> PAGE_SHIFT;
@@ -3127,6 +3168,12 @@ void exit_mmap(struct mm_struct *mm)
 		up_write(&mm->mmap_sem);
 	}
 
+	/*
+	 * Taking write lock on mmap_sem does not harm others,
+	 * but it's crucial for uksm to avoid races.
+	 */
+	down_write(&mm->mmap_sem);
+
 	if (mm->locked_vm) {
 		vma = mm->mmap;
 		while (vma) {
@@ -3161,6 +3208,11 @@ void exit_mmap(struct mm_struct *mm)
 		vma = remove_vma(vma);
 	}
 	vm_unacct_memory(nr_accounted);
+
+	mm->mmap = NULL;
+	mm->mm_rb = RB_ROOT;
+	vmacache_invalidate(mm);
+	up_write(&mm->mmap_sem);
 }
 
 /* Insert vm structure into process list sorted by address
@@ -3263,11 +3315,12 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 		if (anon_vma_clone(new_vma, vma))
 			goto out_free_mempol;
 		if (new_vma->vm_file)
-			get_file(new_vma->vm_file);
+			vma_get_file(new_vma);
 		if (new_vma->vm_ops && new_vma->vm_ops->open)
 			new_vma->vm_ops->open(new_vma);
 		vma_link(mm, new_vma, prev, rb_link, rb_parent);
 		*need_rmap_locks = false;
+		uksm_vma_add_new(new_vma);
 	}
 	return new_vma;
 
@@ -3420,6 +3473,7 @@ static struct vm_area_struct *__install_special_mapping(
 	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
 
 	perf_event_mmap(vma);
+	uksm_vma_add_new(vma);
 
 	return vma;
 
diff --git a/mm/nommu.c b/mm/nommu.c
index 318df4e236c..7f051e86ea1 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -567,7 +567,7 @@ static void __put_nommu_region(struct vm_region *region)
 		up_write(&nommu_region_sem);
 
 		if (region->vm_file)
-			fput(region->vm_file);
+			vmr_fput(region);
 
 		/* IO memory and memory shared directly out of the pagecache
 		 * from ramfs/tmpfs mustn't be released here */
@@ -699,7 +699,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
 	if (vma->vm_file)
-		fput(vma->vm_file);
+		vma_fput(vma);
 	put_nommu_region(vma->vm_region);
 	vm_area_free(vma);
 }
@@ -1222,7 +1222,7 @@ unsigned long do_mmap(struct file *file,
 					goto error_just_free;
 				}
 			}
-			fput(region->vm_file);
+			vmr_fput(region);
 			kmem_cache_free(vm_region_jar, region);
 			region = pregion;
 			result = start;
@@ -1299,10 +1299,10 @@ unsigned long do_mmap(struct file *file,
 	up_write(&nommu_region_sem);
 error:
 	if (region->vm_file)
-		fput(region->vm_file);
+		vmr_fput(region);
 	kmem_cache_free(vm_region_jar, region);
 	if (vma->vm_file)
-		fput(vma->vm_file);
+		vma_fput(vma);
 	vm_area_free(vma);
 	return ret;
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7326b54ab72..a934925362d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
 /*
  * Start background writeback (via writeback threads) at this percentage
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+int dirty_background_ratio = 20;
+#else
 int dirty_background_ratio = 10;
+#endif
 
 /*
  * dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
 /*
  * The generator of dirty data starts writeback at this percentage
  */
+#ifdef CONFIG_PCK_INTERACTIVE
+int vm_dirty_ratio = 50;
+#else
 int vm_dirty_ratio = 20;
+#endif
 
 /*
  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d0c0d9364aa..667f34a27ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -76,6 +76,8 @@
 #include "shuffle.h"
 #include "page_reporting.h"
 
+atomic_long_t kswapd_waiters = ATOMIC_LONG_INIT(0);
+
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
@@ -315,20 +317,7 @@ compound_page_dtor * const compound_page_dtors[] = {
 
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
-#ifdef CONFIG_DISCONTIGMEM
-/*
- * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
- * are not on separate NUMA nodes. Functionally this works but with
- * watermark_boost_factor, it can reclaim prematurely as the ranges can be
- * quite small. By default, do not boost watermarks on discontigmem as in
- * many cases very high-order allocations like THP are likely to be
- * unsupported and the premature reclaim offsets the advantage of long-term
- * fragmentation avoidance.
- */
 int watermark_boost_factor __read_mostly;
-#else
-int watermark_boost_factor __read_mostly = 15000;
-#endif
 int watermark_scale_factor = 10;
 
 static unsigned long nr_kernel_pages __initdata;
@@ -2460,9 +2449,11 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
 	 * likelihood of future fallbacks. Wake kswapd now as the node
 	 * may be balanced overall and kswapd will not wake naturally.
 	 */
-	boost_watermark(zone);
-	if (alloc_flags & ALLOC_KSWAPD)
-		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+	if (alloc_flags & ALLOC_KSWAPD) {
+		boost_watermark(zone);
+		if (zone->watermark_boost)
+			set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
+	}
 
 	/* We are not allowed to try stealing from the whole block */
 	if (!whole_block)
@@ -4477,6 +4468,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	int no_progress_loops;
 	unsigned int cpuset_mems_cookie;
 	int reserve_flags;
+	bool woke_kswapd = false;
 
 	/*
 	 * We also sanity check to catch abuse of atomic reserves being used by
@@ -4510,8 +4502,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	if (!ac->preferred_zoneref->zone)
 		goto nopage;
 
-	if (alloc_flags & ALLOC_KSWAPD)
+	if (alloc_flags & ALLOC_KSWAPD) {
+		if (!woke_kswapd) {
+			atomic_long_inc(&kswapd_waiters);
+			woke_kswapd = true;
+		}
 		wake_all_kswapds(order, gfp_mask, ac);
+	}
 
 	/*
 	 * The adjusted alloc_flags might result in immediate success, so try
@@ -4716,9 +4713,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		goto retry;
 	}
 fail:
-	warn_alloc(gfp_mask, ac->nodemask,
-			"page allocation failure: order:%u", order);
 got_pg:
+	if (woke_kswapd)
+		atomic_long_dec(&kswapd_waiters);
+	if (!page)
+		warn_alloc(gfp_mask, ac->nodemask,
+				"page allocation failure: order:%u", order);
 	return page;
 }
 
diff --git a/mm/prfile.c b/mm/prfile.c
new file mode 100644
index 00000000000..00d51187c32
--- /dev/null
+++ b/mm/prfile.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mainly for aufs which mmap(2) different file and wants to print different
+ * path in /proc/PID/maps.
+ * Call these functions via macros defined in linux/mm.h.
+ *
+ * See Documentation/filesystems/aufs/design/06mmap.txt
+ *
+ * Copyright (c) 2014-2020 Junjro R. Okajima
+ * Copyright (c) 2014 Ian Campbell
+ */
+
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+/* #define PRFILE_TRACE */
+static inline void prfile_trace(struct file *f, struct file *pr,
+			      const char func[], int line, const char func2[])
+{
+#ifdef PRFILE_TRACE
+	if (pr)
+		pr_info("%s:%d: %s, %pD2\n", func, line, func2, f);
+#endif
+}
+
+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[],
+			     int line)
+{
+	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	file_update_time(f);
+	if (f && pr)
+		file_update_time(pr);
+}
+
+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[],
+			       int line)
+{
+	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	return (f && pr) ? pr : f;
+}
+
+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line)
+{
+	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	get_file(f);
+	if (f && pr)
+		get_file(pr);
+}
+
+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line)
+{
+	struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	fput(f);
+	if (f && pr)
+		fput(pr);
+}
+
+#ifndef CONFIG_MMU
+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[],
+			       int line)
+{
+	struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	return (f && pr) ? pr : f;
+}
+
+void vmr_do_fput(struct vm_region *region, const char func[], int line)
+{
+	struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+	prfile_trace(f, pr, func, line, __func__);
+	fput(f);
+	if (f && pr)
+		fput(pr);
+}
+#endif /* !CONFIG_MMU */
diff --git a/mm/truncate.c b/mm/truncate.c
index dd9ebc1da35..8fb6c2f762b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -31,24 +31,31 @@
  * itself locked.  These unlocked entries need verification under the tree
  * lock.
  */
-static inline void __clear_shadow_entry(struct address_space *mapping,
-				pgoff_t index, void *entry)
+static bool __must_check __clear_shadow_entry(struct address_space *mapping,
+					      pgoff_t index, void *entry)
 {
 	XA_STATE(xas, &mapping->i_pages, index);
 
 	xas_set_update(&xas, workingset_update_node);
 	if (xas_load(&xas) != entry)
-		return;
+		return 0;
 	xas_store(&xas, NULL);
 	mapping->nrexceptional--;
+
+	return mapping_empty(mapping);
 }
 
 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
 			       void *entry)
 {
+	bool empty;
+
 	xa_lock_irq(&mapping->i_pages);
-	__clear_shadow_entry(mapping, index, entry);
+	empty = __clear_shadow_entry(mapping, index, entry);
 	xa_unlock_irq(&mapping->i_pages);
+
+	if (empty)
+		inode_pages_clear(mapping->host);
 }
 
 /*
@@ -61,7 +68,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
 				pgoff_t end)
 {
 	int i, j;
-	bool dax, lock;
+	bool dax, lock, empty = false;
 
 	/* Handled by shmem itself */
 	if (shmem_mapping(mapping))
@@ -96,11 +103,16 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
 			continue;
 		}
 
-		__clear_shadow_entry(mapping, index, page);
+		if (__clear_shadow_entry(mapping, index, page))
+			empty = true;
 	}
 
 	if (lock)
 		xa_unlock_irq(&mapping->i_pages);
+
+	if (empty)
+		inode_pages_clear(mapping->host);
+
 	pvec->nr = j;
 }
 
@@ -300,7 +312,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 	pgoff_t		index;
 	int		i;
 
-	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
+	if (mapping_empty(mapping))
 		goto out;
 
 	/* Offsets within partial pages */
@@ -636,6 +648,7 @@ static int
 invalidate_complete_page2(struct address_space *mapping, struct page *page)
 {
 	unsigned long flags;
+	bool empty;
 
 	if (page->mapping != mapping)
 		return 0;
@@ -648,9 +661,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
 		goto failed;
 
 	BUG_ON(page_has_private(page));
-	__delete_from_page_cache(page, NULL);
+	empty = __delete_from_page_cache(page, NULL);
 	xa_unlock_irqrestore(&mapping->i_pages, flags);
 
+	if (empty)
+		inode_pages_clear(mapping->host);
+
 	if (mapping->a_ops->freepage)
 		mapping->a_ops->freepage(page);
 
@@ -692,7 +708,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 	int ret2 = 0;
 	int did_range_unmap = 0;
 
-	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
+	if (mapping_empty(mapping))
 		goto out;
 
 	pagevec_init(&pvec);
diff --git a/mm/uksm.c b/mm/uksm.c
new file mode 100644
index 00000000000..ef068d5dc30
--- /dev/null
+++ b/mm/uksm.c
@@ -0,0 +1,5613 @@
+/*
+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
+ *
+ * This is an improvement upon KSM. Some basic data structures and routines
+ * are borrowed from ksm.c .
+ *
+ * Its new features:
+ * 1. Full system scan:
+ *      It automatically scans all user processes' anonymous VMAs. Kernel-user
+ *      interaction to submit a memory area to KSM is no longer needed.
+ *
+ * 2. Rich area detection:
+ *      It automatically detects rich areas containing abundant duplicated
+ *      pages based. Rich areas are given a full scan speed. Poor areas are
+ *      sampled at a reasonable speed with very low CPU consumption.
+ *
+ * 3. Ultra Per-page scan speed improvement:
+ *      A new hash algorithm is proposed. As a result, on a machine with
+ *      Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
+ *      can scan memory areas that does not contain duplicated pages at speed of
+ *      627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
+ *      477MB/sec ~ 923MB/sec.
+ *
+ * 4. Thrashing area avoidance:
+ *      Thrashing area(an VMA that has frequent Ksm page break-out) can be
+ *      filtered out. My benchmark shows it's more efficient than KSM's per-page
+ *      hash value based volatile page detection.
+ *
+ *
+ * 5. Misc changes upon KSM:
+ *      * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
+ *        comparison. It's much faster than default C version on x86.
+ *      * rmap_item now has an struct *page member to loosely cache a
+ *        address-->page mapping, which reduces too much time-costly
+ *        follow_page().
+ *      * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
+ *      * try_to_merge_two_pages() now can revert a pte if it fails. No break_
+ *        ksm is needed for this case.
+ *
+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
+ *    Now uksmd consider full zero pages as special pages and merge them to an
+ *    special unswappable uksm zero page.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/coredump.h>
+#include <linux/sched/cputime.h>
+#include <linux/rwsem.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/spinlock.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/memory.h>
+#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
+#include <linux/ksm.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <linux/random.h>
+#include <linux/math64.h>
+#include <linux/gcd.h>
+#include <linux/freezer.h>
+#include <linux/oom.h>
+#include <linux/numa.h>
+#include <linux/sradix-tree.h>
+
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+#ifdef CONFIG_X86
+#undef memcmp
+
+#ifdef CONFIG_X86_32
+#define memcmp memcmpx86_32
+/*
+ * Compare 4-byte-aligned address s1 and s2, with length n
+ */
+int memcmpx86_32(void *s1, void *s2, size_t n)
+{
+	size_t num = n / 4;
+	register int res;
+
+	__asm__ __volatile__
+	(
+	 "testl %3,%3\n\t"
+	 "repe; cmpsd\n\t"
+	 "je        1f\n\t"
+	 "sbbl      %0,%0\n\t"
+	 "orl       $1,%0\n"
+	 "1:"
+	 : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
+	 : "0" (0)
+	 : "cc");
+
+	return res;
+}
+
+/*
+ * Check the page is all zero ?
+ */
+static int is_full_zero(const void *s1, size_t len)
+{
+	unsigned char same;
+
+	len /= 4;
+
+	__asm__ __volatile__
+	("repe; scasl;"
+	 "sete %0"
+	 : "=qm" (same), "+D" (s1), "+c" (len)
+	 : "a" (0)
+	 : "cc");
+
+	return same;
+}
+
+
+#elif defined(CONFIG_X86_64)
+#define memcmp memcmpx86_64
+/*
+ * Compare 8-byte-aligned address s1 and s2, with length n
+ */
+int memcmpx86_64(void *s1, void *s2, size_t n)
+{
+	size_t num = n / 8;
+	register int res;
+
+	__asm__ __volatile__
+	(
+	 "testq %q3,%q3\n\t"
+	 "repe; cmpsq\n\t"
+	 "je        1f\n\t"
+	 "sbbq      %q0,%q0\n\t"
+	 "orq       $1,%q0\n"
+	 "1:"
+	 : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
+	 : "0" (0)
+	 : "cc");
+
+	return res;
+}
+
+static int is_full_zero(const void *s1, size_t len)
+{
+	unsigned char same;
+
+	len /= 8;
+
+	__asm__ __volatile__
+	("repe; scasq;"
+	 "sete %0"
+	 : "=qm" (same), "+D" (s1), "+c" (len)
+	 : "a" (0)
+	 : "cc");
+
+	return same;
+}
+
+#endif
+#else
+static int is_full_zero(const void *s1, size_t len)
+{
+	unsigned long *src = s1;
+	int i;
+
+	len /= sizeof(*src);
+
+	for (i = 0; i < len; i++) {
+		if (src[i])
+			return 0;
+	}
+
+	return 1;
+}
+#endif
+
+#define UKSM_RUNG_ROUND_FINISHED  (1 << 0)
+#define TIME_RATIO_SCALE	10000
+
+#define SLOT_TREE_NODE_SHIFT	8
+#define SLOT_TREE_NODE_STORE_SIZE	(1UL << SLOT_TREE_NODE_SHIFT)
+struct slot_tree_node {
+	unsigned long size;
+	struct sradix_tree_node snode;
+	void *stores[SLOT_TREE_NODE_STORE_SIZE];
+};
+
+static struct kmem_cache *slot_tree_node_cachep;
+
+static struct sradix_tree_node *slot_tree_node_alloc(void)
+{
+	struct slot_tree_node *p;
+
+	p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
+			      __GFP_NORETRY | __GFP_NOWARN);
+	if (!p)
+		return NULL;
+
+	return &p->snode;
+}
+
+static void slot_tree_node_free(struct sradix_tree_node *node)
+{
+	struct slot_tree_node *p;
+
+	p = container_of(node, struct slot_tree_node, snode);
+	kmem_cache_free(slot_tree_node_cachep, p);
+}
+
+static void slot_tree_node_extend(struct sradix_tree_node *parent,
+				  struct sradix_tree_node *child)
+{
+	struct slot_tree_node *p, *c;
+
+	p = container_of(parent, struct slot_tree_node, snode);
+	c = container_of(child, struct slot_tree_node, snode);
+
+	p->size += c->size;
+}
+
+void slot_tree_node_assign(struct sradix_tree_node *node,
+			   unsigned int index, void *item)
+{
+	struct vma_slot *slot = item;
+	struct slot_tree_node *cur;
+
+	slot->snode = node;
+	slot->sindex = index;
+
+	while (node) {
+		cur = container_of(node, struct slot_tree_node, snode);
+		cur->size += slot->pages;
+		node = node->parent;
+	}
+}
+
+void slot_tree_node_rm(struct sradix_tree_node *node, unsigned int offset)
+{
+	struct vma_slot *slot;
+	struct slot_tree_node *cur;
+	unsigned long pages;
+
+	if (node->height == 1) {
+		slot = node->stores[offset];
+		pages = slot->pages;
+	} else {
+		cur = container_of(node->stores[offset],
+				   struct slot_tree_node, snode);
+		pages = cur->size;
+	}
+
+	while (node) {
+		cur = container_of(node, struct slot_tree_node, snode);
+		cur->size -= pages;
+		node = node->parent;
+	}
+}
+
+unsigned long slot_iter_index;
+int slot_iter(void *item,  unsigned long height)
+{
+	struct slot_tree_node *node;
+	struct vma_slot *slot;
+
+	if (height == 1) {
+		slot = item;
+		if (slot_iter_index < slot->pages) {
+			/*in this one*/
+			return 1;
+		} else {
+			slot_iter_index -= slot->pages;
+			return 0;
+		}
+
+	} else {
+		node = container_of(item, struct slot_tree_node, snode);
+		if (slot_iter_index < node->size) {
+			/*in this one*/
+			return 1;
+		} else {
+			slot_iter_index -= node->size;
+			return 0;
+		}
+	}
+}
+
+
+static inline void slot_tree_init_root(struct sradix_tree_root *root)
+{
+	init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT);
+	root->alloc = slot_tree_node_alloc;
+	root->free = slot_tree_node_free;
+	root->extend = slot_tree_node_extend;
+	root->assign = slot_tree_node_assign;
+	root->rm = slot_tree_node_rm;
+}
+
+void slot_tree_init(void)
+{
+	slot_tree_node_cachep = kmem_cache_create("slot_tree_node",
+				sizeof(struct slot_tree_node), 0,
+				SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+				NULL);
+}
+
+
+/* Each rung of this ladder is a list of VMAs having a same scan ratio */
+struct scan_rung {
+	//struct list_head scanned_list;
+	struct sradix_tree_root vma_root;
+	struct sradix_tree_root vma_root2;
+
+	struct vma_slot *current_scan;
+	unsigned long current_offset;
+
+	/*
+	 * The initial value for current_offset, it should loop over
+	 * [0~ step - 1] to let all slot have its chance to be scanned.
+	 */
+	unsigned long offset_init;
+	unsigned long step; /* dynamic step for current_offset */
+	unsigned int flags;
+	unsigned long pages_to_scan;
+	//unsigned long fully_scanned_slots;
+	/*
+	 * a little bit tricky - if cpu_time_ratio > 0, then the value is the
+	 * the cpu time ratio it can spend in rung_i for every scan
+	 * period. if < 0, then it is the cpu time ratio relative to the
+	 * max cpu percentage user specified. Both in unit of
+	 * 1/TIME_RATIO_SCALE
+	 */
+	int cpu_ratio;
+
+	/*
+	 * How long it will take for all slots in this rung to be fully
+	 * scanned? If it's zero, we don't care about the cover time:
+	 * it's fully scanned.
+	 */
+	unsigned int cover_msecs;
+	//unsigned long vma_num;
+	//unsigned long pages; /* Sum of all slot's pages in rung */
+};
+
+/**
+ * node of either the stable or unstale rbtree
+ *
+ */
+struct tree_node {
+	struct rb_node node; /* link in the main (un)stable rbtree */
+	struct rb_root sub_root; /* rb_root for sublevel collision rbtree */
+	u32 hash;
+	unsigned long count; /* TODO: merged with sub_root */
+	struct list_head all_list; /* all tree nodes in stable/unstable tree */
+};
+
+/**
+ * struct stable_node - node of the stable rbtree
+ * @node: rb node of this ksm page in the stable tree
+ * @hlist: hlist head of rmap_items using this ksm page
+ * @kpfn: page frame number of this ksm page
+ */
+struct stable_node {
+	struct rb_node node; /* link in sub-rbtree */
+	struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */
+	struct hlist_head hlist;
+	unsigned long kpfn;
+	u32 hash_max; /* if ==0 then it's not been calculated yet */
+	struct list_head all_list; /* in a list for all stable nodes */
+};
+
+/**
+ * struct node_vma - group rmap_items linked in a same stable
+ * node together.
+ */
+struct node_vma {
+	union {
+		struct vma_slot *slot;
+		unsigned long key;  /* slot is used as key sorted on hlist */
+	};
+	struct hlist_node hlist;
+	struct hlist_head rmap_hlist;
+	struct stable_node *head;
+};
+
+/**
+ * struct rmap_item - reverse mapping item for virtual addresses
+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
+ * @mm: the memory structure this rmap_item is pointing into
+ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
+ * @node: rb node of this rmap_item in the unstable tree
+ * @head: pointer to stable_node heading this list in the stable tree
+ * @hlist: link into hlist of rmap_items hanging off that stable_node
+ */
+struct rmap_item {
+	struct vma_slot *slot;
+	struct page *page;
+	unsigned long address;	/* + low bits used for flags below */
+	unsigned long hash_round;
+	unsigned long entry_index;
+	union {
+		struct {/* when in unstable tree */
+			struct rb_node node;
+			struct tree_node *tree_node;
+			u32 hash_max;
+		};
+		struct { /* when in stable tree */
+			struct node_vma *head;
+			struct hlist_node hlist;
+			struct anon_vma *anon_vma;
+		};
+	};
+} __aligned(4);
+
+struct rmap_list_entry {
+	union {
+		struct rmap_item *item;
+		unsigned long addr;
+	};
+	/* lowest bit is used for is_addr tag */
+} __aligned(4); /* 4 aligned to fit in to pages*/
+
+
+/* Basic data structure definition ends */
+
+
+/*
+ * Flags for rmap_item to judge if it's listed in the stable/unstable tree.
+ * The flags use the low bits of rmap_item.address
+ */
+#define UNSTABLE_FLAG	0x1
+#define STABLE_FLAG	0x2
+#define get_rmap_addr(x)	((x)->address & PAGE_MASK)
+
+/*
+ * rmap_list_entry helpers
+ */
+#define IS_ADDR_FLAG	1
+#define is_addr(ptr)		((unsigned long)(ptr) & IS_ADDR_FLAG)
+#define set_is_addr(ptr)	((ptr) |= IS_ADDR_FLAG)
+#define get_clean_addr(ptr)	(((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG))
+
+
+/*
+ * High speed caches for frequently allocated and freed structs
+ */
+static struct kmem_cache *rmap_item_cache;
+static struct kmem_cache *stable_node_cache;
+static struct kmem_cache *node_vma_cache;
+static struct kmem_cache *vma_slot_cache;
+static struct kmem_cache *tree_node_cache;
+#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\
+		sizeof(struct __struct), __alignof__(struct __struct),\
+		(__flags), NULL)
+
+/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */
+#define SCAN_LADDER_SIZE 4
+static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE];
+
+/* The evaluation rounds uksmd has finished */
+static unsigned long long uksm_eval_round = 1;
+
+/*
+ * we add 1 to this var when we consider we should rebuild the whole
+ * unstable tree.
+ */
+static unsigned long uksm_hash_round = 1;
+
+/*
+ * How many times the whole memory is scanned.
+ */
+static unsigned long long fully_scanned_round = 1;
+
+/* The total number of virtual pages of all vma slots */
+static u64 uksm_pages_total;
+
+/* The number of pages has been scanned since the start up */
+static u64 uksm_pages_scanned;
+
+static u64 scanned_virtual_pages;
+
+/* The number of pages has been scanned since last encode_benefit call */
+static u64 uksm_pages_scanned_last;
+
+/* If the scanned number is tooo large, we encode it here */
+static u64 pages_scanned_stored;
+
+static unsigned long pages_scanned_base;
+
+/* The number of nodes in the stable tree */
+static unsigned long uksm_pages_shared;
+
+/* The number of page slots additionally sharing those nodes */
+static unsigned long uksm_pages_sharing;
+
+/* The number of nodes in the unstable tree */
+static unsigned long uksm_pages_unshared;
+
+/*
+ * Milliseconds ksmd should sleep between scans,
+ * >= 100ms to be consistent with
+ * scan_time_to_sleep_msec()
+ */
+static unsigned int uksm_sleep_jiffies;
+
+/* The real value for the uksmd next sleep */
+static unsigned int uksm_sleep_real;
+
+/* Saved value for user input uksm_sleep_jiffies when it's enlarged */
+static unsigned int uksm_sleep_saved;
+
+/* Max percentage of cpu utilization ksmd can take to scan in one batch */
+static unsigned int uksm_max_cpu_percentage;
+
+static int uksm_cpu_governor;
+
+static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" };
+
+struct uksm_cpu_preset_s {
+	int cpu_ratio[SCAN_LADDER_SIZE];
+	unsigned int cover_msecs[SCAN_LADDER_SIZE];
+	unsigned int max_cpu; /* percentage */
+};
+
+struct uksm_cpu_preset_s uksm_cpu_preset[4] = {
+	{ {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95},
+	{ {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50},
+	{ {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20},
+	{ {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1},
+};
+
+/* The default value for uksm_ema_page_time if it's not initialized */
+#define UKSM_PAGE_TIME_DEFAULT	500
+
+/*cost to scan one page by expotional moving average in nsecs */
+static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
+
+/* The expotional moving average alpha weight, in percentage. */
+#define EMA_ALPHA	20
+
+/*
+ * The threshold used to filter out thrashing areas,
+ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound
+ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio
+ * will be considered as having a zero duplication ratio.
+ */
+static unsigned int uksm_thrash_threshold = 50;
+
+/* How much dedup ratio is considered to be abundant*/
+static unsigned int uksm_abundant_threshold = 10;
+
+/* All slots having merged pages in this eval round. */
+struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup);
+
+/* How many times the ksmd has slept since startup */
+static unsigned long long uksm_sleep_times;
+
+#define UKSM_RUN_STOP	0
+#define UKSM_RUN_MERGE	1
+static unsigned int uksm_run = 1;
+
+static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait);
+static DEFINE_MUTEX(uksm_thread_mutex);
+
+/*
+ * List vma_slot_new is for newly created vma_slot waiting to be added by
+ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to
+ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding
+ * VMA has been removed/freed.
+ */
+struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new);
+struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd);
+struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del);
+static DEFINE_SPINLOCK(vma_slot_list_lock);
+
+/* The unstable tree heads */
+static struct rb_root root_unstable_tree = RB_ROOT;
+
+/*
+ * All tree_nodes are in a list to be freed at once when unstable tree is
+ * freed after each scan round.
+ */
+static struct list_head unstable_tree_node_list =
+				LIST_HEAD_INIT(unstable_tree_node_list);
+
+/* List contains all stable nodes */
+static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list);
+
+/*
+ * When the hash strength is changed, the stable tree must be delta_hashed and
+ * re-structured. We use two set of below structs to speed up the
+ * re-structuring of stable tree.
+ */
+static struct list_head
+stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]),
+			    LIST_HEAD_INIT(stable_tree_node_list[1])};
+
+static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0];
+static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT};
+static struct rb_root *root_stable_treep = &root_stable_tree[0];
+static unsigned long stable_tree_index;
+
+/* The hash strength needed to hash a full page */
+#define HASH_STRENGTH_FULL		(PAGE_SIZE / sizeof(u32))
+
+/* The hash strength needed for loop-back hashing */
+#define HASH_STRENGTH_MAX		(HASH_STRENGTH_FULL + 10)
+
+/* The random offsets in a page */
+static u32 *random_nums;
+
+/* The hash strength */
+static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4;
+
+/* The delta value each time the hash strength increases or decreases */
+static unsigned long hash_strength_delta;
+#define HASH_STRENGTH_DELTA_MAX	5
+
+/* The time we have saved due to random_sample_hash */
+static u64 rshash_pos;
+
+/* The time we have wasted due to hash collision */
+static u64 rshash_neg;
+
+struct uksm_benefit {
+	u64 pos;
+	u64 neg;
+	u64 scanned;
+	unsigned long base;
+} benefit;
+
+/*
+ * The relative cost of memcmp, compared to 1 time unit of random sample
+ * hash, this value is tested when ksm module is initialized
+ */
+static unsigned long memcmp_cost;
+
+static unsigned long  rshash_neg_cont_zero;
+static unsigned long  rshash_cont_obscure;
+
+/* The possible states of hash strength adjustment heuristic */
+enum rshash_states {
+		RSHASH_STILL,
+		RSHASH_TRYUP,
+		RSHASH_TRYDOWN,
+		RSHASH_NEW,
+		RSHASH_PRE_STILL,
+};
+
+/* The possible direction we are about to adjust hash strength */
+enum rshash_direct {
+	GO_UP,
+	GO_DOWN,
+	OBSCURE,
+	STILL,
+};
+
+/* random sampling hash state machine */
+static struct {
+	enum rshash_states state;
+	enum rshash_direct pre_direct;
+	u8 below_count;
+	/* Keep a lookup window of size 5, iff above_count/below_count > 3
+	 * in this window we stop trying.
+	 */
+	u8 lookup_window_index;
+	u64 stable_benefit;
+	unsigned long turn_point_down;
+	unsigned long turn_benefit_down;
+	unsigned long turn_point_up;
+	unsigned long turn_benefit_up;
+	unsigned long stable_point;
+} rshash_state;
+
+/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/
+static u32 *zero_hash_table;
+
+static inline struct node_vma *alloc_node_vma(void)
+{
+	struct node_vma *node_vma;
+
+	node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
+				     __GFP_NORETRY | __GFP_NOWARN);
+	if (node_vma) {
+		INIT_HLIST_HEAD(&node_vma->rmap_hlist);
+		INIT_HLIST_NODE(&node_vma->hlist);
+	}
+	return node_vma;
+}
+
+static inline void free_node_vma(struct node_vma *node_vma)
+{
+	kmem_cache_free(node_vma_cache, node_vma);
+}
+
+
+static inline struct vma_slot *alloc_vma_slot(void)
+{
+	struct vma_slot *slot;
+
+	/*
+	 * In case ksm is not initialized by now.
+	 * Oops, we need to consider the call site of uksm_init() in the future.
+	 */
+	if (!vma_slot_cache)
+		return NULL;
+
+	slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
+				 __GFP_NORETRY | __GFP_NOWARN);
+	if (slot) {
+		INIT_LIST_HEAD(&slot->slot_list);
+		INIT_LIST_HEAD(&slot->dedup_list);
+		slot->flags |= UKSM_SLOT_NEED_RERAND;
+	}
+	return slot;
+}
+
+static inline void free_vma_slot(struct vma_slot *vma_slot)
+{
+	kmem_cache_free(vma_slot_cache, vma_slot);
+}
+
+
+
+static inline struct rmap_item *alloc_rmap_item(void)
+{
+	struct rmap_item *rmap_item;
+
+	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+				      __GFP_NORETRY | __GFP_NOWARN);
+	if (rmap_item) {
+		/* bug on lowest bit is not clear for flag use */
+		BUG_ON(is_addr(rmap_item));
+	}
+	return rmap_item;
+}
+
+static inline void free_rmap_item(struct rmap_item *rmap_item)
+{
+	rmap_item->slot = NULL;	/* debug safety */
+	kmem_cache_free(rmap_item_cache, rmap_item);
+}
+
+static inline struct stable_node *alloc_stable_node(void)
+{
+	struct stable_node *node;
+
+	node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
+				__GFP_NORETRY | __GFP_NOWARN);
+	if (!node)
+		return NULL;
+
+	INIT_HLIST_HEAD(&node->hlist);
+	list_add(&node->all_list, &stable_node_list);
+	return node;
+}
+
+static inline void free_stable_node(struct stable_node *stable_node)
+{
+	list_del(&stable_node->all_list);
+	kmem_cache_free(stable_node_cache, stable_node);
+}
+
+static inline struct tree_node *alloc_tree_node(struct list_head *list)
+{
+	struct tree_node *node;
+
+	node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
+				 __GFP_NORETRY | __GFP_NOWARN);
+	if (!node)
+		return NULL;
+
+	list_add(&node->all_list, list);
+	return node;
+}
+
+static inline void free_tree_node(struct tree_node *node)
+{
+	list_del(&node->all_list);
+	kmem_cache_free(tree_node_cache, node);
+}
+
+static void uksm_drop_anon_vma(struct rmap_item *rmap_item)
+{
+	struct anon_vma *anon_vma = rmap_item->anon_vma;
+
+	put_anon_vma(anon_vma);
+}
+
+
+/**
+ * Remove a stable node from stable_tree, may unlink from its tree_node and
+ * may remove its parent tree_node if no other stable node is pending.
+ *
+ * @stable_node	    The node need to be removed
+ * @unlink_rb	    Will this node be unlinked from the rbtree?
+ * @remove_tree_    node Will its tree_node be removed if empty?
+ */
+static void remove_node_from_stable_tree(struct stable_node *stable_node,
+					 int unlink_rb,  int remove_tree_node)
+{
+	struct node_vma *node_vma;
+	struct rmap_item *rmap_item;
+	struct hlist_node *n;
+
+	if (!hlist_empty(&stable_node->hlist)) {
+		hlist_for_each_entry_safe(node_vma, n,
+					  &stable_node->hlist, hlist) {
+			hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
+				uksm_pages_sharing--;
+
+				uksm_drop_anon_vma(rmap_item);
+				rmap_item->address &= PAGE_MASK;
+			}
+			free_node_vma(node_vma);
+			cond_resched();
+		}
+
+		/* the last one is counted as shared */
+		uksm_pages_shared--;
+		uksm_pages_sharing++;
+	}
+
+	if (stable_node->tree_node && unlink_rb) {
+		rb_erase(&stable_node->node,
+			 &stable_node->tree_node->sub_root);
+
+		if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) &&
+		    remove_tree_node) {
+			rb_erase(&stable_node->tree_node->node,
+				 root_stable_treep);
+			free_tree_node(stable_node->tree_node);
+		} else {
+			stable_node->tree_node->count--;
+		}
+	}
+
+	free_stable_node(stable_node);
+}
+
+
+/*
+ * get_uksm_page: checks if the page indicated by the stable node
+ * is still its ksm page, despite having held no reference to it.
+ * In which case we can trust the content of the page, and it
+ * returns the gotten page; but if the page has now been zapped,
+ * remove the stale node from the stable tree and return NULL.
+ *
+ * You would expect the stable_node to hold a reference to the ksm page.
+ * But if it increments the page's count, swapping out has to wait for
+ * ksmd to come around again before it can free the page, which may take
+ * seconds or even minutes: much too unresponsive.  So instead we use a
+ * "keyhole reference": access to the ksm page from the stable node peeps
+ * out through its keyhole to see if that page still holds the right key,
+ * pointing back to this stable node.  This relies on freeing a PageAnon
+ * page to reset its page->mapping to NULL, and relies on no other use of
+ * a page to put something that might look like our key in page->mapping.
+ *
+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
+ * but this is different - made simpler by uksm_thread_mutex being held, but
+ * interesting for assuming that no other use of the struct page could ever
+ * put our expected_mapping into page->mapping (or a field of the union which
+ * coincides with page->mapping).  The RCU calls are not for KSM at all, but
+ * to keep the page_count protocol described with page_cache_get_speculative.
+ *
+ * Note: it is possible that get_uksm_page() will return NULL one moment,
+ * then page the next, if the page is in between page_freeze_refs() and
+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
+ * is on its way to being freed; but it is an anomaly to bear in mind.
+ *
+ * @unlink_rb:			if the removal of this node will firstly unlink from
+ * its rbtree. stable_node_reinsert will prevent this when restructuring the
+ * node from its old tree.
+ *
+ * @remove_tree_node:	if this is the last one of its tree_node, will the
+ * tree_node be freed ? If we are inserting stable node, this tree_node may
+ * be reused, so don't free it.
+ */
+static struct page *get_uksm_page(struct stable_node *stable_node,
+				 int unlink_rb, int remove_tree_node)
+{
+	struct page *page;
+	void *expected_mapping;
+	unsigned long kpfn;
+
+	expected_mapping = (void *)((unsigned long)stable_node |
+				    PAGE_MAPPING_KSM);
+again:
+	kpfn = READ_ONCE(stable_node->kpfn);
+	page = pfn_to_page(kpfn);
+
+	/*
+	 * page is computed from kpfn, so on most architectures reading
+	 * page->mapping is naturally ordered after reading node->kpfn,
+	 * but on Alpha we need to be more careful.
+	 */
+	smp_read_barrier_depends();
+
+	if (READ_ONCE(page->mapping) != expected_mapping)
+		goto stale;
+
+	/*
+	 * We cannot do anything with the page while its refcount is 0.
+	 * Usually 0 means free, or tail of a higher-order page: in which
+	 * case this node is no longer referenced, and should be freed;
+	 * however, it might mean that the page is under page_freeze_refs().
+	 * The __remove_mapping() case is easy, again the node is now stale;
+	 * but if page is swapcache in migrate_page_move_mapping(), it might
+	 * still be our page, in which case it's essential to keep the node.
+	 */
+	while (!get_page_unless_zero(page)) {
+		/*
+		 * Another check for page->mapping != expected_mapping would
+		 * work here too.  We have chosen the !PageSwapCache test to
+		 * optimize the common case, when the page is or is about to
+		 * be freed: PageSwapCache is cleared (under spin_lock_irq)
+		 * in the freeze_refs section of __remove_mapping(); but Anon
+		 * page->mapping reset to NULL later, in free_pages_prepare().
+		 */
+		if (!PageSwapCache(page))
+			goto stale;
+		cpu_relax();
+	}
+
+	if (READ_ONCE(page->mapping) != expected_mapping) {
+		put_page(page);
+		goto stale;
+	}
+
+	lock_page(page);
+	if (READ_ONCE(page->mapping) != expected_mapping) {
+		unlock_page(page);
+		put_page(page);
+		goto stale;
+	}
+	unlock_page(page);
+	return page;
+stale:
+	/*
+	 * We come here from above when page->mapping or !PageSwapCache
+	 * suggests that the node is stale; but it might be under migration.
+	 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
+	 * before checking whether node->kpfn has been changed.
+	 */
+	smp_rmb();
+	if (stable_node->kpfn != kpfn)
+		goto again;
+
+	remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node);
+
+	return NULL;
+}
+
+/*
+ * Removing rmap_item from stable or unstable tree.
+ * This function will clean the information from the stable/unstable tree.
+ */
+static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+{
+	if (rmap_item->address & STABLE_FLAG) {
+		struct stable_node *stable_node;
+		struct node_vma *node_vma;
+		struct page *page;
+
+		node_vma = rmap_item->head;
+		stable_node = node_vma->head;
+		page = get_uksm_page(stable_node, 1, 1);
+		if (!page)
+			goto out;
+
+		/*
+		 * page lock is needed because it's racing with
+		 * try_to_unmap_ksm(), etc.
+		 */
+		lock_page(page);
+		hlist_del(&rmap_item->hlist);
+
+		if (hlist_empty(&node_vma->rmap_hlist)) {
+			hlist_del(&node_vma->hlist);
+			free_node_vma(node_vma);
+		}
+		unlock_page(page);
+
+		put_page(page);
+		if (hlist_empty(&stable_node->hlist)) {
+			/* do NOT call remove_node_from_stable_tree() here,
+			 * it's possible for a forked rmap_item not in
+			 * stable tree while the in-tree rmap_items were
+			 * deleted.
+			 */
+			uksm_pages_shared--;
+		} else
+			uksm_pages_sharing--;
+
+
+		uksm_drop_anon_vma(rmap_item);
+	} else if (rmap_item->address & UNSTABLE_FLAG) {
+		if (rmap_item->hash_round == uksm_hash_round) {
+
+			rb_erase(&rmap_item->node,
+				 &rmap_item->tree_node->sub_root);
+			if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) {
+				rb_erase(&rmap_item->tree_node->node,
+					 &root_unstable_tree);
+
+				free_tree_node(rmap_item->tree_node);
+			} else
+				rmap_item->tree_node->count--;
+		}
+		uksm_pages_unshared--;
+	}
+
+	rmap_item->address &= PAGE_MASK;
+	rmap_item->hash_max = 0;
+
+out:
+	cond_resched();		/* we're called from many long loops */
+}
+
+static inline int slot_in_uksm(struct vma_slot *slot)
+{
+	return list_empty(&slot->slot_list);
+}
+
+/*
+ * Test if the mm is exiting
+ */
+static inline bool uksm_test_exit(struct mm_struct *mm)
+{
+	return atomic_read(&mm->mm_users) == 0;
+}
+
+static inline unsigned long vma_pool_size(struct vma_slot *slot)
+{
+	return round_up(sizeof(struct rmap_list_entry) * slot->pages,
+			PAGE_SIZE) >> PAGE_SHIFT;
+}
+
+#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta))
+
+/* must be done with sem locked */
+static int slot_pool_alloc(struct vma_slot *slot)
+{
+	unsigned long pool_size;
+
+	if (slot->rmap_list_pool)
+		return 0;
+
+	pool_size = vma_pool_size(slot);
+	slot->rmap_list_pool = kcalloc(pool_size, sizeof(struct page *),
+				       GFP_KERNEL);
+	if (!slot->rmap_list_pool)
+		return -ENOMEM;
+
+	slot->pool_counts = kcalloc(pool_size, sizeof(unsigned int),
+				    GFP_KERNEL);
+	if (!slot->pool_counts) {
+		kfree(slot->rmap_list_pool);
+		return -ENOMEM;
+	}
+
+	slot->pool_size = pool_size;
+	BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages));
+	slot->flags |= UKSM_SLOT_IN_UKSM;
+	uksm_pages_total += slot->pages;
+
+	return 0;
+}
+
+/*
+ * Called after vma is unlinked from its mm
+ */
+void uksm_remove_vma(struct vm_area_struct *vma)
+{
+	struct vma_slot *slot;
+
+	if (!vma->uksm_vma_slot)
+		return;
+
+	spin_lock(&vma_slot_list_lock);
+	slot = vma->uksm_vma_slot;
+	if (!slot)
+		goto out;
+
+	if (slot_in_uksm(slot)) {
+		/**
+		 * This slot has been added by ksmd, so move to the del list
+		 * waiting ksmd to free it.
+		 */
+		list_add_tail(&slot->slot_list, &vma_slot_del);
+	} else {
+		/**
+		 * It's still on new list. It's ok to free slot directly.
+		 */
+		list_del(&slot->slot_list);
+		free_vma_slot(slot);
+	}
+out:
+	vma->uksm_vma_slot = NULL;
+	spin_unlock(&vma_slot_list_lock);
+}
+
+/**
+ * Need to do two things:
+ * 1. check if slot was moved to del list
+ * 2. make sure the mmap_sem is manipulated under valid vma.
+ *
+ * My concern here is that in some cases, this may make
+ * vma_slot_list_lock() waiters to serialized further by some
+ * sem->wait_lock, can this really be expensive?
+ *
+ *
+ * @return
+ * 0: if successfully locked mmap_sem
+ * -ENOENT: this slot was moved to del list
+ * -EBUSY: vma lock failed
+ */
+static int try_down_read_slot_mmap_sem(struct vma_slot *slot)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm;
+	struct rw_semaphore *sem;
+
+	spin_lock(&vma_slot_list_lock);
+
+	/* the slot_list was removed and inited from new list, when it enters
+	 * uksm_list. If now it's not empty, then it must be moved to del list
+	 */
+	if (!slot_in_uksm(slot)) {
+		spin_unlock(&vma_slot_list_lock);
+		return -ENOENT;
+	}
+
+	BUG_ON(slot->pages != vma_pages(slot->vma));
+	/* Ok, vma still valid */
+	vma = slot->vma;
+	mm = vma->vm_mm;
+	sem = &mm->mmap_sem;
+
+	if (uksm_test_exit(mm)) {
+		spin_unlock(&vma_slot_list_lock);
+		return -ENOENT;
+	}
+
+	if (down_read_trylock(sem)) {
+		spin_unlock(&vma_slot_list_lock);
+		if (slot_pool_alloc(slot)) {
+			uksm_remove_vma(vma);
+			up_read(sem);
+			return -ENOENT;
+		}
+		return 0;
+	}
+
+	spin_unlock(&vma_slot_list_lock);
+	return -EBUSY;
+}
+
+static inline unsigned long
+vma_page_address(struct page *page, struct vm_area_struct *vma)
+{
+	pgoff_t pgoff = page->index;
+	unsigned long address;
+
+	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
+		/* page should be within @vma mapping range */
+		return -EFAULT;
+	}
+	return address;
+}
+
+
+/* return 0 on success with the item's mmap_sem locked */
+static inline int get_mergeable_page_lock_mmap(struct rmap_item *item)
+{
+	struct mm_struct *mm;
+	struct vma_slot *slot = item->slot;
+	int err = -EINVAL;
+
+	struct page *page;
+
+	/*
+	 * try_down_read_slot_mmap_sem() returns non-zero if the slot
+	 * has been removed by uksm_remove_vma().
+	 */
+	if (try_down_read_slot_mmap_sem(slot))
+		return -EBUSY;
+
+	mm = slot->vma->vm_mm;
+
+	if (uksm_test_exit(mm))
+		goto failout_up;
+
+	page = item->page;
+	rcu_read_lock();
+	if (!get_page_unless_zero(page)) {
+		rcu_read_unlock();
+		goto failout_up;
+	}
+
+	/* No need to consider huge page here. */
+	if (item->slot->vma->anon_vma != page_anon_vma(page) ||
+	    vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) {
+		/*
+		 * TODO:
+		 * should we release this item becase of its stale page
+		 * mapping?
+		 */
+		put_page(page);
+		rcu_read_unlock();
+		goto failout_up;
+	}
+	rcu_read_unlock();
+	return 0;
+
+failout_up:
+	up_read(&mm->mmap_sem);
+	return err;
+}
+
+/*
+ * What kind of VMA is considered ?
+ */
+static inline int vma_can_enter(struct vm_area_struct *vma)
+{
+	return uksm_flags_can_scan(vma->vm_flags);
+}
+
+/*
+ * Called whenever a fresh new vma is created A new vma_slot.
+ * is created and inserted into a global list Must be called.
+ * after vma is inserted to its mm.
+ */
+void uksm_vma_add_new(struct vm_area_struct *vma)
+{
+	struct vma_slot *slot;
+
+	if (!vma_can_enter(vma)) {
+		vma->uksm_vma_slot = NULL;
+		return;
+	}
+
+	slot = alloc_vma_slot();
+	if (!slot) {
+		vma->uksm_vma_slot = NULL;
+		return;
+	}
+
+	vma->uksm_vma_slot = slot;
+	vma->vm_flags |= VM_MERGEABLE;
+	slot->vma = vma;
+	slot->mm = vma->vm_mm;
+	slot->ctime_j = jiffies;
+	slot->pages = vma_pages(vma);
+	spin_lock(&vma_slot_list_lock);
+	list_add_tail(&slot->slot_list, &vma_slot_new);
+	spin_unlock(&vma_slot_list_lock);
+}
+
+/*   32/3 < they < 32/2 */
+#define shiftl	8
+#define shiftr	12
+
+#define HASH_FROM_TO(from, to)			\
+for (index = from; index < to; index++) {	\
+	pos = random_nums[index];		\
+	hash += key[pos];			\
+	hash += (hash << shiftl);		\
+	hash ^= (hash >> shiftr);		\
+}
+
+
+#define HASH_FROM_DOWN_TO(from, to)		\
+for (index = from - 1; index >= to; index--) {	\
+	hash ^= (hash >> shiftr);		\
+	hash ^= (hash >> (shiftr*2));		\
+	hash -= (hash << shiftl);		\
+	hash += (hash << (shiftl*2));		\
+	pos = random_nums[index];		\
+	hash -= key[pos];			\
+}
+
+/*
+ * The main random sample hash function.
+ */
+static u32 random_sample_hash(void *addr, u32 hash_strength)
+{
+	u32 hash = 0xdeadbeef;
+	int index, pos, loop = hash_strength;
+	u32 *key = (u32 *)addr;
+
+	if (loop > HASH_STRENGTH_FULL)
+		loop = HASH_STRENGTH_FULL;
+
+	HASH_FROM_TO(0, loop);
+
+	if (hash_strength > HASH_STRENGTH_FULL) {
+		loop = hash_strength - HASH_STRENGTH_FULL;
+		HASH_FROM_TO(0, loop);
+	}
+
+	return hash;
+}
+
+
+/**
+ * It's used when hash strength is adjusted
+ *
+ * @addr The page's virtual address
+ * @from The original hash strength
+ * @to   The hash strength changed to
+ * @hash The hash value generated with "from" hash value
+ *
+ * return the hash value
+ */
+static u32 delta_hash(void *addr, int from, int to, u32 hash)
+{
+	u32 *key = (u32 *)addr;
+	int index, pos; /* make sure they are int type */
+
+	if (to > from) {
+		if (from >= HASH_STRENGTH_FULL) {
+			from -= HASH_STRENGTH_FULL;
+			to -= HASH_STRENGTH_FULL;
+			HASH_FROM_TO(from, to);
+		} else if (to <= HASH_STRENGTH_FULL) {
+			HASH_FROM_TO(from, to);
+		} else {
+			HASH_FROM_TO(from, HASH_STRENGTH_FULL);
+			HASH_FROM_TO(0, to - HASH_STRENGTH_FULL);
+		}
+	} else {
+		if (from <= HASH_STRENGTH_FULL) {
+			HASH_FROM_DOWN_TO(from, to);
+		} else if (to >= HASH_STRENGTH_FULL) {
+			from -= HASH_STRENGTH_FULL;
+			to -= HASH_STRENGTH_FULL;
+			HASH_FROM_DOWN_TO(from, to);
+		} else {
+			HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0);
+			HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to);
+		}
+	}
+
+	return hash;
+}
+
+/**
+ *
+ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round
+ * has finished.
+ *
+ * return 0 if no page has been scanned since last call, 1 otherwise.
+ */
+static inline int encode_benefit(void)
+{
+	u64 scanned_delta, pos_delta, neg_delta;
+	unsigned long base = benefit.base;
+
+	scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last;
+
+	if (!scanned_delta)
+		return 0;
+
+	scanned_delta >>= base;
+	pos_delta = rshash_pos >> base;
+	neg_delta = rshash_neg >> base;
+
+	if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) ||
+	    CAN_OVERFLOW_U64(benefit.neg, neg_delta) ||
+	    CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) {
+		benefit.scanned >>= 1;
+		benefit.neg >>= 1;
+		benefit.pos >>= 1;
+		benefit.base++;
+		scanned_delta >>= 1;
+		pos_delta >>= 1;
+		neg_delta >>= 1;
+	}
+
+	benefit.pos += pos_delta;
+	benefit.neg += neg_delta;
+	benefit.scanned += scanned_delta;
+
+	BUG_ON(!benefit.scanned);
+
+	rshash_pos = rshash_neg = 0;
+	uksm_pages_scanned_last = uksm_pages_scanned;
+
+	return 1;
+}
+
+static inline void reset_benefit(void)
+{
+	benefit.pos = 0;
+	benefit.neg = 0;
+	benefit.base = 0;
+	benefit.scanned = 0;
+}
+
+static inline void inc_rshash_pos(unsigned long delta)
+{
+	if (CAN_OVERFLOW_U64(rshash_pos, delta))
+		encode_benefit();
+
+	rshash_pos += delta;
+}
+
+static inline void inc_rshash_neg(unsigned long delta)
+{
+	if (CAN_OVERFLOW_U64(rshash_neg, delta))
+		encode_benefit();
+
+	rshash_neg += delta;
+}
+
+
+static inline u32 page_hash(struct page *page, unsigned long hash_strength,
+			    int cost_accounting)
+{
+	u32 val;
+	unsigned long delta;
+
+	void *addr = kmap_atomic(page);
+
+	val = random_sample_hash(addr, hash_strength);
+	kunmap_atomic(addr);
+
+	if (cost_accounting) {
+		if (hash_strength < HASH_STRENGTH_FULL)
+			delta = HASH_STRENGTH_FULL - hash_strength;
+		else
+			delta = 0;
+
+		inc_rshash_pos(delta);
+	}
+
+	return val;
+}
+
+static int memcmp_pages_with_cost(struct page *page1, struct page *page2,
+			int cost_accounting)
+{
+	char *addr1, *addr2;
+	int ret;
+
+	addr1 = kmap_atomic(page1);
+	addr2 = kmap_atomic(page2);
+	ret = memcmp(addr1, addr2, PAGE_SIZE);
+	kunmap_atomic(addr2);
+	kunmap_atomic(addr1);
+
+	if (cost_accounting)
+		inc_rshash_neg(memcmp_cost);
+
+	return ret;
+}
+
+static inline int pages_identical_with_cost(struct page *page1, struct page *page2)
+{
+	return !memcmp_pages_with_cost(page1, page2, 0);
+}
+
+static inline int is_page_full_zero(struct page *page)
+{
+	char *addr;
+	int ret;
+
+	addr = kmap_atomic(page);
+	ret = is_full_zero(addr, PAGE_SIZE);
+	kunmap_atomic(addr);
+
+	return ret;
+}
+
+static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+			      pte_t *orig_pte, pte_t *old_pte)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	struct page_vma_mapped_walk pvmw = {
+		.page = page,
+		.vma = vma,
+	};
+       struct mmu_notifier_range range;
+	int swapped;
+	int err = -EFAULT;
+
+	pvmw.address = page_address_in_vma(page, vma);
+	if (pvmw.address == -EFAULT)
+		goto out;
+
+	BUG_ON(PageTransCompound(page));
+
+        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, pvmw.address,
+                                pvmw.address + PAGE_SIZE);
+	mmu_notifier_invalidate_range_start(&range);
+
+	if (!page_vma_mapped_walk(&pvmw))
+		goto out_mn;
+	if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
+		goto out_unlock;
+
+	if (old_pte)
+		*old_pte = *pvmw.pte;
+
+	if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
+	    (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || mm_tlb_flush_pending(mm)) {
+		pte_t entry;
+
+		swapped = PageSwapCache(page);
+		flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+		/*
+		 * Ok this is tricky, when get_user_pages_fast() run it doesn't
+		 * take any lock, therefore the check that we are going to make
+		 * with the pagecount against the mapcount is racey and
+		 * O_DIRECT can happen right after the check.
+		 * So we clear the pte and flush the tlb before the check
+		 * this assure us that no O_DIRECT can happen after the check
+		 * or in the middle of the check.
+		 */
+		entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
+		/*
+		 * Check that no O_DIRECT or similar I/O is in progress on the
+		 * page
+		 */
+		if (page_mapcount(page) + 1 + swapped != page_count(page)) {
+			set_pte_at(mm, pvmw.address, pvmw.pte, entry);
+			goto out_unlock;
+		}
+		if (pte_dirty(entry))
+			set_page_dirty(page);
+
+		if (pte_protnone(entry))
+			entry = pte_mkclean(pte_clear_savedwrite(entry));
+		else
+			entry = pte_mkclean(pte_wrprotect(entry));
+
+		set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
+	}
+	*orig_pte = *pvmw.pte;
+	err = 0;
+
+out_unlock:
+	page_vma_mapped_walk_done(&pvmw);
+out_mn:
+	mmu_notifier_invalidate_range_end(&range);
+out:
+	return err;
+}
+
+#define MERGE_ERR_PGERR		1 /* the page is invalid cannot continue */
+#define MERGE_ERR_COLLI		2 /* there is a collision */
+#define MERGE_ERR_COLLI_MAX	3 /* collision at the max hash strength */
+#define MERGE_ERR_CHANGED	4 /* the page has changed since last hash */
+
+
+/**
+ * replace_page - replace page in vma by new ksm page
+ * @vma:      vma that holds the pte pointing to page
+ * @page:     the page we are replacing by kpage
+ * @kpage:    the ksm page we replace page by
+ * @orig_pte: the original value of the pte
+ *
+ * Returns 0 on success, MERGE_ERR_PGERR on failure.
+ */
+static int replace_page(struct vm_area_struct *vma, struct page *page,
+			struct page *kpage, pte_t orig_pte)
+{
+	struct mm_struct *mm = vma->vm_mm;
+       struct mmu_notifier_range range;
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *ptep;
+	spinlock_t *ptl;
+	pte_t entry;
+
+	unsigned long addr;
+	int err = MERGE_ERR_PGERR;
+
+	addr = page_address_in_vma(page, vma);
+	if (addr == -EFAULT)
+		goto out;
+
+	pgd = pgd_offset(mm, addr);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	p4d = p4d_offset(pgd, addr);
+	pud = pud_offset(p4d, addr);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, addr);
+	BUG_ON(pmd_trans_huge(*pmd));
+	if (!pmd_present(*pmd))
+		goto out;
+
+        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
+                                addr + PAGE_SIZE);
+	mmu_notifier_invalidate_range_start(&range);
+
+	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	if (!pte_same(*ptep, orig_pte)) {
+		pte_unmap_unlock(ptep, ptl);
+		goto out_mn;
+	}
+
+	flush_cache_page(vma, addr, pte_pfn(*ptep));
+	ptep_clear_flush_notify(vma, addr, ptep);
+	entry = mk_pte(kpage, vma->vm_page_prot);
+
+	/* special treatment is needed for zero_page */
+	if ((page_to_pfn(kpage) == uksm_zero_pfn) ||
+				(page_to_pfn(kpage) == zero_pfn)) {
+		entry = pte_mkspecial(entry);
+		dec_mm_counter(mm, MM_ANONPAGES);
+		inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
+	} else {
+		get_page(kpage);
+		page_add_anon_rmap(kpage, vma, addr, false);
+	}
+
+	set_pte_at_notify(mm, addr, ptep, entry);
+
+	page_remove_rmap(page, false);
+	if (!page_mapped(page))
+		try_to_free_swap(page);
+	put_page(page);
+
+	pte_unmap_unlock(ptep, ptl);
+	err = 0;
+out_mn:
+	mmu_notifier_invalidate_range_end(&range);
+out:
+	return err;
+}
+
+
+/**
+ *  Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The
+ *  zero hash value at HASH_STRENGTH_MAX is used to indicated that its
+ *  hash_max member has not been calculated.
+ *
+ * @page The page needs to be hashed
+ * @hash_old The hash value calculated with current hash strength
+ *
+ * return the new hash value calculated at HASH_STRENGTH_MAX
+ */
+static inline u32 page_hash_max(struct page *page, u32 hash_old)
+{
+	u32 hash_max = 0;
+	void *addr;
+
+	addr = kmap_atomic(page);
+	hash_max = delta_hash(addr, hash_strength,
+			      HASH_STRENGTH_MAX, hash_old);
+
+	kunmap_atomic(addr);
+
+	if (!hash_max)
+		hash_max = 1;
+
+	inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
+	return hash_max;
+}
+
+/*
+ * We compare the hash again, to ensure that it is really a hash collision
+ * instead of being caused by page write.
+ */
+static inline int check_collision(struct rmap_item *rmap_item,
+				  u32 hash)
+{
+	int err;
+	struct page *page = rmap_item->page;
+
+	/* if this rmap_item has already been hash_maxed, then the collision
+	 * must appears in the second-level rbtree search. In this case we check
+	 * if its hash_max value has been changed. Otherwise, the collision
+	 * happens in the first-level rbtree search, so we check against it's
+	 * current hash value.
+	 */
+	if (rmap_item->hash_max) {
+		inc_rshash_neg(memcmp_cost);
+		inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
+
+		if (rmap_item->hash_max == page_hash_max(page, hash))
+			err = MERGE_ERR_COLLI;
+		else
+			err = MERGE_ERR_CHANGED;
+	} else {
+		inc_rshash_neg(memcmp_cost + hash_strength);
+
+		if (page_hash(page, hash_strength, 0) == hash)
+			err = MERGE_ERR_COLLI;
+		else
+			err = MERGE_ERR_CHANGED;
+	}
+
+	return err;
+}
+
+/**
+ * Try to merge a rmap_item.page with a kpage in stable node. kpage must
+ * already be a ksm page.
+ *
+ * @return 0 if the pages were merged, -EFAULT otherwise.
+ */
+static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item,
+				      struct page *kpage, u32 hash)
+{
+	struct vm_area_struct *vma = rmap_item->slot->vma;
+	struct mm_struct *mm = vma->vm_mm;
+	pte_t orig_pte = __pte(0);
+	int err = MERGE_ERR_PGERR;
+	struct page *page;
+
+	if (uksm_test_exit(mm))
+		goto out;
+
+	page = rmap_item->page;
+
+	if (page == kpage) { /* ksm page forked */
+		err = 0;
+		goto out;
+	}
+
+	/*
+	 * We need the page lock to read a stable PageSwapCache in
+	 * write_protect_page().  We use trylock_page() instead of
+	 * lock_page() because we don't want to wait here - we
+	 * prefer to continue scanning and merging different pages,
+	 * then come back to this page when it is unlocked.
+	 */
+	if (!trylock_page(page))
+		goto out;
+
+	if (!PageAnon(page) || !PageKsm(kpage))
+		goto out_unlock;
+
+	if (PageTransCompound(page)) {
+		err = split_huge_page(page);
+		if (err)
+			goto out_unlock;
+	}
+
+	/*
+	 * If this anonymous page is mapped only here, its pte may need
+	 * to be write-protected.  If it's mapped elsewhere, all of its
+	 * ptes are necessarily already write-protected.  But in either
+	 * case, we need to lock and check page_count is not raised.
+	 */
+	if (write_protect_page(vma, page, &orig_pte, NULL) == 0) {
+		if (pages_identical_with_cost(page, kpage))
+			err = replace_page(vma, page, kpage, orig_pte);
+		else
+			err = check_collision(rmap_item, hash);
+	}
+
+	if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
+		munlock_vma_page(page);
+		if (!PageMlocked(kpage)) {
+			unlock_page(page);
+			lock_page(kpage);
+			mlock_vma_page(kpage);
+			page = kpage;		/* for final unlock */
+		}
+	}
+
+out_unlock:
+	unlock_page(page);
+out:
+	return err;
+}
+
+
+
+/**
+ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance
+ * to restore a page mapping that has been changed in try_to_merge_two_pages.
+ *
+ * @return 0 on success.
+ */
+static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr,
+			     pte_t orig_pte, pte_t wprt_pte)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *ptep;
+	spinlock_t *ptl;
+
+	int err = -EFAULT;
+
+	pgd = pgd_offset(mm, addr);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	p4d = p4d_offset(pgd, addr);
+	pud = pud_offset(p4d, addr);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, addr);
+	if (!pmd_present(*pmd))
+		goto out;
+
+	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	if (!pte_same(*ptep, wprt_pte)) {
+		/* already copied, let it be */
+		pte_unmap_unlock(ptep, ptl);
+		goto out;
+	}
+
+	/*
+	 * Good boy, still here. When we still get the ksm page, it does not
+	 * return to the free page pool, there is no way that a pte was changed
+	 * to other page and gets back to this page. And remind that ksm page
+	 * do not reuse in do_wp_page(). So it's safe to restore the original
+	 * pte.
+	 */
+	flush_cache_page(vma, addr, pte_pfn(*ptep));
+	ptep_clear_flush_notify(vma, addr, ptep);
+	set_pte_at_notify(mm, addr, ptep, orig_pte);
+
+	pte_unmap_unlock(ptep, ptl);
+	err = 0;
+out:
+	return err;
+}
+
+/**
+ * try_to_merge_two_pages() - take two identical pages and prepare
+ * them to be merged into one page(rmap_item->page)
+ *
+ * @return 0 if we successfully merged two identical pages into
+ *         one ksm page. MERGE_ERR_COLLI if it's only a hash collision
+ *         search in rbtree. MERGE_ERR_CHANGED if rmap_item has been
+ *         changed since it's hashed. MERGE_ERR_PGERR otherwise.
+ *
+ */
+static int try_to_merge_two_pages(struct rmap_item *rmap_item,
+				  struct rmap_item *tree_rmap_item,
+				  u32 hash)
+{
+	pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0);
+	pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0);
+	struct vm_area_struct *vma1 = rmap_item->slot->vma;
+	struct vm_area_struct *vma2 = tree_rmap_item->slot->vma;
+	struct page *page = rmap_item->page;
+	struct page *tree_page = tree_rmap_item->page;
+	int err = MERGE_ERR_PGERR;
+	struct address_space *saved_mapping;
+
+
+	if (rmap_item->page == tree_rmap_item->page)
+		goto out;
+
+	if (!trylock_page(page))
+		goto out;
+
+	if (!PageAnon(page))
+		goto out_unlock;
+
+	if (PageTransCompound(page)) {
+		err = split_huge_page(page);
+		if (err)
+			goto out_unlock;
+	}
+
+	if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) {
+		unlock_page(page);
+		goto out;
+	}
+
+	/*
+	 * While we hold page lock, upgrade page from
+	 * PageAnon+anon_vma to PageKsm+NULL stable_node:
+	 * stable_tree_insert() will update stable_node.
+	 */
+	saved_mapping = page->mapping;
+	set_page_stable_node(page, NULL);
+	mark_page_accessed(page);
+	if (!PageDirty(page))
+		SetPageDirty(page);
+
+	unlock_page(page);
+
+	if (!trylock_page(tree_page))
+		goto restore_out;
+
+	if (!PageAnon(tree_page)) {
+		unlock_page(tree_page);
+		goto restore_out;
+	}
+
+	if (PageTransCompound(tree_page)) {
+		err = split_huge_page(tree_page);
+		if (err) {
+			unlock_page(tree_page);
+			goto restore_out;
+		}
+	}
+
+	if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) {
+		unlock_page(tree_page);
+		goto restore_out;
+	}
+
+	if (pages_identical_with_cost(page, tree_page)) {
+		err = replace_page(vma2, tree_page, page, wprt_pte2);
+		if (err) {
+			unlock_page(tree_page);
+			goto restore_out;
+		}
+
+		if ((vma2->vm_flags & VM_LOCKED)) {
+			munlock_vma_page(tree_page);
+			if (!PageMlocked(page)) {
+				unlock_page(tree_page);
+				lock_page(page);
+				mlock_vma_page(page);
+				tree_page = page; /* for final unlock */
+			}
+		}
+
+		unlock_page(tree_page);
+
+		goto out; /* success */
+
+	} else {
+		if (tree_rmap_item->hash_max &&
+		    tree_rmap_item->hash_max == rmap_item->hash_max) {
+			err = MERGE_ERR_COLLI_MAX;
+		} else if (page_hash(page, hash_strength, 0) ==
+		    page_hash(tree_page, hash_strength, 0)) {
+			inc_rshash_neg(memcmp_cost + hash_strength * 2);
+			err = MERGE_ERR_COLLI;
+		} else {
+			err = MERGE_ERR_CHANGED;
+		}
+
+		unlock_page(tree_page);
+	}
+
+restore_out:
+	lock_page(page);
+	if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item),
+				  orig_pte1, wprt_pte1))
+		page->mapping = saved_mapping;
+
+out_unlock:
+	unlock_page(page);
+out:
+	return err;
+}
+
+static inline int hash_cmp(u32 new_val, u32 node_val)
+{
+	if (new_val > node_val)
+		return 1;
+	else if (new_val < node_val)
+		return -1;
+	else
+		return 0;
+}
+
+static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash)
+{
+	u32 hash_max = item->hash_max;
+
+	if (!hash_max) {
+		hash_max = page_hash_max(item->page, hash);
+
+		item->hash_max = hash_max;
+	}
+
+	return hash_max;
+}
+
+
+
+/**
+ * stable_tree_search() - search the stable tree for a page
+ *
+ * @item:	the rmap_item we are comparing with
+ * @hash:	the hash value of this item->page already calculated
+ *
+ * @return	the page we have found, NULL otherwise. The page returned has
+ *			been gotten.
+ */
+static struct page *stable_tree_search(struct rmap_item *item, u32 hash)
+{
+	struct rb_node *node = root_stable_treep->rb_node;
+	struct tree_node *tree_node;
+	unsigned long hash_max;
+	struct page *page = item->page;
+	struct stable_node *stable_node;
+
+	stable_node = page_stable_node(page);
+	if (stable_node) {
+		/* ksm page forked, that is
+		 * if (PageKsm(page) && !in_stable_tree(rmap_item))
+		 * it's actually gotten once outside.
+		 */
+		get_page(page);
+		return page;
+	}
+
+	while (node) {
+		int cmp;
+
+		tree_node = rb_entry(node, struct tree_node, node);
+
+		cmp = hash_cmp(hash, tree_node->hash);
+
+		if (cmp < 0)
+			node = node->rb_left;
+		else if (cmp > 0)
+			node = node->rb_right;
+		else
+			break;
+	}
+
+	if (!node)
+		return NULL;
+
+	if (tree_node->count == 1) {
+		stable_node = rb_entry(tree_node->sub_root.rb_node,
+				       struct stable_node, node);
+		BUG_ON(!stable_node);
+
+		goto get_page_out;
+	}
+
+	/*
+	 * ok, we have to search the second
+	 * level subtree, hash the page to a
+	 * full strength.
+	 */
+	node = tree_node->sub_root.rb_node;
+	BUG_ON(!node);
+	hash_max = rmap_item_hash_max(item, hash);
+
+	while (node) {
+		int cmp;
+
+		stable_node = rb_entry(node, struct stable_node, node);
+
+		cmp = hash_cmp(hash_max, stable_node->hash_max);
+
+		if (cmp < 0)
+			node = node->rb_left;
+		else if (cmp > 0)
+			node = node->rb_right;
+		else
+			goto get_page_out;
+	}
+
+	return NULL;
+
+get_page_out:
+	page = get_uksm_page(stable_node, 1, 1);
+	return page;
+}
+
+static int try_merge_rmap_item(struct rmap_item *item,
+			       struct page *kpage,
+			       struct page *tree_page)
+{
+	struct vm_area_struct *vma = item->slot->vma;
+	struct page_vma_mapped_walk pvmw = {
+		.page = kpage,
+		.vma = vma,
+	};
+
+	pvmw.address = get_rmap_addr(item);
+	if (!page_vma_mapped_walk(&pvmw))
+		return 0;
+
+	if (pte_write(*pvmw.pte)) {
+		/* has changed, abort! */
+		page_vma_mapped_walk_done(&pvmw);
+		return 0;
+	}
+
+	get_page(tree_page);
+	page_add_anon_rmap(tree_page, vma, pvmw.address, false);
+
+	flush_cache_page(vma, pvmw.address, page_to_pfn(kpage));
+	ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
+	set_pte_at_notify(vma->vm_mm, pvmw.address, pvmw.pte,
+			  mk_pte(tree_page, vma->vm_page_prot));
+
+	page_remove_rmap(kpage, false);
+	put_page(kpage);
+
+	page_vma_mapped_walk_done(&pvmw);
+
+	return 1;
+}
+
+/**
+ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted
+ * into stable tree, the page was found to be identical to a stable ksm page,
+ * this is the last chance we can merge them into one.
+ *
+ * @item1:	the rmap_item holding the page which we wanted to insert
+ *		into stable tree.
+ * @item2:	the other rmap_item we found when unstable tree search
+ * @oldpage:	the page currently mapped by the two rmap_items
+ * @tree_page:	the page we found identical in stable tree node
+ * @success1:	return if item1 is successfully merged
+ * @success2:	return if item2 is successfully merged
+ */
+static void try_merge_with_stable(struct rmap_item *item1,
+				  struct rmap_item *item2,
+				  struct page **kpage,
+				  struct page *tree_page,
+				  int *success1, int *success2)
+{
+	struct vm_area_struct *vma1 = item1->slot->vma;
+	struct vm_area_struct *vma2 = item2->slot->vma;
+	*success1 = 0;
+	*success2 = 0;
+
+	if (unlikely(*kpage == tree_page)) {
+		/* I don't think this can really happen */
+		pr_warn("UKSM: unexpected condition detected in "
+			"%s -- *kpage == tree_page !\n", __func__);
+		*success1 = 1;
+		*success2 = 1;
+		return;
+	}
+
+	if (!PageAnon(*kpage) || !PageKsm(*kpage))
+		goto failed;
+
+	if (!trylock_page(tree_page))
+		goto failed;
+
+	/* If the oldpage is still ksm and still pointed
+	 * to in the right place, and still write protected,
+	 * we are confident it's not changed, no need to
+	 * memcmp anymore.
+	 * be ware, we cannot take nested pte locks,
+	 * deadlock risk.
+	 */
+	if (!try_merge_rmap_item(item1, *kpage, tree_page))
+		goto unlock_failed;
+
+	/* ok, then vma2, remind that pte1 already set */
+	if (!try_merge_rmap_item(item2, *kpage, tree_page))
+		goto success_1;
+
+	*success2 = 1;
+success_1:
+	*success1 = 1;
+
+
+	if ((*success1 && vma1->vm_flags & VM_LOCKED) ||
+	    (*success2 && vma2->vm_flags & VM_LOCKED)) {
+		munlock_vma_page(*kpage);
+		if (!PageMlocked(tree_page))
+			mlock_vma_page(tree_page);
+	}
+
+	/*
+	 * We do not need oldpage any more in the caller, so can break the lock
+	 * now.
+	 */
+	unlock_page(*kpage);
+	*kpage = tree_page; /* Get unlocked outside. */
+	return;
+
+unlock_failed:
+	unlock_page(tree_page);
+failed:
+	return;
+}
+
+static inline void stable_node_hash_max(struct stable_node *node,
+					 struct page *page, u32 hash)
+{
+	u32 hash_max = node->hash_max;
+
+	if (!hash_max) {
+		hash_max = page_hash_max(page, hash);
+		node->hash_max = hash_max;
+	}
+}
+
+static inline
+struct stable_node *new_stable_node(struct tree_node *tree_node,
+				    struct page *kpage, u32 hash_max)
+{
+	struct stable_node *new_stable_node;
+
+	new_stable_node = alloc_stable_node();
+	if (!new_stable_node)
+		return NULL;
+
+	new_stable_node->kpfn = page_to_pfn(kpage);
+	new_stable_node->hash_max = hash_max;
+	new_stable_node->tree_node = tree_node;
+	set_page_stable_node(kpage, new_stable_node);
+
+	return new_stable_node;
+}
+
+static inline
+struct stable_node *first_level_insert(struct tree_node *tree_node,
+				       struct rmap_item *rmap_item,
+				       struct rmap_item *tree_rmap_item,
+				       struct page **kpage, u32 hash,
+				       int *success1, int *success2)
+{
+	int cmp;
+	struct page *tree_page;
+	u32 hash_max = 0;
+	struct stable_node *stable_node, *new_snode;
+	struct rb_node *parent = NULL, **new;
+
+	/* this tree node contains no sub-tree yet */
+	stable_node = rb_entry(tree_node->sub_root.rb_node,
+			       struct stable_node, node);
+
+	tree_page = get_uksm_page(stable_node, 1, 0);
+	if (tree_page) {
+		cmp = memcmp_pages_with_cost(*kpage, tree_page, 1);
+		if (!cmp) {
+			try_merge_with_stable(rmap_item, tree_rmap_item, kpage,
+					      tree_page, success1, success2);
+			put_page(tree_page);
+			if (!*success1 && !*success2)
+				goto failed;
+
+			return stable_node;
+
+		} else {
+			/*
+			 * collision in first level try to create a subtree.
+			 * A new node need to be created.
+			 */
+			put_page(tree_page);
+
+			stable_node_hash_max(stable_node, tree_page,
+					     tree_node->hash);
+			hash_max = rmap_item_hash_max(rmap_item, hash);
+			cmp = hash_cmp(hash_max, stable_node->hash_max);
+
+			parent = &stable_node->node;
+			if (cmp < 0)
+				new = &parent->rb_left;
+			else if (cmp > 0)
+				new = &parent->rb_right;
+			else
+				goto failed;
+		}
+
+	} else {
+		/* the only stable_node deleted, we reuse its tree_node.
+		 */
+		parent = NULL;
+		new = &tree_node->sub_root.rb_node;
+	}
+
+	new_snode = new_stable_node(tree_node, *kpage, hash_max);
+	if (!new_snode)
+		goto failed;
+
+	rb_link_node(&new_snode->node, parent, new);
+	rb_insert_color(&new_snode->node, &tree_node->sub_root);
+	tree_node->count++;
+	*success1 = *success2 = 1;
+
+	return new_snode;
+
+failed:
+	return NULL;
+}
+
+static inline
+struct stable_node *stable_subtree_insert(struct tree_node *tree_node,
+					  struct rmap_item *rmap_item,
+					  struct rmap_item *tree_rmap_item,
+					  struct page **kpage, u32 hash,
+					  int *success1, int *success2)
+{
+	struct page *tree_page;
+	u32 hash_max;
+	struct stable_node *stable_node, *new_snode;
+	struct rb_node *parent, **new;
+
+research:
+	parent = NULL;
+	new = &tree_node->sub_root.rb_node;
+	BUG_ON(!*new);
+	hash_max = rmap_item_hash_max(rmap_item, hash);
+	while (*new) {
+		int cmp;
+
+		stable_node = rb_entry(*new, struct stable_node, node);
+
+		cmp = hash_cmp(hash_max, stable_node->hash_max);
+
+		if (cmp < 0) {
+			parent = *new;
+			new = &parent->rb_left;
+		} else if (cmp > 0) {
+			parent = *new;
+			new = &parent->rb_right;
+		} else {
+			tree_page = get_uksm_page(stable_node, 1, 0);
+			if (tree_page) {
+				cmp = memcmp_pages_with_cost(*kpage, tree_page, 1);
+				if (!cmp) {
+					try_merge_with_stable(rmap_item,
+						tree_rmap_item, kpage,
+						tree_page, success1, success2);
+
+					put_page(tree_page);
+					if (!*success1 && !*success2)
+						goto failed;
+					/*
+					 * successfully merged with a stable
+					 * node
+					 */
+					return stable_node;
+				} else {
+					put_page(tree_page);
+					goto failed;
+				}
+			} else {
+				/*
+				 * stable node may be deleted,
+				 * and subtree maybe
+				 * restructed, cannot
+				 * continue, research it.
+				 */
+				if (tree_node->count) {
+					goto research;
+				} else {
+					/* reuse the tree node*/
+					parent = NULL;
+					new = &tree_node->sub_root.rb_node;
+				}
+			}
+		}
+	}
+
+	new_snode = new_stable_node(tree_node, *kpage, hash_max);
+	if (!new_snode)
+		goto failed;
+
+	rb_link_node(&new_snode->node, parent, new);
+	rb_insert_color(&new_snode->node, &tree_node->sub_root);
+	tree_node->count++;
+	*success1 = *success2 = 1;
+
+	return new_snode;
+
+failed:
+	return NULL;
+}
+
+
+/**
+ * stable_tree_insert() - try to insert a merged page in unstable tree to
+ * the stable tree
+ *
+ * @kpage:		the page need to be inserted
+ * @hash:		the current hash of this page
+ * @rmap_item:		the rmap_item being scanned
+ * @tree_rmap_item:	the rmap_item found on unstable tree
+ * @success1:		return if rmap_item is merged
+ * @success2:		return if tree_rmap_item is merged
+ *
+ * @return		the stable_node on stable tree if at least one
+ *			rmap_item is inserted into stable tree, NULL
+ *			otherwise.
+ */
+static struct stable_node *
+stable_tree_insert(struct page **kpage, u32 hash,
+		   struct rmap_item *rmap_item,
+		   struct rmap_item *tree_rmap_item,
+		   int *success1, int *success2)
+{
+	struct rb_node **new = &root_stable_treep->rb_node;
+	struct rb_node *parent = NULL;
+	struct stable_node *stable_node;
+	struct tree_node *tree_node;
+	u32 hash_max = 0;
+
+	*success1 = *success2 = 0;
+
+	while (*new) {
+		int cmp;
+
+		tree_node = rb_entry(*new, struct tree_node, node);
+
+		cmp = hash_cmp(hash, tree_node->hash);
+
+		if (cmp < 0) {
+			parent = *new;
+			new = &parent->rb_left;
+		} else if (cmp > 0) {
+			parent = *new;
+			new = &parent->rb_right;
+		} else
+			break;
+	}
+
+	if (*new) {
+		if (tree_node->count == 1) {
+			stable_node = first_level_insert(tree_node, rmap_item,
+						tree_rmap_item, kpage,
+						hash, success1, success2);
+		} else {
+			stable_node = stable_subtree_insert(tree_node,
+					rmap_item, tree_rmap_item, kpage,
+					hash, success1, success2);
+		}
+	} else {
+
+		/* no tree node found */
+		tree_node = alloc_tree_node(stable_tree_node_listp);
+		if (!tree_node) {
+			stable_node = NULL;
+			goto out;
+		}
+
+		stable_node = new_stable_node(tree_node, *kpage, hash_max);
+		if (!stable_node) {
+			free_tree_node(tree_node);
+			goto out;
+		}
+
+		tree_node->hash = hash;
+		rb_link_node(&tree_node->node, parent, new);
+		rb_insert_color(&tree_node->node, root_stable_treep);
+		parent = NULL;
+		new = &tree_node->sub_root.rb_node;
+
+		rb_link_node(&stable_node->node, parent, new);
+		rb_insert_color(&stable_node->node, &tree_node->sub_root);
+		tree_node->count++;
+		*success1 = *success2 = 1;
+	}
+
+out:
+	return stable_node;
+}
+
+
+/**
+ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem
+ *
+ * @return	0 on success, -EBUSY if unable to lock the mmap_sem,
+ *		-EINVAL if the page mapping has been changed.
+ */
+static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item)
+{
+	int err;
+
+	err = get_mergeable_page_lock_mmap(tree_rmap_item);
+
+	if (err == -EINVAL) {
+		/* its page map has been changed, remove it */
+		remove_rmap_item_from_tree(tree_rmap_item);
+	}
+
+	/* The page is gotten and mmap_sem is locked now. */
+	return err;
+}
+
+
+/**
+ * unstable_tree_search_insert() - search an unstable tree rmap_item with the
+ * same hash value. Get its page and trylock the mmap_sem
+ */
+static inline
+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
+					      u32 hash)
+
+{
+	struct rb_node **new = &root_unstable_tree.rb_node;
+	struct rb_node *parent = NULL;
+	struct tree_node *tree_node;
+	u32 hash_max;
+	struct rmap_item *tree_rmap_item;
+
+	while (*new) {
+		int cmp;
+
+		tree_node = rb_entry(*new, struct tree_node, node);
+
+		cmp = hash_cmp(hash, tree_node->hash);
+
+		if (cmp < 0) {
+			parent = *new;
+			new = &parent->rb_left;
+		} else if (cmp > 0) {
+			parent = *new;
+			new = &parent->rb_right;
+		} else
+			break;
+	}
+
+	if (*new) {
+		/* got the tree_node */
+		if (tree_node->count == 1) {
+			tree_rmap_item = rb_entry(tree_node->sub_root.rb_node,
+						  struct rmap_item, node);
+			BUG_ON(!tree_rmap_item);
+
+			goto get_page_out;
+		}
+
+		/* well, search the collision subtree */
+		new = &tree_node->sub_root.rb_node;
+		BUG_ON(!*new);
+		hash_max = rmap_item_hash_max(rmap_item, hash);
+
+		while (*new) {
+			int cmp;
+
+			tree_rmap_item = rb_entry(*new, struct rmap_item,
+						  node);
+
+			cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
+			parent = *new;
+			if (cmp < 0)
+				new = &parent->rb_left;
+			else if (cmp > 0)
+				new = &parent->rb_right;
+			else
+				goto get_page_out;
+		}
+	} else {
+		/* alloc a new tree_node */
+		tree_node = alloc_tree_node(&unstable_tree_node_list);
+		if (!tree_node)
+			return NULL;
+
+		tree_node->hash = hash;
+		rb_link_node(&tree_node->node, parent, new);
+		rb_insert_color(&tree_node->node, &root_unstable_tree);
+		parent = NULL;
+		new = &tree_node->sub_root.rb_node;
+	}
+
+	/* did not found even in sub-tree */
+	rmap_item->tree_node = tree_node;
+	rmap_item->address |= UNSTABLE_FLAG;
+	rmap_item->hash_round = uksm_hash_round;
+	rb_link_node(&rmap_item->node, parent, new);
+	rb_insert_color(&rmap_item->node, &tree_node->sub_root);
+
+	uksm_pages_unshared++;
+	return NULL;
+
+get_page_out:
+	if (tree_rmap_item->page == rmap_item->page)
+		return NULL;
+
+	if (get_tree_rmap_item_page(tree_rmap_item))
+		return NULL;
+
+	return tree_rmap_item;
+}
+
+static void hold_anon_vma(struct rmap_item *rmap_item,
+			  struct anon_vma *anon_vma)
+{
+	rmap_item->anon_vma = anon_vma;
+	get_anon_vma(anon_vma);
+}
+
+
+/**
+ * stable_tree_append() - append a rmap_item to a stable node. Deduplication
+ * ratio statistics is done in this function.
+ *
+ */
+static void stable_tree_append(struct rmap_item *rmap_item,
+			       struct stable_node *stable_node, int logdedup)
+{
+	struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL;
+	unsigned long key = (unsigned long)rmap_item->slot;
+	unsigned long factor = rmap_item->slot->rung->step;
+
+	BUG_ON(!stable_node);
+	rmap_item->address |= STABLE_FLAG;
+
+	if (hlist_empty(&stable_node->hlist)) {
+		uksm_pages_shared++;
+		goto node_vma_new;
+	} else {
+		uksm_pages_sharing++;
+	}
+
+	hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
+		if (node_vma->key >= key)
+			break;
+
+		if (logdedup) {
+			node_vma->slot->pages_bemerged += factor;
+			if (list_empty(&node_vma->slot->dedup_list))
+				list_add(&node_vma->slot->dedup_list,
+					 &vma_slot_dedup);
+		}
+	}
+
+	if (node_vma) {
+		if (node_vma->key == key) {
+			node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist);
+			goto node_vma_ok;
+		} else if (node_vma->key > key) {
+			node_vma_cont = node_vma;
+		}
+	}
+
+node_vma_new:
+	/* no same vma already in node, alloc a new node_vma */
+	new_node_vma = alloc_node_vma();
+	BUG_ON(!new_node_vma);
+	new_node_vma->head = stable_node;
+	new_node_vma->slot = rmap_item->slot;
+
+	if (!node_vma) {
+		hlist_add_head(&new_node_vma->hlist, &stable_node->hlist);
+	} else if (node_vma->key != key) {
+		if (node_vma->key < key)
+			hlist_add_behind(&new_node_vma->hlist, &node_vma->hlist);
+		else {
+			hlist_add_before(&new_node_vma->hlist,
+					 &node_vma->hlist);
+		}
+
+	}
+	node_vma = new_node_vma;
+
+node_vma_ok: /* ok, ready to add to the list */
+	rmap_item->head = node_vma;
+	hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist);
+	hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma);
+	if (logdedup) {
+		rmap_item->slot->pages_merged++;
+		if (node_vma_cont) {
+			node_vma = node_vma_cont;
+			hlist_for_each_entry_continue(node_vma, hlist) {
+				node_vma->slot->pages_bemerged += factor;
+				if (list_empty(&node_vma->slot->dedup_list))
+					list_add(&node_vma->slot->dedup_list,
+						 &vma_slot_dedup);
+			}
+		}
+	}
+}
+
+/*
+ * We use break_ksm to break COW on a ksm page: it's a stripped down
+ *
+ *	if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
+ *		put_page(page);
+ *
+ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
+ * in case the application has unmapped and remapped mm,addr meanwhile.
+ * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
+ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
+ */
+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct page *page;
+	int ret = 0;
+
+	do {
+		cond_resched();
+		page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
+		if (IS_ERR_OR_NULL(page))
+			break;
+		if (PageKsm(page)) {
+			ret = handle_mm_fault(vma, addr,
+					      FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
+		} else
+			ret = VM_FAULT_WRITE;
+		put_page(page);
+	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
+	/*
+	 * We must loop because handle_mm_fault() may back out if there's
+	 * any difficulty e.g. if pte accessed bit gets updated concurrently.
+	 *
+	 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
+	 * COW has been broken, even if the vma does not permit VM_WRITE;
+	 * but note that a concurrent fault might break PageKsm for us.
+	 *
+	 * VM_FAULT_SIGBUS could occur if we race with truncation of the
+	 * backing file, which also invalidates anonymous pages: that's
+	 * okay, that truncation will have unmapped the PageKsm for us.
+	 *
+	 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
+	 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
+	 * current task has TIF_MEMDIE set, and will be OOM killed on return
+	 * to user; and ksmd, having no mm, would never be chosen for that.
+	 *
+	 * But if the mm is in a limited mem_cgroup, then the fault may fail
+	 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
+	 * even ksmd can fail in this way - though it's usually breaking ksm
+	 * just to undo a merge it made a moment before, so unlikely to oom.
+	 *
+	 * That's a pity: we might therefore have more kernel pages allocated
+	 * than we're counting as nodes in the stable tree; but uksm_do_scan
+	 * will retry to break_cow on each pass, so should recover the page
+	 * in due course.  The important thing is to not let VM_MERGEABLE
+	 * be cleared while any such pages might remain in the area.
+	 */
+	return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
+}
+
+static void break_cow(struct rmap_item *rmap_item)
+{
+	struct vm_area_struct *vma = rmap_item->slot->vma;
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long addr = get_rmap_addr(rmap_item);
+
+	if (uksm_test_exit(mm))
+		goto out;
+
+	break_ksm(vma, addr);
+out:
+	return;
+}
+
+/*
+ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
+ * than check every pte of a given vma, the locking doesn't quite work for
+ * that - an rmap_item is assigned to the stable tree after inserting ksm
+ * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
+ * rmap_items from parent to child at fork time (so as not to waste time
+ * if exit comes before the next scan reaches it).
+ *
+ * Similarly, although we'd like to remove rmap_items (so updating counts
+ * and freeing memory) when unmerging an area, it's easier to leave that
+ * to the next pass of ksmd - consider, for example, how ksmd might be
+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
+ */
+inline int unmerge_uksm_pages(struct vm_area_struct *vma,
+		      unsigned long start, unsigned long end)
+{
+	unsigned long addr;
+	int err = 0;
+
+	for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
+		if (uksm_test_exit(vma->vm_mm))
+			break;
+		if (signal_pending(current))
+			err = -ERESTARTSYS;
+		else
+			err = break_ksm(vma, addr);
+	}
+	return err;
+}
+
+static inline void inc_uksm_pages_scanned(void)
+{
+	u64 delta;
+
+
+	if (uksm_pages_scanned == U64_MAX) {
+		encode_benefit();
+
+		delta = uksm_pages_scanned >> pages_scanned_base;
+
+		if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) {
+			pages_scanned_stored >>= 1;
+			delta >>= 1;
+			pages_scanned_base++;
+		}
+
+		pages_scanned_stored += delta;
+
+		uksm_pages_scanned = uksm_pages_scanned_last = 0;
+	}
+
+	uksm_pages_scanned++;
+}
+
+static inline int find_zero_page_hash(int strength, u32 hash)
+{
+	return (zero_hash_table[strength] == hash);
+}
+
+static
+int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page)
+{
+	struct page *zero_page = empty_uksm_zero_page;
+	struct mm_struct *mm = vma->vm_mm;
+	pte_t orig_pte = __pte(0);
+	int err = -EFAULT;
+
+	if (uksm_test_exit(mm))
+		goto out;
+
+	if (!trylock_page(page))
+		goto out;
+
+	if (!PageAnon(page))
+		goto out_unlock;
+
+	if (PageTransCompound(page)) {
+		err = split_huge_page(page);
+		if (err)
+			goto out_unlock;
+	}
+
+	if (write_protect_page(vma, page, &orig_pte, 0) == 0) {
+		if (is_page_full_zero(page))
+			err = replace_page(vma, page, zero_page, orig_pte);
+	}
+
+out_unlock:
+	unlock_page(page);
+out:
+	return err;
+}
+
+/*
+ * cmp_and_merge_page() - first see if page can be merged into the stable
+ * tree; if not, compare hash to previous and if it's the same, see if page
+ * can be inserted into the unstable tree, or merged with a page already there
+ * and both transferred to the stable tree.
+ *
+ * @page: the page that we are searching identical page to.
+ * @rmap_item: the reverse mapping into the virtual address of this page
+ */
+static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash)
+{
+	struct rmap_item *tree_rmap_item;
+	struct page *page;
+	struct page *kpage = NULL;
+	u32 hash_max;
+	int err;
+	unsigned int success1, success2;
+	struct stable_node *snode;
+	int cmp;
+	struct rb_node *parent = NULL, **new;
+
+	remove_rmap_item_from_tree(rmap_item);
+	page = rmap_item->page;
+
+	/* We first start with searching the page inside the stable tree */
+	kpage = stable_tree_search(rmap_item, hash);
+	if (kpage) {
+		err = try_to_merge_with_uksm_page(rmap_item, kpage,
+						 hash);
+		if (!err) {
+			/*
+			 * The page was successfully merged, add
+			 * its rmap_item to the stable tree.
+			 * page lock is needed because it's
+			 * racing with try_to_unmap_ksm(), etc.
+			 */
+			lock_page(kpage);
+			snode = page_stable_node(kpage);
+			stable_tree_append(rmap_item, snode, 1);
+			unlock_page(kpage);
+			put_page(kpage);
+			return; /* success */
+		}
+		put_page(kpage);
+
+		/*
+		 * if it's a collision and it has been search in sub-rbtree
+		 * (hash_max != 0), we want to abort, because if it is
+		 * successfully merged in unstable tree, the collision trends to
+		 * happen again.
+		 */
+		if (err == MERGE_ERR_COLLI && rmap_item->hash_max)
+			return;
+	}
+
+	tree_rmap_item =
+		unstable_tree_search_insert(rmap_item, hash);
+	if (tree_rmap_item) {
+		err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash);
+		/*
+		 * As soon as we merge this page, we want to remove the
+		 * rmap_item of the page we have merged with from the unstable
+		 * tree, and insert it instead as new node in the stable tree.
+		 */
+		if (!err) {
+			kpage = page;
+			remove_rmap_item_from_tree(tree_rmap_item);
+			lock_page(kpage);
+			snode = stable_tree_insert(&kpage, hash,
+						   rmap_item, tree_rmap_item,
+						   &success1, &success2);
+
+			/*
+			 * Do not log dedup for tree item, it's not counted as
+			 * scanned in this round.
+			 */
+			if (success2)
+				stable_tree_append(tree_rmap_item, snode, 0);
+
+			/*
+			 * The order of these two stable append is important:
+			 * we are scanning rmap_item.
+			 */
+			if (success1)
+				stable_tree_append(rmap_item, snode, 1);
+
+			/*
+			 * The original kpage may be unlocked inside
+			 * stable_tree_insert() already. This page
+			 * should be unlocked before doing
+			 * break_cow().
+			 */
+			unlock_page(kpage);
+
+			if (!success1)
+				break_cow(rmap_item);
+
+			if (!success2)
+				break_cow(tree_rmap_item);
+
+		} else if (err == MERGE_ERR_COLLI) {
+			BUG_ON(tree_rmap_item->tree_node->count > 1);
+
+			rmap_item_hash_max(tree_rmap_item,
+					   tree_rmap_item->tree_node->hash);
+
+			hash_max = rmap_item_hash_max(rmap_item, hash);
+			cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
+			parent = &tree_rmap_item->node;
+			if (cmp < 0)
+				new = &parent->rb_left;
+			else if (cmp > 0)
+				new = &parent->rb_right;
+			else
+				goto put_up_out;
+
+			rmap_item->tree_node = tree_rmap_item->tree_node;
+			rmap_item->address |= UNSTABLE_FLAG;
+			rmap_item->hash_round = uksm_hash_round;
+			rb_link_node(&rmap_item->node, parent, new);
+			rb_insert_color(&rmap_item->node,
+					&tree_rmap_item->tree_node->sub_root);
+			rmap_item->tree_node->count++;
+		} else {
+			/*
+			 * either one of the page has changed or they collide
+			 * at the max hash, we consider them as ill items.
+			 */
+			remove_rmap_item_from_tree(tree_rmap_item);
+		}
+put_up_out:
+		put_page(tree_rmap_item->page);
+		up_read(&tree_rmap_item->slot->vma->vm_mm->mmap_sem);
+	}
+}
+
+
+
+
+static inline unsigned long get_pool_index(struct vma_slot *slot,
+					   unsigned long index)
+{
+	unsigned long pool_index;
+
+	pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT;
+	if (pool_index >= slot->pool_size)
+		BUG();
+	return pool_index;
+}
+
+static inline unsigned long index_page_offset(unsigned long index)
+{
+	return offset_in_page(sizeof(struct rmap_list_entry *) * index);
+}
+
+static inline
+struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot,
+					    unsigned long index, int need_alloc)
+{
+	unsigned long pool_index;
+	struct page *page;
+	void *addr;
+
+
+	pool_index = get_pool_index(slot, index);
+	if (!slot->rmap_list_pool[pool_index]) {
+		if (!need_alloc)
+			return NULL;
+
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
+		if (!page)
+			return NULL;
+
+		slot->rmap_list_pool[pool_index] = page;
+	}
+
+	addr = kmap(slot->rmap_list_pool[pool_index]);
+	addr += index_page_offset(index);
+
+	return addr;
+}
+
+static inline void put_rmap_list_entry(struct vma_slot *slot,
+				       unsigned long index)
+{
+	unsigned long pool_index;
+
+	pool_index = get_pool_index(slot, index);
+	BUG_ON(!slot->rmap_list_pool[pool_index]);
+	kunmap(slot->rmap_list_pool[pool_index]);
+}
+
+static inline int entry_is_new(struct rmap_list_entry *entry)
+{
+	return !entry->item;
+}
+
+static inline unsigned long get_index_orig_addr(struct vma_slot *slot,
+						unsigned long index)
+{
+	return slot->vma->vm_start + (index << PAGE_SHIFT);
+}
+
+static inline unsigned long get_entry_address(struct rmap_list_entry *entry)
+{
+	unsigned long addr;
+
+	if (is_addr(entry->addr))
+		addr = get_clean_addr(entry->addr);
+	else if (entry->item)
+		addr = get_rmap_addr(entry->item);
+	else
+		BUG();
+
+	return addr;
+}
+
+static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry)
+{
+	if (is_addr(entry->addr))
+		return NULL;
+
+	return entry->item;
+}
+
+static inline void inc_rmap_list_pool_count(struct vma_slot *slot,
+					    unsigned long index)
+{
+	unsigned long pool_index;
+
+	pool_index = get_pool_index(slot, index);
+	BUG_ON(!slot->rmap_list_pool[pool_index]);
+	slot->pool_counts[pool_index]++;
+}
+
+static inline void dec_rmap_list_pool_count(struct vma_slot *slot,
+					    unsigned long index)
+{
+	unsigned long pool_index;
+
+	pool_index = get_pool_index(slot, index);
+	BUG_ON(!slot->rmap_list_pool[pool_index]);
+	BUG_ON(!slot->pool_counts[pool_index]);
+	slot->pool_counts[pool_index]--;
+}
+
+static inline int entry_has_rmap(struct rmap_list_entry *entry)
+{
+	return !is_addr(entry->addr) && entry->item;
+}
+
+static inline void swap_entries(struct rmap_list_entry *entry1,
+				unsigned long index1,
+				struct rmap_list_entry *entry2,
+				unsigned long index2)
+{
+	struct rmap_list_entry tmp;
+
+	/* swapping two new entries is meaningless */
+	BUG_ON(entry_is_new(entry1) && entry_is_new(entry2));
+
+	tmp = *entry1;
+	*entry1 = *entry2;
+	*entry2 = tmp;
+
+	if (entry_has_rmap(entry1))
+		entry1->item->entry_index = index1;
+
+	if (entry_has_rmap(entry2))
+		entry2->item->entry_index = index2;
+
+	if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) {
+		inc_rmap_list_pool_count(entry1->item->slot, index1);
+		dec_rmap_list_pool_count(entry1->item->slot, index2);
+	} else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) {
+		inc_rmap_list_pool_count(entry2->item->slot, index2);
+		dec_rmap_list_pool_count(entry2->item->slot, index1);
+	}
+}
+
+static inline void free_entry_item(struct rmap_list_entry *entry)
+{
+	unsigned long index;
+	struct rmap_item *item;
+
+	if (!is_addr(entry->addr)) {
+		BUG_ON(!entry->item);
+		item = entry->item;
+		entry->addr = get_rmap_addr(item);
+		set_is_addr(entry->addr);
+		index = item->entry_index;
+		remove_rmap_item_from_tree(item);
+		dec_rmap_list_pool_count(item->slot, index);
+		free_rmap_item(item);
+	}
+}
+
+static inline int pool_entry_boundary(unsigned long index)
+{
+	unsigned long linear_addr;
+
+	linear_addr = sizeof(struct rmap_list_entry *) * index;
+	return index && !offset_in_page(linear_addr);
+}
+
+static inline void try_free_last_pool(struct vma_slot *slot,
+				      unsigned long index)
+{
+	unsigned long pool_index;
+
+	pool_index = get_pool_index(slot, index);
+	if (slot->rmap_list_pool[pool_index] &&
+	    !slot->pool_counts[pool_index]) {
+		__free_page(slot->rmap_list_pool[pool_index]);
+		slot->rmap_list_pool[pool_index] = NULL;
+		slot->flags |= UKSM_SLOT_NEED_SORT;
+	}
+
+}
+
+static inline unsigned long vma_item_index(struct vm_area_struct *vma,
+					   struct rmap_item *item)
+{
+	return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT;
+}
+
+static int within_same_pool(struct vma_slot *slot,
+			    unsigned long i, unsigned long j)
+{
+	unsigned long pool_i, pool_j;
+
+	pool_i = get_pool_index(slot, i);
+	pool_j = get_pool_index(slot, j);
+
+	return (pool_i == pool_j);
+}
+
+static void sort_rmap_entry_list(struct vma_slot *slot)
+{
+	unsigned long i, j;
+	struct rmap_list_entry *entry, *swap_entry;
+
+	entry = get_rmap_list_entry(slot, 0, 0);
+	for (i = 0; i < slot->pages; ) {
+
+		if (!entry)
+			goto skip_whole_pool;
+
+		if (entry_is_new(entry))
+			goto next_entry;
+
+		if (is_addr(entry->addr)) {
+			entry->addr = 0;
+			goto next_entry;
+		}
+
+		j = vma_item_index(slot->vma, entry->item);
+		if (j == i)
+			goto next_entry;
+
+		if (within_same_pool(slot, i, j))
+			swap_entry = entry + j - i;
+		else
+			swap_entry = get_rmap_list_entry(slot, j, 1);
+
+		swap_entries(entry, i, swap_entry, j);
+		if (!within_same_pool(slot, i, j))
+			put_rmap_list_entry(slot, j);
+		continue;
+
+skip_whole_pool:
+		i += PAGE_SIZE / sizeof(*entry);
+		if (i < slot->pages)
+			entry = get_rmap_list_entry(slot, i, 0);
+		continue;
+
+next_entry:
+		if (i >= slot->pages - 1 ||
+		    !within_same_pool(slot, i, i + 1)) {
+			put_rmap_list_entry(slot, i);
+			if (i + 1 < slot->pages)
+				entry = get_rmap_list_entry(slot, i + 1, 0);
+		} else
+			entry++;
+		i++;
+		continue;
+	}
+
+	/* free empty pool entries which contain no rmap_item */
+	/* CAN be simplied to based on only pool_counts when bug freed !!!!! */
+	for (i = 0; i < slot->pool_size; i++) {
+		unsigned char has_rmap;
+		void *addr;
+
+		if (!slot->rmap_list_pool[i])
+			continue;
+
+		has_rmap = 0;
+		addr = kmap(slot->rmap_list_pool[i]);
+		BUG_ON(!addr);
+		for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
+			entry = (struct rmap_list_entry *)addr + j;
+			if (is_addr(entry->addr))
+				continue;
+			if (!entry->item)
+				continue;
+			has_rmap = 1;
+		}
+		kunmap(slot->rmap_list_pool[i]);
+		if (!has_rmap) {
+			BUG_ON(slot->pool_counts[i]);
+			__free_page(slot->rmap_list_pool[i]);
+			slot->rmap_list_pool[i] = NULL;
+		}
+	}
+
+	slot->flags &= ~UKSM_SLOT_NEED_SORT;
+}
+
+/*
+ * vma_fully_scanned() - if all the pages in this slot have been scanned.
+ */
+static inline int vma_fully_scanned(struct vma_slot *slot)
+{
+	return slot->pages_scanned == slot->pages;
+}
+
+/**
+ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to
+ * its random permutation. This function is embedded with the random
+ * permutation index management code.
+ */
+static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash)
+{
+	unsigned long rand_range, addr, swap_index, scan_index;
+	struct rmap_item *item = NULL;
+	struct rmap_list_entry *scan_entry, *swap_entry = NULL;
+	struct page *page;
+
+	scan_index = swap_index = slot->pages_scanned % slot->pages;
+
+	if (pool_entry_boundary(scan_index))
+		try_free_last_pool(slot, scan_index - 1);
+
+	if (vma_fully_scanned(slot)) {
+		if (slot->flags & UKSM_SLOT_NEED_SORT)
+			slot->flags |= UKSM_SLOT_NEED_RERAND;
+		else
+			slot->flags &= ~UKSM_SLOT_NEED_RERAND;
+		if (slot->flags & UKSM_SLOT_NEED_SORT)
+			sort_rmap_entry_list(slot);
+	}
+
+	scan_entry = get_rmap_list_entry(slot, scan_index, 1);
+	if (!scan_entry)
+		return NULL;
+
+	if (entry_is_new(scan_entry)) {
+		scan_entry->addr = get_index_orig_addr(slot, scan_index);
+		set_is_addr(scan_entry->addr);
+	}
+
+	if (slot->flags & UKSM_SLOT_NEED_RERAND) {
+		rand_range = slot->pages - scan_index;
+		BUG_ON(!rand_range);
+		swap_index = scan_index + (prandom_u32() % rand_range);
+	}
+
+	if (swap_index != scan_index) {
+		swap_entry = get_rmap_list_entry(slot, swap_index, 1);
+
+		if (!swap_entry)
+			return NULL;
+
+		if (entry_is_new(swap_entry)) {
+			swap_entry->addr = get_index_orig_addr(slot,
+							       swap_index);
+			set_is_addr(swap_entry->addr);
+		}
+		swap_entries(scan_entry, scan_index, swap_entry, swap_index);
+	}
+
+	addr = get_entry_address(scan_entry);
+	item = get_entry_item(scan_entry);
+	BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start);
+
+	page = follow_page(slot->vma, addr, FOLL_GET);
+	if (IS_ERR_OR_NULL(page))
+		goto nopage;
+
+	if (!PageAnon(page))
+		goto putpage;
+
+	/*check is zero_page pfn or uksm_zero_page*/
+	if ((page_to_pfn(page) == zero_pfn)
+			|| (page_to_pfn(page) == uksm_zero_pfn))
+		goto putpage;
+
+	flush_anon_page(slot->vma, page, addr);
+	flush_dcache_page(page);
+
+
+	*hash = page_hash(page, hash_strength, 1);
+	inc_uksm_pages_scanned();
+	/*if the page content all zero, re-map to zero-page*/
+	if (find_zero_page_hash(hash_strength, *hash)) {
+		if (!cmp_and_merge_zero_page(slot->vma, page)) {
+			slot->pages_merged++;
+
+			/* For full-zero pages, no need to create rmap item */
+			goto putpage;
+		} else {
+			inc_rshash_neg(memcmp_cost / 2);
+		}
+	}
+
+	if (!item) {
+		item = alloc_rmap_item();
+		if (item) {
+			/* It has already been zeroed */
+			item->slot = slot;
+			item->address = addr;
+			item->entry_index = scan_index;
+			scan_entry->item = item;
+			inc_rmap_list_pool_count(slot, scan_index);
+		} else
+			goto putpage;
+	}
+
+	BUG_ON(item->slot != slot);
+	/* the page may have changed */
+	item->page = page;
+	put_rmap_list_entry(slot, scan_index);
+	if (swap_entry)
+		put_rmap_list_entry(slot, swap_index);
+	return item;
+
+putpage:
+	put_page(page);
+	page = NULL;
+nopage:
+	/* no page, store addr back and free rmap_item if possible */
+	free_entry_item(scan_entry);
+	put_rmap_list_entry(slot, scan_index);
+	if (swap_entry)
+		put_rmap_list_entry(slot, swap_index);
+	return NULL;
+}
+
+static inline int in_stable_tree(struct rmap_item *rmap_item)
+{
+	return rmap_item->address & STABLE_FLAG;
+}
+
+/**
+ * scan_vma_one_page() - scan the next page in a vma_slot. Called with
+ * mmap_sem locked.
+ */
+static noinline void scan_vma_one_page(struct vma_slot *slot)
+{
+	u32 hash;
+	struct mm_struct *mm;
+	struct rmap_item *rmap_item = NULL;
+	struct vm_area_struct *vma = slot->vma;
+
+	mm = vma->vm_mm;
+	BUG_ON(!mm);
+	BUG_ON(!slot);
+
+	rmap_item = get_next_rmap_item(slot, &hash);
+	if (!rmap_item)
+		goto out1;
+
+	if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item))
+		goto out2;
+
+	cmp_and_merge_page(rmap_item, hash);
+out2:
+	put_page(rmap_item->page);
+out1:
+	slot->pages_scanned++;
+	slot->this_sampled++;
+	if (slot->fully_scanned_round != fully_scanned_round)
+		scanned_virtual_pages++;
+
+	if (vma_fully_scanned(slot))
+		slot->fully_scanned_round = fully_scanned_round;
+}
+
+static inline unsigned long rung_get_pages(struct scan_rung *rung)
+{
+	struct slot_tree_node *node;
+
+	if (!rung->vma_root.rnode)
+		return 0;
+
+	node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode);
+
+	return node->size;
+}
+
+#define RUNG_SAMPLED_MIN	3
+
+static inline
+void uksm_calc_rung_step(struct scan_rung *rung,
+			 unsigned long page_time, unsigned long ratio)
+{
+	unsigned long sampled, pages;
+
+	/* will be fully scanned ? */
+	if (!rung->cover_msecs) {
+		rung->step = 1;
+		return;
+	}
+
+	sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE)
+		  * ratio / page_time;
+
+	/*
+	 *  Before we finsish a scan round and expensive per-round jobs,
+	 *  we need to have a chance to estimate the per page time. So
+	 *  the sampled number can not be too small.
+	 */
+	if (sampled < RUNG_SAMPLED_MIN)
+		sampled = RUNG_SAMPLED_MIN;
+
+	pages = rung_get_pages(rung);
+	if (likely(pages > sampled))
+		rung->step = pages / sampled;
+	else
+		rung->step = 1;
+}
+
+static inline int step_need_recalc(struct scan_rung *rung)
+{
+	unsigned long pages, stepmax;
+
+	pages = rung_get_pages(rung);
+	stepmax = pages / RUNG_SAMPLED_MIN;
+
+	return pages && (rung->step > pages ||
+			 (stepmax && rung->step > stepmax));
+}
+
+static inline
+void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc)
+{
+	struct vma_slot *slot;
+
+	if (finished)
+		rung->flags |= UKSM_RUNG_ROUND_FINISHED;
+
+	if (step_recalc || step_need_recalc(rung)) {
+		uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
+		BUG_ON(step_need_recalc(rung));
+	}
+
+	slot_iter_index = prandom_u32() % rung->step;
+	BUG_ON(!rung->vma_root.rnode);
+	slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter);
+	BUG_ON(!slot);
+
+	rung->current_scan = slot;
+	rung->current_offset = slot_iter_index;
+}
+
+static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot)
+{
+	return &slot->rung->vma_root;
+}
+
+/*
+ * return if resetted.
+ */
+static int advance_current_scan(struct scan_rung *rung)
+{
+	unsigned short n;
+	struct vma_slot *slot, *next = NULL;
+
+	BUG_ON(!rung->vma_root.num);
+
+	slot = rung->current_scan;
+	n = (slot->pages - rung->current_offset) % rung->step;
+	slot_iter_index = rung->step - n;
+	next = sradix_tree_next(&rung->vma_root, slot->snode,
+				slot->sindex, slot_iter);
+
+	if (next) {
+		rung->current_offset = slot_iter_index;
+		rung->current_scan = next;
+		return 0;
+	} else {
+		reset_current_scan(rung, 1, 0);
+		return 1;
+	}
+}
+
+static inline void rung_rm_slot(struct vma_slot *slot)
+{
+	struct scan_rung *rung = slot->rung;
+	struct sradix_tree_root *root;
+
+	if (rung->current_scan == slot)
+		advance_current_scan(rung);
+
+	root = slot_get_root(slot);
+	sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex);
+	slot->snode = NULL;
+	if (step_need_recalc(rung)) {
+		uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
+		BUG_ON(step_need_recalc(rung));
+	}
+
+	/* In case advance_current_scan loop back to this slot again */
+	if (rung->vma_root.num && rung->current_scan == slot)
+		reset_current_scan(slot->rung, 1, 0);
+}
+
+static inline void rung_add_new_slots(struct scan_rung *rung,
+			struct vma_slot **slots, unsigned long num)
+{
+	int err;
+	struct vma_slot *slot;
+	unsigned long i;
+	struct sradix_tree_root *root = &rung->vma_root;
+
+	err = sradix_tree_enter(root, (void **)slots, num);
+	BUG_ON(err);
+
+	for (i = 0; i < num; i++) {
+		slot = slots[i];
+		slot->rung = rung;
+		BUG_ON(vma_fully_scanned(slot));
+	}
+
+	if (rung->vma_root.num == num)
+		reset_current_scan(rung, 0, 1);
+}
+
+static inline int rung_add_one_slot(struct scan_rung *rung,
+				     struct vma_slot *slot)
+{
+	int err;
+
+	err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1);
+	if (err)
+		return err;
+
+	slot->rung = rung;
+	if (rung->vma_root.num == 1)
+		reset_current_scan(rung, 0, 1);
+
+	return 0;
+}
+
+/*
+ * Return true if the slot is deleted from its rung.
+ */
+static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung)
+{
+	struct scan_rung *old_rung = slot->rung;
+	int err;
+
+	if (old_rung == rung)
+		return 0;
+
+	rung_rm_slot(slot);
+	err = rung_add_one_slot(rung, slot);
+	if (err) {
+		err = rung_add_one_slot(old_rung, slot);
+		WARN_ON(err); /* OOPS, badly OOM, we lost this slot */
+	}
+
+	return 1;
+}
+
+static inline int vma_rung_up(struct vma_slot *slot)
+{
+	struct scan_rung *rung;
+
+	rung = slot->rung;
+	if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1])
+		rung++;
+
+	return vma_rung_enter(slot, rung);
+}
+
+static inline int vma_rung_down(struct vma_slot *slot)
+{
+	struct scan_rung *rung;
+
+	rung = slot->rung;
+	if (slot->rung != &uksm_scan_ladder[0])
+		rung--;
+
+	return vma_rung_enter(slot, rung);
+}
+
+/**
+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
+ */
+static unsigned long cal_dedup_ratio(struct vma_slot *slot)
+{
+	unsigned long ret;
+	unsigned long pages;
+
+	pages = slot->this_sampled;
+	if (!pages)
+		return 0;
+
+	BUG_ON(slot->pages_scanned == slot->last_scanned);
+
+	ret = slot->pages_merged;
+
+	/* Thrashing area filtering */
+	if (ret && uksm_thrash_threshold) {
+		if (slot->pages_cowed * 100 / slot->pages_merged
+		    > uksm_thrash_threshold) {
+			ret = 0;
+		} else {
+			ret = slot->pages_merged - slot->pages_cowed;
+		}
+	}
+
+	return ret * 100 / pages;
+}
+
+/**
+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
+ */
+static unsigned long cal_dedup_ratio_old(struct vma_slot *slot)
+{
+	unsigned long ret;
+	unsigned long pages;
+
+	pages = slot->pages;
+	if (!pages)
+		return 0;
+
+	ret = slot->pages_bemerged;
+
+	/* Thrashing area filtering */
+	if (ret && uksm_thrash_threshold) {
+		if (slot->pages_cowed * 100 / slot->pages_bemerged
+		    > uksm_thrash_threshold) {
+			ret = 0;
+		} else {
+			ret = slot->pages_bemerged - slot->pages_cowed;
+		}
+	}
+
+	return ret * 100 / pages;
+}
+
+/**
+ * stable_node_reinsert() - When the hash_strength has been adjusted, the
+ * stable tree need to be restructured, this is the function re-inserting the
+ * stable node.
+ */
+static inline void stable_node_reinsert(struct stable_node *new_node,
+					struct page *page,
+					struct rb_root *root_treep,
+					struct list_head *tree_node_listp,
+					u32 hash)
+{
+	struct rb_node **new = &root_treep->rb_node;
+	struct rb_node *parent = NULL;
+	struct stable_node *stable_node;
+	struct tree_node *tree_node;
+	struct page *tree_page;
+	int cmp;
+
+	while (*new) {
+		int cmp;
+
+		tree_node = rb_entry(*new, struct tree_node, node);
+
+		cmp = hash_cmp(hash, tree_node->hash);
+
+		if (cmp < 0) {
+			parent = *new;
+			new = &parent->rb_left;
+		} else if (cmp > 0) {
+			parent = *new;
+			new = &parent->rb_right;
+		} else
+			break;
+	}
+
+	if (*new) {
+		/* find a stable tree node with same first level hash value */
+		stable_node_hash_max(new_node, page, hash);
+		if (tree_node->count == 1) {
+			stable_node = rb_entry(tree_node->sub_root.rb_node,
+					       struct stable_node, node);
+			tree_page = get_uksm_page(stable_node, 1, 0);
+			if (tree_page) {
+				stable_node_hash_max(stable_node,
+						      tree_page, hash);
+				put_page(tree_page);
+
+				/* prepare for stable node insertion */
+
+				cmp = hash_cmp(new_node->hash_max,
+						   stable_node->hash_max);
+				parent = &stable_node->node;
+				if (cmp < 0)
+					new = &parent->rb_left;
+				else if (cmp > 0)
+					new = &parent->rb_right;
+				else
+					goto failed;
+
+				goto add_node;
+			} else {
+				/* the only stable_node deleted, the tree node
+				 * was not deleted.
+				 */
+				goto tree_node_reuse;
+			}
+		}
+
+		/* well, search the collision subtree */
+		new = &tree_node->sub_root.rb_node;
+		parent = NULL;
+		BUG_ON(!*new);
+		while (*new) {
+			int cmp;
+
+			stable_node = rb_entry(*new, struct stable_node, node);
+
+			cmp = hash_cmp(new_node->hash_max,
+					   stable_node->hash_max);
+
+			if (cmp < 0) {
+				parent = *new;
+				new = &parent->rb_left;
+			} else if (cmp > 0) {
+				parent = *new;
+				new = &parent->rb_right;
+			} else {
+				/* oh, no, still a collision */
+				goto failed;
+			}
+		}
+
+		goto add_node;
+	}
+
+	/* no tree node found */
+	tree_node = alloc_tree_node(tree_node_listp);
+	if (!tree_node) {
+		pr_err("UKSM: memory allocation error!\n");
+		goto failed;
+	} else {
+		tree_node->hash = hash;
+		rb_link_node(&tree_node->node, parent, new);
+		rb_insert_color(&tree_node->node, root_treep);
+
+tree_node_reuse:
+		/* prepare for stable node insertion */
+		parent = NULL;
+		new = &tree_node->sub_root.rb_node;
+	}
+
+add_node:
+	rb_link_node(&new_node->node, parent, new);
+	rb_insert_color(&new_node->node, &tree_node->sub_root);
+	new_node->tree_node = tree_node;
+	tree_node->count++;
+	return;
+
+failed:
+	/* This can only happen when two nodes have collided
+	 * in two levels.
+	 */
+	new_node->tree_node = NULL;
+	return;
+}
+
+static inline void free_all_tree_nodes(struct list_head *list)
+{
+	struct tree_node *node, *tmp;
+
+	list_for_each_entry_safe(node, tmp, list, all_list) {
+		free_tree_node(node);
+	}
+}
+
+/**
+ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash
+ * strength to the current hash_strength. It re-structures the hole tree.
+ */
+static inline void stable_tree_delta_hash(u32 prev_hash_strength)
+{
+	struct stable_node *node, *tmp;
+	struct rb_root *root_new_treep;
+	struct list_head *new_tree_node_listp;
+
+	stable_tree_index = (stable_tree_index + 1) % 2;
+	root_new_treep = &root_stable_tree[stable_tree_index];
+	new_tree_node_listp = &stable_tree_node_list[stable_tree_index];
+	*root_new_treep = RB_ROOT;
+	BUG_ON(!list_empty(new_tree_node_listp));
+
+	/*
+	 * we need to be safe, the node could be removed by get_uksm_page()
+	 */
+	list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) {
+		void *addr;
+		struct page *node_page;
+		u32 hash;
+
+		/*
+		 * We are completely re-structuring the stable nodes to a new
+		 * stable tree. We don't want to touch the old tree unlinks and
+		 * old tree_nodes. The old tree_nodes will be freed at once.
+		 */
+		node_page = get_uksm_page(node, 0, 0);
+		if (!node_page)
+			continue;
+
+		if (node->tree_node) {
+			hash = node->tree_node->hash;
+
+			addr = kmap_atomic(node_page);
+
+			hash = delta_hash(addr, prev_hash_strength,
+					  hash_strength, hash);
+			kunmap_atomic(addr);
+		} else {
+			/*
+			 *it was not inserted to rbtree due to collision in last
+			 *round scan.
+			 */
+			hash = page_hash(node_page, hash_strength, 0);
+		}
+
+		stable_node_reinsert(node, node_page, root_new_treep,
+				     new_tree_node_listp, hash);
+		put_page(node_page);
+	}
+
+	root_stable_treep = root_new_treep;
+	free_all_tree_nodes(stable_tree_node_listp);
+	BUG_ON(!list_empty(stable_tree_node_listp));
+	stable_tree_node_listp = new_tree_node_listp;
+}
+
+static inline void inc_hash_strength(unsigned long delta)
+{
+	hash_strength += 1 << delta;
+	if (hash_strength > HASH_STRENGTH_MAX)
+		hash_strength = HASH_STRENGTH_MAX;
+}
+
+static inline void dec_hash_strength(unsigned long delta)
+{
+	unsigned long change = 1 << delta;
+
+	if (hash_strength <= change + 1)
+		hash_strength = 1;
+	else
+		hash_strength -= change;
+}
+
+static inline void inc_hash_strength_delta(void)
+{
+	hash_strength_delta++;
+	if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX)
+		hash_strength_delta = HASH_STRENGTH_DELTA_MAX;
+}
+
+static inline unsigned long get_current_neg_ratio(void)
+{
+	u64 pos = benefit.pos;
+	u64 neg = benefit.neg;
+
+	if (!neg)
+		return 0;
+
+	if (!pos || neg > pos)
+		return 100;
+
+	if (neg > div64_u64(U64_MAX, 100))
+		pos = div64_u64(pos, 100);
+	else
+		neg *= 100;
+
+	return div64_u64(neg, pos);
+}
+
+static inline unsigned long get_current_benefit(void)
+{
+	u64 pos = benefit.pos;
+	u64 neg = benefit.neg;
+	u64 scanned = benefit.scanned;
+
+	if (neg > pos)
+		return 0;
+
+	return div64_u64((pos - neg), scanned);
+}
+
+static inline int judge_rshash_direction(void)
+{
+	u64 current_neg_ratio, stable_benefit;
+	u64 current_benefit, delta = 0;
+	int ret = STILL;
+
+	/*
+	 * Try to probe a value after the boot, and in case the system
+	 * are still for a long time.
+	 */
+	if ((fully_scanned_round & 0xFFULL) == 10) {
+		ret = OBSCURE;
+		goto out;
+	}
+
+	current_neg_ratio = get_current_neg_ratio();
+
+	if (current_neg_ratio == 0) {
+		rshash_neg_cont_zero++;
+		if (rshash_neg_cont_zero > 2)
+			return GO_DOWN;
+		else
+			return STILL;
+	}
+	rshash_neg_cont_zero = 0;
+
+	if (current_neg_ratio > 90) {
+		ret = GO_UP;
+		goto out;
+	}
+
+	current_benefit = get_current_benefit();
+	stable_benefit = rshash_state.stable_benefit;
+
+	if (!stable_benefit) {
+		ret = OBSCURE;
+		goto out;
+	}
+
+	if (current_benefit > stable_benefit)
+		delta = current_benefit - stable_benefit;
+	else if (current_benefit < stable_benefit)
+		delta = stable_benefit - current_benefit;
+
+	delta = div64_u64(100 * delta, stable_benefit);
+
+	if (delta > 50) {
+		rshash_cont_obscure++;
+		if (rshash_cont_obscure > 2)
+			return OBSCURE;
+		else
+			return STILL;
+	}
+
+out:
+	rshash_cont_obscure = 0;
+	return ret;
+}
+
+/**
+ * rshash_adjust() - The main function to control the random sampling state
+ * machine for hash strength adapting.
+ *
+ * return true if hash_strength has changed.
+ */
+static inline int rshash_adjust(void)
+{
+	unsigned long prev_hash_strength = hash_strength;
+
+	if (!encode_benefit())
+		return 0;
+
+	switch (rshash_state.state) {
+	case RSHASH_STILL:
+		switch (judge_rshash_direction()) {
+		case GO_UP:
+			if (rshash_state.pre_direct == GO_DOWN)
+				hash_strength_delta = 0;
+
+			inc_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+			rshash_state.stable_benefit = get_current_benefit();
+			rshash_state.pre_direct = GO_UP;
+			break;
+
+		case GO_DOWN:
+			if (rshash_state.pre_direct == GO_UP)
+				hash_strength_delta = 0;
+
+			dec_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+			rshash_state.stable_benefit = get_current_benefit();
+			rshash_state.pre_direct = GO_DOWN;
+			break;
+
+		case OBSCURE:
+			rshash_state.stable_point = hash_strength;
+			rshash_state.turn_point_down = hash_strength;
+			rshash_state.turn_point_up = hash_strength;
+			rshash_state.turn_benefit_down = get_current_benefit();
+			rshash_state.turn_benefit_up = get_current_benefit();
+			rshash_state.lookup_window_index = 0;
+			rshash_state.state = RSHASH_TRYDOWN;
+			dec_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+			break;
+
+		case STILL:
+			break;
+		default:
+			BUG();
+		}
+		break;
+
+	case RSHASH_TRYDOWN:
+		if (rshash_state.lookup_window_index++ % 5 == 0)
+			rshash_state.below_count = 0;
+
+		if (get_current_benefit() < rshash_state.stable_benefit)
+			rshash_state.below_count++;
+		else if (get_current_benefit() >
+			 rshash_state.turn_benefit_down) {
+			rshash_state.turn_point_down = hash_strength;
+			rshash_state.turn_benefit_down = get_current_benefit();
+		}
+
+		if (rshash_state.below_count >= 3 ||
+		    judge_rshash_direction() == GO_UP ||
+		    hash_strength == 1) {
+			hash_strength = rshash_state.stable_point;
+			hash_strength_delta = 0;
+			inc_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+			rshash_state.lookup_window_index = 0;
+			rshash_state.state = RSHASH_TRYUP;
+			hash_strength_delta = 0;
+		} else {
+			dec_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+		}
+		break;
+
+	case RSHASH_TRYUP:
+		if (rshash_state.lookup_window_index++ % 5 == 0)
+			rshash_state.below_count = 0;
+
+		if (get_current_benefit() < rshash_state.turn_benefit_down)
+			rshash_state.below_count++;
+		else if (get_current_benefit() > rshash_state.turn_benefit_up) {
+			rshash_state.turn_point_up = hash_strength;
+			rshash_state.turn_benefit_up = get_current_benefit();
+		}
+
+		if (rshash_state.below_count >= 3 ||
+		    judge_rshash_direction() == GO_DOWN ||
+		    hash_strength == HASH_STRENGTH_MAX) {
+			hash_strength = rshash_state.turn_benefit_up >
+				rshash_state.turn_benefit_down ?
+				rshash_state.turn_point_up :
+				rshash_state.turn_point_down;
+
+			rshash_state.state = RSHASH_PRE_STILL;
+		} else {
+			inc_hash_strength(hash_strength_delta);
+			inc_hash_strength_delta();
+		}
+
+		break;
+
+	case RSHASH_NEW:
+	case RSHASH_PRE_STILL:
+		rshash_state.stable_benefit = get_current_benefit();
+		rshash_state.state = RSHASH_STILL;
+		hash_strength_delta = 0;
+		break;
+	default:
+		BUG();
+	}
+
+	/* rshash_neg = rshash_pos = 0; */
+	reset_benefit();
+
+	if (prev_hash_strength != hash_strength)
+		stable_tree_delta_hash(prev_hash_strength);
+
+	return prev_hash_strength != hash_strength;
+}
+
+/**
+ * round_update_ladder() - The main function to do update of all the
+ * adjustments whenever a scan round is finished.
+ */
+static noinline void round_update_ladder(void)
+{
+	int i;
+	unsigned long dedup;
+	struct vma_slot *slot, *tmp_slot;
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++)
+		uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED;
+
+	list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) {
+
+		/* slot may be rung_rm_slot() when mm exits */
+		if (slot->snode) {
+			dedup = cal_dedup_ratio_old(slot);
+			if (dedup && dedup >= uksm_abundant_threshold)
+				vma_rung_up(slot);
+		}
+
+		slot->pages_bemerged = 0;
+		slot->pages_cowed = 0;
+
+		list_del_init(&slot->dedup_list);
+	}
+}
+
+static void uksm_del_vma_slot(struct vma_slot *slot)
+{
+	int i, j;
+	struct rmap_list_entry *entry;
+
+	if (slot->snode) {
+		/*
+		 * In case it just failed when entering the rung, it's not
+		 * necessary.
+		 */
+		rung_rm_slot(slot);
+	}
+
+	if (!list_empty(&slot->dedup_list))
+		list_del(&slot->dedup_list);
+
+	if (!slot->rmap_list_pool || !slot->pool_counts) {
+		/* In case it OOMed in uksm_vma_enter() */
+		goto out;
+	}
+
+	for (i = 0; i < slot->pool_size; i++) {
+		void *addr;
+
+		if (!slot->rmap_list_pool[i])
+			continue;
+
+		addr = kmap(slot->rmap_list_pool[i]);
+		for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
+			entry = (struct rmap_list_entry *)addr + j;
+			if (is_addr(entry->addr))
+				continue;
+			if (!entry->item)
+				continue;
+
+			remove_rmap_item_from_tree(entry->item);
+			free_rmap_item(entry->item);
+			slot->pool_counts[i]--;
+		}
+		BUG_ON(slot->pool_counts[i]);
+		kunmap(slot->rmap_list_pool[i]);
+		__free_page(slot->rmap_list_pool[i]);
+	}
+	kfree(slot->rmap_list_pool);
+	kfree(slot->pool_counts);
+
+out:
+	slot->rung = NULL;
+	if (slot->flags & UKSM_SLOT_IN_UKSM) {
+		BUG_ON(uksm_pages_total < slot->pages);
+		uksm_pages_total -= slot->pages;
+	}
+
+	if (slot->fully_scanned_round == fully_scanned_round)
+		scanned_virtual_pages -= slot->pages;
+	else
+		scanned_virtual_pages -= slot->pages_scanned;
+	free_vma_slot(slot);
+}
+
+
+#define SPIN_LOCK_PERIOD	32
+static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD];
+static inline void cleanup_vma_slots(void)
+{
+	struct vma_slot *slot;
+	int i;
+
+	i = 0;
+	spin_lock(&vma_slot_list_lock);
+	while (!list_empty(&vma_slot_del)) {
+		slot = list_entry(vma_slot_del.next,
+				  struct vma_slot, slot_list);
+		list_del(&slot->slot_list);
+		cleanup_slots[i++] = slot;
+		if (i == SPIN_LOCK_PERIOD) {
+			spin_unlock(&vma_slot_list_lock);
+			while (--i >= 0)
+				uksm_del_vma_slot(cleanup_slots[i]);
+			i = 0;
+			spin_lock(&vma_slot_list_lock);
+		}
+	}
+	spin_unlock(&vma_slot_list_lock);
+
+	while (--i >= 0)
+		uksm_del_vma_slot(cleanup_slots[i]);
+}
+
+/*
+ * Expotional moving average formula
+ */
+static inline unsigned long ema(unsigned long curr, unsigned long last_ema)
+{
+	/*
+	 * For a very high burst, even the ema cannot work well, a false very
+	 * high per-page time estimation can result in feedback in very high
+	 * overhead of context switch and rung update -- this will then lead
+	 * to higher per-paper time, this may not converge.
+	 *
+	 * Instead, we try to approach this value in a binary manner.
+	 */
+	if (curr > last_ema * 10)
+		return last_ema * 2;
+
+	return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100;
+}
+
+/*
+ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to
+ * nanoseconds based on current uksm_sleep_jiffies.
+ */
+static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio)
+{
+	return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) /
+		(TIME_RATIO_SCALE - ratio) * ratio;
+}
+
+
+static inline unsigned long rung_real_ratio(int cpu_time_ratio)
+{
+	unsigned long ret;
+
+	BUG_ON(!cpu_time_ratio);
+
+	if (cpu_time_ratio > 0)
+		ret = cpu_time_ratio;
+	else
+		ret = (unsigned long)(-cpu_time_ratio) *
+			uksm_max_cpu_percentage / 100UL;
+
+	return ret ? ret : 1;
+}
+
+static noinline void uksm_calc_scan_pages(void)
+{
+	struct scan_rung *ladder = uksm_scan_ladder;
+	unsigned long sleep_usecs, nsecs;
+	unsigned long ratio;
+	int i;
+	unsigned long per_page;
+
+	if (uksm_ema_page_time > 100000 ||
+	    (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL))
+		uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
+
+	per_page = uksm_ema_page_time;
+	BUG_ON(!per_page);
+
+	/*
+	 * For every 8 eval round, we try to probe a uksm_sleep_jiffies value
+	 * based on saved user input.
+	 */
+	if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL)
+		uksm_sleep_jiffies = uksm_sleep_saved;
+
+	/* We require a rung scan at least 1 page in a period. */
+	nsecs = per_page;
+	ratio = rung_real_ratio(ladder[0].cpu_ratio);
+	if (cpu_ratio_to_nsec(ratio) < nsecs) {
+		sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio
+				/ NSEC_PER_USEC;
+		uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1;
+	}
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		ratio = rung_real_ratio(ladder[i].cpu_ratio);
+		ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) /
+					per_page;
+		BUG_ON(!ladder[i].pages_to_scan);
+		uksm_calc_rung_step(&ladder[i], per_page, ratio);
+	}
+}
+
+/*
+ * From the scan time of this round (ns) to next expected min sleep time
+ * (ms), be careful of the possible overflows. ratio is taken from
+ * rung_real_ratio()
+ */
+static inline
+unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio)
+{
+	scan_time >>= 20; /* to msec level now */
+	BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE));
+
+	return (unsigned int) ((unsigned long) scan_time *
+			       (TIME_RATIO_SCALE - ratio) / ratio);
+}
+
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+static void uksm_vma_enter(struct vma_slot **slots, unsigned long num)
+{
+	struct scan_rung *rung;
+
+	rung = &uksm_scan_ladder[0];
+	rung_add_new_slots(rung, slots, num);
+}
+
+static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE];
+
+static void uksm_enter_all_slots(void)
+{
+	struct vma_slot *slot;
+	unsigned long index;
+	struct list_head empty_vma_list;
+	int i;
+
+	i = 0;
+	index = 0;
+	INIT_LIST_HEAD(&empty_vma_list);
+
+	spin_lock(&vma_slot_list_lock);
+	while (!list_empty(&vma_slot_new)) {
+		slot = list_entry(vma_slot_new.next,
+				  struct vma_slot, slot_list);
+
+		if (!slot->vma->anon_vma) {
+			list_move(&slot->slot_list, &empty_vma_list);
+		} else if (vma_can_enter(slot->vma)) {
+			batch_slots[index++] = slot;
+			list_del_init(&slot->slot_list);
+		} else {
+			list_move(&slot->slot_list, &vma_slot_noadd);
+		}
+
+		if (++i == SPIN_LOCK_PERIOD ||
+		    (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) {
+			spin_unlock(&vma_slot_list_lock);
+
+			if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) {
+				uksm_vma_enter(batch_slots, index);
+				index = 0;
+			}
+			i = 0;
+			cond_resched();
+			spin_lock(&vma_slot_list_lock);
+		}
+	}
+
+	list_splice(&empty_vma_list, &vma_slot_new);
+
+	spin_unlock(&vma_slot_list_lock);
+
+	if (index)
+		uksm_vma_enter(batch_slots, index);
+
+}
+
+static inline int rung_round_finished(struct scan_rung *rung)
+{
+	return rung->flags & UKSM_RUNG_ROUND_FINISHED;
+}
+
+static inline void judge_slot(struct vma_slot *slot)
+{
+	struct scan_rung *rung = slot->rung;
+	unsigned long dedup;
+	int deleted;
+
+	dedup = cal_dedup_ratio(slot);
+	if (vma_fully_scanned(slot) && uksm_thrash_threshold)
+		deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]);
+	else if (dedup && dedup >= uksm_abundant_threshold)
+		deleted = vma_rung_up(slot);
+	else
+		deleted = vma_rung_down(slot);
+
+	slot->pages_merged = 0;
+	slot->pages_cowed = 0;
+	slot->this_sampled = 0;
+
+	if (vma_fully_scanned(slot))
+		slot->pages_scanned = 0;
+
+	slot->last_scanned = slot->pages_scanned;
+
+	/* If its deleted in above, then rung was already advanced. */
+	if (!deleted)
+		advance_current_scan(rung);
+}
+
+
+static inline int hash_round_finished(void)
+{
+	if (scanned_virtual_pages > (uksm_pages_total >> 2)) {
+		scanned_virtual_pages = 0;
+		if (uksm_pages_scanned)
+			fully_scanned_round++;
+
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+#define UKSM_MMSEM_BATCH	5
+#define BUSY_RETRY		100
+
+/**
+ * uksm_do_scan()  - the main worker function.
+ */
+static noinline void uksm_do_scan(void)
+{
+	struct vma_slot *slot, *iter;
+	struct mm_struct *busy_mm;
+	unsigned char round_finished, all_rungs_emtpy;
+	int i, err, mmsem_batch;
+	unsigned long pcost;
+	long long delta_exec;
+	unsigned long vpages, max_cpu_ratio;
+	unsigned long long start_time, end_time, scan_time;
+	unsigned int expected_jiffies;
+
+	might_sleep();
+
+	vpages = 0;
+
+	start_time = task_sched_runtime(current);
+	max_cpu_ratio = 0;
+	mmsem_batch = 0;
+
+	for (i = 0; i < SCAN_LADDER_SIZE;) {
+		struct scan_rung *rung = &uksm_scan_ladder[i];
+		unsigned long ratio;
+		int busy_retry;
+
+		if (!rung->pages_to_scan) {
+			i++;
+			continue;
+		}
+
+		if (!rung->vma_root.num) {
+			rung->pages_to_scan = 0;
+			i++;
+			continue;
+		}
+
+		ratio = rung_real_ratio(rung->cpu_ratio);
+		if (ratio > max_cpu_ratio)
+			max_cpu_ratio = ratio;
+
+		busy_retry = BUSY_RETRY;
+		/*
+		 * Do not consider rung_round_finished() here, just used up the
+		 * rung->pages_to_scan quota.
+		 */
+		while (rung->pages_to_scan && rung->vma_root.num &&
+		       likely(!freezing(current))) {
+			int reset = 0;
+
+			slot = rung->current_scan;
+
+			BUG_ON(vma_fully_scanned(slot));
+
+			if (mmsem_batch)
+				err = 0;
+			else
+				err = try_down_read_slot_mmap_sem(slot);
+
+			if (err == -ENOENT) {
+rm_slot:
+				rung_rm_slot(slot);
+				continue;
+			}
+
+			busy_mm = slot->mm;
+
+			if (err == -EBUSY) {
+				/* skip other vmas on the same mm */
+				do {
+					reset = advance_current_scan(rung);
+					iter = rung->current_scan;
+					busy_retry--;
+					if (iter->vma->vm_mm != busy_mm ||
+					    !busy_retry || reset)
+						break;
+				} while (1);
+
+				if (iter->vma->vm_mm != busy_mm) {
+					continue;
+				} else {
+					/* scan round finsished */
+					break;
+				}
+			}
+
+			BUG_ON(!vma_can_enter(slot->vma));
+			if (uksm_test_exit(slot->vma->vm_mm)) {
+				mmsem_batch = 0;
+				up_read(&slot->vma->vm_mm->mmap_sem);
+				goto rm_slot;
+			}
+
+			if (mmsem_batch)
+				mmsem_batch--;
+			else
+				mmsem_batch = UKSM_MMSEM_BATCH;
+
+			/* Ok, we have take the mmap_sem, ready to scan */
+			scan_vma_one_page(slot);
+			rung->pages_to_scan--;
+			vpages++;
+
+			if (rung->current_offset + rung->step > slot->pages - 1
+			    || vma_fully_scanned(slot)) {
+				up_read(&slot->vma->vm_mm->mmap_sem);
+				judge_slot(slot);
+				mmsem_batch = 0;
+			} else {
+				rung->current_offset += rung->step;
+				if (!mmsem_batch)
+					up_read(&slot->vma->vm_mm->mmap_sem);
+			}
+
+			busy_retry = BUSY_RETRY;
+			cond_resched();
+		}
+
+		if (mmsem_batch) {
+			up_read(&slot->vma->vm_mm->mmap_sem);
+			mmsem_batch = 0;
+		}
+
+		if (freezing(current))
+			break;
+
+		cond_resched();
+	}
+	end_time = task_sched_runtime(current);
+	delta_exec = end_time - start_time;
+
+	if (freezing(current))
+		return;
+
+	cleanup_vma_slots();
+	uksm_enter_all_slots();
+
+	round_finished = 1;
+	all_rungs_emtpy = 1;
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		struct scan_rung *rung = &uksm_scan_ladder[i];
+
+		if (rung->vma_root.num) {
+			all_rungs_emtpy = 0;
+			if (!rung_round_finished(rung))
+				round_finished = 0;
+		}
+	}
+
+	if (all_rungs_emtpy)
+		round_finished = 0;
+
+	if (round_finished) {
+		round_update_ladder();
+		uksm_eval_round++;
+
+		if (hash_round_finished() && rshash_adjust()) {
+			/* Reset the unstable root iff hash strength changed */
+			uksm_hash_round++;
+			root_unstable_tree = RB_ROOT;
+			free_all_tree_nodes(&unstable_tree_node_list);
+		}
+
+		/*
+		 * A number of pages can hang around indefinitely on per-cpu
+		 * pagevecs, raised page count preventing write_protect_page
+		 * from merging them.  Though it doesn't really matter much,
+		 * it is puzzling to see some stuck in pages_volatile until
+		 * other activity jostles them out, and they also prevented
+		 * LTP's KSM test from succeeding deterministically; so drain
+		 * them here (here rather than on entry to uksm_do_scan(),
+		 * so we don't IPI too often when pages_to_scan is set low).
+		 */
+		lru_add_drain_all();
+	}
+
+
+	if (vpages && delta_exec > 0) {
+		pcost = (unsigned long) delta_exec / vpages;
+		if (likely(uksm_ema_page_time))
+			uksm_ema_page_time = ema(pcost, uksm_ema_page_time);
+		else
+			uksm_ema_page_time = pcost;
+	}
+
+	uksm_calc_scan_pages();
+	uksm_sleep_real = uksm_sleep_jiffies;
+	/* in case of radical cpu bursts, apply the upper bound */
+	end_time = task_sched_runtime(current);
+	if (max_cpu_ratio && end_time > start_time) {
+		scan_time = end_time - start_time;
+		expected_jiffies = msecs_to_jiffies(
+			scan_time_to_sleep(scan_time, max_cpu_ratio));
+
+		if (expected_jiffies > uksm_sleep_real)
+			uksm_sleep_real = expected_jiffies;
+
+		/* We have a 1 second up bound for responsiveness. */
+		if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC)
+			uksm_sleep_real = msecs_to_jiffies(1000);
+	}
+
+	return;
+}
+
+static int ksmd_should_run(void)
+{
+	return uksm_run & UKSM_RUN_MERGE;
+}
+
+static int uksm_scan_thread(void *nothing)
+{
+	set_freezable();
+	set_user_nice(current, 5);
+
+	while (!kthread_should_stop()) {
+		mutex_lock(&uksm_thread_mutex);
+		if (ksmd_should_run())
+			uksm_do_scan();
+		mutex_unlock(&uksm_thread_mutex);
+
+		try_to_freeze();
+
+		if (ksmd_should_run()) {
+			schedule_timeout_interruptible(uksm_sleep_real);
+			uksm_sleep_times++;
+		} else {
+			wait_event_freezable(uksm_thread_wait,
+				ksmd_should_run() || kthread_should_stop());
+		}
+	}
+	return 0;
+}
+
+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
+{
+	struct stable_node *stable_node;
+	struct node_vma *node_vma;
+	struct rmap_item *rmap_item;
+	int search_new_forks = 0;
+	unsigned long address;
+
+	VM_BUG_ON_PAGE(!PageKsm(page), page);
+	VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+	stable_node = page_stable_node(page);
+	if (!stable_node)
+		return;
+again:
+	hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
+		hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
+			struct anon_vma *anon_vma = rmap_item->anon_vma;
+			struct anon_vma_chain *vmac;
+			struct vm_area_struct *vma;
+
+			cond_resched();
+			anon_vma_lock_read(anon_vma);
+			anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
+						       0, ULONG_MAX) {
+				cond_resched();
+				vma = vmac->vma;
+				address = get_rmap_addr(rmap_item);
+
+				if (address < vma->vm_start ||
+				    address >= vma->vm_end)
+					continue;
+
+				if ((rmap_item->slot->vma == vma) ==
+				    search_new_forks)
+					continue;
+
+				if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+					continue;
+
+				if (!rwc->rmap_one(page, vma, address, rwc->arg)) {
+					anon_vma_unlock_read(anon_vma);
+					return;
+				}
+
+				if (rwc->done && rwc->done(page)) {
+					anon_vma_unlock_read(anon_vma);
+					return;
+				}
+			}
+			anon_vma_unlock_read(anon_vma);
+		}
+	}
+	if (!search_new_forks++)
+		goto again;
+}
+
+#ifdef CONFIG_MIGRATION
+/* Common ksm interface but may be specific to uksm */
+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+	struct stable_node *stable_node;
+
+	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
+	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+	VM_BUG_ON(newpage->mapping != oldpage->mapping);
+
+	stable_node = page_stable_node(newpage);
+	if (stable_node) {
+		VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+		stable_node->kpfn = page_to_pfn(newpage);
+		/*
+		 * newpage->mapping was set in advance; now we need smp_wmb()
+		 * to make sure that the new stable_node->kpfn is visible
+		 * to get_ksm_page() before it can see that oldpage->mapping
+		 * has gone stale (or that PageSwapCache has been cleared).
+		 */
+		smp_wmb();
+		set_page_stable_node(oldpage, NULL);
+	}
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn,
+						 unsigned long end_pfn)
+{
+	struct rb_node *node;
+
+	for (node = rb_first(root_stable_treep); node; node = rb_next(node)) {
+		struct stable_node *stable_node;
+
+		stable_node = rb_entry(node, struct stable_node, node);
+		if (stable_node->kpfn >= start_pfn &&
+		    stable_node->kpfn < end_pfn)
+			return stable_node;
+	}
+	return NULL;
+}
+
+static int uksm_memory_callback(struct notifier_block *self,
+			       unsigned long action, void *arg)
+{
+	struct memory_notify *mn = arg;
+	struct stable_node *stable_node;
+
+	switch (action) {
+	case MEM_GOING_OFFLINE:
+		/*
+		 * Keep it very simple for now: just lock out ksmd and
+		 * MADV_UNMERGEABLE while any memory is going offline.
+		 * mutex_lock_nested() is necessary because lockdep was alarmed
+		 * that here we take uksm_thread_mutex inside notifier chain
+		 * mutex, and later take notifier chain mutex inside
+		 * uksm_thread_mutex to unlock it.   But that's safe because both
+		 * are inside mem_hotplug_mutex.
+		 */
+		mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING);
+		break;
+
+	case MEM_OFFLINE:
+		/*
+		 * Most of the work is done by page migration; but there might
+		 * be a few stable_nodes left over, still pointing to struct
+		 * pages which have been offlined: prune those from the tree.
+		 */
+		while ((stable_node = uksm_check_stable_tree(mn->start_pfn,
+					mn->start_pfn + mn->nr_pages)) != NULL)
+			remove_node_from_stable_tree(stable_node, 1, 1);
+		/* fallthrough */
+
+	case MEM_CANCEL_OFFLINE:
+		mutex_unlock(&uksm_thread_mutex);
+		break;
+	}
+	return NOTIFY_OK;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+#ifdef CONFIG_SYSFS
+/*
+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
+ */
+
+#define UKSM_ATTR_RO(_name) \
+	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+#define UKSM_ATTR(_name) \
+	static struct kobj_attribute _name##_attr = \
+		__ATTR(_name, 0644, _name##_show, _name##_store)
+
+static ssize_t max_cpu_percentage_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", uksm_max_cpu_percentage);
+}
+
+static ssize_t max_cpu_percentage_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long max_cpu_percentage;
+	int err;
+
+	err = kstrtoul(buf, 10, &max_cpu_percentage);
+	if (err || max_cpu_percentage > 100)
+		return -EINVAL;
+
+	if (max_cpu_percentage == 100)
+		max_cpu_percentage = 99;
+	else if (max_cpu_percentage < 10)
+		max_cpu_percentage = 10;
+
+	uksm_max_cpu_percentage = max_cpu_percentage;
+
+	return count;
+}
+UKSM_ATTR(max_cpu_percentage);
+
+static ssize_t sleep_millisecs_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies));
+}
+
+static ssize_t sleep_millisecs_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long msecs;
+	int err;
+
+	err = kstrtoul(buf, 10, &msecs);
+	if (err || msecs > MSEC_PER_SEC)
+		return -EINVAL;
+
+	uksm_sleep_jiffies = msecs_to_jiffies(msecs);
+	uksm_sleep_saved = uksm_sleep_jiffies;
+
+	return count;
+}
+UKSM_ATTR(sleep_millisecs);
+
+
+static ssize_t cpu_governor_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
+	int i;
+
+	buf[0] = '\0';
+	for (i = 0; i < n ; i++) {
+		if (uksm_cpu_governor == i)
+			strcat(buf, "[");
+
+		strcat(buf, uksm_cpu_governor_str[i]);
+
+		if (uksm_cpu_governor == i)
+			strcat(buf, "]");
+
+		strcat(buf, " ");
+	}
+	strcat(buf, "\n");
+
+	return strlen(buf);
+}
+
+static inline void init_performance_values(void)
+{
+	int i;
+	struct scan_rung *rung;
+	struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor;
+
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = uksm_scan_ladder + i;
+		rung->cpu_ratio = preset->cpu_ratio[i];
+		rung->cover_msecs = preset->cover_msecs[i];
+	}
+
+	uksm_max_cpu_percentage = preset->max_cpu;
+}
+
+static ssize_t cpu_governor_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t count)
+{
+	int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
+
+	for (n--; n >= 0 ; n--) {
+		if (!strncmp(buf, uksm_cpu_governor_str[n],
+			     strlen(uksm_cpu_governor_str[n])))
+			break;
+	}
+
+	if (n < 0)
+		return -EINVAL;
+	else
+		uksm_cpu_governor = n;
+
+	init_performance_values();
+
+	return count;
+}
+UKSM_ATTR(cpu_governor);
+
+static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+	return sprintf(buf, "%u\n", uksm_run);
+}
+
+static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int err;
+	unsigned long flags;
+
+	err = kstrtoul(buf, 10, &flags);
+	if (err || flags > UINT_MAX)
+		return -EINVAL;
+	if (flags > UKSM_RUN_MERGE)
+		return -EINVAL;
+
+	mutex_lock(&uksm_thread_mutex);
+	if (uksm_run != flags)
+		uksm_run = flags;
+	mutex_unlock(&uksm_thread_mutex);
+
+	if (flags & UKSM_RUN_MERGE)
+		wake_up_interruptible(&uksm_thread_wait);
+
+	return count;
+}
+UKSM_ATTR(run);
+
+static ssize_t abundant_threshold_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", uksm_abundant_threshold);
+}
+
+static ssize_t abundant_threshold_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buf, size_t count)
+{
+	int err;
+	unsigned long flags;
+
+	err = kstrtoul(buf, 10, &flags);
+	if (err || flags > 99)
+		return -EINVAL;
+
+	uksm_abundant_threshold = flags;
+
+	return count;
+}
+UKSM_ATTR(abundant_threshold);
+
+static ssize_t thrash_threshold_show(struct kobject *kobj,
+				     struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u\n", uksm_thrash_threshold);
+}
+
+static ssize_t thrash_threshold_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buf, size_t count)
+{
+	int err;
+	unsigned long flags;
+
+	err = kstrtoul(buf, 10, &flags);
+	if (err || flags > 99)
+		return -EINVAL;
+
+	uksm_thrash_threshold = flags;
+
+	return count;
+}
+UKSM_ATTR(thrash_threshold);
+
+static ssize_t cpu_ratios_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	int i, size;
+	struct scan_rung *rung;
+	char *p = buf;
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = &uksm_scan_ladder[i];
+
+		if (rung->cpu_ratio > 0)
+			size = sprintf(p, "%d ", rung->cpu_ratio);
+		else
+			size = sprintf(p, "MAX/%d ",
+					TIME_RATIO_SCALE / -rung->cpu_ratio);
+
+		p += size;
+	}
+
+	*p++ = '\n';
+	*p = '\0';
+
+	return p - buf;
+}
+
+static ssize_t cpu_ratios_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buf, size_t count)
+{
+	int i, cpuratios[SCAN_LADDER_SIZE], err;
+	unsigned long value;
+	struct scan_rung *rung;
+	char *p, *end = NULL;
+
+	p = kzalloc(count, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	memcpy(p, buf, count);
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		if (i != SCAN_LADDER_SIZE - 1) {
+			end = strchr(p, ' ');
+			if (!end)
+				return -EINVAL;
+
+			*end = '\0';
+		}
+
+		if (strstr(p, "MAX/")) {
+			p = strchr(p, '/') + 1;
+			err = kstrtoul(p, 10, &value);
+			if (err || value > TIME_RATIO_SCALE || !value)
+				return -EINVAL;
+
+			cpuratios[i] = -(int) (TIME_RATIO_SCALE / value);
+		} else {
+			err = kstrtoul(p, 10, &value);
+			if (err || value > TIME_RATIO_SCALE || !value)
+				return -EINVAL;
+
+			cpuratios[i] = value;
+		}
+
+		p = end + 1;
+	}
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = &uksm_scan_ladder[i];
+
+		rung->cpu_ratio = cpuratios[i];
+	}
+
+	return count;
+}
+UKSM_ATTR(cpu_ratios);
+
+static ssize_t eval_intervals_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	int i, size;
+	struct scan_rung *rung;
+	char *p = buf;
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = &uksm_scan_ladder[i];
+		size = sprintf(p, "%u ", rung->cover_msecs);
+		p += size;
+	}
+
+	*p++ = '\n';
+	*p = '\0';
+
+	return p - buf;
+}
+
+static ssize_t eval_intervals_store(struct kobject *kobj,
+				      struct kobj_attribute *attr,
+				      const char *buf, size_t count)
+{
+	int i, err;
+	unsigned long values[SCAN_LADDER_SIZE];
+	struct scan_rung *rung;
+	char *p, *end = NULL;
+	ssize_t ret = count;
+
+	p = kzalloc(count + 2, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	memcpy(p, buf, count);
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		if (i != SCAN_LADDER_SIZE - 1) {
+			end = strchr(p, ' ');
+			if (!end) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			*end = '\0';
+		}
+
+		err = kstrtoul(p, 10, &values[i]);
+		if (err) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		p = end + 1;
+	}
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = &uksm_scan_ladder[i];
+
+		rung->cover_msecs = values[i];
+	}
+
+out:
+	kfree(p);
+	return ret;
+}
+UKSM_ATTR(eval_intervals);
+
+static ssize_t ema_per_page_time_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", uksm_ema_page_time);
+}
+UKSM_ATTR_RO(ema_per_page_time);
+
+static ssize_t pages_shared_show(struct kobject *kobj,
+				 struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", uksm_pages_shared);
+}
+UKSM_ATTR_RO(pages_shared);
+
+static ssize_t pages_sharing_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", uksm_pages_sharing);
+}
+UKSM_ATTR_RO(pages_sharing);
+
+static ssize_t pages_unshared_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", uksm_pages_unshared);
+}
+UKSM_ATTR_RO(pages_unshared);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%llu\n", fully_scanned_round);
+}
+UKSM_ATTR_RO(full_scans);
+
+static ssize_t pages_scanned_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	unsigned long base = 0;
+	u64 delta, ret;
+
+	if (pages_scanned_stored) {
+		base = pages_scanned_base;
+		ret = pages_scanned_stored;
+		delta = uksm_pages_scanned >> base;
+		if (CAN_OVERFLOW_U64(ret, delta)) {
+			ret >>= 1;
+			delta >>= 1;
+			base++;
+			ret += delta;
+		}
+	} else {
+		ret = uksm_pages_scanned;
+	}
+
+	while (ret > ULONG_MAX) {
+		ret >>= 1;
+		base++;
+	}
+
+	if (base)
+		return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base);
+	else
+		return sprintf(buf, "%lu\n", (unsigned long)ret);
+}
+UKSM_ATTR_RO(pages_scanned);
+
+static ssize_t hash_strength_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%lu\n", hash_strength);
+}
+UKSM_ATTR_RO(hash_strength);
+
+static ssize_t sleep_times_show(struct kobject *kobj,
+				  struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%llu\n", uksm_sleep_times);
+}
+UKSM_ATTR_RO(sleep_times);
+
+
+static struct attribute *uksm_attrs[] = {
+	&max_cpu_percentage_attr.attr,
+	&sleep_millisecs_attr.attr,
+	&cpu_governor_attr.attr,
+	&run_attr.attr,
+	&ema_per_page_time_attr.attr,
+	&pages_shared_attr.attr,
+	&pages_sharing_attr.attr,
+	&pages_unshared_attr.attr,
+	&full_scans_attr.attr,
+	&pages_scanned_attr.attr,
+	&hash_strength_attr.attr,
+	&sleep_times_attr.attr,
+	&thrash_threshold_attr.attr,
+	&abundant_threshold_attr.attr,
+	&cpu_ratios_attr.attr,
+	&eval_intervals_attr.attr,
+	NULL,
+};
+
+static struct attribute_group uksm_attr_group = {
+	.attrs = uksm_attrs,
+	.name = "uksm",
+};
+#endif /* CONFIG_SYSFS */
+
+static inline void init_scan_ladder(void)
+{
+	int i;
+	struct scan_rung *rung;
+
+	for (i = 0; i < SCAN_LADDER_SIZE; i++) {
+		rung = uksm_scan_ladder + i;
+		slot_tree_init_root(&rung->vma_root);
+	}
+
+	init_performance_values();
+	uksm_calc_scan_pages();
+}
+
+static inline int cal_positive_negative_costs(void)
+{
+	struct page *p1, *p2;
+	unsigned char *addr1, *addr2;
+	unsigned long i, time_start, hash_cost;
+	unsigned long loopnum = 0;
+
+	/*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */
+	volatile u32 hash;
+	volatile int ret;
+
+	p1 = alloc_page(GFP_KERNEL);
+	if (!p1)
+		return -ENOMEM;
+
+	p2 = alloc_page(GFP_KERNEL);
+	if (!p2)
+		return -ENOMEM;
+
+	addr1 = kmap_atomic(p1);
+	addr2 = kmap_atomic(p2);
+	memset(addr1, prandom_u32(), PAGE_SIZE);
+	memcpy(addr2, addr1, PAGE_SIZE);
+
+	/* make sure that the two pages differ in last byte */
+	addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1];
+	kunmap_atomic(addr2);
+	kunmap_atomic(addr1);
+
+	time_start = jiffies;
+	while (jiffies - time_start < 100) {
+		for (i = 0; i < 100; i++)
+			hash = page_hash(p1, HASH_STRENGTH_FULL, 0);
+		loopnum += 100;
+	}
+	hash_cost = (jiffies - time_start);
+
+	time_start = jiffies;
+	for (i = 0; i < loopnum; i++)
+		ret = pages_identical_with_cost(p1, p2);
+	memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start);
+	memcmp_cost /= hash_cost;
+	pr_info("UKSM: relative memcmp_cost = %lu "
+		"hash=%u cmp_ret=%d.\n",
+		memcmp_cost, hash, ret);
+
+	__free_page(p1);
+	__free_page(p2);
+	return 0;
+}
+
+static int init_zeropage_hash_table(void)
+{
+	struct page *page;
+	char *addr;
+	int i;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+
+	addr = kmap_atomic(page);
+	memset(addr, 0, PAGE_SIZE);
+	kunmap_atomic(addr);
+
+	zero_hash_table = kmalloc_array(HASH_STRENGTH_MAX, sizeof(u32),
+		GFP_KERNEL);
+	if (!zero_hash_table)
+		return -ENOMEM;
+
+	for (i = 0; i < HASH_STRENGTH_MAX; i++)
+		zero_hash_table[i] = page_hash(page, i, 0);
+
+	__free_page(page);
+
+	return 0;
+}
+
+static inline int init_random_sampling(void)
+{
+	unsigned long i;
+
+	random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!random_nums)
+		return -ENOMEM;
+
+	for (i = 0; i < HASH_STRENGTH_FULL; i++)
+		random_nums[i] = i;
+
+	for (i = 0; i < HASH_STRENGTH_FULL; i++) {
+		unsigned long rand_range, swap_index, tmp;
+
+		rand_range = HASH_STRENGTH_FULL - i;
+		swap_index = i + prandom_u32() % rand_range;
+		tmp = random_nums[i];
+		random_nums[i] =  random_nums[swap_index];
+		random_nums[swap_index] = tmp;
+	}
+
+	rshash_state.state = RSHASH_NEW;
+	rshash_state.below_count = 0;
+	rshash_state.lookup_window_index = 0;
+
+	return cal_positive_negative_costs();
+}
+
+static int __init uksm_slab_init(void)
+{
+	rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0);
+	if (!rmap_item_cache)
+		goto out;
+
+	stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0);
+	if (!stable_node_cache)
+		goto out_free1;
+
+	node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0);
+	if (!node_vma_cache)
+		goto out_free2;
+
+	vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0);
+	if (!vma_slot_cache)
+		goto out_free3;
+
+	tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0);
+	if (!tree_node_cache)
+		goto out_free4;
+
+	return 0;
+
+out_free4:
+	kmem_cache_destroy(vma_slot_cache);
+out_free3:
+	kmem_cache_destroy(node_vma_cache);
+out_free2:
+	kmem_cache_destroy(stable_node_cache);
+out_free1:
+	kmem_cache_destroy(rmap_item_cache);
+out:
+	return -ENOMEM;
+}
+
+static void __init uksm_slab_free(void)
+{
+	kmem_cache_destroy(stable_node_cache);
+	kmem_cache_destroy(rmap_item_cache);
+	kmem_cache_destroy(node_vma_cache);
+	kmem_cache_destroy(vma_slot_cache);
+	kmem_cache_destroy(tree_node_cache);
+}
+
+/* Common interface to ksm, different to it. */
+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+		unsigned long end, int advice, unsigned long *vm_flags)
+{
+	int err;
+
+	switch (advice) {
+	case MADV_MERGEABLE:
+		return 0;		/* just ignore the advice */
+
+	case MADV_UNMERGEABLE:
+		if (!(*vm_flags & VM_MERGEABLE) || !uksm_flags_can_scan(*vm_flags))
+			return 0;		/* just ignore the advice */
+
+		if (vma->anon_vma) {
+			err = unmerge_uksm_pages(vma, start, end);
+			if (err)
+				return err;
+		}
+
+		uksm_remove_vma(vma);
+		*vm_flags &= ~VM_MERGEABLE;
+		break;
+	}
+
+	return 0;
+}
+
+/* Common interface to ksm, actually the same. */
+struct page *ksm_might_need_to_copy(struct page *page,
+			struct vm_area_struct *vma, unsigned long address)
+{
+	struct anon_vma *anon_vma = page_anon_vma(page);
+	struct page *new_page;
+
+	if (PageKsm(page)) {
+		if (page_stable_node(page))
+			return page;	/* no need to copy it */
+	} else if (!anon_vma) {
+		return page;		/* no need to copy it */
+	} else if (anon_vma->root == vma->anon_vma->root &&
+		 page->index == linear_page_index(vma, address)) {
+		return page;		/* still no need to copy it */
+	}
+	if (!PageUptodate(page))
+		return page;		/* let do_swap_page report the error */
+
+	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+	if (new_page) {
+		copy_user_highpage(new_page, page, address, vma);
+
+		SetPageDirty(new_page);
+		__SetPageUptodate(new_page);
+		__SetPageLocked(new_page);
+	}
+
+	return new_page;
+}
+
+/* Copied from mm/ksm.c and required from 5.1 */
+bool reuse_ksm_page(struct page *page,
+		    struct vm_area_struct *vma,
+		    unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+	if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
+			WARN_ON(!page_mapped(page)) ||
+			WARN_ON(!PageLocked(page))) {
+		dump_page(page, "reuse_ksm_page");
+		return false;
+	}
+#endif
+
+	if (PageSwapCache(page) || !page_stable_node(page))
+		return false;
+	/* Prohibit parallel get_ksm_page() */
+	if (!page_ref_freeze(page, 1))
+		return false;
+
+	page_move_anon_rmap(page, vma);
+	page->index = linear_page_index(vma, address);
+	page_ref_unfreeze(page, 1);
+
+	return true;
+}
+
+static int __init uksm_init(void)
+{
+	struct task_struct *uksm_thread;
+	int err;
+
+	uksm_sleep_jiffies = msecs_to_jiffies(100);
+	uksm_sleep_saved = uksm_sleep_jiffies;
+
+	slot_tree_init();
+	init_scan_ladder();
+
+
+	err = init_random_sampling();
+	if (err)
+		goto out_free2;
+
+	err = uksm_slab_init();
+	if (err)
+		goto out_free1;
+
+	err = init_zeropage_hash_table();
+	if (err)
+		goto out_free0;
+
+	uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd");
+	if (IS_ERR(uksm_thread)) {
+		pr_err("uksm: creating kthread failed\n");
+		err = PTR_ERR(uksm_thread);
+		goto out_free;
+	}
+
+#ifdef CONFIG_SYSFS
+	err = sysfs_create_group(mm_kobj, &uksm_attr_group);
+	if (err) {
+		pr_err("uksm: register sysfs failed\n");
+		kthread_stop(uksm_thread);
+		goto out_free;
+	}
+#else
+	uksm_run = UKSM_RUN_MERGE;	/* no way for user to start it */
+
+#endif /* CONFIG_SYSFS */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+	/*
+	 * Choose a high priority since the callback takes uksm_thread_mutex:
+	 * later callbacks could only be taking locks which nest within that.
+	 */
+	hotplug_memory_notifier(uksm_memory_callback, 100);
+#endif
+	return 0;
+
+out_free:
+	kfree(zero_hash_table);
+out_free0:
+	uksm_slab_free();
+out_free1:
+	kfree(random_nums);
+out_free2:
+	kfree(uksm_scan_ladder);
+	return err;
+}
+
+#ifdef MODULE
+subsys_initcall(ksm_init);
+#else
+late_initcall(uksm_init);
+#endif
+
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a37c87b5aee..03127e9c429 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -901,6 +901,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 	} else {
 		void (*freepage)(struct page *);
 		void *shadow = NULL;
+		int empty;
 
 		freepage = mapping->a_ops->freepage;
 		/*
@@ -922,9 +923,12 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 		if (reclaimed && page_is_file_lru(page) &&
 		    !mapping_exiting(mapping) && !dax_mapping(mapping))
 			shadow = workingset_eviction(page, target_memcg);
-		__delete_from_page_cache(page, shadow);
+		empty = __delete_from_page_cache(page, shadow);
 		xa_unlock_irqrestore(&mapping->i_pages, flags);
 
+		if (empty)
+			inode_pages_clear(mapping->host);
+
 		if (freepage != NULL)
 			freepage(page);
 	}
@@ -3693,7 +3697,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
 		__fs_reclaim_release();
 		ret = try_to_freeze();
 		__fs_reclaim_acquire();
-		if (ret || kthread_should_stop())
+		if (ret || kthread_should_stop() ||
+		    !atomic_long_read(&kswapd_waiters))
 			break;
 
 		/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 96d21a792b5..fdc6a4cd76e 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1171,6 +1171,9 @@ const char * const vmstat_text[] = {
 	"nr_foll_pin_acquired",
 	"nr_foll_pin_released",
 
+#ifdef CONFIG_UKSM
+	"nr_uksm_zero_pages",
+#endif
 	/* enum writeback_stat_item counters */
 	"nr_dirty_threshold",
 	"nr_dirty_background_threshold",
@@ -1205,9 +1208,11 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_NUMA
 	"zone_reclaim_failed",
 #endif
-	"pginodesteal",
 	"slabs_scanned",
+	"pginodesteal",
 	"kswapd_inodesteal",
+	"pginoderescue",
+	"pginodedelayed",
 	"kswapd_low_wmark_hit_quickly",
 	"kswapd_high_wmark_hit_quickly",
 	"pageoutrun",
diff --git a/mm/workingset.c b/mm/workingset.c
index 474186b76ce..7ce9c74ebfd 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -491,6 +491,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	struct xa_node *node = container_of(item, struct xa_node, private_list);
 	XA_STATE(xas, node->array, 0);
 	struct address_space *mapping;
+	bool empty = false;
 	int ret;
 
 	/*
@@ -529,6 +530,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	if (WARN_ON_ONCE(node->count != node->nr_values))
 		goto out_invalid;
 	mapping->nrexceptional -= node->nr_values;
+	empty = mapping_empty(mapping);
 	xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
 	xas.xa_offset = node->offset;
 	xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
@@ -542,6 +544,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 
 out_invalid:
 	xa_unlock_irq(&mapping->i_pages);
+	if (empty)
+		inode_pages_clear(mapping->host);
 	ret = LRU_REMOVED_RETRY;
 out:
 	cond_resched();
diff --git a/net/can/Kconfig b/net/can/Kconfig
index d7704275245..9e2f196f042 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -55,6 +55,16 @@ config CAN_GW
 
 source "net/can/j1939/Kconfig"
 
+config CAN_ISOTP
+	tristate "ISO 15765-2:2016 CAN transport protocol"
+	default y
+	---help---
+	  ISO 15765-2 CAN transport protocol for protocol family CAN
+
+	  This implementation is already widely used in automotive use-cases, e.g.
+	  for UDS based OBD diagnosis. Although some small adaptions may be applied
+	  to make it ready for Linux Mainline. Feedback is welcome.
+
 source "drivers/net/can/Kconfig"
 
 endif
diff --git a/net/can/Makefile b/net/can/Makefile
index 08bd217fc05..cfa1024369c 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -16,4 +16,7 @@ can-bcm-y		:= bcm.o
 obj-$(CONFIG_CAN_GW)	+= can-gw.o
 can-gw-y		:= gw.o
 
+obj-$(CONFIG_CAN_ISOTP)	+= can-isotp.o
+can-isotp-y		:= isotp.o
+
 obj-$(CONFIG_CAN_J1939)	+= j1939/
diff --git a/net/can/isotp.c b/net/can/isotp.c
new file mode 100644
index 00000000000..a0d20d9443b
--- /dev/null
+++ b/net/can/isotp.c
@@ -0,0 +1,1537 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * isotp.c - ISO 15765-2 CAN transport protocol for protocol family CAN
+ *
+ * WARNING: This is ALPHA code for discussions and first tests that should
+ *          not be used in production environments.
+ *
+ * In the discussion the Socket-API to the userspace or the ISO-TP socket
+ * options or the return values we may change! Current behaviour:
+ *
+ * - no ISO-TP specific return values are provided to the userspace
+ * - when a transfer (tx) is on the run the next write() blocks until it's done
+ * - no support for sending wait frames to the data source in the rx path
+ *
+ * Copyright (c) 2017 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <linux-can@vger.kernel.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/wait.h>
+#include <linux/uio.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/core.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+#include <linux/can/skb.h>
+#define CAN_SKBRES sizeof(struct can_skb_priv)
+#else
+#define CAN_SKBRES 0
+#endif
+#include <linux/can/isotp.h>
+#include <net/sock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#include <net/net_namespace.h>
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+#include "compat.h"
+#endif
+
+#define CAN_ISOTP_VERSION "20181217"
+static __initdata const char banner[] =
+	KERN_INFO "can: isotp protocol (rev " CAN_ISOTP_VERSION " alpha)\n";
+
+MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_ALIAS("can-proto-6");
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+#error This modules needs hrtimers (available since Kernel 2.6.22)
+#endif
+
+#define DBG(fmt, args...) (printk( KERN_DEBUG "can-isotp: %s: " fmt, \
+				   __func__, ##args))
+#undef DBG
+#define DBG(fmt, args...)
+
+#define SINGLE_MASK(id) ((id & CAN_EFF_FLAG) ? \
+			 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
+			 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
+
+/*
+  ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
+  take full 32 bit values (4 Gbyte). We would need some good concept to handle
+  this between user space and kernel space. For now increase the static buffer
+  to something about 8 kbyte to be able to test this new functionality.
+*/
+#define MAX_MSG_LENGTH 8200
+
+/* N_PCI type values in bits 7-4 of N_PCI bytes */
+#define N_PCI_SF	0x00 /* single frame */
+#define N_PCI_FF	0x10 /* first frame */
+#define N_PCI_CF	0x20 /* consecutive frame */
+#define N_PCI_FC	0x30 /* flow control */
+
+#define N_PCI_SZ 1	/* size of the PCI byte #1 */
+#define SF_PCI_SZ4 1	/* size of SingleFrame PCI including 4 bit SF_DL */
+#define SF_PCI_SZ8 2	/* size of SingleFrame PCI including 8 bit SF_DL */
+#define FF_PCI_SZ12 2	/* size of FirstFrame PCI including 12 bit FF_DL */
+#define FF_PCI_SZ32 6	/* size of FirstFrame PCI including 32 bit FF_DL */
+#define FC_CONTENT_SZ 3	/* flow control content size in byte (FS/BS/STmin) */
+
+#define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA)
+
+/* Flow Status given in FC frame */
+#define ISOTP_FC_CTS	0	/* clear to send */
+#define ISOTP_FC_WT	1	/* wait */
+#define ISOTP_FC_OVFLW	2	/* overflow */
+
+enum {
+	ISOTP_IDLE = 0,
+	ISOTP_WAIT_FIRST_FC,
+	ISOTP_WAIT_FC,
+	ISOTP_WAIT_DATA,
+	ISOTP_SENDING
+};
+
+struct tpcon {
+	int idx;
+	int len;
+	u8  state;
+	u8  bs;
+	u8  sn;
+	u8  ll_dl;
+	u8  buf[MAX_MSG_LENGTH+1];
+};
+
+struct isotp_sock {
+	struct sock sk;
+	int bound;
+	int ifindex;
+	canid_t txid;
+	canid_t rxid;
+	ktime_t tx_gap;
+	ktime_t lastrxcf_tstamp;
+	struct hrtimer rxtimer, txtimer;
+	struct tasklet_struct txtsklet;
+	struct can_isotp_options opt;
+	struct can_isotp_fc_options rxfc, txfc;
+	struct can_isotp_ll_options ll;
+	__u32 force_tx_stmin;
+	__u32 force_rx_stmin;
+	struct tpcon rx, tx;
+	struct notifier_block notifier;
+	wait_queue_head_t wait;
+};
+
+static inline struct isotp_sock *isotp_sk(const struct sock *sk)
+{
+	return (struct isotp_sock *)sk;
+}
+
+static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
+{
+	struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+					     rxtimer);
+	if (so->rx.state == ISOTP_WAIT_DATA) {
+		DBG("we did not get new data frames in time.\n");
+
+		/* reset tx state */
+		so->rx.state = ISOTP_IDLE;
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+static void isotp_skb_reserve(struct sk_buff *skb, struct net_device *dev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	can_skb_reserve(skb);
+	can_skb_prv(skb)->ifindex = dev->ifindex;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,5)
+	can_skb_prv(skb)->skbcnt = 0;
+#endif
+}
+
+static void isotp_skb_destructor(struct sk_buff *skb)
+{
+	sock_put(skb->sk);
+}
+
+static inline void isotp_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+{
+	if (sk) {
+		sock_hold(sk);
+		skb->destructor = isotp_skb_destructor;
+		skb->sk = sk;
+	}
+}
+
+static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
+{
+	struct net_device *dev;
+	struct sk_buff *nskb;
+	struct canfd_frame *ncf;
+	struct isotp_sock *so = isotp_sk(sk);
+
+	nskb = alloc_skb(so->ll.mtu + CAN_SKBRES, gfp_any());
+	if (!nskb)
+		return 1;
+
+	dev = dev_get_by_index(sock_net(sk), so->ifindex);
+	if (!dev) {
+		kfree_skb(nskb);
+		return 1;
+	}
+	isotp_skb_reserve(nskb, dev);
+	nskb->dev = dev;
+	isotp_skb_set_owner(nskb, sk);
+	ncf = (struct canfd_frame *) nskb->data;
+	skb_put(nskb, so->ll.mtu);
+
+	/* create & send flow control reply */
+	ncf->can_id = so->txid;
+
+	if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
+		memset(ncf->data, so->opt.txpad_content, CAN_MAX_DLEN);
+		ncf->len = CAN_MAX_DLEN;
+	} else
+		ncf->len = ae + FC_CONTENT_SZ;
+
+	ncf->data[ae] = N_PCI_FC | flowstatus;
+	ncf->data[ae + 1] = so->rxfc.bs;
+	ncf->data[ae + 2] = so->rxfc.stmin;
+
+	if (ae)
+		ncf->data[0] = so->opt.ext_address;
+
+	if (so->ll.mtu == CANFD_MTU)
+		ncf->flags = so->ll.tx_flags;
+
+	can_send(nskb, 1);
+	dev_put(dev);
+
+	/* reset blocksize counter */
+	so->rx.bs = 0;
+
+	/* reset last CF frame rx timestamp for rx stmin enforcement */
+	so->lastrxcf_tstamp = ktime_set(0,0);
+
+	/* start rx timeout watchdog */
+	hrtimer_start(&so->rxtimer, ktime_set(1,0), HRTIMER_MODE_REL);
+	return 0;
+}
+
+static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
+{
+	struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
+
+	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+
+	memset(addr, 0, sizeof(*addr));
+	addr->can_family  = AF_CAN;
+	addr->can_ifindex = skb->dev->ifindex;
+
+	if (sock_queue_rcv_skb(sk, skb) < 0)
+		kfree_skb(skb);
+}
+
+static u8 padlen(u8 datalen)
+{
+	const u8 plen[] = {8,  8,  8,  8,  8,  8,  8,  8,  8,	/* 0 - 8 */
+			   12, 12, 12, 12,			/* 9 - 12 */
+			   16, 16, 16, 16,			/* 13 - 16 */
+			   20, 20, 20, 20,			/* 17 - 20 */
+			   24, 24, 24, 24,			/* 21 - 24 */
+			   32, 32, 32, 32, 32, 32, 32, 32,	/* 25 - 32 */
+			   48, 48, 48, 48, 48, 48, 48, 48,	/* 33 - 40 */
+			   48, 48, 48, 48, 48, 48, 48, 48};	/* 41 - 48 */
+
+	if (datalen > 48)
+		return 64;
+
+	return plen[datalen];
+}
+
+/* check for length optimization and return 1/true when the check fails */
+static int check_optimized(struct canfd_frame *cf, int start_index)
+{
+	/*
+	 * for CAN_DL <= 8 the start_index is equal to the CAN_DL as the
+	 * padding would start at this point. E.g. if the padding would
+	 * start at cf.data[7] cf->len has to be 7 to be optimal.
+	 * Note: The data[] index starts with zero.
+	 */
+	if (cf->len <= CAN_MAX_DLEN)
+		return (cf->len != start_index);
+
+	/*
+	 * This relation is also valid in the non-linear DLC range, where
+	 * we need to take care of the minimal next possible CAN_DL.
+	 * The correct check would be (padlen(cf->len) != padlen(start_index)).
+	 * But as cf->len can only take discrete values from 12, .., 64 at this
+	 * point the padlen(cf->len) is always equal to cf->len.
+	 */
+	return (cf->len != padlen(start_index));
+}
+
+/* check padding and return 1/true when the check fails */
+static int check_pad(struct isotp_sock *so, struct canfd_frame *cf,
+		     int start_index, __u8 content)
+{
+	int i;
+
+	/* no RX_PADDING value => check length of optimized frame length */
+	if (!(so->opt.flags & CAN_ISOTP_RX_PADDING)) {
+
+		if (so->opt.flags & CAN_ISOTP_CHK_PAD_LEN)
+			return check_optimized(cf, start_index);
+
+		/* no valid test against empty value => ignore frame */
+		return 1;
+	}
+
+	/* check datalength of correctly padded CAN frame */
+	if ((so->opt.flags & CAN_ISOTP_CHK_PAD_LEN) &&
+	    cf->len != padlen(cf->len))
+			return 1;
+
+	/* check padding content */
+	if (so->opt.flags & CAN_ISOTP_CHK_PAD_DATA) {
+		for (i = start_index; i < cf->len; i++)
+			if (cf->data[i] != content)
+				return 1;
+	}
+	return 0;
+}
+
+static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
+{
+	if (so->tx.state != ISOTP_WAIT_FC &&
+	    so->tx.state != ISOTP_WAIT_FIRST_FC)
+		return 0;
+
+	hrtimer_cancel(&so->txtimer);
+
+	if ((cf->len < ae + FC_CONTENT_SZ) ||
+	    ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+	     check_pad(so, cf, ae + FC_CONTENT_SZ, so->opt.rxpad_content))) {
+		so->tx.state = ISOTP_IDLE;
+		wake_up_interruptible(&so->wait);
+		return 1;
+	}
+
+	/* get communication parameters only from the first FC frame */
+	if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+
+		so->txfc.bs = cf->data[ae + 1];
+		so->txfc.stmin = cf->data[ae + 2];
+
+		/* fix wrong STmin values according spec */
+		if ((so->txfc.stmin > 0x7F) &&
+		    ((so->txfc.stmin < 0xF1) || (so->txfc.stmin > 0xF9)))
+			so->txfc.stmin = 0x7F;
+
+		so->tx_gap = ktime_set(0,0);
+		/* add transmission time for CAN frame N_As */
+		so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime);
+		/* add waiting time for consecutive frames N_Cs */
+		if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
+			so->tx_gap = ktime_add_ns(so->tx_gap,
+						  so->force_tx_stmin);
+		else if (so->txfc.stmin < 0x80)
+			so->tx_gap = ktime_add_ns(so->tx_gap,
+						  so->txfc.stmin * 1000000);
+		else
+			so->tx_gap = ktime_add_ns(so->tx_gap,
+						  (so->txfc.stmin - 0xF0)
+						  * 100000);
+		so->tx.state = ISOTP_WAIT_FC;
+	}
+
+	DBG("FC frame: FS %d, BS %d, STmin 0x%02X, tx_gap %lld\n",
+	    cf->data[ae] & 0x0F & 0x0F, so->txfc.bs, so->txfc.stmin,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
+	    (long long)so->tx_gap);
+#else
+	    (long long)so->tx_gap.tv64);
+#endif
+
+	switch (cf->data[ae] & 0x0F) {
+
+	case ISOTP_FC_CTS:
+		so->tx.bs = 0;
+		so->tx.state = ISOTP_SENDING;
+		DBG("starting txtimer for sending\n");
+		/* start cyclic timer for sending CF frame */
+		hrtimer_start(&so->txtimer, so->tx_gap,
+			      HRTIMER_MODE_REL);
+		break;
+
+	case ISOTP_FC_WT:
+		DBG("starting waiting for next FC\n");
+		/* start timer to wait for next FC frame */
+		hrtimer_start(&so->txtimer, ktime_set(1,0),
+			      HRTIMER_MODE_REL);
+		break;
+
+	case ISOTP_FC_OVFLW:
+		DBG("overflow in receiver side\n");
+
+	default:
+		/* stop this tx job. TODO: error reporting? */
+		so->tx.state = ISOTP_IDLE;
+		wake_up_interruptible(&so->wait);
+	}
+	return 0;
+}
+
+static int isotp_rcv_sf(struct sock *sk, struct canfd_frame *cf, int pcilen,
+			struct sk_buff *skb, int len)
+{
+	struct isotp_sock *so = isotp_sk(sk);
+	struct sk_buff *nskb;
+
+	hrtimer_cancel(&so->rxtimer);
+	so->rx.state = ISOTP_IDLE;
+
+	if (!len || len > cf->len - pcilen)
+		return 1;
+
+	if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+	    check_pad(so, cf, pcilen + len, so->opt.rxpad_content))
+		return 1;
+
+	nskb = alloc_skb(len, gfp_any());
+	if (!nskb)
+		return 1;
+
+	memcpy(skb_put(nskb, len), &cf->data[pcilen], len);
+
+	nskb->tstamp = skb->tstamp;
+	nskb->dev = skb->dev;
+	isotp_rcv_skb(nskb, sk);
+	return 0;
+}
+
+static int isotp_rcv_ff(struct sock *sk, struct canfd_frame *cf, int ae)
+{
+	struct isotp_sock *so = isotp_sk(sk);
+	int i;
+	int off;
+	int ff_pci_sz;
+
+	hrtimer_cancel(&so->rxtimer);
+	so->rx.state = ISOTP_IDLE;
+
+	/* get the used sender LL_DL from the (first) CAN frame data length */
+	so->rx.ll_dl = padlen(cf->len);
+
+	/* the first frame has to use the entire frame up to LL_DL length */
+	if (cf->len != so->rx.ll_dl)
+		return 1;
+
+	/* get the FF_DL */
+	so->rx.len = (cf->data[ae] & 0x0F) << 8;
+	so->rx.len += cf->data[ae + 1];
+
+	/* Check for FF_DL escape sequence supporting 32 bit PDU length */
+	if (so->rx.len)
+		ff_pci_sz = FF_PCI_SZ12;
+	else {
+		/* FF_DL = 0 => get real length from next 4 bytes */
+		so->rx.len = cf->data[ae + 2] << 24;
+		so->rx.len += cf->data[ae + 3] << 16;
+		so->rx.len += cf->data[ae + 4] << 8;
+		so->rx.len += cf->data[ae + 5];
+		ff_pci_sz = FF_PCI_SZ32;
+	}
+
+	/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
+	off = (so->rx.ll_dl > CAN_MAX_DLEN)? 1:0;
+
+	if (so->rx.len + ae + off + ff_pci_sz < so->rx.ll_dl)
+		return 1;
+
+	if (so->rx.len > MAX_MSG_LENGTH) {
+		/* send FC frame with overflow status */
+		isotp_send_fc(sk, ae, ISOTP_FC_OVFLW);
+		return 1;
+	}
+
+	/* copy the first received data bytes */
+	so->rx.idx = 0;
+	for (i = ae + ff_pci_sz; i < so->rx.ll_dl; i++)
+		so->rx.buf[so->rx.idx++] = cf->data[i];
+
+	/* initial setup for this pdu receiption */
+	so->rx.sn = 1;
+	so->rx.state = ISOTP_WAIT_DATA;
+
+	/* no creation of flow control frames */
+	if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
+		return 0;
+
+	/* send our first FC frame */
+	isotp_send_fc(sk, ae, ISOTP_FC_CTS);
+	return 0;
+}
+
+static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
+			struct sk_buff *skb)
+{
+	struct isotp_sock *so = isotp_sk(sk);
+	struct sk_buff *nskb;
+	int i;
+
+	if (so->rx.state != ISOTP_WAIT_DATA)
+		return 0;
+
+	/* drop if timestamp gap is less than force_rx_stmin nano secs */
+	if (so->opt.flags & CAN_ISOTP_FORCE_RXSTMIN) {
+
+		if (ktime_to_ns(ktime_sub(skb->tstamp, so->lastrxcf_tstamp)) <
+		    so->force_rx_stmin)
+			return 0;
+
+		so->lastrxcf_tstamp = skb->tstamp;
+	}
+
+	hrtimer_cancel(&so->rxtimer);
+
+	/* CFs are never longer than the FF */
+	if (cf->len > so->rx.ll_dl)
+		return 1;
+
+	/* CFs have usually the LL_DL length */
+	if (cf->len < so->rx.ll_dl) {
+		/* this is only allowed for the last CF */
+		if (so->rx.len - so->rx.idx > so->rx.ll_dl - ae - N_PCI_SZ)
+			return 1;
+	}
+
+	if ((cf->data[ae] & 0x0F) != so->rx.sn) {
+		DBG("wrong sn %d. expected %d.\n",
+		    cf->data[ae] & 0x0F, so->rx.sn);
+		/* some error reporting? */
+		so->rx.state = ISOTP_IDLE;
+		return 1;
+	}
+	so->rx.sn++;
+	so->rx.sn %= 16;
+
+	for (i = ae + N_PCI_SZ; i < cf->len; i++) {
+		so->rx.buf[so->rx.idx++] = cf->data[i];
+		if (so->rx.idx >= so->rx.len)
+			break;
+	}
+
+	if (so->rx.idx >= so->rx.len) {
+
+		/* we are done */
+		so->rx.state = ISOTP_IDLE;
+
+		if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+		    check_pad(so, cf, i+1, so->opt.rxpad_content))
+			return 1;
+
+		nskb = alloc_skb(so->rx.len, gfp_any());
+		if (!nskb)
+			return 1;
+
+		memcpy(skb_put(nskb, so->rx.len), so->rx.buf,
+		       so->rx.len);
+
+		nskb->tstamp = skb->tstamp;
+		nskb->dev = skb->dev;
+		isotp_rcv_skb(nskb, sk);
+		return 0;
+	}
+
+	/* no creation of flow control frames */
+	if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
+		return 0;
+
+	/* perform blocksize handling, if enabled */
+	if (!so->rxfc.bs || ++so->rx.bs < so->rxfc.bs) {
+
+		/* start rx timeout watchdog */
+		hrtimer_start(&so->rxtimer, ktime_set(1,0),
+			      HRTIMER_MODE_REL);
+		return 0;
+	}
+
+	/* we reached the specified blocksize so->rxfc.bs */
+	isotp_send_fc(sk, ae, ISOTP_FC_CTS);
+	return 0;
+}
+
+static void isotp_rcv(struct sk_buff *skb, void *data)
+{
+	struct sock *sk = (struct sock *)data;
+	struct isotp_sock *so = isotp_sk(sk);
+	struct canfd_frame *cf;
+	int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR)? 1:0;
+	u8 n_pci_type, sf_dl;
+
+	BUG_ON(skb->len != CAN_MTU && skb->len != CANFD_MTU);
+
+	/*
+	 * Strictly receive only frames with the configured MTU size
+	 * => clear separation of CAN2.0 / CAN FD transport channels
+	 */
+	if (skb->len != so->ll.mtu)
+		return;
+
+	cf = (struct canfd_frame *) skb->data;
+
+	/* if enabled: check receiption of my configured extended address */
+	if (ae && cf->data[0] != so->opt.rx_ext_address)
+		return;
+
+	n_pci_type = cf->data[ae] & 0xF0;
+
+	if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
+		/* check rx/tx path half duplex expectations */
+		if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
+		    (so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
+			return;
+	}
+
+	switch (n_pci_type) {
+	case N_PCI_FC:
+		/* tx path: flow control frame containing the FC parameters */
+		isotp_rcv_fc(so, cf, ae);
+		break;
+
+	case N_PCI_SF:
+		/*
+		 * rx path: single frame
+		 *
+		 * As we do not have a rx.ll_dl configuration, we can only test
+		 * if the CAN frames payload length matches the LL_DL == 8
+		 * requirements - no matter if it's CAN 2.0 or CAN FD
+		 */
+
+		/* get the SF_DL from the N_PCI byte */
+		sf_dl = cf->data[ae] & 0x0F;
+
+		if (cf->len <= CAN_MAX_DLEN)
+			isotp_rcv_sf(sk, cf, SF_PCI_SZ4 + ae, skb, sf_dl);
+		else if (skb->len == CANFD_MTU) {
+			/*
+			 * We have a CAN FD frame and CAN_DL is greater than 8:
+			 * Only frames with the SF_DL == 0 ESC value are valid.
+			 *
+			 * If so take care of the increased SF PCI size
+			 * (SF_PCI_SZ8) to point to the message content behind
+			 * the extended SF PCI info and get the real SF_DL
+			 * length value from the formerly first data byte.
+			 */
+			if (sf_dl == 0)
+				isotp_rcv_sf(sk, cf, SF_PCI_SZ8 + ae, skb,
+					     cf->data[SF_PCI_SZ4 + ae]);
+		}
+		break;
+
+	case N_PCI_FF:
+		/* rx path: first frame */
+		isotp_rcv_ff(sk, cf, ae);
+		break;
+
+	case N_PCI_CF:
+		/* rx path: consecutive frame */
+		isotp_rcv_cf(sk, cf, ae, skb);
+		break;
+
+	}
+}
+
+static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
+				 int ae, int off)
+{
+	int pcilen = N_PCI_SZ + ae + off;
+	int space = so->tx.ll_dl - pcilen;
+	int num = min_t(int, so->tx.len - so->tx.idx, space);
+	int i;
+
+	cf->can_id = so->txid;
+	cf->len = num + pcilen;
+
+	if (num < space) {
+		if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
+			/* user requested padding */
+			cf->len = padlen(cf->len);
+			memset(cf->data, so->opt.txpad_content, cf->len);
+		} else if (cf->len > CAN_MAX_DLEN) {
+			/* mandatory padding for CAN FD frames */
+			cf->len = padlen(cf->len);
+			memset(cf->data, CAN_ISOTP_DEFAULT_PAD_CONTENT,
+			       cf->len);
+		}
+	}
+
+	for (i = 0; i < num; i++)
+		cf->data[pcilen + i] = so->tx.buf[so->tx.idx++];
+
+	if (ae)
+		cf->data[0] = so->opt.ext_address;
+}
+
+static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
+				int ae)
+{
+	int i;
+	int ff_pci_sz;
+
+	cf->can_id = so->txid;
+	cf->len = so->tx.ll_dl;
+	if (ae)
+		cf->data[0] = so->opt.ext_address;
+
+	/* create N_PCI bytes with 12/32 bit FF_DL data length */
+	if (so->tx.len > 4095) {
+		/* use 32 bit FF_DL notation */
+		cf->data[ae] = N_PCI_FF;
+		cf->data[ae + 1] = 0;
+		cf->data[ae + 2] = (u8) (so->tx.len >> 24) & 0xFFU;
+		cf->data[ae + 3] = (u8) (so->tx.len >> 16) & 0xFFU;
+		cf->data[ae + 4] = (u8) (so->tx.len >> 8) & 0xFFU;
+		cf->data[ae + 5] = (u8) so->tx.len & 0xFFU;
+		ff_pci_sz = FF_PCI_SZ32;
+	} else {
+		/* use 12 bit FF_DL notation */
+		cf->data[ae] = (u8) (so->tx.len>>8) | N_PCI_FF;
+		cf->data[ae + 1] = (u8) so->tx.len & 0xFFU;
+		ff_pci_sz = FF_PCI_SZ12;
+	}
+
+	/* add first data bytes depending on ae */
+	for (i = ae + ff_pci_sz; i < so->tx.ll_dl; i++)
+		cf->data[i] = so->tx.buf[so->tx.idx++];
+
+	so->tx.sn = 1;
+	so->tx.state = ISOTP_WAIT_FIRST_FC;
+}
+
+static void isotp_tx_timer_tsklet(unsigned long data)
+{
+	struct isotp_sock *so = (struct isotp_sock *)data;
+	struct sock *sk = &so->sk;
+	struct sk_buff *skb;
+	struct net_device *dev;
+	struct canfd_frame *cf;
+	int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR)? 1:0;
+
+	switch (so->tx.state) {
+
+	case ISOTP_WAIT_FC:
+	case ISOTP_WAIT_FIRST_FC:
+
+		/* we did not get any flow control frame in time */
+
+		DBG("we did not get FC frame in time.\n");
+
+		/* reset tx state */
+		so->tx.state = ISOTP_IDLE;
+		wake_up_interruptible(&so->wait);
+		break;
+
+	case ISOTP_SENDING:
+
+		/* push out the next segmented pdu */
+
+		DBG("next pdu to send.\n");
+
+		dev = dev_get_by_index(sock_net(sk), so->ifindex);
+		if (!dev)
+			break;
+
+isotp_tx_burst:
+		skb = alloc_skb(so->ll.mtu + CAN_SKBRES, gfp_any());
+		if (!skb) {
+			dev_put(dev);
+			break;
+		}
+
+		isotp_skb_reserve(skb, dev);
+		cf = (struct canfd_frame *)skb->data;
+		skb_put(skb, so->ll.mtu);
+
+		/* create consecutive frame */
+		isotp_fill_dataframe(cf, so, ae, 0);
+
+		/* place consecutive frame N_PCI in appropriate index */
+		cf->data[ae] = N_PCI_CF | so->tx.sn++;
+		so->tx.sn %= 16;
+		so->tx.bs++;
+
+		if (so->ll.mtu == CANFD_MTU)
+			cf->flags = so->ll.tx_flags;
+
+		skb->dev = dev;
+		isotp_skb_set_owner(skb, sk);
+		can_send(skb, 1);
+
+		if (so->tx.idx >= so->tx.len) {
+			/* we are done */
+			DBG("we are done\n");
+			so->tx.state = ISOTP_IDLE;
+			dev_put(dev);
+			wake_up_interruptible(&so->wait);
+			break;
+		}
+
+		if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
+			/* stop and wait for FC */
+			DBG("BS stop and wait for FC\n");
+			so->tx.state = ISOTP_WAIT_FC;
+			dev_put(dev);
+			hrtimer_start(&so->txtimer,
+				      ktime_add(ktime_get(), ktime_set(1,0)),
+				      HRTIMER_MODE_ABS);
+			break;
+		}
+
+		/* no gap between data frames needed => use burst mode */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
+		if (!so->tx_gap)
+			goto isotp_tx_burst;
+#else
+		if (!so->tx_gap.tv64)
+			goto isotp_tx_burst;
+#endif
+
+		/* start timer to send next data frame with correct delay */
+		dev_put(dev);
+		hrtimer_start(&so->txtimer,
+			      ktime_add(ktime_get(), so->tx_gap),
+			      HRTIMER_MODE_ABS);
+		break;
+
+	default:
+		BUG_ON(1);
+	}
+}
+
+static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+{
+	struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+					     txtimer);
+	tasklet_schedule(&so->txtsklet);
+
+	return HRTIMER_NORESTART;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+#else
+static int isotp_sendmsg(struct kiocb *iocb, struct socket *sock,
+		       struct msghdr *msg, size_t size)
+#endif
+{
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so = isotp_sk(sk);
+	struct sk_buff *skb;
+	struct net_device *dev;
+	struct canfd_frame *cf;
+	int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR)? 1:0;
+	int off;
+	int err;
+
+	if (!so->bound)
+		return -EADDRNOTAVAIL;
+
+	/* we do not support multiple buffers - for now */
+	if (so->tx.state != ISOTP_IDLE) {
+		if (msg->msg_flags & MSG_DONTWAIT)
+			return -EAGAIN;
+
+		/* wait for complete transmission of current pdu */
+		wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+	}
+
+	if (!size || size > MAX_MSG_LENGTH)
+		return -EINVAL;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
+	err = memcpy_from_msg(so->tx.buf, msg, size);
+#else
+	err = memcpy_fromiovec(so->tx.buf, msg->msg_iov, size);
+#endif
+	if (err < 0)
+		return err;
+
+	dev = dev_get_by_index(sock_net(sk), so->ifindex);
+	if (!dev)
+		return -ENXIO;
+
+	skb = sock_alloc_send_skb(sk, so->ll.mtu + CAN_SKBRES,
+				  msg->msg_flags & MSG_DONTWAIT, &err);
+	if (!skb) {
+		dev_put(dev);
+		return err;
+	}
+
+	isotp_skb_reserve(skb, dev);
+
+	so->tx.state = ISOTP_SENDING;
+	so->tx.len = size;
+	so->tx.idx = 0;
+
+	cf = (struct canfd_frame *)skb->data;
+	skb_put(skb, so->ll.mtu);
+
+	/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
+	off = (so->tx.ll_dl > CAN_MAX_DLEN)? 1:0;
+
+	/* check for single frame transmission depending on TX_DL */
+	if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
+
+		/*
+		 * The message size generally fits into a SingleFrame - good.
+		 *
+		 * SF_DL ESC offset optimization:
+		 *
+		 * When TX_DL is greater 8 but the message would still fit
+		 * into a 8 byte CAN frame, we can omit the offset.
+		 * This prevents a protocol caused length extension from
+		 * CAN_DL = 8 to CAN_DL = 12 due to the SF_SL ESC handling.
+		 */
+		if (size <= CAN_MAX_DLEN - SF_PCI_SZ4 - ae)
+			off = 0;
+
+		isotp_fill_dataframe(cf, so, ae, off);
+
+		/* place single frame N_PCI w/o length in appropriate index */
+		cf->data[ae] = N_PCI_SF;
+
+		/* place SF_DL size value depending on the SF_DL ESC offset */
+		if (off)
+			cf->data[SF_PCI_SZ4 + ae] = size;
+		else
+			cf->data[ae] |= size;
+
+		so->tx.state = ISOTP_IDLE;
+		wake_up_interruptible(&so->wait);
+	} else {
+		/* send first frame and wait for FC */
+
+		isotp_create_fframe(cf, so, ae);
+
+		DBG("starting txtimer for fc\n");
+		/* start timeout for FC */
+		hrtimer_start(&so->txtimer, ktime_set(1,0), HRTIMER_MODE_REL);
+	}
+
+	/* send the first or only CAN frame */
+	if (so->ll.mtu == CANFD_MTU)
+		cf->flags = so->ll.tx_flags;
+
+	skb->dev = dev;
+	skb->sk  = sk;
+	err = can_send(skb, 1);
+	dev_put(dev);
+	if (err)
+		return err;
+
+	return size;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+			 int flags)
+#else
+static int isotp_recvmsg(struct kiocb *iocb, struct socket *sock,
+			 struct msghdr *msg, size_t size, int flags)
+#endif
+{
+	struct sock *sk = sock->sk;
+	struct sk_buff *skb;
+	int err = 0;
+	int noblock;
+
+	noblock =  flags & MSG_DONTWAIT;
+	flags   &= ~MSG_DONTWAIT;
+
+	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	if (!skb)
+		return err;
+
+	if (size < skb->len)
+		msg->msg_flags |= MSG_TRUNC;
+	else
+		size = skb->len;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
+	err = memcpy_to_msg(msg, skb->data, size);
+#else
+	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
+#endif
+	if (err < 0) {
+		skb_free_datagram(sk, skb);
+		return err;
+	}
+
+	sock_recv_timestamp(msg, sk, skb);
+
+	if (msg->msg_name) {
+		msg->msg_namelen = sizeof(struct sockaddr_can);
+		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+	}
+
+	skb_free_datagram(sk, skb);
+
+	return size;
+}
+
+static int isotp_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so;
+	struct net *net;
+
+	if (!sk)
+		return 0;
+
+	so = isotp_sk(sk);
+	net = sock_net(sk);
+
+	/* wait for complete transmission of current pdu */
+	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+
+	unregister_netdevice_notifier(&so->notifier);
+
+	lock_sock(sk);
+
+	hrtimer_cancel(&so->txtimer);
+	hrtimer_cancel(&so->rxtimer);
+	tasklet_kill(&so->txtsklet);
+
+	/* remove current filters & unregister */
+	if (so->bound) {
+		if (so->ifindex) {
+			struct net_device *dev;
+
+			dev = dev_get_by_index(net, so->ifindex);
+			if (dev) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+				can_rx_unregister(net, dev, so->rxid,
+#else
+				can_rx_unregister(dev, so->rxid,
+#endif
+						  SINGLE_MASK(so->rxid),
+						  isotp_rcv, sk);
+				dev_put(dev);
+			}
+		}
+	}
+
+	so->ifindex = 0;
+	so->bound   = 0;
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+
+	release_sock(sk);
+	sock_put(sk);
+
+	return 0;
+}
+
+static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+{
+	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so = isotp_sk(sk);
+	struct net *net = sock_net(sk);
+	int ifindex;
+	struct net_device *dev;
+	int err = 0;
+	int notify_enetdown = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
+	if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+#else
+	if (len < sizeof(*addr))
+#endif
+		return -EINVAL;
+
+	if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
+		return -EADDRNOTAVAIL;
+
+	if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) &
+	    (CAN_ERR_FLAG | CAN_RTR_FLAG))
+		return -EADDRNOTAVAIL;
+
+	if (!addr->can_ifindex)
+		return -ENODEV;
+
+	lock_sock(sk);
+
+	if (so->bound && addr->can_ifindex == so->ifindex &&
+	    addr->can_addr.tp.rx_id == so->rxid &&
+	    addr->can_addr.tp.tx_id == so->txid)
+		goto out;
+
+	dev = dev_get_by_index(net, addr->can_ifindex);
+	if (!dev) {
+		err = -ENODEV;
+		goto out;
+	}
+	if (dev->type != ARPHRD_CAN) {
+		dev_put(dev);
+		err = -ENODEV;
+		goto out;
+	}
+	if (dev->mtu < so->ll.mtu) {
+		dev_put(dev);
+		err = -EINVAL;
+		goto out;
+	}
+	if (!(dev->flags & IFF_UP))
+		notify_enetdown = 1;
+
+	ifindex = dev->ifindex;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+	can_rx_register(net, dev, addr->can_addr.tp.rx_id,
+#else
+	can_rx_register(dev, addr->can_addr.tp.rx_id,
+#endif
+			SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,11)) || \
+	((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,50)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))) || \
+	((LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,49)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))) || \
+	((LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,42)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)))
+			"isotp", sk);
+#else
+			"isotp");
+#endif
+	dev_put(dev);
+
+	if (so->bound) {
+		/* unregister old filter */
+		if (so->ifindex) {
+			dev = dev_get_by_index(net, so->ifindex);
+			if (dev) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+				can_rx_unregister(net, dev, so->rxid,
+#else
+				can_rx_unregister(dev, so->rxid,
+#endif
+						  SINGLE_MASK(so->rxid),
+						  isotp_rcv, sk);
+				dev_put(dev);
+			}
+		}
+	}
+
+	/* switch to new settings */
+	so->ifindex = ifindex;
+	so->rxid = addr->can_addr.tp.rx_id;
+	so->txid = addr->can_addr.tp.tx_id;
+	so->bound = 1;
+
+out:
+	release_sock(sk);
+
+	if (notify_enetdown) {
+		sk->sk_err = ENETDOWN;
+		if (!sock_flag(sk, SOCK_DEAD))
+			sk->sk_error_report(sk);
+	}
+
+	return err;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)
+static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
+#else
+static int isotp_getname(struct socket *sock, struct sockaddr *uaddr,
+		       int *len, int peer)
+#endif
+{
+	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so = isotp_sk(sk);
+
+	if (peer)
+		return -EOPNOTSUPP;
+
+	addr->can_family  = AF_CAN;
+	addr->can_ifindex = so->ifindex;
+	addr->can_addr.tp.rx_id = so->rxid;
+	addr->can_addr.tp.tx_id = so->txid;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)
+	return sizeof(*addr);
+#else
+	*len = sizeof(*addr);
+
+	return 0;
+#endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+			    char __user *optval, unsigned int optlen)
+#else
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+			    char __user *optval, int optlen)
+#endif
+{
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so = isotp_sk(sk);
+	int ret = 0;
+
+	if (level != SOL_CAN_ISOTP)
+		return -EINVAL;
+	if (optlen < 0)
+		return -EINVAL;
+
+	switch (optname) {
+
+	case CAN_ISOTP_OPTS:
+		if (optlen != sizeof(struct can_isotp_options))
+			return -EINVAL;
+
+		if (copy_from_user(&so->opt, optval, optlen))
+			return -EFAULT;
+
+		/* no separate rx_ext_address is given => use ext_address */
+		if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
+			so->opt.rx_ext_address = so->opt.ext_address;
+		break;
+
+	case CAN_ISOTP_RECV_FC:
+		if (optlen != sizeof(struct can_isotp_fc_options))
+			return -EINVAL;
+
+		if (copy_from_user(&so->rxfc, optval, optlen))
+			return -EFAULT;
+		break;
+
+	case CAN_ISOTP_TX_STMIN:
+		if (optlen != sizeof(__u32))
+			return -EINVAL;
+
+		if (copy_from_user(&so->force_tx_stmin, optval, optlen))
+			return -EFAULT;
+		break;
+
+	case CAN_ISOTP_RX_STMIN:
+		if (optlen != sizeof(__u32))
+			return -EINVAL;
+
+		if (copy_from_user(&so->force_rx_stmin, optval, optlen))
+			return -EFAULT;
+		break;
+
+	case CAN_ISOTP_LL_OPTS:
+		if (optlen != sizeof(struct can_isotp_ll_options))
+			return -EINVAL;
+		else {
+			struct can_isotp_ll_options ll;
+
+			if (copy_from_user(&ll, optval, optlen))
+				return -EFAULT;
+
+			/* check for correct ISO 11898-1 DLC data lentgh */
+			if (ll.tx_dl != padlen(ll.tx_dl))
+				return -EINVAL;
+
+			if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
+				return -EINVAL;
+
+			if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
+				return -EINVAL;
+
+			memcpy(&so->ll, &ll, sizeof(ll));
+
+			/* set ll_dl for tx path to similar place as for rx */
+			so->tx.ll_dl = ll.tx_dl;
+		}
+		break;
+
+	default:
+		ret = -ENOPROTOOPT;
+	}
+
+	return ret;
+}
+
+static int isotp_getsockopt(struct socket *sock, int level, int optname,
+			  char __user *optval, int __user *optlen)
+{
+	struct sock *sk = sock->sk;
+	struct isotp_sock *so = isotp_sk(sk);
+	int len;
+	void *val;
+
+	if (level != SOL_CAN_ISOTP)
+		return -EINVAL;
+	if (get_user(len, optlen))
+		return -EFAULT;
+	if (len < 0)
+		return -EINVAL;
+
+	switch (optname) {
+
+	case CAN_ISOTP_OPTS:
+		len = min_t(int, len, sizeof(struct can_isotp_options));
+		val = &so->opt;
+		break;
+
+	case CAN_ISOTP_RECV_FC:
+		len = min_t(int, len, sizeof(struct can_isotp_fc_options));
+		val = &so->rxfc;
+		break;
+
+	case CAN_ISOTP_TX_STMIN:
+		len = min_t(int, len, sizeof(__u32));
+		val = &so->force_tx_stmin;
+		break;
+
+	case CAN_ISOTP_RX_STMIN:
+		len = min_t(int, len, sizeof(__u32));
+		val = &so->force_rx_stmin;
+		break;
+
+	case CAN_ISOTP_LL_OPTS:
+		len = min_t(int, len, sizeof(struct can_isotp_ll_options));
+		val = &so->ll;
+		break;
+
+	default:
+		return -ENOPROTOOPT;
+	}
+
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, val, len))
+		return -EFAULT;
+	return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+			  void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+#else
+static int isotp_notifier(struct notifier_block *nb,
+			unsigned long msg, void *data)
+{
+	struct net_device *dev = (struct net_device *)data;
+#endif
+	struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
+	struct sock *sk = &so->sk;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+	if (!net_eq(dev_net(dev), sock_net(sk)))
+		return NOTIFY_DONE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+	if (dev_net(dev) != &init_net)
+		return NOTIFY_DONE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	if (dev->nd_net != &init_net)
+		return NOTIFY_DONE;
+#endif
+
+	if (dev->type != ARPHRD_CAN)
+		return NOTIFY_DONE;
+
+	if (so->ifindex != dev->ifindex)
+		return NOTIFY_DONE;
+
+	switch (msg) {
+
+	case NETDEV_UNREGISTER:
+		lock_sock(sk);
+		/* remove current filters & unregister */
+		if (so->bound)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+			can_rx_unregister(dev_net(dev), dev, so->rxid,
+#else
+			can_rx_unregister(dev, so->rxid,
+#endif
+					  SINGLE_MASK(so->rxid),
+					  isotp_rcv, sk);
+
+		so->ifindex = 0;
+		so->bound   = 0;
+		release_sock(sk);
+
+		sk->sk_err = ENODEV;
+		if (!sock_flag(sk, SOCK_DEAD))
+			sk->sk_error_report(sk);
+		break;
+
+	case NETDEV_DOWN:
+		sk->sk_err = ENETDOWN;
+		if (!sock_flag(sk, SOCK_DEAD))
+			sk->sk_error_report(sk);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int isotp_init(struct sock *sk)
+{
+	struct isotp_sock *so = isotp_sk(sk);
+
+	so->ifindex = 0;
+	so->bound   = 0;
+
+	so->opt.flags		= CAN_ISOTP_DEFAULT_FLAGS;
+	so->opt.ext_address	= CAN_ISOTP_DEFAULT_EXT_ADDRESS;
+	so->opt.rx_ext_address	= CAN_ISOTP_DEFAULT_EXT_ADDRESS;
+	so->opt.rxpad_content	= CAN_ISOTP_DEFAULT_PAD_CONTENT;
+	so->opt.txpad_content	= CAN_ISOTP_DEFAULT_PAD_CONTENT;
+	so->opt.frame_txtime	= CAN_ISOTP_DEFAULT_FRAME_TXTIME;
+	so->rxfc.bs		= CAN_ISOTP_DEFAULT_RECV_BS;
+	so->rxfc.stmin		= CAN_ISOTP_DEFAULT_RECV_STMIN;
+	so->rxfc.wftmax		= CAN_ISOTP_DEFAULT_RECV_WFTMAX;
+	so->ll.mtu		= CAN_ISOTP_DEFAULT_LL_MTU;
+	so->ll.tx_dl		= CAN_ISOTP_DEFAULT_LL_TX_DL;
+	so->ll.tx_flags		= CAN_ISOTP_DEFAULT_LL_TX_FLAGS;
+
+	/* set ll_dl for tx path to similar place as for rx */
+	so->tx.ll_dl		= so->ll.tx_dl;
+
+	so->rx.state = ISOTP_IDLE;
+	so->tx.state = ISOTP_IDLE;
+
+	hrtimer_init(&so->rxtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	so->rxtimer.function = isotp_rx_timer_handler;
+	hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	so->txtimer.function = isotp_tx_timer_handler;
+
+	tasklet_init(&so->txtsklet, isotp_tx_timer_tsklet, (unsigned long)so);
+
+	init_waitqueue_head(&so->wait);
+
+	so->notifier.notifier_call = isotp_notifier;
+	register_netdevice_notifier(&so->notifier);
+
+	return 0;
+}
+
+int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+			 unsigned long arg)
+{
+	/* no ioctls for socket layer -> hand it down to NIC layer */
+	return -ENOIOCTLCMD;
+}
+
+static const struct proto_ops isotp_ops = {
+	.family		= PF_CAN,
+	.release	= isotp_release,
+	.bind		= isotp_bind,
+	.connect	= sock_no_connect,
+	.socketpair	= sock_no_socketpair,
+	.accept		= sock_no_accept,
+	.getname	= isotp_getname,
+	.poll		= datagram_poll,
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+	.ioctl		= isotp_sock_no_ioctlcmd,
+	.gettstamp	= sock_gettstamp,
+#else
+	.ioctl		= can_ioctl,	/* use can_ioctl() from af_can.c */
+#endif
+	.listen		= sock_no_listen,
+	.shutdown	= sock_no_shutdown,
+	.setsockopt	= isotp_setsockopt,
+	.getsockopt	= isotp_getsockopt,
+	.sendmsg	= isotp_sendmsg,
+	.recvmsg	= isotp_recvmsg,
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage,
+};
+
+static struct proto isotp_proto __read_mostly = {
+	.name		= "CAN_ISOTP",
+	.owner		= THIS_MODULE,
+	.obj_size	= sizeof(struct isotp_sock),
+	.init		= isotp_init,
+};
+
+static const struct can_proto isotp_can_proto = {
+	.type		= SOCK_DGRAM,
+	.protocol	= CAN_ISOTP,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
+	.capability	= -1,
+#endif
+	.ops		= &isotp_ops,
+	.prot		= &isotp_proto,
+};
+
+static __init int isotp_module_init(void)
+{
+	int err;
+
+	printk(banner);
+
+	err = can_proto_register(&isotp_can_proto);
+	if (err < 0)
+		printk(KERN_ERR "can: registration of isotp protocol failed\n");
+
+	return err;
+}
+
+static __exit void isotp_module_exit(void)
+{
+	can_proto_unregister(&isotp_can_proto);
+}
+
+module_init(isotp_module_init);
+module_exit(isotp_module_exit);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 08e2811b527..955fcfdd3c3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1894,7 +1894,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
 		mutex_unlock(&pktgen_thread_lock);
 		pr_debug("%s: waiting for %s to disappear....\n",
 			 __func__, ifname);
-		schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
+		schedule_msec_hrtimeout_interruptible((msec_per_try));
 		mutex_lock(&pktgen_thread_lock);
 
 		if (++i >= max_tries) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index bfbefb7bff9..d37a8d94455 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -468,6 +468,9 @@ choice
 	config DEFAULT_FQ_CODEL
 		bool "Fair Queue Controlled Delay" if NET_SCH_FQ_CODEL
 
+	config DEFAULT_FQ_PIE
+		bool "Flow Queue Proportional Integral controller Enhanced" if NET_SCH_FQ_PIE
+
 	config DEFAULT_SFQ
 		bool "Stochastic Fair Queue" if NET_SCH_SFQ
 
@@ -480,6 +483,7 @@ config DEFAULT_NET_SCH
 	default "pfifo_fast" if DEFAULT_PFIFO_FAST
 	default "fq" if DEFAULT_FQ
 	default "fq_codel" if DEFAULT_FQ_CODEL
+	default "fq_pie" if DEFAULT_FQ_PIE
 	default "sfq" if DEFAULT_SFQ
 	default "pfifo_fast"
 endif
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7ae6b90e0d2..a8424eab8ae 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4821,8 +4821,9 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
 			return false;
 		return true;
 	case NL80211_CMD_START_AP:
-		/* SAE not supported yet */
-		if (auth_type == NL80211_AUTHTYPE_SAE)
+		if (!wiphy_ext_feature_isset(&rdev->wiphy,
+					     NL80211_EXT_FEATURE_SAE_OFFLOAD) &&
+		    auth_type == NL80211_AUTHTYPE_SAE)
 			return false;
 		/* FILS not supported yet */
 		if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
@@ -9394,7 +9395,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
 		if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN)
 			return -EINVAL;
 		if (!wiphy_ext_feature_isset(&rdev->wiphy,
-					     NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK))
+					     NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK) &&
+		    !wiphy_ext_feature_isset(&rdev->wiphy,
+					     NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK))
 			return -EINVAL;
 		settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]);
 	}
@@ -15634,6 +15637,8 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
 	     (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
 	      nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
 			  cr->timeout_reason))) ||
+	    (cr->authorized &&
+	     nla_put_flag(msg, NL80211_ATTR_PORT_AUTHORIZED)) ||
 	    (cr->req_ie &&
 	     nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
 	    (cr->resp_ie &&
@@ -15700,7 +15705,9 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
 	    (info->fils.pmk &&
 	     nla_put(msg, NL80211_ATTR_PMK, info->fils.pmk_len, info->fils.pmk)) ||
 	    (info->fils.pmkid &&
-	     nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid)))
+	     nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid)) ||
+	    (info->authorized &&
+	     nla_put_flag(msg, NL80211_ATTR_PORT_AUTHORIZED)))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index ac3e60aa1fc..4c8bb73cdc8 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -887,6 +887,7 @@ void cfg80211_connect_done(struct net_device *dev,
 	ev->cr.bss = params->bss;
 	ev->cr.status = params->status;
 	ev->cr.timeout_reason = params->timeout_reason;
+	ev->cr.authorized = params->authorized;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
@@ -1020,6 +1021,7 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
 	if (info->fils.update_erp_next_seq_num)
 		ev->rm.fils.erp_next_seq_num = info->fils.erp_next_seq_num;
 	ev->rm.bss = info->bss;
+	ev->rm.authorized = info->authorized;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 94a833597a8..28d5b0f9320 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -94,6 +94,7 @@ include/uapi/linux/eventpoll.h:CONFIG_PM_SLEEP
 include/uapi/linux/hw_breakpoint.h:CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
 include/uapi/linux/pktcdvd.h:CONFIG_CDROM_PKTCDVD_WCACHE
 include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS
+include/uapi/linux/sched.h:CONFIG_SCHED_MUQSS
 "
 
 for c in $configs
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 5b80a469974..b9d86f90316 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -41,8 +41,8 @@ else
 fi
 
 UTS_VERSION="#$VERSION"
-CONFIG_FLAGS=""
-if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+CONFIG_FLAGS="PCK"
+if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi
 if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
 if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi
 
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 64e80fc17f6..5a4918ee55e 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -146,6 +146,7 @@ if is_enabled CONFIG_OF_EARLY_FLATTREE; then
 	# Only some architectures with OF support have this target
 	if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
 		$MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
+		$MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/boot/dtbs/$version" dtbs_install
 	fi
 fi
 
diff --git a/security/security.c b/security/security.c
index 8b4d342ade5..0dcc1061824 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1087,6 +1087,7 @@ int security_path_rmdir(const struct path *dir, struct dentry *dentry)
 		return 0;
 	return call_int_hook(path_rmdir, 0, dir, dentry);
 }
+EXPORT_SYMBOL_GPL(security_path_rmdir);
 
 int security_path_unlink(const struct path *dir, struct dentry *dentry)
 {
@@ -1103,6 +1104,7 @@ int security_path_symlink(const struct path *dir, struct dentry *dentry,
 		return 0;
 	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
 }
+EXPORT_SYMBOL_GPL(security_path_symlink);
 
 int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
 		       struct dentry *new_dentry)
@@ -1111,6 +1113,7 @@ int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
 		return 0;
 	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
 }
+EXPORT_SYMBOL_GPL(security_path_link);
 
 int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
 			 const struct path *new_dir, struct dentry *new_dentry,
@@ -1138,6 +1141,7 @@ int security_path_truncate(const struct path *path)
 		return 0;
 	return call_int_hook(path_truncate, 0, path);
 }
+EXPORT_SYMBOL_GPL(security_path_truncate);
 
 int security_path_chmod(const struct path *path, umode_t mode)
 {
@@ -1145,6 +1149,7 @@ int security_path_chmod(const struct path *path, umode_t mode)
 		return 0;
 	return call_int_hook(path_chmod, 0, path, mode);
 }
+EXPORT_SYMBOL_GPL(security_path_chmod);
 
 int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 {
@@ -1152,6 +1157,7 @@ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 		return 0;
 	return call_int_hook(path_chown, 0, path, uid, gid);
 }
+EXPORT_SYMBOL_GPL(security_path_chown);
 
 int security_path_chroot(const struct path *path)
 {
@@ -1252,6 +1258,7 @@ int security_inode_permission(struct inode *inode, int mask)
 		return 0;
 	return call_int_hook(inode_permission, 0, inode, mask);
 }
+EXPORT_SYMBOL_GPL(security_inode_permission);
 
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 {
@@ -1444,6 +1451,7 @@ int security_file_permission(struct file *file, int mask)
 
 	return fsnotify_perm(file, mask);
 }
+EXPORT_SYMBOL_GPL(security_file_permission);
 
 int security_file_alloc(struct file *file)
 {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index fbb4435d0aa..48903ca4dda 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -230,6 +230,20 @@ config SND_HDA_POWER_SAVE_DEFAULT
 	  The default time-out value in seconds for HD-audio automatic
 	  power-save mode.  0 means to disable the power-save mode.
 
+config SND_HDA_INTEL_HDMI_SILENT_STREAM
+	bool "Enable Silent Stream always for HDMI"
+	depends on SND_HDA_INTEL
+	help
+	  Intel hardware has a feature called 'silent stream', that
+	  keeps external HDMI receiver's analog circuitry powered on
+	  avoiding 2-3 sec silence during playback start. This mechanism
+	  relies on setting channel_id as 0xf, sending info packet and
+	  preventing codec D3 entry (increasing  platform static power
+	  consumption when HDMI receiver is plugged-in). 2-3 sec silence
+	  at the playback start is expected whenever there is format change.
+	  (default is 2 channel format).
+	  Say Y to enable Silent Stream feature.
+
 endif
 
 endmenu
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 137d655fed8..602c7adedc5 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -42,6 +42,11 @@ static bool enable_acomp = true;
 module_param(enable_acomp, bool, 0444);
 MODULE_PARM_DESC(enable_acomp, "Enable audio component binding (default=yes)");
 
+static bool enable_silent_stream =
+IS_ENABLED(CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM);
+module_param(enable_silent_stream, bool, 0644);
+MODULE_PARM_DESC(enable_silent_stream, "Enable Silent Stream for HDMI devices");
+
 struct hdmi_spec_per_cvt {
 	hda_nid_t cvt_nid;
 	int assigned;
@@ -167,6 +172,7 @@ struct hdmi_spec {
 	hda_nid_t vendor_nid;
 	const int *port_map;
 	int port_num;
+	bool send_silent_stream; /* Flag to enable silent stream feature */
 };
 
 #ifdef CONFIG_SND_HDA_COMPONENT
@@ -1634,21 +1640,72 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
 	snd_hda_power_down_pm(codec);
 }
 
+static void silent_stream_enable(struct hda_codec *codec,
+				struct hdmi_spec_per_pin *per_pin)
+{
+	unsigned int newval, oldval;
+
+	codec_dbg(codec, "hdmi: enabling silent stream for NID %d\n",
+			per_pin->pin_nid);
+
+	mutex_lock(&per_pin->lock);
+
+	if (!per_pin->channels)
+		per_pin->channels = 2;
+
+	oldval = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+			AC_VERB_GET_CONV, 0);
+	newval = (oldval & 0xF0) | 0xF;
+	snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+			AC_VERB_SET_CHANNEL_STREAMID, newval);
+
+	hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
+
+	mutex_unlock(&per_pin->lock);
+}
+
 /* update ELD and jack state via audio component */
 static void sync_eld_via_acomp(struct hda_codec *codec,
 			       struct hdmi_spec_per_pin *per_pin)
 {
 	struct hdmi_spec *spec = codec->spec;
 	struct hdmi_eld *eld = &spec->temp_eld;
+	bool monitor_prev, monitor_next;
 
 	mutex_lock(&per_pin->lock);
 	eld->monitor_present = false;
+	monitor_prev = per_pin->sink_eld.monitor_present;
 	eld->eld_size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
 				      per_pin->dev_id, &eld->monitor_present,
 				      eld->eld_buffer, ELD_MAX_SIZE);
 	eld->eld_valid = (eld->eld_size > 0);
 	update_eld(codec, per_pin, eld, 0);
+	monitor_next = per_pin->sink_eld.monitor_present;
 	mutex_unlock(&per_pin->lock);
+
+	/*
+	 * Power-up will call hdmi_present_sense, so the PM calls
+	 * have to be done without mutex held.
+	 */
+
+	if (spec->send_silent_stream) {
+		int pm_ret;
+
+		if (!monitor_prev && monitor_next) {
+			pm_ret = snd_hda_power_up_pm(codec);
+			if (pm_ret < 0)
+				codec_err(codec,
+				"Monitor plugged-in, Failed to power up codec ret=[%d]\n",
+				pm_ret);
+			silent_stream_enable(codec, per_pin);
+		} else if (monitor_prev && !monitor_next) {
+			pm_ret = snd_hda_power_down_pm(codec);
+			if (pm_ret < 0)
+				codec_err(codec,
+				"Monitor plugged-out, Failed to power down codec ret=[%d]\n",
+				pm_ret);
+		}
+	}
 }
 
 static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
@@ -2791,6 +2848,13 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
 	spec->ops.setup_stream = i915_hsw_setup_stream;
 	spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup;
 
+	/*
+	 * Enable silent stream feature, if it is enabled via
+	 * module param or Kconfig option
+	 */
+	if (enable_silent_stream)
+		spec->send_silent_stream = true;
+
 	return parse_intel_hdmi(codec);
 }
 
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index d310b0d8b22..cc38af11919 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -1994,7 +1994,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
 		outw(0, io + GPIO_DATA);
 		outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
 
-		schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
+		schedule_msec_hrtimeout_uninterruptible((delay1));
 
 		outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
 		udelay(5);
@@ -2002,7 +2002,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
 		outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
 		outw(~0, io + GPIO_MASK);
 
-		schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
+		schedule_msec_hrtimeout_uninterruptible((delay2));
 
 		if (! snd_m3_try_read_vendor(chip))
 			break;
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index f70b9f7e68b..77b65398ca0 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -415,7 +415,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena
 	hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2);
 	snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
 	if (enable) {
-		schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+		schedule_msec_hrtimeout_uninterruptible((10));
 		/* config one-bit depop parameter */
 		rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f);
 		snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL,
@@ -525,7 +525,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable
 	hp_zc = snd_soc_component_read32(component, RT5631_INT_ST_IRQ_CTRL_2);
 	snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
 	if (enable) {
-		schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+		schedule_msec_hrtimeout_uninterruptible((10));
 
 		/* config depop sequence parameter */
 		rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f);
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index fe99584c917..f1344d532a1 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -233,10 +233,10 @@ static void wm8350_pga_work(struct work_struct *work)
 		    out2->ramp == WM8350_RAMP_UP) {
 			/* delay is longer over 0dB as increases are larger */
 			if (i >= WM8350_OUTn_0dB)
-				schedule_timeout_interruptible(msecs_to_jiffies
+				schedule_msec_hrtimeout_interruptible(
 							       (2));
 			else
-				schedule_timeout_interruptible(msecs_to_jiffies
+				schedule_msec_hrtimeout_interruptible(
 							       (1));
 		} else
 			udelay(50);	/* doesn't matter if we delay longer */
@@ -1120,7 +1120,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
 					 (platform->dis_out4 << 6));
 
 			/* wait for discharge */
-			schedule_timeout_interruptible(msecs_to_jiffies
+			schedule_msec_hrtimeout_interruptible(
 						       (platform->
 							cap_discharge_msecs));
 
@@ -1136,7 +1136,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
 					 WM8350_VBUFEN);
 
 			/* wait for vmid */
-			schedule_timeout_interruptible(msecs_to_jiffies
+			schedule_msec_hrtimeout_interruptible(
 						       (platform->
 							vmid_charge_msecs));
 
@@ -1187,7 +1187,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
 		wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
 
 		/* wait */
-		schedule_timeout_interruptible(msecs_to_jiffies
+		schedule_msec_hrtimeout_interruptible(
 					       (platform->
 						vmid_discharge_msecs));
 
@@ -1205,7 +1205,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
 				 pm1 | WM8350_OUTPUT_DRAIN_EN);
 
 		/* wait */
-		schedule_timeout_interruptible(msecs_to_jiffies
+		schedule_msec_hrtimeout_interruptible(
 					       (platform->drain_msecs));
 
 		pm1 &= ~WM8350_BIASEN;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 271235a69c0..3ec90e1b1eb 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1109,7 +1109,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component,
 		/* Need to let things settle before stopping the clock
 		 * to ensure that restart works, see "Stopping the
 		 * master clock" in the datasheet. */
-		schedule_timeout_interruptible(msecs_to_jiffies(1));
+		schedule_msec_hrtimeout_interruptible(1);
 		snd_soc_component_write(component, WM8900_REG_POWER2,
 			     WM8900_REG_POWER2_SYSCLK_ENA);
 		break;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 6497c1ea622..08fefeca9d8 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
 
 	/* Gracefully shut down the voice interface. */
 	snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200);
-	schedule_timeout_interruptible(msecs_to_jiffies(1));
+	schedule_msec_hrtimeout_interruptible(1);
 	snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
 	snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000);
 
@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_component *component,
 	wm9713->pll_in = freq_in;
 
 	/* wait 10ms AC97 link frames for the link to stabilise */
-	schedule_timeout_interruptible(msecs_to_jiffies(10));
+	schedule_msec_hrtimeout_interruptible((10));
 	return 0;
 }
 
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c0aa64ff8e3..34a0771f187 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
 static void pop_wait(u32 pop_time)
 {
 	if (pop_time)
-		schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
+		schedule_msec_hrtimeout_uninterruptible((pop_time));
 }
 
 __printf(3, 4)
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index fdbdfb7bce9..fa8e8faf3eb 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
 		if (!alive)
 			break;
 		set_current_state(TASK_UNINTERRUPTIBLE);
-		schedule_timeout(1);
+		schedule_min_hrtimeout();
 	} while (--timeout > 0);
 	if (alive)
 		dev_err(line6pcm->line6->ifcdev,
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index 0efcd494daa..03a4bedcebc 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -6,3 +6,4 @@ futex_wait_private_mapped_file
 futex_wait_timeout
 futex_wait_uninitialized_heap
 futex_wait_wouldblock
+futex_wait_multiple
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 23207829ec7..26562f2d792 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -14,7 +14,8 @@ TEST_GEN_FILES := \
 	futex_requeue_pi_signal_restart \
 	futex_requeue_pi_mismatched_ops \
 	futex_wait_uninitialized_heap \
-	futex_wait_private_mapped_file
+	futex_wait_private_mapped_file \
+	futex_wait_multiple
 
 TEST_PROGS := run.sh
 
diff --git a/tools/testing/selftests/futex/functional/futex_wait_multiple.c b/tools/testing/selftests/futex/functional/futex_wait_multiple.c
new file mode 100644
index 00000000000..b48422e79f4
--- /dev/null
+++ b/tools/testing/selftests/futex/functional/futex_wait_multiple.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/******************************************************************************
+ *
+ *   Copyright © Collabora, Ltd., 2019
+ *
+ * DESCRIPTION
+ *      Test basic semantics of FUTEX_WAIT_MULTIPLE
+ *
+ * AUTHOR
+ *      Gabriel Krisman Bertazi <krisman@collabora.com>
+ *
+ * HISTORY
+ *      2019-Dec-13: Initial version by Krisman <krisman@collabora.com>
+ *
+ *****************************************************************************/
+
+#include <errno.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <pthread.h>
+#include "futextest.h"
+#include "logging.h"
+
+#define TEST_NAME "futex-wait-multiple"
+#define timeout_ns 100000
+#define MAX_COUNT 128
+#define WAKE_WAIT_US 3000000
+
+int ret = RET_PASS;
+char *progname;
+futex_t f[MAX_COUNT] = {0};
+struct futex_wait_block fwb[MAX_COUNT];
+
+void usage(char *prog)
+{
+	printf("Usage: %s\n", prog);
+	printf("  -c	Use color\n");
+	printf("  -h	Display this help message\n");
+	printf("  -v L	Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
+	       VQUIET, VCRITICAL, VINFO);
+}
+
+void test_count_overflow(void)
+{
+	futex_t f = FUTEX_INITIALIZER;
+	struct futex_wait_block fwb[MAX_COUNT+1];
+	int res, i;
+
+	ksft_print_msg("%s: Test a too big number of futexes\n", progname);
+
+	for (i = 0; i < MAX_COUNT+1; i++) {
+		fwb[i].uaddr = &f;
+		fwb[i].val = f;
+		fwb[i].bitset = 0;
+	}
+
+	res = futex_wait_multiple(fwb, MAX_COUNT+1, NULL, FUTEX_PRIVATE_FLAG);
+
+#ifdef __ILP32__
+	if (res != -1 || errno != ENOSYS) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
+	} else {
+		ksft_test_result_skip("futex_wait_multiple not supported at x32\n");
+	}
+#else
+	if (res != -1 || errno != EINVAL) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
+	} else {
+		ksft_test_result_pass("futex_wait_multiple count overflow succeed\n");
+	}
+
+#endif /* __ILP32__ */
+}
+
+void *waiterfn(void *arg)
+{
+	int res;
+
+	res = futex_wait_multiple(fwb, MAX_COUNT, NULL, FUTEX_PRIVATE_FLAG);
+
+#ifdef __ILP32__
+	if (res != -1 || errno != ENOSYS) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
+	} else {
+		ksft_test_result_skip("futex_wait_multiple not supported at x32\n");
+	}
+#else
+	if (res < 0)
+		ksft_print_msg("waiter failed %d\n", res);
+
+	info("futex_wait_multiple: Got hint futex %d was freed\n", res);
+#endif /* __ILP32__ */
+
+	return NULL;
+}
+
+void test_fwb_wakeup(void)
+{
+	int res, i;
+	pthread_t waiter;
+
+	ksft_print_msg("%s: Test wake up in a list of futex\n", progname);
+
+	for (i = 0; i < MAX_COUNT; i++) {
+		fwb[i].uaddr = &f[i];
+		fwb[i].val = f[i];
+		fwb[i].bitset = 0xffffffff;
+	}
+
+	res = pthread_create(&waiter, NULL, waiterfn, NULL);
+	if (res) {
+		ksft_test_result_fail("Creating waiting thread failed");
+		ksft_exit_fail();
+	}
+
+	usleep(WAKE_WAIT_US);
+	res = futex_wake(&(f[MAX_COUNT-1]), 1, FUTEX_PRIVATE_FLAG);
+	if (res != 1) {
+		ksft_test_result_fail("Failed to wake thread res=%d\n", res);
+		ksft_exit_fail();
+	}
+
+	pthread_join(waiter, NULL);
+	ksft_test_result_pass("%s succeed\n", __func__);
+}
+
+int main(int argc, char *argv[])
+{
+	int c;
+
+	while ((c = getopt(argc, argv, "cht:v:")) != -1) {
+		switch (c) {
+		case 'c':
+			log_color(1);
+			break;
+		case 'h':
+			usage(basename(argv[0]));
+			exit(0);
+		case 'v':
+			log_verbosity(atoi(optarg));
+			break;
+		default:
+			usage(basename(argv[0]));
+			exit(1);
+		}
+	}
+
+	progname = basename(argv[0]);
+
+	ksft_print_header();
+	ksft_set_plan(2);
+
+	test_count_overflow();
+
+#ifdef __ILP32__
+	// if it's a 32x binary, there's no futex to wakeup
+	ksft_test_result_skip("futex_wait_multiple not supported at x32\n");
+#else
+	test_fwb_wakeup();
+#endif /* __ILP32__ */
+
+	ksft_print_cnts();
+	return ret;
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index ee55e6d389a..2a63e1c2cfb 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -11,6 +11,7 @@
  *
  * HISTORY
  *      2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
+ *      2019-Dec-13: Add WAIT_MULTIPLE test by Krisman <krisman@collabora.com>
  *
  *****************************************************************************/
 
@@ -41,6 +42,8 @@ int main(int argc, char *argv[])
 {
 	futex_t f1 = FUTEX_INITIALIZER;
 	struct timespec to;
+	time_t secs;
+	struct futex_wait_block fwb = {&f1, f1, 0};
 	int res, ret = RET_PASS;
 	int c;
 
@@ -65,7 +68,7 @@ int main(int argc, char *argv[])
 	}
 
 	ksft_print_header();
-	ksft_set_plan(1);
+	ksft_set_plan(2);
 	ksft_print_msg("%s: Block on a futex and wait for timeout\n",
 	       basename(argv[0]));
 	ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
@@ -79,8 +82,39 @@ int main(int argc, char *argv[])
 	if (!res || errno != ETIMEDOUT) {
 		fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
 		ret = RET_FAIL;
+	} else
+		ksft_test_result_pass("futex_wait timeout succeeds\n");
+
+	info("Calling futex_wait_multiple on f1: %u @ %p\n", f1, &f1);
+
+	/* Setup absolute time */
+	ret = clock_gettime(CLOCK_REALTIME, &to);
+	secs = (to.tv_nsec + timeout_ns) / 1000000000;
+	to.tv_nsec = ((int64_t)to.tv_nsec + timeout_ns) % 1000000000;
+	to.tv_sec += secs;
+	info("to.tv_sec  = %ld\n", to.tv_sec);
+	info("to.tv_nsec = %ld\n", to.tv_nsec);
+
+	res = futex_wait_multiple(&fwb, 1, &to,
+				  FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME);
+
+#ifdef __ILP32__
+	if (res == -1 && errno == ENOSYS) {
+		ksft_test_result_skip("futex_wait_multiple not supported at x32\n");
+	} else {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
 	}
+#else
+	if (!res || errno != ETIMEDOUT) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
+	} else
+		ksft_test_result_pass("futex_wait_multiple timeout succeeds\n");
+#endif /* __ILP32__ */
 
-	print_result(TEST_NAME, ret);
+	ksft_print_cnts();
 	return ret;
 }
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index 0ae390ff816..bcbac042992 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -12,6 +12,7 @@
  *
  * HISTORY
  *      2009-Nov-14: Initial version by Gowrishankar <gowrishankar.m@in.ibm.com>
+ *      2019-Dec-13: Add WAIT_MULTIPLE test by Krisman <krisman@collabora.com>
  *
  *****************************************************************************/
 
@@ -40,6 +41,7 @@ int main(int argc, char *argv[])
 {
 	struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
 	futex_t f1 = FUTEX_INITIALIZER;
+	struct futex_wait_block fwb = {&f1, f1+1, 0};
 	int res, ret = RET_PASS;
 	int c;
 
@@ -61,7 +63,7 @@ int main(int argc, char *argv[])
 	}
 
 	ksft_print_header();
-	ksft_set_plan(1);
+	ksft_set_plan(2);
 	ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
 	       basename(argv[0]));
 
@@ -71,8 +73,30 @@ int main(int argc, char *argv[])
 		fail("futex_wait returned: %d %s\n",
 		     res ? errno : res, res ? strerror(errno) : "");
 		ret = RET_FAIL;
+	} else
+		ksft_test_result_pass("futex_wait wouldblock succeeds\n");
+
+	info("Calling futex_wait_multiple on f1: %u @ %p with val=%u\n",
+	     f1, &f1, f1+1);
+	res = futex_wait_multiple(&fwb, 1, NULL, FUTEX_PRIVATE_FLAG);
+
+#ifdef __ILP32__
+	if (res != -1 || errno != ENOSYS) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
+	} else {
+		ksft_test_result_skip("futex_wait_multiple not supported at x32\n");
+	}
+#else
+	if (!res || errno != EWOULDBLOCK) {
+		ksft_test_result_fail("futex_wait_multiple returned %d\n",
+				      res < 0 ? errno : res);
+		ret = RET_FAIL;
 	}
+	ksft_test_result_pass("futex_wait_multiple wouldblock succeeds\n");
+#endif /* __ILP32__ */
 
-	print_result(TEST_NAME, ret);
+	ksft_print_cnts();
 	return ret;
 }
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 1acb6ace168..a8be94f28ff 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -73,3 +73,6 @@ echo
 echo
 ./futex_wait_uninitialized_heap $COLOR
 ./futex_wait_private_mapped_file $COLOR
+
+echo
+./futex_wait_multiple $COLOR
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index ddbcfc9b7ba..bb103bef455 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -38,6 +38,14 @@ typedef volatile u_int32_t futex_t;
 #ifndef FUTEX_CMP_REQUEUE_PI
 #define FUTEX_CMP_REQUEUE_PI		12
 #endif
+#ifndef FUTEX_WAIT_MULTIPLE
+#define FUTEX_WAIT_MULTIPLE		13
+struct futex_wait_block {
+	futex_t *uaddr;
+	futex_t val;
+	__u32 bitset;
+};
+#endif
 #ifndef FUTEX_WAIT_REQUEUE_PI_PRIVATE
 #define FUTEX_WAIT_REQUEUE_PI_PRIVATE	(FUTEX_WAIT_REQUEUE_PI | \
 					 FUTEX_PRIVATE_FLAG)
@@ -80,6 +88,20 @@ futex_wait(futex_t *uaddr, futex_t val, struct timespec *timeout, int opflags)
 	return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
 }
 
+/**
+ * futex_wait_multiple() - block on several futexes with optional timeout
+ * @fwb:	wait block user space address
+ * @count:	number of entities at fwb
+ * @timeout:	absolute timeout
+ */
+static inline int
+futex_wait_multiple(struct futex_wait_block *fwb, int count,
+		    struct timespec *timeout, int opflags)
+{
+	return futex(fwb, FUTEX_WAIT_MULTIPLE, count, timeout, NULL, 0,
+		     opflags);
+}
+
 /**
  * futex_wake() - wake one or more tasks blocked on uaddr
  * @nr_wake:	wake up to this many tasks