v4.19.13 snapshot.
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
new file mode 100644
index 0000000..1a4ea98
--- /dev/null
+++ b/drivers/usb/host/Kconfig
@@ -0,0 +1,800 @@
+#
+# USB Host Controller Drivers
+#
+comment "USB Host Controller Drivers"
+
+config USB_C67X00_HCD
+	tristate "Cypress C67x00 HCD support"
+	depends on HAS_IOMEM
+	help
+	  The Cypress C67x00 (EZ-Host/EZ-OTG) chips are dual-role
+	  host/peripheral/OTG USB controllers.
+
+	  Enable this option to support this chip in host controller mode.
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called c67x00.
+
+config USB_XHCI_HCD
+	tristate "xHCI HCD (USB 3.0) support"
+	depends on HAS_DMA && HAS_IOMEM
+	---help---
+	  The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+	  "SuperSpeed" host controller hardware.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called xhci-hcd.
+
+if USB_XHCI_HCD
+config USB_XHCI_DBGCAP
+	bool "xHCI support for debug capability"
+	depends on TTY
+	---help---
+	  Say 'Y' to enable the support for the xHCI debug capability. Make
+	  sure that your xHCI host supports the extended debug capability and
+	  you want a TTY serial device based on the xHCI debug capability
+	  before enabling this option. If unsure, say 'N'.
+
+config USB_XHCI_PCI
+       tristate
+       depends on USB_PCI
+       default y
+
+config USB_XHCI_PLATFORM
+	tristate "Generic xHCI driver for a platform device"
+	select USB_XHCI_RCAR if ARCH_RENESAS
+	---help---
+	  Adds an xHCI host driver for a generic platform device, which
+	  provides a memory space and an irq.
+	  It is also a prerequisite for platform specific drivers that
+	  implement some extra quirks.
+
+	  If unsure, say N.
+
+config USB_XHCI_HISTB
+	tristate "xHCI support for HiSilicon STB SoCs"
+	depends on USB_XHCI_PLATFORM && (ARCH_HISI || COMPILE_TEST)
+	help
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in HiSilicon STB SoCs.
+
+config USB_XHCI_MTK
+	tristate "xHCI support for MediaTek SoCs"
+	select MFD_SYSCON
+	depends on (MIPS && SOC_MT7621) || ARCH_MEDIATEK || COMPILE_TEST
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in MediaTek SoCs.
+	  If unsure, say N.
+
+config USB_XHCI_MVEBU
+	tristate "xHCI support for Marvell Armada 375/38x"
+	select USB_XHCI_PLATFORM
+	depends on HAS_IOMEM
+	depends on ARCH_MVEBU || COMPILE_TEST
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in Marvell Armada 375/38x ARM SOCs.
+
+config USB_XHCI_RCAR
+	tristate "xHCI support for Renesas R-Car SoCs"
+	depends on USB_XHCI_PLATFORM
+	depends on ARCH_RENESAS || COMPILE_TEST
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in Renesas R-Car ARM SoCs.
+
+config USB_XHCI_TEGRA
+	tristate "xHCI support for NVIDIA Tegra SoCs"
+	depends on PHY_TEGRA_XUSB
+	depends on RESET_CONTROLLER
+	select FW_LOADER
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in NVIDIA Tegra124 and later SoCs.
+
+endif # USB_XHCI_HCD
+
+config USB_EHCI_HCD
+	tristate "EHCI HCD (USB 2.0) support"
+	depends on HAS_DMA && HAS_IOMEM
+	---help---
+	  The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
+	  "high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
+	  If your USB host controller supports USB 2.0, you will likely want to
+	  configure this Host Controller Driver.
+
+	  EHCI controllers are packaged with "companion" host controllers (OHCI
+	  or UHCI) to handle USB 1.1 devices connected to root hub ports.  Ports
+	  will connect to EHCI if the device is high speed, otherwise they
+	  connect to a companion controller.  If you configure EHCI, you should
+	  probably configure the OHCI (for NEC and some other vendors) USB Host
+	  Controller Driver or UHCI (for Via motherboards) Host Controller
+	  Driver too.
+
+	  You may want to read <file:Documentation/usb/ehci.txt>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ehci-hcd.
+
+config USB_EHCI_ROOT_HUB_TT
+	bool "Root Hub Transaction Translators"
+	depends on USB_EHCI_HCD
+	---help---
+	  Some EHCI chips have vendor-specific extensions to integrate
+	  transaction translators, so that no OHCI or UHCI companion
+	  controller is needed.  It's safe to say "y" even if your
+	  controller doesn't support this feature.
+
+	  This supports the EHCI implementation that's originally
+	  from ARC, and has since changed hands a few times.
+
+config USB_EHCI_TT_NEWSCHED
+	bool "Improved Transaction Translator scheduling"
+	depends on USB_EHCI_HCD
+	default y
+	---help---
+	  This changes the periodic scheduling code to fill more of the low
+	  and full speed bandwidth available from the Transaction Translator
+	  (TT) in USB 2.0 hubs.  Without this, only one transfer will be
+	  issued in each microframe, significantly reducing the number of
+	  periodic low/fullspeed transfers possible.
+
+	  If you have multiple periodic low/fullspeed devices connected to a
+	  highspeed USB hub which is connected to a highspeed USB Host
+	  Controller, and some of those devices will not work correctly
+	  (possibly due to "ENOSPC" or "-28" errors), say Y.  Conversely, if
+	  you have only one such device and it doesn't work, you could try
+	  saying N.
+
+	  If unsure, say Y.
+
+if USB_EHCI_HCD
+
+config USB_EHCI_PCI
+	tristate
+	depends on USB_PCI
+	default y
+
+config USB_EHCI_HCD_PMC_MSP
+	tristate "EHCI support for on-chip PMC MSP71xx USB controller"
+	depends on MSP_HAS_USB
+	default n
+	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO
+	---help---
+		Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's.
+		If unsure, say N.
+
+config XPS_USB_HCD_XILINX
+	bool "Use Xilinx usb host EHCI controller core"
+	depends on (PPC32 || MICROBLAZE)
+	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO
+	---help---
+		Xilinx xps USB host controller core is EHCI compliant and has
+		transaction translator built-in. It can be configured to either
+		support both high speed and full speed devices, or high speed
+		devices only.
+
+config USB_EHCI_FSL
+	tristate "Support for Freescale PPC on-chip EHCI USB controller"
+	depends on FSL_SOC
+	select USB_EHCI_ROOT_HUB_TT
+	---help---
+	  Variation of ARC USB block used in some Freescale chips.
+
+config USB_EHCI_MXC
+	tristate "Support for Freescale i.MX on-chip EHCI USB controller"
+	depends on ARCH_MXC
+	select USB_EHCI_ROOT_HUB_TT
+	---help---
+	  Variation of ARC USB block used in some Freescale chips.
+
+config USB_EHCI_HCD_NPCM7XX
+	tristate "Support for Nuvoton NPCM7XX on-chip EHCI USB controller"
+	depends on (USB_EHCI_HCD && ARCH_NPCM7XX) || COMPILE_TEST
+	default y if (USB_EHCI_HCD && ARCH_NPCM7XX)
+	help
+	  Enables support for the on-chip EHCI controller on
+	  Nuvoton NPCM7XX chips.
+
+config USB_EHCI_HCD_OMAP
+	tristate "EHCI support for OMAP3 and later chips"
+	depends on ARCH_OMAP
+	depends on NOP_USB_XCEIV
+	default y
+	---help---
+	  Enables support for the on-chip EHCI controller on
+	  OMAP3 and later chips.
+
+config USB_EHCI_HCD_ORION
+	tristate  "Support for Marvell EBU on-chip EHCI USB controller"
+	depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
+	default y
+	---help---
+	  Enables support for the on-chip EHCI controller on Marvell's
+	  embedded ARM SoCs, including Orion, Kirkwood, Dove, Armada XP,
+	  Armada 370.  This is different from the EHCI implementation
+	  on Marvell's mobile PXA and MMP SoC, see "EHCI support for
+	  Marvell PXA/MMP USB controller" for those.
+
+config USB_EHCI_HCD_SPEAR
+        tristate "Support for ST SPEAr on-chip EHCI USB controller"
+        depends on USB_EHCI_HCD && PLAT_SPEAR
+        default y
+        ---help---
+          Enables support for the on-chip EHCI controller on
+          ST SPEAr chips.
+
+config USB_EHCI_HCD_STI
+	tristate "Support for ST STiHxxx on-chip EHCI USB controller"
+	depends on ARCH_STI && OF
+	select GENERIC_PHY
+	select USB_EHCI_HCD_PLATFORM
+	help
+	  Enable support for the on-chip EHCI controller found on
+	  STMicroelectronics consumer electronics SoC's.
+
+config USB_EHCI_HCD_AT91
+        tristate  "Support for Atmel on-chip EHCI USB controller"
+        depends on USB_EHCI_HCD && ARCH_AT91
+        default y
+        ---help---
+          Enables support for the on-chip EHCI controller on
+          Atmel chips.
+
+config USB_EHCI_TEGRA
+       tristate "NVIDIA Tegra HCD support"
+       depends on ARCH_TEGRA
+       select USB_EHCI_ROOT_HUB_TT
+       select USB_TEGRA_PHY
+       help
+         This driver enables support for the internal USB Host Controllers
+         found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+
+config USB_EHCI_HCD_PPC_OF
+	bool "EHCI support for PPC USB controller on OF platform bus"
+	depends on PPC
+	default y
+	---help---
+	  Enables support for the USB controller present on the PowerPC
+	  OpenFirmware platform bus.
+
+config USB_EHCI_SH
+	bool "EHCI support for SuperH USB controller"
+	depends on SUPERH
+	---help---
+	  Enables support for the on-chip EHCI controller on the SuperH.
+	  If you use the PCI EHCI controller, this option is not necessary.
+
+config USB_EHCI_EXYNOS
+       tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+       depends on ARCH_S5PV210 || ARCH_EXYNOS
+       help
+	Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+
+config USB_EHCI_MV
+	bool "EHCI support for Marvell PXA/MMP USB controller"
+	depends on (ARCH_PXA || ARCH_MMP)
+	select USB_EHCI_ROOT_HUB_TT
+	---help---
+	  Enables support for Marvell (including PXA and MMP series) on-chip
+	  USB SPH and OTG controller. SPH is a single port host, and it can
+	  only be EHCI host. OTG is controller that can switch to host mode.
+	  Note that this driver will not work on Marvell's other EHCI
+	  controller used by the EBU-type SoCs including Orion, Kirkwood,
+	  Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
+	  on-chip EHCI USB controller" for those.
+
+config USB_W90X900_EHCI
+	tristate "W90X900(W90P910) EHCI support"
+	depends on ARCH_W90X900
+	---help---
+		Enables support for the W90X900 USB controller
+
+config USB_CNS3XXX_EHCI
+	bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
+	depends on ARCH_CNS3XXX
+	select USB_EHCI_HCD_PLATFORM
+	---help---
+	  This option is deprecated now and the driver was removed, use
+	  USB_EHCI_HCD_PLATFORM instead.
+
+	  Enable support for the CNS3XXX SOC's on-chip EHCI controller.
+	  It is needed for high-speed (480Mbit/sec) USB 2.0 device
+	  support.
+
+config USB_EHCI_HCD_PLATFORM
+	tristate "Generic EHCI driver for a platform device"
+	default n
+	---help---
+	  Adds an EHCI host driver for a generic platform device, which
+	  provides a memory space and an irq.
+
+	  If unsure, say N.
+
+config USB_OCTEON_EHCI
+	bool "Octeon on-chip EHCI support (DEPRECATED)"
+	depends on CAVIUM_OCTEON_SOC
+	default n
+	select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+	select USB_EHCI_HCD_PLATFORM
+	help
+	  This option is deprecated now and the driver was removed, use
+	  USB_EHCI_HCD_PLATFORM instead.
+
+	  Enable support for the Octeon II SOC's on-chip EHCI
+	  controller.  It is needed for high-speed (480Mbit/sec)
+	  USB 2.0 device support.  All CN6XXX based chips with USB are
+	  supported.
+
+endif # USB_EHCI_HCD
+
+config USB_OXU210HP_HCD
+	tristate "OXU210HP HCD support"
+	depends on HAS_IOMEM
+	---help---
+	  The OXU210HP is an USB host/OTG/device controller. Enable this
+	  option if your board has this chip. If unsure, say N.
+
+	  This driver does not support isochronous transfers and doesn't
+	  implement OTG nor USB device controllers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called oxu210hp-hcd.
+
+config USB_ISP116X_HCD
+	tristate "ISP116X HCD support"
+	depends on HAS_IOMEM
+	---help---
+	  The ISP1160 and ISP1161 chips are USB host controllers. Enable this
+	  option if your board has this chip. If unsure, say N.
+
+	  This driver does not support isochronous transfers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called isp116x-hcd.
+
+config USB_ISP1362_HCD
+	tristate "ISP1362 HCD support"
+	depends on HAS_IOMEM
+	depends on COMPILE_TEST # nothing uses this
+	---help---
+	  Supports the Philips ISP1362 chip as a host controller
+
+	  This driver does not support isochronous transfers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called isp1362-hcd.
+
+config USB_FOTG210_HCD
+	tristate "FOTG210 HCD support"
+	depends on USB && HAS_DMA && HAS_IOMEM
+	---help---
+	  Faraday FOTG210 is an OTG controller which can be configured as
+	  an USB2.0 host. It is designed to meet USB2.0 EHCI specification
+	  with minor modification.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called fotg210-hcd.
+
+config USB_MAX3421_HCD
+	tristate "MAX3421 HCD (USB-over-SPI) support"
+	depends on USB && SPI
+	---help---
+	  The Maxim MAX3421E chip supports standard USB 2.0-compliant
+	  full-speed devices either in host or peripheral mode.  This
+	  driver supports the host-mode of the MAX3421E only.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called max3421-hcd.
+
+config USB_OHCI_HCD
+	tristate "OHCI HCD (USB 1.1) support"
+	depends on HAS_DMA && HAS_IOMEM
+	---help---
+	  The Open Host Controller Interface (OHCI) is a standard for accessing
+	  USB 1.1 host controller hardware.  It does more in hardware than Intel's
+	  UHCI specification.  If your USB host controller follows the OHCI spec,
+	  say Y.  On most non-x86 systems, and on x86 hardware that's not using a
+	  USB controller from Intel or VIA, this is appropriate.  If your host
+	  controller doesn't use PCI, this is probably appropriate.  For a PCI
+	  based system where you're not sure, the "lspci -v" entry will list the
+	  right "prog-if" for your USB controller(s):  EHCI, OHCI, or UHCI.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ohci-hcd.
+
+if USB_OHCI_HCD
+
+config USB_OHCI_HCD_OMAP1
+	tristate "OHCI support for OMAP1/2 chips"
+	depends on ARCH_OMAP1
+	depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
+	default y
+	---help---
+	  Enables support for the OHCI controller on OMAP1/2 chips.
+
+config USB_OHCI_HCD_SPEAR
+        tristate "Support for ST SPEAr on-chip OHCI USB controller"
+        depends on USB_OHCI_HCD && PLAT_SPEAR
+        default y
+        ---help---
+          Enables support for the on-chip OHCI controller on
+          ST SPEAr chips.
+
+config USB_OHCI_HCD_STI
+	tristate "Support for ST STiHxxx on-chip OHCI USB controller"
+	depends on ARCH_STI && OF
+	select GENERIC_PHY
+	select USB_OHCI_HCD_PLATFORM
+	help
+	  Enable support for the on-chip OHCI controller found on
+	  STMicroelectronics consumer electronics SoC's.
+
+config USB_OHCI_HCD_S3C2410
+        tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+        depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+        default y
+        ---help---
+          Enables support for the on-chip OHCI controller on
+          S3C24xx/S3C64xx chips.
+
+config USB_OHCI_HCD_LPC32XX
+	tristate "Support for LPC on-chip OHCI USB controller"
+	depends on USB_OHCI_HCD && ARCH_LPC32XX
+	depends on USB_ISP1301
+	default y
+	---help---
+          Enables support for the on-chip OHCI controller on
+          NXP chips.
+
+config USB_OHCI_HCD_PXA27X
+	tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
+	depends on USB_OHCI_HCD && (PXA27x || PXA3xx)
+	default y
+	---help---
+	  Enables support for the on-chip OHCI controller on
+	  PXA27x/PXA3xx chips.
+
+config USB_OHCI_HCD_AT91
+	tristate "Support for Atmel on-chip OHCI USB controller"
+	depends on USB_OHCI_HCD && ARCH_AT91 && OF
+	default y
+	---help---
+          Enables support for the on-chip OHCI controller on
+          Atmel chips.
+
+config USB_OHCI_HCD_OMAP3
+	tristate "OHCI support for OMAP3 and later chips"
+	depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+	select USB_OHCI_HCD_PLATFORM
+	default y
+	help
+	  This option is deprecated now and the driver was removed, use
+	  USB_OHCI_HCD_PLATFORM instead.
+
+	  Enables support for the on-chip OHCI controller on
+	  OMAP3 and later chips.
+
+config USB_OHCI_HCD_DAVINCI
+	tristate "OHCI support for TI DaVinci DA8xx"
+	depends on ARCH_DAVINCI_DA8XX
+	depends on USB_OHCI_HCD
+	select PHY_DA8XX_USB
+	default y
+	help
+	  Enables support for the DaVinci DA8xx integrated OHCI
+	  controller. This driver cannot currently be a loadable
+	  module because it lacks a proper PHY abstraction.
+
+config USB_OHCI_HCD_PPC_OF_BE
+	bool "OHCI support for OF platform bus (big endian)"
+	depends on PPC
+	select USB_OHCI_BIG_ENDIAN_DESC
+	select USB_OHCI_BIG_ENDIAN_MMIO
+	---help---
+	  Enables support for big-endian USB controllers present on the
+	  OpenFirmware platform bus.
+
+config USB_OHCI_HCD_PPC_OF_LE
+	bool "OHCI support for OF platform bus (little endian)"
+	depends on PPC
+	select USB_OHCI_LITTLE_ENDIAN
+	---help---
+	  Enables support for little-endian USB controllers present on the
+	  OpenFirmware platform bus.
+
+config USB_OHCI_HCD_PPC_OF
+	bool
+	depends on PPC
+	default USB_OHCI_HCD_PPC_OF_BE || USB_OHCI_HCD_PPC_OF_LE
+
+config USB_OHCI_HCD_PCI
+	tristate "OHCI support for PCI-bus USB controllers"
+	depends on USB_PCI
+	default y
+	select USB_OHCI_LITTLE_ENDIAN
+	---help---
+	  Enables support for PCI-bus plug-in USB controller cards.
+	  If unsure, say Y.
+
+config USB_OHCI_HCD_SSB
+	bool "OHCI support for Broadcom SSB OHCI core (DEPRECATED)"
+	depends on (SSB = y || SSB = USB_OHCI_HCD)
+	select USB_HCD_SSB
+	select USB_OHCI_HCD_PLATFORM
+	default n
+	---help---
+	  This option is deprecated now and the driver was removed, use
+	  USB_HCD_SSB and USB_OHCI_HCD_PLATFORM instead.
+
+	  Support for the Sonics Silicon Backplane (SSB) attached
+	  Broadcom USB OHCI core.
+
+	  This device is present in some embedded devices with
+	  Broadcom based SSB bus.
+
+	  If unsure, say N.
+
+config USB_OHCI_SH
+	bool "OHCI support for SuperH USB controller (DEPRECATED)"
+	depends on SUPERH
+	select USB_OHCI_HCD_PLATFORM
+	---help---
+	  This option is deprecated now and the driver was removed, use
+	  USB_OHCI_HCD_PLATFORM instead.
+
+	  Enables support for the on-chip OHCI controller on the SuperH.
+	  If you use the PCI OHCI controller, this option is not necessary.
+
+config USB_OHCI_EXYNOS
+	tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
+	depends on ARCH_S5PV210 || ARCH_EXYNOS
+	help
+	 Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
+config USB_CNS3XXX_OHCI
+	bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
+	depends on ARCH_CNS3XXX
+	select USB_OHCI_HCD_PLATFORM
+	---help---
+	  This option is deprecated now and the driver was removed, use
+	  USB_OHCI_HCD_PLATFORM instead.
+
+	  Enable support for the CNS3XXX SOC's on-chip OHCI controller.
+	  It is needed for low-speed USB 1.0 device support.
+
+config USB_OHCI_HCD_PLATFORM
+	tristate "Generic OHCI driver for a platform device"
+	default n
+	---help---
+	  Adds an OHCI host driver for a generic platform device, which
+	  provides a memory space and an irq.
+
+	  If unsure, say N.
+
+config USB_OCTEON_OHCI
+	bool "Octeon on-chip OHCI support (DEPRECATED)"
+	depends on CAVIUM_OCTEON_SOC
+	default USB_OCTEON_EHCI
+	select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+	select USB_OHCI_LITTLE_ENDIAN
+	select USB_OHCI_HCD_PLATFORM
+	help
+	  This option is deprecated now and the driver was removed, use
+	  USB_OHCI_HCD_PLATFORM instead.
+
+	  Enable support for the Octeon II SOC's on-chip OHCI
+	  controller.  It is needed for low-speed USB 1.0 device
+	  support.  All CN6XXX based chips with USB are supported.
+
+endif # USB_OHCI_HCD
+
+config USB_UHCI_HCD
+	tristate "UHCI HCD (most Intel and VIA) support"
+	depends on USB_PCI || USB_UHCI_SUPPORT_NON_PCI_HC
+	---help---
+	  The Universal Host Controller Interface is a standard by Intel for
+	  accessing the USB hardware in the PC (which is also called the USB
+	  host controller). If your USB host controller conforms to this
+	  standard, you may want to say Y, but see below. All recent boards
+	  with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX,
+	  i810, i820) conform to this standard. Also all VIA PCI chipsets
+	  (like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro
+	  133) and LEON/GRLIB SoCs with the GRUSBHC controller.
+	  If unsure, say Y.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called uhci-hcd.
+
+config USB_UHCI_SUPPORT_NON_PCI_HC
+	bool
+	default y if (SPARC_LEON || USB_UHCI_PLATFORM)
+
+config USB_UHCI_PLATFORM
+	bool
+	default y if (ARCH_VT8500 || ARCH_ASPEED)
+
+config USB_UHCI_ASPEED
+       bool
+       default y if ARCH_ASPEED
+
+config USB_FHCI_HCD
+	tristate "Freescale QE USB Host Controller support"
+	depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
+	select FSL_GTM
+	select QE_USB
+	help
+	  This driver enables support for Freescale QE USB Host Controller
+	  (as found on MPC8360 and MPC8323 processors), the driver supports
+	  Full and Low Speed USB.
+
+config FHCI_DEBUG
+	bool "Freescale QE USB Host Controller debug support"
+	depends on USB_FHCI_HCD && DEBUG_FS
+	help
+	  Say "y" to see some FHCI debug information and statistics
+	  through debugfs.
+
+config USB_U132_HCD
+	tristate "Elan U132 Adapter Host Controller"
+	depends on USB_FTDI_ELAN
+	help
+	  The U132 adapter is a USB to CardBus adapter specifically designed
+	  for PC cards that contain an OHCI host controller. Typical PC cards
+	  are the Orange Mobile 3G Option GlobeTrotter Fusion card. The U132
+	  adapter will *NOT* work with PC cards that do not contain an OHCI
+	  controller.
+
+	  For those PC cards that contain multiple OHCI controllers only the
+	  first one is used.
+
+	  The driver consists of two modules, the "ftdi-elan" module is a
+	  USB client driver that interfaces to the FTDI chip within ELAN's
+	  USB-to-PCMCIA adapter, and this "u132-hcd" module is a USB host
+	  controller driver that talks to the OHCI controller within the
+	  CardBus cards that are inserted in the U132 adapter.
+
+	  This driver has been tested with a CardBus OHCI USB adapter, and
+	  worked with a USB PEN Drive inserted into the first USB port of
+	  the PCCARD. A rather pointless thing to do, but useful for testing.
+
+	  It is safe to say M here.
+
+	  See also <http://www.elandigitalsystems.com/support/ufaq/u132linux.php>
+
+config USB_SL811_HCD
+	tristate "SL811HS HCD support"
+	depends on HAS_IOMEM
+	help
+	  The SL811HS is a single-port USB controller that supports either
+	  host side or peripheral side roles.  Enable this option if your
+	  board has this chip, and you want to use it as a host controller. 
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sl811-hcd.
+
+config USB_SL811_HCD_ISO
+	bool "partial ISO support"
+	depends on USB_SL811_HCD
+	help
+	  The driver doesn't support iso_frame_desc (yet), but for some simple
+	  devices that just queue one ISO frame per URB, then ISO transfers
+	  "should" work using the normal urb status fields.
+
+	  If unsure, say N.
+
+config USB_SL811_CS
+	tristate "CF/PCMCIA support for SL811HS HCD"
+	depends on USB_SL811_HCD && PCMCIA
+	help
+	  Wraps a PCMCIA driver around the SL811HS HCD, supporting the RATOC
+	  REX-CFU1U CF card (often used with PDAs).  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called "sl811_cs".
+
+config USB_R8A66597_HCD
+	tristate "R8A66597 HCD support"
+	depends on HAS_IOMEM
+	help
+	  The R8A66597 is a USB 2.0 host and peripheral controller.
+
+	  Enable this option if your board has this chip, and you want
+	  to use it as a host controller.  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called r8a66597-hcd.
+
+config USB_RENESAS_USBHS_HCD
+	tristate "Renesas USBHS HCD support"
+	depends on USB_RENESAS_USBHS
+	help
+	  The Renesas USBHS is a USB 2.0 host and peripheral controller.
+
+	  Enable this option if your board has this chip, and you want
+	  to use it as a host controller.  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called renesas-usbhs.
+
+config USB_WHCI_HCD
+	tristate "Wireless USB Host Controller Interface (WHCI) driver"
+	depends on USB_PCI && USB && UWB
+	select USB_WUSB
+	select UWB_WHCI
+	help
+	  A driver for PCI-based Wireless USB Host Controllers that are
+	  compliant with the WHCI specification.
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "whci-hcd".
+
+config USB_HWA_HCD
+	tristate "Host Wire Adapter (HWA) driver"
+	depends on USB && UWB
+	select USB_WUSB
+	select UWB_HWA
+	help
+	  This driver enables you to connect Wireless USB devices to
+	  your system using a Host Wire Adaptor USB dongle. This is an
+	  UWB Radio Controller and WUSB Host Controller connected to
+	  your machine via USB (specified in WUSB1.0).
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "hwa-hc".
+
+config USB_IMX21_HCD
+       tristate "i.MX21 HCD support"
+       depends on ARM && ARCH_MXC
+       help
+         This driver enables support for the on-chip USB host in the
+         i.MX21 processor.
+
+         To compile this driver as a module, choose M here: the
+         module will be called "imx21-hcd".
+
+config USB_HCD_BCMA
+	tristate "BCMA usb host driver"
+	depends on BCMA
+	select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+	select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+	help
+	  Enable support for the EHCI and OCHI host controller on an bcma bus.
+	  It converts the bcma driver into two platform device drivers
+	  for ehci and ohci.
+
+	  If unsure, say N.
+
+config USB_HCD_SSB
+	tristate "SSB usb host driver"
+	depends on SSB
+	select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+	select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
+	help
+	  Enable support for the EHCI and OCHI host controller on an bcma bus.
+	  It converts the bcma driver into two platform device drivers
+	  for ehci and ohci.
+
+	  If unsure, say N.
+
+config USB_HCD_TEST_MODE
+	bool "HCD test mode support"
+	---help---
+	  Say 'Y' to enable additional software test modes that may be
+	  supported by the host controller drivers.
+
+	  One such test mode is the Embedded High-speed Host Electrical Test
+	  (EHSET) for EHCI host controller hardware, specifically the "Single
+	  Step Set Feature" test.  Typically this will be enabled for On-the-Go
+	  or embedded hosts that need to undergo USB-IF compliance testing with
+	  the aid of special testing hardware.  In the future, this may expand
+	  to include other tests that require support from a HCD driver.
+
+	  This option is of interest only to developers who need to validate
+	  their USB hardware designs.  It is not needed for normal use.  If
+	  unsure, say N.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
new file mode 100644
index 0000000..e623526
--- /dev/null
+++ b/drivers/usb/host/Makefile
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for USB Host Controller Drivers
+#
+
+# tell define_trace.h where to find the xhci trace header
+CFLAGS_xhci-trace.o := -I$(src)
+
+fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
+fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
+
+fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
+
+xhci-hcd-y := xhci.o xhci-mem.o xhci-ext-caps.o
+xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-y += xhci-trace.o
+
+ifneq ($(CONFIG_USB_XHCI_DBGCAP), )
+	xhci-hcd-y += xhci-dbgcap.o xhci-dbgtty.o
+endif
+
+ifneq ($(CONFIG_USB_XHCI_MTK), )
+	xhci-hcd-y += xhci-mtk-sch.o
+endif
+
+xhci-plat-hcd-y := xhci-plat.o
+ifneq ($(CONFIG_USB_XHCI_MVEBU), )
+	xhci-plat-hcd-y		+= xhci-mvebu.o
+endif
+ifneq ($(CONFIG_USB_XHCI_RCAR), )
+	xhci-plat-hcd-y		+= xhci-rcar.o
+endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+	xhci-hcd-y		+= xhci-debugfs.o
+endif
+
+obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+
+obj-$(CONFIG_USB_PCI)	+= pci-quirks.o
+
+obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o
+obj-$(CONFIG_USB_EHCI_PCI)	+= ehci-pci.o
+obj-$(CONFIG_USB_EHCI_HCD_PLATFORM)	+= ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC)	+= ehci-mxc.o
+obj-$(CONFIG_USB_EHCI_HCD_NPCM7XX)	+= ehci-npcm7xx.o
+obj-$(CONFIG_USB_EHCI_HCD_OMAP)	+= ehci-omap.o
+obj-$(CONFIG_USB_EHCI_HCD_ORION)	+= ehci-orion.o
+obj-$(CONFIG_USB_EHCI_HCD_SPEAR)	+= ehci-spear.o
+obj-$(CONFIG_USB_EHCI_HCD_STI)	+= ehci-st.o
+obj-$(CONFIG_USB_EHCI_EXYNOS)	+= ehci-exynos.o
+obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
+obj-$(CONFIG_USB_EHCI_TEGRA)	+= ehci-tegra.o
+obj-$(CONFIG_USB_W90X900_EHCI)	+= ehci-w90x900.o
+
+obj-$(CONFIG_USB_OXU210HP_HCD)	+= oxu210hp-hcd.o
+obj-$(CONFIG_USB_ISP116X_HCD)	+= isp116x-hcd.o
+obj-$(CONFIG_USB_ISP1362_HCD)	+= isp1362-hcd.o
+
+obj-$(CONFIG_USB_OHCI_HCD)	+= ohci-hcd.o
+obj-$(CONFIG_USB_OHCI_HCD_PCI)	+= ohci-pci.o
+obj-$(CONFIG_USB_OHCI_HCD_PLATFORM)	+= ohci-platform.o
+obj-$(CONFIG_USB_OHCI_EXYNOS)	+= ohci-exynos.o
+obj-$(CONFIG_USB_OHCI_HCD_OMAP1)	+= ohci-omap.o
+obj-$(CONFIG_USB_OHCI_HCD_SPEAR)	+= ohci-spear.o
+obj-$(CONFIG_USB_OHCI_HCD_STI)	+= ohci-st.o
+obj-$(CONFIG_USB_OHCI_HCD_AT91)	+= ohci-at91.o
+obj-$(CONFIG_USB_OHCI_HCD_S3C2410)	+= ohci-s3c2410.o
+obj-$(CONFIG_USB_OHCI_HCD_LPC32XX)	+= ohci-nxp.o
+obj-$(CONFIG_USB_OHCI_HCD_PXA27X)	+= ohci-pxa27x.o
+obj-$(CONFIG_USB_OHCI_HCD_DAVINCI)	+= ohci-da8xx.o
+
+obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
+obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
+obj-$(CONFIG_USB_XHCI_HCD)	+= xhci-hcd.o
+obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
+obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
+obj-$(CONFIG_USB_XHCI_HISTB)	+= xhci-histb.o
+obj-$(CONFIG_USB_XHCI_MTK)	+= xhci-mtk.o
+obj-$(CONFIG_USB_XHCI_TEGRA)	+= xhci-tegra.o
+obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
+obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
+obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
+obj-$(CONFIG_USB_R8A66597_HCD)	+= r8a66597-hcd.o
+obj-$(CONFIG_USB_HWA_HCD)	+= hwa-hc.o
+obj-$(CONFIG_USB_IMX21_HCD)	+= imx21-hcd.o
+obj-$(CONFIG_USB_FSL_USB2)	+= fsl-mph-dr-of.o
+obj-$(CONFIG_USB_EHCI_FSL)	+= fsl-mph-dr-of.o
+obj-$(CONFIG_USB_EHCI_FSL)	+= ehci-fsl.o
+obj-$(CONFIG_USB_HCD_BCMA)	+= bcma-hcd.o
+obj-$(CONFIG_USB_HCD_SSB)	+= ssb-hcd.o
+obj-$(CONFIG_USB_FOTG210_HCD)	+= fotg210-hcd.o
+obj-$(CONFIG_USB_MAX3421_HCD)	+= max3421-hcd.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
new file mode 100644
index 0000000..2400a82
--- /dev/null
+++ b/drivers/usb/host/bcma-hcd.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom specific Advanced Microcontroller Bus
+ * Broadcom USB-core driver (BCMA bus glue)
+ *
+ * Copyright 2011-2015 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2015 Felix Fietkau <nbd@openwrt.org>
+ *
+ * Based on ssb-ohci driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Derived from the USBcore related parts of Broadcom-SB
+ * Copyright 2005-2011 Broadcom Corporation
+ */
+#include <linux/bcma/bcma.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
+MODULE_LICENSE("GPL");
+
+/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
+#define USB_BCMA_CLKCTLST_USB_CLK_REQ			0x00000100
+
+struct bcma_hcd_device {
+	struct bcma_device *core;
+	struct platform_device *ehci_dev;
+	struct platform_device *ohci_dev;
+	struct gpio_desc *gpio_desc;
+};
+
+/* Wait for bitmask in a register to get set or cleared.
+ * timeout is in units of ten-microseconds.
+ */
+static int bcma_wait_bits(struct bcma_device *dev, u16 reg, u32 bitmask,
+			  int timeout)
+{
+	int i;
+	u32 val;
+
+	for (i = 0; i < timeout; i++) {
+		val = bcma_read32(dev, reg);
+		if ((val & bitmask) == bitmask)
+			return 0;
+		udelay(10);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static void bcma_hcd_4716wa(struct bcma_device *dev)
+{
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+	/* Work around for 4716 failures. */
+	if (dev->bus->chipinfo.id == 0x4716) {
+		u32 tmp;
+
+		tmp = bcma_cpu_clock(&dev->bus->drv_mips);
+		if (tmp >= 480000000)
+			tmp = 0x1846b; /* set CDR to 0x11(fast) */
+		else if (tmp == 453000000)
+			tmp = 0x1046b; /* set CDR to 0x10(slow) */
+		else
+			tmp = 0;
+
+		/* Change Shim mdio control reg to fix host not acking at
+		 * high frequencies
+		 */
+		if (tmp) {
+			bcma_write32(dev, 0x524, 0x1); /* write sel to enable */
+			udelay(500);
+
+			bcma_write32(dev, 0x524, tmp);
+			udelay(500);
+			bcma_write32(dev, 0x524, 0x4ab);
+			udelay(500);
+			bcma_read32(dev, 0x528);
+			bcma_write32(dev, 0x528, 0x80000000);
+		}
+	}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+}
+
+/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
+static void bcma_hcd_init_chip_mips(struct bcma_device *dev)
+{
+	u32 tmp;
+
+	/*
+	 * USB 2.0 special considerations:
+	 *
+	 * 1. Since the core supports both OHCI and EHCI functions, it must
+	 *    only be reset once.
+	 *
+	 * 2. In addition to the standard SI reset sequence, the Host Control
+	 *    Register must be programmed to bring the USB core and various
+	 *    phy components out of reset.
+	 */
+	if (!bcma_core_is_enabled(dev)) {
+		bcma_core_enable(dev, 0);
+		mdelay(10);
+		if (dev->id.rev >= 5) {
+			/* Enable Misc PLL */
+			tmp = bcma_read32(dev, 0x1e0);
+			tmp |= 0x100;
+			bcma_write32(dev, 0x1e0, tmp);
+			if (bcma_wait_bits(dev, 0x1e0, 1 << 24, 100))
+				printk(KERN_EMERG "Failed to enable misc PPL!\n");
+
+			/* Take out of resets */
+			bcma_write32(dev, 0x200, 0x4ff);
+			udelay(25);
+			bcma_write32(dev, 0x200, 0x6ff);
+			udelay(25);
+
+			/* Make sure digital and AFE are locked in USB PHY */
+			bcma_write32(dev, 0x524, 0x6b);
+			udelay(50);
+			tmp = bcma_read32(dev, 0x524);
+			udelay(50);
+			bcma_write32(dev, 0x524, 0xab);
+			udelay(50);
+			tmp = bcma_read32(dev, 0x524);
+			udelay(50);
+			bcma_write32(dev, 0x524, 0x2b);
+			udelay(50);
+			tmp = bcma_read32(dev, 0x524);
+			udelay(50);
+			bcma_write32(dev, 0x524, 0x10ab);
+			udelay(50);
+			tmp = bcma_read32(dev, 0x524);
+
+			if (bcma_wait_bits(dev, 0x528, 0xc000, 10000)) {
+				tmp = bcma_read32(dev, 0x528);
+				printk(KERN_EMERG
+				       "USB20H mdio_rddata 0x%08x\n", tmp);
+			}
+			bcma_write32(dev, 0x528, 0x80000000);
+			tmp = bcma_read32(dev, 0x314);
+			udelay(265);
+			bcma_write32(dev, 0x200, 0x7ff);
+			udelay(10);
+
+			/* Take USB and HSIC out of non-driving modes */
+			bcma_write32(dev, 0x510, 0);
+		} else {
+			bcma_write32(dev, 0x200, 0x7ff);
+
+			udelay(1);
+		}
+
+		bcma_hcd_4716wa(dev);
+	}
+}
+
+/**
+ * bcma_hcd_usb20_old_arm_init - Initialize old USB 2.0 controller on ARM
+ *
+ * Old USB 2.0 core is identified as BCMA_CORE_USB20_HOST and was introduced
+ * long before Northstar devices. It seems some cheaper chipsets like BCM53573
+ * still use it.
+ * Initialization of this old core differs between MIPS and ARM.
+ */
+static int bcma_hcd_usb20_old_arm_init(struct bcma_hcd_device *usb_dev)
+{
+	struct bcma_device *core = usb_dev->core;
+	struct device *dev = &core->dev;
+	struct bcma_device *pmu_core;
+
+	usleep_range(10000, 20000);
+	if (core->id.rev < 5)
+		return 0;
+
+	pmu_core = bcma_find_core(core->bus, BCMA_CORE_PMU);
+	if (!pmu_core) {
+		dev_err(dev, "Could not find PMU core\n");
+		return -ENOENT;
+	}
+
+	/* Take USB core out of reset */
+	bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK | BCMA_IOCTL_FGC);
+	usleep_range(100, 200);
+	bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+	usleep_range(100, 200);
+	bcma_awrite32(core, BCMA_RESET_CTL, 0);
+	usleep_range(100, 200);
+	bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK);
+	usleep_range(100, 200);
+
+	/* Enable Misc PLL */
+	bcma_write32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT |
+					  BCMA_CLKCTLST_HQCLKREQ |
+					  USB_BCMA_CLKCTLST_USB_CLK_REQ);
+	usleep_range(100, 200);
+
+	bcma_write32(core, 0x510, 0xc7f85000);
+	bcma_write32(core, 0x510, 0xc7f85003);
+	usleep_range(300, 600);
+
+	/* Program USB PHY PLL parameters */
+	bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x6);
+	bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x005360c1);
+	usleep_range(100, 200);
+	bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x7);
+	bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x0);
+	usleep_range(100, 200);
+	bcma_set32(pmu_core, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+	usleep_range(100, 200);
+
+	bcma_write32(core, 0x510, 0x7f8d007);
+	udelay(1000);
+
+	/* Take controller out of reset */
+	bcma_write32(core, 0x200, 0x4ff);
+	usleep_range(25, 50);
+	bcma_write32(core, 0x200, 0x6ff);
+	usleep_range(25, 50);
+	bcma_write32(core, 0x200, 0x7ff);
+	usleep_range(25, 50);
+
+	of_platform_default_populate(dev->of_node, NULL, dev);
+
+	return 0;
+}
+
+static void bcma_hcd_usb20_ns_init_hc(struct bcma_device *dev)
+{
+	u32 val;
+
+	/* Set packet buffer OUT threshold */
+	val = bcma_read32(dev, 0x94);
+	val &= 0xffff;
+	val |= 0x80 << 16;
+	bcma_write32(dev, 0x94, val);
+
+	/* Enable break memory transfer */
+	val = bcma_read32(dev, 0x9c);
+	val |= 1;
+	bcma_write32(dev, 0x9c, val);
+
+	/*
+	 * Broadcom initializes PHY and then waits to ensure HC is ready to be
+	 * configured. In our case the order is reversed. We just initialized
+	 * controller and we let HCD initialize PHY, so let's wait (sleep) now.
+	 */
+	usleep_range(1000, 2000);
+}
+
+/**
+ * bcma_hcd_usb20_ns_init - Initialize Northstar USB 2.0 controller
+ */
+static int bcma_hcd_usb20_ns_init(struct bcma_hcd_device *bcma_hcd)
+{
+	struct bcma_device *core = bcma_hcd->core;
+	struct bcma_chipinfo *ci = &core->bus->chipinfo;
+	struct device *dev = &core->dev;
+
+	bcma_core_enable(core, 0);
+
+	if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+	    ci->id == BCMA_CHIP_ID_BCM53018)
+		bcma_hcd_usb20_ns_init_hc(core);
+
+	of_platform_default_populate(dev->of_node, NULL, dev);
+
+	return 0;
+}
+
+static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
+{
+	struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
+
+	if (IS_ERR_OR_NULL(usb_dev->gpio_desc))
+		return;
+
+	gpiod_set_value(usb_dev->gpio_desc, val);
+}
+
+static const struct usb_ehci_pdata ehci_pdata = {
+};
+
+static const struct usb_ohci_pdata ohci_pdata = {
+};
+
+static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev,
+						    const char *name, u32 addr,
+						    const void *data,
+						    size_t size)
+{
+	struct platform_device *hci_dev;
+	struct resource hci_res[2];
+	int ret;
+
+	memset(hci_res, 0, sizeof(hci_res));
+
+	hci_res[0].start = addr;
+	hci_res[0].end = hci_res[0].start + 0x1000 - 1;
+	hci_res[0].flags = IORESOURCE_MEM;
+
+	hci_res[1].start = dev->irq;
+	hci_res[1].flags = IORESOURCE_IRQ;
+
+	hci_dev = platform_device_alloc(name, 0);
+	if (!hci_dev)
+		return ERR_PTR(-ENOMEM);
+
+	hci_dev->dev.parent = &dev->dev;
+	hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
+
+	ret = platform_device_add_resources(hci_dev, hci_res,
+					    ARRAY_SIZE(hci_res));
+	if (ret)
+		goto err_alloc;
+	if (data)
+		ret = platform_device_add_data(hci_dev, data, size);
+	if (ret)
+		goto err_alloc;
+	ret = platform_device_add(hci_dev);
+	if (ret)
+		goto err_alloc;
+
+	return hci_dev;
+
+err_alloc:
+	platform_device_put(hci_dev);
+	return ERR_PTR(ret);
+}
+
+static int bcma_hcd_usb20_init(struct bcma_hcd_device *usb_dev)
+{
+	struct bcma_device *dev = usb_dev->core;
+	struct bcma_chipinfo *chipinfo = &dev->bus->chipinfo;
+	u32 ohci_addr;
+	int err;
+
+	if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
+		return -EOPNOTSUPP;
+
+	bcma_hcd_init_chip_mips(dev);
+
+	/* In AI chips EHCI is addrspace 0, OHCI is 1 */
+	ohci_addr = dev->addr_s[0];
+	if ((chipinfo->id == BCMA_CHIP_ID_BCM5357 ||
+	     chipinfo->id == BCMA_CHIP_ID_BCM4749)
+	    && chipinfo->rev == 0)
+		ohci_addr = 0x18009000;
+
+	usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, "ohci-platform",
+						 ohci_addr, &ohci_pdata,
+						 sizeof(ohci_pdata));
+	if (IS_ERR(usb_dev->ohci_dev))
+		return PTR_ERR(usb_dev->ohci_dev);
+
+	usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, "ehci-platform",
+						 dev->addr, &ehci_pdata,
+						 sizeof(ehci_pdata));
+	if (IS_ERR(usb_dev->ehci_dev)) {
+		err = PTR_ERR(usb_dev->ehci_dev);
+		goto err_unregister_ohci_dev;
+	}
+
+	return 0;
+
+err_unregister_ohci_dev:
+	platform_device_unregister(usb_dev->ohci_dev);
+	return err;
+}
+
+static int bcma_hcd_usb30_init(struct bcma_hcd_device *bcma_hcd)
+{
+	struct bcma_device *core = bcma_hcd->core;
+	struct device *dev = &core->dev;
+
+	bcma_core_enable(core, 0);
+
+	of_platform_default_populate(dev->of_node, NULL, dev);
+
+	return 0;
+}
+
+static int bcma_hcd_probe(struct bcma_device *core)
+{
+	int err;
+	struct bcma_hcd_device *usb_dev;
+
+	/* TODO: Probably need checks here; is the core connected? */
+
+	usb_dev = devm_kzalloc(&core->dev, sizeof(struct bcma_hcd_device),
+			       GFP_KERNEL);
+	if (!usb_dev)
+		return -ENOMEM;
+	usb_dev->core = core;
+
+	if (core->dev.of_node)
+		usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
+						    GPIOD_OUT_HIGH);
+
+	switch (core->id.id) {
+	case BCMA_CORE_USB20_HOST:
+		if (IS_ENABLED(CONFIG_ARM))
+			err = bcma_hcd_usb20_old_arm_init(usb_dev);
+		else if (IS_ENABLED(CONFIG_MIPS))
+			err = bcma_hcd_usb20_init(usb_dev);
+		else
+			err = -ENOTSUPP;
+		break;
+	case BCMA_CORE_NS_USB20:
+		err = bcma_hcd_usb20_ns_init(usb_dev);
+		break;
+	case BCMA_CORE_NS_USB30:
+		err = bcma_hcd_usb30_init(usb_dev);
+		break;
+	default:
+		return -ENODEV;
+	}
+	if (err)
+		return err;
+
+	bcma_set_drvdata(core, usb_dev);
+	return 0;
+}
+
+static void bcma_hcd_remove(struct bcma_device *dev)
+{
+	struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
+	struct platform_device *ohci_dev = usb_dev->ohci_dev;
+	struct platform_device *ehci_dev = usb_dev->ehci_dev;
+
+	if (ohci_dev)
+		platform_device_unregister(ohci_dev);
+	if (ehci_dev)
+		platform_device_unregister(ehci_dev);
+
+	bcma_core_disable(dev, 0);
+}
+
+static void bcma_hcd_shutdown(struct bcma_device *dev)
+{
+	bcma_hci_platform_power_gpio(dev, false);
+	bcma_core_disable(dev, 0);
+}
+
+#ifdef CONFIG_PM
+
+static int bcma_hcd_suspend(struct bcma_device *dev)
+{
+	bcma_hci_platform_power_gpio(dev, false);
+	bcma_core_disable(dev, 0);
+
+	return 0;
+}
+
+static int bcma_hcd_resume(struct bcma_device *dev)
+{
+	bcma_hci_platform_power_gpio(dev, true);
+	bcma_core_enable(dev, 0);
+
+	return 0;
+}
+
+#else /* !CONFIG_PM */
+#define bcma_hcd_suspend	NULL
+#define bcma_hcd_resume	NULL
+#endif /* CONFIG_PM */
+
+static const struct bcma_device_id bcma_hcd_table[] = {
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB20, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB30, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	{},
+};
+MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
+
+static struct bcma_driver bcma_hcd_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= bcma_hcd_table,
+	.probe		= bcma_hcd_probe,
+	.remove		= bcma_hcd_remove,
+	.shutdown	= bcma_hcd_shutdown,
+	.suspend	= bcma_hcd_suspend,
+	.resume		= bcma_hcd_resume,
+};
+
+static int __init bcma_hcd_init(void)
+{
+	return bcma_driver_register(&bcma_hcd_driver);
+}
+module_init(bcma_hcd_init);
+
+static void __exit bcma_hcd_exit(void)
+{
+	bcma_driver_unregister(&bcma_hcd_driver);
+}
+module_exit(bcma_hcd_exit);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
new file mode 100644
index 0000000..3ba140c
--- /dev/null
+++ b/drivers/usb/host/ehci-atmel.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for EHCI UHP on Atmel chips
+ *
+ *  Copyright (C) 2009 Atmel Corporation,
+ *                     Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ *  Based on various ehci-*.c drivers
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI Atmel driver"
+
+static const char hcd_name[] = "ehci-atmel";
+
+/* interface and function clocks */
+#define hcd_to_atmel_ehci_priv(h) \
+	((struct atmel_ehci_priv *)hcd_to_ehci(h)->priv)
+
+struct atmel_ehci_priv {
+	struct clk *iclk;
+	struct clk *uclk;
+	bool clocked;
+};
+
+static struct hc_driver __read_mostly ehci_atmel_hc_driver;
+
+static const struct ehci_driver_overrides ehci_atmel_drv_overrides __initconst = {
+	.extra_priv_size = sizeof(struct atmel_ehci_priv),
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
+{
+	if (atmel_ehci->clocked)
+		return;
+
+	clk_prepare_enable(atmel_ehci->uclk);
+	clk_prepare_enable(atmel_ehci->iclk);
+	atmel_ehci->clocked = true;
+}
+
+static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
+{
+	if (!atmel_ehci->clocked)
+		return;
+
+	clk_disable_unprepare(atmel_ehci->iclk);
+	clk_disable_unprepare(atmel_ehci->uclk);
+	atmel_ehci->clocked = false;
+}
+
+static void atmel_start_ehci(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
+	dev_dbg(&pdev->dev, "start\n");
+	atmel_start_clock(atmel_ehci);
+}
+
+static void atmel_stop_ehci(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
+	dev_dbg(&pdev->dev, "stop\n");
+	atmel_stop_clock(atmel_ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int ehci_atmel_drv_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	const struct hc_driver *driver = &ehci_atmel_hc_driver;
+	struct resource *res;
+	struct ehci_hcd *ehci;
+	struct atmel_ehci_priv *atmel_ehci;
+	int irq;
+	int retval;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_debug("Initializing Atmel-SoC USB Host Controller\n");
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		retval = -ENODEV;
+		goto fail_create_hcd;
+	}
+
+	/* Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail_create_hcd;
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail_create_hcd;
+	}
+	atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto fail_request_resource;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	atmel_ehci->iclk = devm_clk_get(&pdev->dev, "ehci_clk");
+	if (IS_ERR(atmel_ehci->iclk)) {
+		dev_err(&pdev->dev, "Error getting interface clock\n");
+		retval = -ENOENT;
+		goto fail_request_resource;
+	}
+
+	atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
+	if (IS_ERR(atmel_ehci->uclk)) {
+		dev_err(&pdev->dev, "failed to get uclk\n");
+		retval = PTR_ERR(atmel_ehci->uclk);
+		goto fail_request_resource;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+	/* registers start at offset 0x0 */
+	ehci->caps = hcd->regs;
+
+	atmel_start_ehci(pdev);
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval)
+		goto fail_add_hcd;
+	device_wakeup_enable(hcd->self.controller);
+
+	return retval;
+
+fail_add_hcd:
+	atmel_stop_ehci(pdev);
+fail_request_resource:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	dev_err(&pdev->dev, "init %s fail, %d\n",
+		dev_name(&pdev->dev), retval);
+
+	return retval;
+}
+
+static int ehci_atmel_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	atmel_stop_ehci(pdev);
+
+	return 0;
+}
+
+static int __maybe_unused ehci_atmel_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+	int ret;
+
+	ret = ehci_suspend(hcd, false);
+	if (ret)
+		return ret;
+
+	atmel_stop_clock(atmel_ehci);
+	return 0;
+}
+
+static int __maybe_unused ehci_atmel_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
+
+	atmel_start_clock(atmel_ehci);
+	ehci_resume(hcd, false);
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ehci_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g45-ehci" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
+#endif
+
+static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend,
+					ehci_atmel_drv_resume);
+
+static struct platform_driver ehci_atmel_driver = {
+	.probe		= ehci_atmel_drv_probe,
+	.remove		= ehci_atmel_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "atmel-ehci",
+		.pm	= &ehci_atmel_pm_ops,
+		.of_match_table	= of_match_ptr(atmel_ehci_dt_ids),
+	},
+};
+
+static int __init ehci_atmel_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	ehci_init_driver(&ehci_atmel_hc_driver, &ehci_atmel_drv_overrides);
+	return platform_driver_register(&ehci_atmel_driver);
+}
+module_init(ehci_atmel_init);
+
+static void __exit ehci_atmel_cleanup(void)
+{
+	platform_driver_unregister(&ehci_atmel_driver);
+}
+module_exit(ehci_atmel_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:atmel-ehci");
+MODULE_AUTHOR("Nicolas Ferre");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
new file mode 100644
index 0000000..7619cfb
--- /dev/null
+++ b/drivers/usb/host/ehci-dbg.c
@@ -0,0 +1,1081 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2001-2002 by David Brownell
+ */
+
+/* this file is part of ehci-hcd.c */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+
+/*
+ * check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params(struct ehci_hcd *ehci, char *label)
+{
+	u32	params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	ehci_dbg(ehci,
+		"%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
+		label, params,
+		HCS_DEBUG_PORT(params),
+		HCS_INDICATOR(params) ? " ind" : "",
+		HCS_N_CC(params),
+		HCS_N_PCC(params),
+		HCS_PORTROUTED(params) ? "" : " ordered",
+		HCS_PPC(params) ? "" : " !ppc",
+		HCS_N_PORTS(params));
+	/* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */
+	if (HCS_PORTROUTED(params)) {
+		int i;
+		char buf[46], tmp[7], byte;
+
+		buf[0] = 0;
+		for (i = 0; i < HCS_N_PORTS(params); i++) {
+			/* FIXME MIPS won't readb() ... */
+			byte = readb(&ehci->caps->portroute[(i >> 1)]);
+			sprintf(tmp, "%d ",
+				(i & 0x1) ? byte & 0xf : (byte >> 4) & 0xf);
+			strcat(buf, tmp);
+		}
+		ehci_dbg(ehci, "%s portroute %s\n", label, buf);
+	}
+}
+
+/*
+ * check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ */
+static void dbg_hcc_params(struct ehci_hcd *ehci, char *label)
+{
+	u32	params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
+	if (HCC_ISOC_CACHE(params)) {
+		ehci_dbg(ehci,
+			"%s hcc_params %04x caching frame %s%s%s\n",
+			label, params,
+			HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+			HCC_CANPARK(params) ? " park" : "",
+			HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+	} else {
+		ehci_dbg(ehci,
+			"%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
+			label,
+			params,
+			HCC_ISOC_THRES(params),
+			HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+			HCC_CANPARK(params) ? " park" : "",
+			HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
+			HCC_LPM(params) ? " LPM" : "",
+			HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
+			HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
+			HCC_32FRAME_PERIODIC_LIST(params) ?
+				" 32 periodic list" : "");
+	}
+}
+
+static void __maybe_unused
+dbg_qtd(const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
+{
+	ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+		hc32_to_cpup(ehci, &qtd->hw_next),
+		hc32_to_cpup(ehci, &qtd->hw_alt_next),
+		hc32_to_cpup(ehci, &qtd->hw_token),
+		hc32_to_cpup(ehci, &qtd->hw_buf[0]));
+	if (qtd->hw_buf[1])
+		ehci_dbg(ehci, "  p1=%08x p2=%08x p3=%08x p4=%08x\n",
+			hc32_to_cpup(ehci, &qtd->hw_buf[1]),
+			hc32_to_cpup(ehci, &qtd->hw_buf[2]),
+			hc32_to_cpup(ehci, &qtd->hw_buf[3]),
+			hc32_to_cpup(ehci, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh(const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	struct ehci_qh_hw *hw = qh->hw;
+
+	ehci_dbg(ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
+		qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+	dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd(const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
+{
+	ehci_dbg(ehci, "%s [%d] itd %p, next %08x, urb %p\n",
+		label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
+		itd->urb);
+	ehci_dbg(ehci,
+		"  trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		hc32_to_cpu(ehci, itd->hw_transaction[0]),
+		hc32_to_cpu(ehci, itd->hw_transaction[1]),
+		hc32_to_cpu(ehci, itd->hw_transaction[2]),
+		hc32_to_cpu(ehci, itd->hw_transaction[3]),
+		hc32_to_cpu(ehci, itd->hw_transaction[4]),
+		hc32_to_cpu(ehci, itd->hw_transaction[5]),
+		hc32_to_cpu(ehci, itd->hw_transaction[6]),
+		hc32_to_cpu(ehci, itd->hw_transaction[7]));
+	ehci_dbg(ehci,
+		"  buf:   %08x %08x %08x %08x %08x %08x %08x\n",
+		hc32_to_cpu(ehci, itd->hw_bufp[0]),
+		hc32_to_cpu(ehci, itd->hw_bufp[1]),
+		hc32_to_cpu(ehci, itd->hw_bufp[2]),
+		hc32_to_cpu(ehci, itd->hw_bufp[3]),
+		hc32_to_cpu(ehci, itd->hw_bufp[4]),
+		hc32_to_cpu(ehci, itd->hw_bufp[5]),
+		hc32_to_cpu(ehci, itd->hw_bufp[6]));
+	ehci_dbg(ehci, "  index: %d %d %d %d %d %d %d %d\n",
+		itd->index[0], itd->index[1], itd->index[2],
+		itd->index[3], itd->index[4], itd->index[5],
+		itd->index[6], itd->index[7]);
+}
+
+static void __maybe_unused
+dbg_sitd(const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+{
+	ehci_dbg(ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
+		label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
+		sitd->urb);
+	ehci_dbg(ehci,
+		"  addr %08x sched %04x result %08x buf %08x %08x\n",
+		hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
+		hc32_to_cpu(ehci, sitd->hw_uframe),
+		hc32_to_cpu(ehci, sitd->hw_results),
+		hc32_to_cpu(ehci, sitd->hw_buf[0]),
+		hc32_to_cpu(ehci, sitd->hw_buf[1]));
+}
+
+static int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+	return scnprintf(buf, len,
+		"%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", status,
+		(status & STS_PPCE_MASK) ? " PPCE" : "",
+		(status & STS_ASS) ? " Async" : "",
+		(status & STS_PSS) ? " Periodic" : "",
+		(status & STS_RECL) ? " Recl" : "",
+		(status & STS_HALT) ? " Halt" : "",
+		(status & STS_IAA) ? " IAA" : "",
+		(status & STS_FATAL) ? " FATAL" : "",
+		(status & STS_FLR) ? " FLR" : "",
+		(status & STS_PCD) ? " PCD" : "",
+		(status & STS_ERR) ? " ERR" : "",
+		(status & STS_INT) ? " INT" : "");
+}
+
+static int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+	return scnprintf(buf, len,
+		"%s%sintrenable %02x%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", enable,
+		(enable & STS_PPCE_MASK) ? " PPCE" : "",
+		(enable & STS_IAA) ? " IAA" : "",
+		(enable & STS_FATAL) ? " FATAL" : "",
+		(enable & STS_FLR) ? " FLR" : "",
+		(enable & STS_PCD) ? " PCD" : "",
+		(enable & STS_ERR) ? " ERR" : "",
+		(enable & STS_INT) ? " INT" : "");
+}
+
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+	return scnprintf(buf, len,
+		"%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
+		"period=%s%s %s",
+		label, label[0] ? " " : "", command,
+		(command & CMD_HIRD) ? " HIRD" : "",
+		(command & CMD_PPCEE) ? " PPCEE" : "",
+		(command & CMD_FSP) ? " FSP" : "",
+		(command & CMD_ASPE) ? " ASPE" : "",
+		(command & CMD_PSPE) ? " PSPE" : "",
+		(command & CMD_PARK) ? " park" : "(park)",
+		CMD_PARK_CNT(command),
+		(command >> 16) & 0x3f,
+		(command & CMD_LRESET) ? " LReset" : "",
+		(command & CMD_IAAD) ? " IAAD" : "",
+		(command & CMD_ASE) ? " Async" : "",
+		(command & CMD_PSE) ? " Periodic" : "",
+		fls_strings[(command >> 2) & 0x3],
+		(command & CMD_RESET) ? " Reset" : "",
+		(command & CMD_RUN) ? "RUN" : "HALT");
+}
+
+static int
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+	char	*sig;
+
+	/* signaling state */
+	switch (status & (3 << 10)) {
+	case 0 << 10:
+		sig = "se0";
+		break;
+	case 1 << 10: /* low speed */
+		sig = "k";
+		break;
+	case 2 << 10:
+		sig = "j";
+		break;
+	default:
+		sig = "?";
+		break;
+	}
+
+	return scnprintf(buf, len,
+		"%s%sport:%d status %06x %d %s%s%s%s%s%s "
+		"sig=%s%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", port, status,
+		status >> 25, /*device address */
+		(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ACK ?
+						" ACK" : "",
+		(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_NYET ?
+						" NYET" : "",
+		(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_STALL ?
+						" STALL" : "",
+		(status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ERR ?
+						" ERR" : "",
+		(status & PORT_POWER) ? " POWER" : "",
+		(status & PORT_OWNER) ? " OWNER" : "",
+		sig,
+		(status & PORT_LPM) ? " LPM" : "",
+		(status & PORT_RESET) ? " RESET" : "",
+		(status & PORT_SUSPEND) ? " SUSPEND" : "",
+		(status & PORT_RESUME) ? " RESUME" : "",
+		(status & PORT_OCC) ? " OCC" : "",
+		(status & PORT_OC) ? " OC" : "",
+		(status & PORT_PEC) ? " PEC" : "",
+		(status & PORT_PE) ? " PE" : "",
+		(status & PORT_CSC) ? " CSC" : "",
+		(status & PORT_CONNECT) ? " CONNECT" : "");
+}
+
+static inline void
+dbg_status(struct ehci_hcd *ehci, const char *label, u32 status)
+{
+	char buf[80];
+
+	dbg_status_buf(buf, sizeof(buf), label, status);
+	ehci_dbg(ehci, "%s\n", buf);
+}
+
+static inline void
+dbg_cmd(struct ehci_hcd *ehci, const char *label, u32 command)
+{
+	char buf[80];
+
+	dbg_command_buf(buf, sizeof(buf), label, command);
+	ehci_dbg(ehci, "%s\n", buf);
+}
+
+static inline void
+dbg_port(struct ehci_hcd *ehci, const char *label, int port, u32 status)
+{
+	char buf[80];
+
+	dbg_port_buf(buf, sizeof(buf), label, port, status);
+	ehci_dbg(ehci, "%s\n", buf);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_bandwidth_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_async_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations debug_bandwidth_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_bandwidth_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations debug_periodic_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_periodic_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations debug_registers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_registers_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static struct dentry *ehci_debug_root;
+
+struct debug_buffer {
+	ssize_t (*fill_func)(struct debug_buffer *);	/* fill method */
+	struct usb_bus *bus;
+	struct mutex mutex;	/* protect filling of buffer */
+	size_t count;		/* number of characters filled into buffer */
+	char *output_buf;
+	size_t alloc_size;
+};
+
+static inline char speed_char(u32 info1)
+{
+	switch (info1 & (3 << 12)) {
+	case QH_FULL_SPEED:
+		return 'f';
+	case QH_LOW_SPEED:
+		return 'l';
+	case QH_HIGH_SPEED:
+		return 'h';
+	default:
+		return '?';
+	}
+}
+
+static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
+{
+	__u32 v = hc32_to_cpu(ehci, token);
+
+	if (v & QTD_STS_ACTIVE)
+		return '*';
+	if (v & QTD_STS_HALT)
+		return '-';
+	if (!IS_SHORT_READ(v))
+		return ' ';
+	/* tries to advance through hw_alt_next */
+	return '/';
+}
+
+static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh,
+		char **nextp, unsigned *sizep)
+{
+	u32			scratch;
+	u32			hw_curr;
+	struct list_head	*entry;
+	struct ehci_qtd		*td;
+	unsigned		temp;
+	unsigned		size = *sizep;
+	char			*next = *nextp;
+	char			mark;
+	__le32			list_end = EHCI_LIST_END(ehci);
+	struct ehci_qh_hw	*hw = qh->hw;
+
+	if (hw->hw_qtd_next == list_end)	/* NEC does this */
+		mark = '@';
+	else
+		mark = token_mark(ehci, hw->hw_token);
+	if (mark == '/') {	/* qh_alt_next controls qh advance? */
+		if ((hw->hw_alt_next & QTD_MASK(ehci))
+				== ehci->async->hw->hw_alt_next)
+			mark = '#';	/* blocked */
+		else if (hw->hw_alt_next == list_end)
+			mark = '.';	/* use hw_qtd_next */
+		/* else alt_next points to some other qtd */
+	}
+	scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+	hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
+	temp = scnprintf(next, size,
+			"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)"
+			" [cur %08x next %08x buf[0] %08x]",
+			qh, scratch & 0x007f,
+			speed_char (scratch),
+			(scratch >> 8) & 0x000f,
+			scratch, hc32_to_cpup(ehci, &hw->hw_info2),
+			hc32_to_cpup(ehci, &hw->hw_token), mark,
+			(cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token)
+				? "data1" : "data0",
+			(hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f,
+			hc32_to_cpup(ehci, &hw->hw_current),
+			hc32_to_cpup(ehci, &hw->hw_qtd_next),
+			hc32_to_cpup(ehci, &hw->hw_buf[0]));
+	size -= temp;
+	next += temp;
+
+	/* hc may be modifying the list as we read it ... */
+	list_for_each(entry, &qh->qtd_list) {
+		char *type;
+
+		td = list_entry(entry, struct ehci_qtd, qtd_list);
+		scratch = hc32_to_cpup(ehci, &td->hw_token);
+		mark = ' ';
+		if (hw_curr == td->qtd_dma) {
+			mark = '*';
+		} else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) {
+			mark = '+';
+		} else if (QTD_LENGTH(scratch)) {
+			if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
+				mark = '#';
+			else if (td->hw_alt_next != list_end)
+				mark = '/';
+		}
+		switch ((scratch >> 8) & 0x03) {
+		case 0:
+			type = "out";
+			break;
+		case 1:
+			type = "in";
+			break;
+		case 2:
+			type = "setup";
+			break;
+		default:
+			type = "?";
+			break;
+		}
+		temp = scnprintf(next, size,
+				"\n\t%p%c%s len=%d %08x urb %p"
+				" [td %08x buf[0] %08x]",
+				td, mark, type,
+				(scratch >> 16) & 0x7fff,
+				scratch,
+				td->urb,
+				(u32) td->qtd_dma,
+				hc32_to_cpup(ehci, &td->hw_buf[0]));
+		size -= temp;
+		next += temp;
+		if (temp == size)
+			goto done;
+	}
+
+	temp = scnprintf(next, size, "\n");
+	size -= temp;
+	next += temp;
+
+done:
+	*sizep = size;
+	*nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd		*hcd;
+	struct ehci_hcd		*ehci;
+	unsigned long		flags;
+	unsigned		temp, size;
+	char			*next;
+	struct ehci_qh		*qh;
+
+	hcd = bus_to_hcd(buf->bus);
+	ehci = hcd_to_ehci(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	*next = 0;
+
+	/*
+	 * dumps a snapshot of the async schedule.
+	 * usually empty except for long-term bulk reads, or head.
+	 * one QH per line, and TDs we know about
+	 */
+	spin_lock_irqsave(&ehci->lock, flags);
+	for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
+		qh_lines(ehci, qh, &next, &size);
+	if (!list_empty(&ehci->async_unlink) && size > 0) {
+		temp = scnprintf(next, size, "\nunlink =\n");
+		size -= temp;
+		next += temp;
+
+		list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
+			if (size <= 0)
+				break;
+			qh_lines(ehci, qh, &next, &size);
+		}
+	}
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	return strlen(buf->output_buf);
+}
+
+static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
+{
+	struct ehci_hcd		*ehci;
+	struct ehci_tt		*tt;
+	struct ehci_per_sched	*ps;
+	unsigned		temp, size;
+	char			*next;
+	unsigned		i;
+	u8			*bw;
+	u16			*bf;
+	u8			budget[EHCI_BANDWIDTH_SIZE];
+
+	ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	*next = 0;
+
+	spin_lock_irq(&ehci->lock);
+
+	/* Dump the HS bandwidth table */
+	temp = scnprintf(next, size,
+			"HS bandwidth allocation (us per microframe)\n");
+	size -= temp;
+	next += temp;
+	for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+		bw = &ehci->bandwidth[i];
+		temp = scnprintf(next, size,
+				"%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+				i, bw[0], bw[1], bw[2], bw[3],
+					bw[4], bw[5], bw[6], bw[7]);
+		size -= temp;
+		next += temp;
+	}
+
+	/* Dump all the FS/LS tables */
+	list_for_each_entry(tt, &ehci->tt_list, tt_list) {
+		temp = scnprintf(next, size,
+				"\nTT %s port %d  FS/LS bandwidth allocation (us per frame)\n",
+				dev_name(&tt->usb_tt->hub->dev),
+				tt->tt_port + !!tt->usb_tt->multi);
+		size -= temp;
+		next += temp;
+
+		bf = tt->bandwidth;
+		temp = scnprintf(next, size,
+				"  %5u%5u%5u%5u%5u%5u%5u%5u\n",
+				bf[0], bf[1], bf[2], bf[3],
+					bf[4], bf[5], bf[6], bf[7]);
+		size -= temp;
+		next += temp;
+
+		temp = scnprintf(next, size,
+				"FS/LS budget (us per microframe)\n");
+		size -= temp;
+		next += temp;
+		compute_tt_budget(budget, tt);
+		for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
+			bw = &budget[i];
+			temp = scnprintf(next, size,
+					"%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
+					i, bw[0], bw[1], bw[2], bw[3],
+						bw[4], bw[5], bw[6], bw[7]);
+			size -= temp;
+			next += temp;
+		}
+		list_for_each_entry(ps, &tt->ps_list, ps_list) {
+			temp = scnprintf(next, size,
+					"%s ep %02x:  %4u @ %2u.%u+%u mask %04x\n",
+					dev_name(&ps->udev->dev),
+					ps->ep->desc.bEndpointAddress,
+					ps->tt_usecs,
+					ps->bw_phase, ps->phase_uf,
+					ps->bw_period, ps->cs_mask);
+			size -= temp;
+			next += temp;
+		}
+	}
+	spin_unlock_irq(&ehci->lock);
+
+	return next - buf->output_buf;
+}
+
+static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci,
+		struct ehci_qh_hw *hw, struct ehci_qh *qh, unsigned size)
+{
+	u32			scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+	struct ehci_qtd		*qtd;
+	char			*type = "";
+	unsigned		temp = 0;
+
+	/* count tds, get ep direction */
+	list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+		temp++;
+		switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8)	& 0x03) {
+		case 0:
+			type = "out";
+			continue;
+		case 1:
+			type = "in";
+			continue;
+		}
+	}
+
+	return scnprintf(buf, size, " (%c%d ep%d%s [%d/%d] q%d p%d)",
+			speed_char(scratch), scratch & 0x007f,
+			(scratch >> 8) & 0x000f, type, qh->ps.usecs,
+			qh->ps.c_usecs, temp, 0x7ff & (scratch >> 16));
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd		*hcd;
+	struct ehci_hcd		*ehci;
+	unsigned long		flags;
+	union ehci_shadow	p, *seen;
+	unsigned		temp, size, seen_count;
+	char			*next;
+	unsigned		i;
+	__hc32			tag;
+
+	seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
+	if (!seen)
+		return 0;
+	seen_count = 0;
+
+	hcd = bus_to_hcd(buf->bus);
+	ehci = hcd_to_ehci(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	temp = scnprintf(next, size, "size = %d\n", ehci->periodic_size);
+	size -= temp;
+	next += temp;
+
+	/*
+	 * dump a snapshot of the periodic schedule.
+	 * iso changes, interrupt usually doesn't.
+	 */
+	spin_lock_irqsave(&ehci->lock, flags);
+	for (i = 0; i < ehci->periodic_size; i++) {
+		p = ehci->pshadow[i];
+		if (likely(!p.ptr))
+			continue;
+		tag = Q_NEXT_TYPE(ehci, ehci->periodic[i]);
+
+		temp = scnprintf(next, size, "%4d: ", i);
+		size -= temp;
+		next += temp;
+
+		do {
+			struct ehci_qh_hw *hw;
+
+			switch (hc32_to_cpu(ehci, tag)) {
+			case Q_TYPE_QH:
+				hw = p.qh->hw;
+				temp = scnprintf(next, size, " qh%d-%04x/%p",
+						p.qh->ps.period,
+						hc32_to_cpup(ehci,
+							&hw->hw_info2)
+							/* uframe masks */
+							& (QH_CMASK | QH_SMASK),
+						p.qh);
+				size -= temp;
+				next += temp;
+				/* don't repeat what follows this qh */
+				for (temp = 0; temp < seen_count; temp++) {
+					if (seen[temp].ptr != p.ptr)
+						continue;
+					if (p.qh->qh_next.ptr) {
+						temp = scnprintf(next, size,
+							" ...");
+						size -= temp;
+						next += temp;
+					}
+					break;
+				}
+				/* show more info the first time around */
+				if (temp == seen_count) {
+					temp = output_buf_tds_dir(next, ehci,
+						hw, p.qh, size);
+
+					if (seen_count < DBG_SCHED_LIMIT)
+						seen[seen_count++].qh = p.qh;
+				} else {
+					temp = 0;
+				}
+				tag = Q_NEXT_TYPE(ehci, hw->hw_next);
+				p = p.qh->qh_next;
+				break;
+			case Q_TYPE_FSTN:
+				temp = scnprintf(next, size,
+					" fstn-%8x/%p", p.fstn->hw_prev,
+					p.fstn);
+				tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
+				p = p.fstn->fstn_next;
+				break;
+			case Q_TYPE_ITD:
+				temp = scnprintf(next, size,
+					" itd/%p", p.itd);
+				tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
+				p = p.itd->itd_next;
+				break;
+			case Q_TYPE_SITD:
+				temp = scnprintf(next, size,
+					" sitd%d-%04x/%p",
+					p.sitd->stream->ps.period,
+					hc32_to_cpup(ehci, &p.sitd->hw_uframe)
+						& 0x0000ffff,
+					p.sitd);
+				tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next);
+				p = p.sitd->sitd_next;
+				break;
+			}
+			size -= temp;
+			next += temp;
+		} while (p.ptr);
+
+		temp = scnprintf(next, size, "\n");
+		size -= temp;
+		next += temp;
+	}
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	kfree(seen);
+
+	return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct ehci_hcd *ehci)
+{
+	switch (ehci->rh_state) {
+	case EHCI_RH_HALTED:
+		return "halted";
+	case EHCI_RH_SUSPENDED:
+		return "suspended";
+	case EHCI_RH_RUNNING:
+		return "running";
+	case EHCI_RH_STOPPING:
+		return "stopping";
+	}
+	return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd		*hcd;
+	struct ehci_hcd		*ehci;
+	unsigned long		flags;
+	unsigned		temp, size, i;
+	char			*next, scratch[80];
+	static char		fmt[] = "%*s\n";
+	static char		label[] = "";
+
+	hcd = bus_to_hcd(buf->bus);
+	ehci = hcd_to_ehci(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		size = scnprintf(next, size,
+			"bus %s, device %s\n"
+			"%s\n"
+			"SUSPENDED (no register access)\n",
+			hcd->self.controller->bus->name,
+			dev_name(hcd->self.controller),
+			hcd->product_desc);
+		goto done;
+	}
+
+	/* Capability Registers */
+	i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+	temp = scnprintf(next, size,
+		"bus %s, device %s\n"
+		"%s\n"
+		"EHCI %x.%02x, rh state %s\n",
+		hcd->self.controller->bus->name,
+		dev_name(hcd->self.controller),
+		hcd->product_desc,
+		i >> 8, i & 0x0ff, rh_state_string(ehci));
+	size -= temp;
+	next += temp;
+
+#ifdef	CONFIG_USB_PCI
+	/* EHCI 0.96 and later may have "extended capabilities" */
+	if (dev_is_pci(hcd->self.controller)) {
+		struct pci_dev	*pdev;
+		u32		offset, cap, cap2;
+		unsigned	count = 256 / 4;
+
+		pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
+		offset = HCC_EXT_CAPS(ehci_readl(ehci,
+				&ehci->caps->hcc_params));
+		while (offset && count--) {
+			pci_read_config_dword(pdev, offset, &cap);
+			switch (cap & 0xff) {
+			case 1:
+				temp = scnprintf(next, size,
+					"ownership %08x%s%s\n", cap,
+					(cap & (1 << 24)) ? " linux" : "",
+					(cap & (1 << 16)) ? " firmware" : "");
+				size -= temp;
+				next += temp;
+
+				offset += 4;
+				pci_read_config_dword(pdev, offset, &cap2);
+				temp = scnprintf(next, size,
+					"SMI sts/enable 0x%08x\n", cap2);
+				size -= temp;
+				next += temp;
+				break;
+			case 0:		/* illegal reserved capability */
+				cap = 0;
+				/* FALLTHROUGH */
+			default:		/* unknown */
+				break;
+			}
+			offset = (cap >> 8) & 0xff;
+		}
+	}
+#endif
+
+	/* FIXME interpret both types of params */
+	i = ehci_readl(ehci, &ehci->caps->hcs_params);
+	temp = scnprintf(next, size, "structural params 0x%08x\n", i);
+	size -= temp;
+	next += temp;
+
+	i = ehci_readl(ehci, &ehci->caps->hcc_params);
+	temp = scnprintf(next, size, "capability params 0x%08x\n", i);
+	size -= temp;
+	next += temp;
+
+	/* Operational Registers */
+	temp = dbg_status_buf(scratch, sizeof(scratch), label,
+			ehci_readl(ehci, &ehci->regs->status));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = dbg_command_buf(scratch, sizeof(scratch), label,
+			ehci_readl(ehci, &ehci->regs->command));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = dbg_intr_buf(scratch, sizeof(scratch), label,
+			ehci_readl(ehci, &ehci->regs->intr_enable));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = scnprintf(next, size, "uframe %04x\n",
+			ehci_read_frame_index(ehci));
+	size -= temp;
+	next += temp;
+
+	for (i = 1; i <= HCS_N_PORTS(ehci->hcs_params); i++) {
+		temp = dbg_port_buf(scratch, sizeof(scratch), label, i,
+				ehci_readl(ehci,
+					&ehci->regs->port_status[i - 1]));
+		temp = scnprintf(next, size, fmt, temp, scratch);
+		size -= temp;
+		next += temp;
+		if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
+			temp = scnprintf(next, size,
+					"    debug control %08x\n",
+					ehci_readl(ehci,
+						&ehci->debug->control));
+			size -= temp;
+			next += temp;
+		}
+	}
+
+	if (!list_empty(&ehci->async_unlink)) {
+		temp = scnprintf(next, size, "async unlink qh %p\n",
+				list_first_entry(&ehci->async_unlink,
+						struct ehci_qh, unlink_node));
+		size -= temp;
+		next += temp;
+	}
+
+#ifdef EHCI_STATS
+	temp = scnprintf(next, size,
+		"irq normal %ld err %ld iaa %ld (lost %ld)\n",
+		ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
+		ehci->stats.lost_iaa);
+	size -= temp;
+	next += temp;
+
+	temp = scnprintf(next, size, "complete %ld unlink %ld\n",
+		ehci->stats.complete, ehci->stats.unlink);
+	size -= temp;
+	next += temp;
+#endif
+
+done:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+		ssize_t (*fill_func)(struct debug_buffer *))
+{
+	struct debug_buffer *buf;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+
+	if (buf) {
+		buf->bus = bus;
+		buf->fill_func = fill_func;
+		mutex_init(&buf->mutex);
+		buf->alloc_size = PAGE_SIZE;
+	}
+
+	return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+	int ret = 0;
+
+	if (!buf->output_buf)
+		buf->output_buf = vmalloc(buf->alloc_size);
+
+	if (!buf->output_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = buf->fill_func(buf);
+
+	if (ret >= 0) {
+		buf->count = ret;
+		ret = 0;
+	}
+
+out:
+	return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+		size_t len, loff_t *offset)
+{
+	struct debug_buffer *buf = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&buf->mutex);
+	if (buf->count == 0) {
+		ret = fill_buffer(buf);
+		if (ret != 0) {
+			mutex_unlock(&buf->mutex);
+			goto out;
+		}
+	}
+	mutex_unlock(&buf->mutex);
+
+	ret = simple_read_from_buffer(user_buf, len, offset,
+				      buf->output_buf, buf->count);
+
+out:
+	return ret;
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *buf = file->private_data;
+
+	if (buf) {
+		vfree(buf->output_buf);
+		kfree(buf);
+	}
+
+	return 0;
+}
+
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_bandwidth_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private,
+			fill_bandwidth_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *buf;
+
+	buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+	if (!buf)
+		return -ENOMEM;
+
+	buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8) * PAGE_SIZE;
+	file->private_data = buf;
+	return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private,
+					  fill_registers_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct ehci_hcd *ehci)
+{
+	struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
+
+	ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root);
+
+	debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus,
+			    &debug_async_fops);
+	debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
+			    &debug_bandwidth_fops);
+	debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
+			    &debug_periodic_fops);
+	debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus,
+			    &debug_registers_fops);
+}
+
+static inline void remove_debug_files(struct ehci_hcd *ehci)
+{
+	debugfs_remove_recursive(ehci->debug_dir);
+}
+
+#else /* CONFIG_DYNAMIC_DEBUG */
+
+static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) { }
+static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) { }
+
+static inline void __maybe_unused dbg_qh(const char *label,
+		struct ehci_hcd *ehci, struct ehci_qh *qh) { }
+
+static inline int __maybe_unused dbg_status_buf(const char *buf,
+		unsigned int len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __maybe_unused dbg_command_buf(const char *buf,
+		unsigned int len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __maybe_unused dbg_intr_buf(const char *buf,
+		unsigned int len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __maybe_unused dbg_port_buf(char *buf,
+		unsigned int len, const char *label, int port, u32 status)
+{ return 0; }
+
+static inline void dbg_status(struct ehci_hcd *ehci, const char *label,
+		u32 status) { }
+static inline void dbg_cmd(struct ehci_hcd *ehci, const char *label,
+		u32 command) { }
+static inline void dbg_port(struct ehci_hcd *ehci, const char *label,
+		int port, u32 status) { }
+
+static inline void create_debug_files(struct ehci_hcd *bus) { }
+static inline void remove_debug_files(struct ehci_hcd *bus) { }
+
+#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
new file mode 100644
index 0000000..8e3bab1
--- /dev/null
+++ b/drivers/usb/host/ehci-exynos.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SAMSUNG EXYNOS USB HOST EHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI EXYNOS driver"
+
+#define EHCI_INSNREG00(base)			(base + 0x90)
+#define EHCI_INSNREG00_ENA_INCR16		(0x1 << 25)
+#define EHCI_INSNREG00_ENA_INCR8		(0x1 << 24)
+#define EHCI_INSNREG00_ENA_INCR4		(0x1 << 23)
+#define EHCI_INSNREG00_ENA_INCRX_ALIGN		(0x1 << 22)
+#define EHCI_INSNREG00_ENABLE_DMA_BURST	\
+	(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 |	\
+	 EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
+
+static const char hcd_name[] = "ehci-exynos";
+static struct hc_driver __read_mostly exynos_ehci_hc_driver;
+
+#define PHY_NUMBER 3
+
+struct exynos_ehci_hcd {
+	struct clk *clk;
+	struct phy *phy[PHY_NUMBER];
+};
+
+#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
+
+static int exynos_ehci_get_phy(struct device *dev,
+				struct exynos_ehci_hcd *exynos_ehci)
+{
+	struct device_node *child;
+	struct phy *phy;
+	int phy_number;
+	int ret;
+
+	/* Get PHYs for the controller */
+	for_each_available_child_of_node(dev->of_node, child) {
+		ret = of_property_read_u32(child, "reg", &phy_number);
+		if (ret) {
+			dev_err(dev, "Failed to parse device tree\n");
+			of_node_put(child);
+			return ret;
+		}
+
+		if (phy_number >= PHY_NUMBER) {
+			dev_err(dev, "Invalid number of PHYs\n");
+			of_node_put(child);
+			return -EINVAL;
+		}
+
+		phy = devm_of_phy_get(dev, child, NULL);
+		exynos_ehci->phy[phy_number] = phy;
+		if (IS_ERR(phy)) {
+			ret = PTR_ERR(phy);
+			if (ret == -EPROBE_DEFER) {
+				of_node_put(child);
+				return ret;
+			} else if (ret != -ENOSYS && ret != -ENODEV) {
+				dev_err(dev,
+					"Error retrieving usb2 phy: %d\n", ret);
+				of_node_put(child);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int exynos_ehci_phy_enable(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+	int i;
+	int ret = 0;
+
+	for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+		if (!IS_ERR(exynos_ehci->phy[i]))
+			ret = phy_power_on(exynos_ehci->phy[i]);
+	if (ret)
+		for (i--; i >= 0; i--)
+			if (!IS_ERR(exynos_ehci->phy[i]))
+				phy_power_off(exynos_ehci->phy[i]);
+
+	return ret;
+}
+
+static void exynos_ehci_phy_disable(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+	int i;
+
+	for (i = 0; i < PHY_NUMBER; i++)
+		if (!IS_ERR(exynos_ehci->phy[i]))
+			phy_power_off(exynos_ehci->phy[i]);
+}
+
+static void exynos_setup_vbus_gpio(struct device *dev)
+{
+	int err;
+	int gpio;
+
+	if (!dev->of_node)
+		return;
+
+	gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
+	if (!gpio_is_valid(gpio))
+		return;
+
+	err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
+				    "ehci_vbus_gpio");
+	if (err)
+		dev_err(dev, "can't request ehci vbus gpio %d", gpio);
+}
+
+static int exynos_ehci_probe(struct platform_device *pdev)
+{
+	struct exynos_ehci_hcd *exynos_ehci;
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	struct resource *res;
+	int irq;
+	int err;
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we move to full device tree support this will vanish off.
+	 */
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	exynos_setup_vbus_gpio(&pdev->dev);
+
+	hcd = usb_create_hcd(&exynos_ehci_hc_driver,
+			     &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_err(&pdev->dev, "Unable to create HCD\n");
+		return -ENOMEM;
+	}
+	exynos_ehci = to_exynos_ehci(hcd);
+
+	err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
+	if (err)
+		goto fail_clk;
+
+	exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+	if (IS_ERR(exynos_ehci->clk)) {
+		dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+		err = PTR_ERR(exynos_ehci->clk);
+		goto fail_clk;
+	}
+
+	err = clk_prepare_enable(exynos_ehci->clk);
+	if (err)
+		goto fail_clk;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto fail_io;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "Failed to get IRQ\n");
+		err = -ENODEV;
+		goto fail_io;
+	}
+
+	err = exynos_ehci_phy_enable(&pdev->dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to enable USB phy\n");
+		goto fail_io;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = hcd->regs;
+
+	/* DMA burst Enable */
+	writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to add USB HCD\n");
+		goto fail_add_hcd;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	platform_set_drvdata(pdev, hcd);
+
+	return 0;
+
+fail_add_hcd:
+	exynos_ehci_phy_disable(&pdev->dev);
+fail_io:
+	clk_disable_unprepare(exynos_ehci->clk);
+fail_clk:
+	usb_put_hcd(hcd);
+	return err;
+}
+
+static int exynos_ehci_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+	usb_remove_hcd(hcd);
+
+	exynos_ehci_phy_disable(&pdev->dev);
+
+	clk_disable_unprepare(exynos_ehci->clk);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos_ehci_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+
+	bool do_wakeup = device_may_wakeup(dev);
+	int rc;
+
+	rc = ehci_suspend(hcd, do_wakeup);
+	if (rc)
+		return rc;
+
+	exynos_ehci_phy_disable(dev);
+
+	clk_disable_unprepare(exynos_ehci->clk);
+
+	return rc;
+}
+
+static int exynos_ehci_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
+	int ret;
+
+	ret = clk_prepare_enable(exynos_ehci->clk);
+	if (ret)
+		return ret;
+
+	ret = exynos_ehci_phy_enable(dev);
+	if (ret) {
+		dev_err(dev, "Failed to enable USB phy\n");
+		clk_disable_unprepare(exynos_ehci->clk);
+		return ret;
+	}
+
+	/* DMA burst Enable */
+	writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
+
+	ehci_resume(hcd, false);
+	return 0;
+}
+#else
+#define exynos_ehci_suspend	NULL
+#define exynos_ehci_resume	NULL
+#endif
+
+static const struct dev_pm_ops exynos_ehci_pm_ops = {
+	.suspend	= exynos_ehci_suspend,
+	.resume		= exynos_ehci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ehci_match[] = {
+	{ .compatible = "samsung,exynos4210-ehci" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, exynos_ehci_match);
+#endif
+
+static struct platform_driver exynos_ehci_driver = {
+	.probe		= exynos_ehci_probe,
+	.remove		= exynos_ehci_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name	= "exynos-ehci",
+		.pm	= &exynos_ehci_pm_ops,
+		.of_match_table = of_match_ptr(exynos_ehci_match),
+	}
+};
+static const struct ehci_driver_overrides exynos_overrides __initconst = {
+	.extra_priv_size = sizeof(struct exynos_ehci_hcd),
+};
+
+static int __init ehci_exynos_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
+	return platform_driver_register(&exynos_ehci_driver);
+}
+module_init(ehci_exynos_init);
+
+static void __exit ehci_exynos_cleanup(void)
+{
+	platform_driver_unregister(&exynos_ehci_driver);
+}
+module_exit(ehci_exynos_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:exynos-ehci");
+MODULE_AUTHOR("Jingoo Han");
+MODULE_AUTHOR("Joonyoung Shim");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
new file mode 100644
index 0000000..0a9fd20
--- /dev/null
+++ b/drivers/usb/host/ehci-fsl.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2005-2009 MontaVista Software, Inc.
+ * Copyright 2008,2012,2015      Freescale Semiconductor, Inc.
+ *
+ * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
+ * by Hunter Wu.
+ * Power Management support by Dave Liu <daveliu@freescale.com>,
+ * Jerry Huang <Chang-Ming.Huang@freescale.com> and
+ * Anton Vorontsov <avorontsov@ru.mvista.com>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/err.h>
+#include <linux/usb.h>
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/of_platform.h>
+
+#include "ehci.h"
+#include "ehci-fsl.h"
+
+#define DRIVER_DESC "Freescale EHCI Host controller driver"
+#define DRV_NAME "ehci-fsl"
+
+static struct hc_driver __read_mostly fsl_ehci_hc_driver;
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+/*
+ * fsl_ehci_drv_probe - initialize FSL-based HCDs
+ * @pdev: USB Host Controller being probed
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller.
+ *
+ */
+static int fsl_ehci_drv_probe(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int irq;
+	int retval;
+
+	pr_debug("initializing FSL-SOC USB Controller\n");
+
+	/* Need platform data for setup */
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev,
+			"No platform data for %s.\n", dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	/*
+	 * This is a host mode driver, verify that we're supposed to be
+	 * in host mode.
+	 */
+	if (!((pdata->operating_mode == FSL_USB2_DR_HOST) ||
+	      (pdata->operating_mode == FSL_USB2_MPH_HOST) ||
+	      (pdata->operating_mode == FSL_USB2_DR_OTG))) {
+		dev_err(&pdev->dev,
+			"Non Host Mode configured for %s. Wrong driver linked.\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	irq = res->start;
+
+	hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
+			       &pdev->dev, dev_name(&pdev->dev), NULL);
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto err1;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err2;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	pdata->regs = hcd->regs;
+
+	if (pdata->power_budget)
+		hcd->power_budget = pdata->power_budget;
+
+	/*
+	 * do platform specific init: check the clock, grab/config pins, etc.
+	 */
+	if (pdata->init && pdata->init(pdev)) {
+		retval = -ENODEV;
+		goto err2;
+	}
+
+	/* Enable USB controller, 83xx or 8536 */
+	if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
+		clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, 0x4);
+
+	/*
+	 * Enable UTMI phy and program PTS field in UTMI mode before asserting
+	 * controller reset for USB Controller version 2.5
+	 */
+	if (pdata->has_fsl_erratum_a007792) {
+		clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, CTRL_UTMI_PHY_EN);
+		writel(PORT_PTS_UTMI, hcd->regs + FSL_SOC_USB_PORTSC1);
+	}
+
+	/* Don't need to set host mode here. It will be done by tdi_reset() */
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval != 0)
+		goto err2;
+	device_wakeup_enable(hcd->self.controller);
+
+#ifdef CONFIG_USB_OTG
+	if (pdata->operating_mode == FSL_USB2_DR_OTG) {
+		struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+		hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		dev_dbg(&pdev->dev, "hcd=0x%p  ehci=0x%p, phy=0x%p\n",
+			hcd, ehci, hcd->usb_phy);
+
+		if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
+			retval = otg_set_host(hcd->usb_phy->otg,
+					      &ehci_to_hcd(ehci)->self);
+			if (retval) {
+				usb_put_phy(hcd->usb_phy);
+				goto err2;
+			}
+		} else {
+			dev_err(&pdev->dev, "can't find phy\n");
+			retval = -ENODEV;
+			goto err2;
+		}
+
+		hcd->skip_phy_initialization = 1;
+	}
+#endif
+	return retval;
+
+      err2:
+	usb_put_hcd(hcd);
+      err1:
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
+	if (pdata->exit)
+		pdata->exit(pdev);
+	return retval;
+}
+
+static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
+			       enum fsl_usb2_phy_modes phy_mode,
+			       unsigned int port_offset)
+{
+	u32 portsc;
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	void __iomem *non_ehci = hcd->regs;
+	struct device *dev = hcd->self.controller;
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+
+	if (pdata->controller_ver < 0) {
+		dev_warn(hcd->self.controller, "Could not get controller version\n");
+		return -ENODEV;
+	}
+
+	portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
+	portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
+
+	switch (phy_mode) {
+	case FSL_USB2_PHY_ULPI:
+		if (pdata->have_sysif_regs && pdata->controller_ver) {
+			/* controller version 1.6 or above */
+			clrbits32(non_ehci + FSL_SOC_USB_CTRL,
+				  CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK,
+					ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+		}
+		portsc |= PORT_PTS_ULPI;
+		break;
+	case FSL_USB2_PHY_SERIAL:
+		portsc |= PORT_PTS_SERIAL;
+		break;
+	case FSL_USB2_PHY_UTMI_WIDE:
+		portsc |= PORT_PTS_PTW;
+		/* fall through */
+	case FSL_USB2_PHY_UTMI:
+	case FSL_USB2_PHY_UTMI_DUAL:
+		if (pdata->have_sysif_regs && pdata->controller_ver) {
+			/* controller version 1.6 or above */
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK, UTMI_PHY_EN);
+			mdelay(FSL_UTMI_PHY_DLY);  /* Delay for UTMI PHY CLK to
+						become stable - 10ms*/
+		}
+		/* enable UTMI PHY */
+		if (pdata->have_sysif_regs)
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK,
+					CTRL_UTMI_PHY_EN);
+		portsc |= PORT_PTS_UTMI;
+		break;
+	case FSL_USB2_PHY_NONE:
+		break;
+	}
+
+	/*
+	 * check PHY_CLK_VALID to determine phy clock presence before writing
+	 * to portsc
+	 */
+	if (pdata->check_phy_clk_valid) {
+		if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) &
+		    PHY_CLK_VALID)) {
+			dev_warn(hcd->self.controller,
+				 "USB PHY clock invalid\n");
+			return -EINVAL;
+		}
+	}
+
+	ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
+
+	if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
+		clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, USB_CTRL_USB_EN);
+
+	return 0;
+}
+
+static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
+{
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	struct fsl_usb2_platform_data *pdata;
+	void __iomem *non_ehci = hcd->regs;
+
+	pdata = dev_get_platdata(hcd->self.controller);
+
+	if (pdata->have_sysif_regs) {
+		/*
+		* Turn on cache snooping hardware, since some PowerPC platforms
+		* wholly rely on hardware to deal with cache coherent
+		*/
+
+		/* Setup Snooping for all the 4GB space */
+		/* SNOOP1 starts from 0x0, size 2G */
+		iowrite32be(0x0 | SNOOP_SIZE_2GB,
+			    non_ehci + FSL_SOC_USB_SNOOP1);
+		/* SNOOP2 starts from 0x80000000, size 2G */
+		iowrite32be(0x80000000 | SNOOP_SIZE_2GB,
+			    non_ehci + FSL_SOC_USB_SNOOP2);
+	}
+
+	/* Deal with USB erratum A-005275 */
+	if (pdata->has_fsl_erratum_a005275 == 1)
+		ehci->has_fsl_hs_errata = 1;
+
+	if (pdata->has_fsl_erratum_a005697 == 1)
+		ehci->has_fsl_susp_errata = 1;
+
+	if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
+			(pdata->operating_mode == FSL_USB2_DR_OTG))
+		if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+			return -EINVAL;
+
+	if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
+		unsigned int chip, rev, svr;
+
+		svr = mfspr(SPRN_SVR);
+		chip = svr >> 16;
+		rev = (svr >> 4) & 0xf;
+
+		/* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
+		if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
+			ehci->has_fsl_port_bug = 1;
+
+		if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
+			if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
+				return -EINVAL;
+
+		if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
+			if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 1))
+				return -EINVAL;
+	}
+
+	if (pdata->have_sysif_regs) {
+#ifdef CONFIG_FSL_SOC_BOOKE
+		iowrite32be(0x00000008, non_ehci + FSL_SOC_USB_PRICTRL);
+		iowrite32be(0x00000080, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
+#else
+		iowrite32be(0x0000000c, non_ehci + FSL_SOC_USB_PRICTRL);
+		iowrite32be(0x00000040, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
+#endif
+		iowrite32be(0x00000001, non_ehci + FSL_SOC_USB_SICTRL);
+	}
+
+	return 0;
+}
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int ehci_fsl_reinit(struct ehci_hcd *ehci)
+{
+	if (ehci_fsl_usb_setup(ehci))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_fsl_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+	struct fsl_usb2_platform_data *pdata;
+	struct device *dev;
+
+	dev = hcd->self.controller;
+	pdata = dev_get_platdata(hcd->self.controller);
+	ehci->big_endian_desc = pdata->big_endian_desc;
+	ehci->big_endian_mmio = pdata->big_endian_mmio;
+
+	/* EHCI registers start at offset 0x100 */
+	ehci->caps = hcd->regs + 0x100;
+
+#ifdef CONFIG_PPC_83xx
+	/*
+	 * Deal with MPC834X that need port power to be cycled after the power
+	 * fault condition is removed. Otherwise the state machine does not
+	 * reflect PORTSC[CSC] correctly.
+	 */
+	ehci->need_oc_pp_cycle = 1;
+#endif
+
+	hcd->has_tt = 1;
+
+	retval = ehci_setup(hcd);
+	if (retval)
+		return retval;
+
+	if (of_device_is_compatible(dev->parent->of_node,
+				    "fsl,mpc5121-usb2-dr")) {
+		/*
+		 * set SBUSCFG:AHBBRST so that control msgs don't
+		 * fail when doing heavy PATA writes.
+		 */
+		ehci_writel(ehci, SBUSCFG_INCR8,
+			    hcd->regs + FSL_SOC_USB_SBUSCFG);
+	}
+
+	retval = ehci_fsl_reinit(ehci);
+	return retval;
+}
+
+struct ehci_fsl {
+	struct ehci_hcd	ehci;
+
+#ifdef CONFIG_PM
+	/* Saved USB PHY settings, need to restore after deep sleep. */
+	u32 usb_ctrl;
+#endif
+};
+
+#ifdef CONFIG_PM
+
+#ifdef CONFIG_PPC_MPC512x
+static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+	u32 tmp;
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+	u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
+	mode &= USBMODE_CM_MASK;
+	tmp = ehci_readl(ehci, hcd->regs + 0x140);	/* usbcmd */
+
+	dev_dbg(dev, "suspend=%d already_suspended=%d "
+		"mode=%d  usbcmd %08x\n", pdata->suspended,
+		pdata->already_suspended, mode, tmp);
+#endif
+
+	/*
+	 * If the controller is already suspended, then this must be a
+	 * PM suspend.  Remember this fact, so that we will leave the
+	 * controller suspended at PM resume time.
+	 */
+	if (pdata->suspended) {
+		dev_dbg(dev, "already suspended, leaving early\n");
+		pdata->already_suspended = 1;
+		return 0;
+	}
+
+	dev_dbg(dev, "suspending...\n");
+
+	ehci->rh_state = EHCI_RH_SUSPENDED;
+	dev->power.power_state = PMSG_SUSPEND;
+
+	/* ignore non-host interrupts */
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	/* stop the controller */
+	tmp = ehci_readl(ehci, &ehci->regs->command);
+	tmp &= ~CMD_RUN;
+	ehci_writel(ehci, tmp, &ehci->regs->command);
+
+	/* save EHCI registers */
+	pdata->pm_command = ehci_readl(ehci, &ehci->regs->command);
+	pdata->pm_command &= ~CMD_RUN;
+	pdata->pm_status  = ehci_readl(ehci, &ehci->regs->status);
+	pdata->pm_intr_enable  = ehci_readl(ehci, &ehci->regs->intr_enable);
+	pdata->pm_frame_index  = ehci_readl(ehci, &ehci->regs->frame_index);
+	pdata->pm_segment  = ehci_readl(ehci, &ehci->regs->segment);
+	pdata->pm_frame_list  = ehci_readl(ehci, &ehci->regs->frame_list);
+	pdata->pm_async_next  = ehci_readl(ehci, &ehci->regs->async_next);
+	pdata->pm_configured_flag  =
+		ehci_readl(ehci, &ehci->regs->configured_flag);
+	pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]);
+	pdata->pm_usbgenctrl = ehci_readl(ehci,
+					  hcd->regs + FSL_SOC_USB_USBGENCTRL);
+
+	/* clear the W1C bits */
+	pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS);
+
+	pdata->suspended = 1;
+
+	/* clear PP to cut power to the port */
+	tmp = ehci_readl(ehci, &ehci->regs->port_status[0]);
+	tmp &= ~PORT_POWER;
+	ehci_writel(ehci, tmp, &ehci->regs->port_status[0]);
+
+	return 0;
+}
+
+static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
+	u32 tmp;
+
+	dev_dbg(dev, "suspend=%d already_suspended=%d\n",
+		pdata->suspended, pdata->already_suspended);
+
+	/*
+	 * If the controller was already suspended at suspend time,
+	 * then don't resume it now.
+	 */
+	if (pdata->already_suspended) {
+		dev_dbg(dev, "already suspended, leaving early\n");
+		pdata->already_suspended = 0;
+		return 0;
+	}
+
+	if (!pdata->suspended) {
+		dev_dbg(dev, "not suspended, leaving early\n");
+		return 0;
+	}
+
+	pdata->suspended = 0;
+
+	dev_dbg(dev, "resuming...\n");
+
+	/* set host mode */
+	tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0);
+	ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE);
+
+	ehci_writel(ehci, pdata->pm_usbgenctrl,
+		    hcd->regs + FSL_SOC_USB_USBGENCTRL);
+	ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
+		    hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
+
+	ehci_writel(ehci, SBUSCFG_INCR8, hcd->regs + FSL_SOC_USB_SBUSCFG);
+
+	/* restore EHCI registers */
+	ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
+	ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable);
+	ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index);
+	ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment);
+	ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list);
+	ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next);
+	ehci_writel(ehci, pdata->pm_configured_flag,
+		    &ehci->regs->configured_flag);
+	ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]);
+
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	ehci->rh_state = EHCI_RH_RUNNING;
+	dev->power.power_state = PMSG_ON;
+
+	tmp = ehci_readl(ehci, &ehci->regs->command);
+	tmp |= CMD_RUN;
+	ehci_writel(ehci, tmp, &ehci->regs->command);
+
+	usb_hcd_resume_root_hub(hcd);
+
+	return 0;
+}
+#else
+static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+	return container_of(ehci, struct ehci_fsl, ehci);
+}
+
+static int ehci_fsl_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+	void __iomem *non_ehci = hcd->regs;
+
+	if (of_device_is_compatible(dev->parent->of_node,
+				    "fsl,mpc5121-usb2-dr")) {
+		return ehci_fsl_mpc512x_drv_suspend(dev);
+	}
+
+	ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+			device_may_wakeup(dev));
+	if (!fsl_deep_sleep())
+		return 0;
+
+	ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+	return 0;
+}
+
+static int ehci_fsl_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	void __iomem *non_ehci = hcd->regs;
+
+	if (of_device_is_compatible(dev->parent->of_node,
+				    "fsl,mpc5121-usb2-dr")) {
+		return ehci_fsl_mpc512x_drv_resume(dev);
+	}
+
+	ehci_prepare_ports_for_controller_resume(ehci);
+	if (!fsl_deep_sleep())
+		return 0;
+
+	usb_root_hub_lost_power(hcd->self.root_hub);
+
+	/* Restore USB PHY settings and enable the controller. */
+	iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
+
+	ehci_reset(ehci);
+	ehci_fsl_reinit(ehci);
+
+	return 0;
+}
+
+static int ehci_fsl_drv_restore(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	usb_root_hub_lost_power(hcd->self.root_hub);
+	return 0;
+}
+
+static const struct dev_pm_ops ehci_fsl_pm_ops = {
+	.suspend = ehci_fsl_drv_suspend,
+	.resume = ehci_fsl_drv_resume,
+	.restore = ehci_fsl_drv_restore,
+};
+
+#define EHCI_FSL_PM_OPS		(&ehci_fsl_pm_ops)
+#else
+#define EHCI_FSL_PM_OPS		NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_OTG
+static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	u32 status;
+
+	if (!port)
+		return -EINVAL;
+
+	port--;
+
+	/* start port reset before HNP protocol time out */
+	status = readl(&ehci->regs->port_status[port]);
+	if (!(status & PORT_CONNECT))
+		return -ENODEV;
+
+	/* hub_wq will finish the reset later */
+	if (ehci_is_TDI(ehci)) {
+		writel(PORT_RESET |
+		       (status & ~(PORT_CSC | PORT_PEC | PORT_OCC)),
+		       &ehci->regs->port_status[port]);
+	} else {
+		writel(PORT_RESET, &ehci->regs->port_status[port]);
+	}
+
+	return 0;
+}
+#else
+#define ehci_start_port_reset	NULL
+#endif /* CONFIG_USB_OTG */
+
+static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = {
+	.extra_priv_size = sizeof(struct ehci_fsl),
+	.reset = ehci_fsl_setup,
+};
+
+/**
+ * fsl_ehci_drv_remove - shutdown processing for FSL-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_fsl_probe().
+ *
+ */
+
+static int fsl_ehci_drv_remove(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
+		otg_set_host(hcd->usb_phy->otg, NULL);
+		usb_put_phy(hcd->usb_phy);
+	}
+
+	usb_remove_hcd(hcd);
+
+	/*
+	 * do platform specific un-initialization:
+	 * release iomux pins, disable clock, etc.
+	 */
+	if (pdata->exit)
+		pdata->exit(pdev);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static struct platform_driver ehci_fsl_driver = {
+	.probe = fsl_ehci_drv_probe,
+	.remove = fsl_ehci_drv_remove,
+	.shutdown = usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "fsl-ehci",
+		.pm = EHCI_FSL_PM_OPS,
+	},
+};
+
+static int __init ehci_fsl_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info(DRV_NAME ": " DRIVER_DESC "\n");
+
+	ehci_init_driver(&fsl_ehci_hc_driver, &ehci_fsl_overrides);
+
+	fsl_ehci_hc_driver.product_desc =
+			"Freescale On-Chip EHCI Host Controller";
+	fsl_ehci_hc_driver.start_port_reset = ehci_start_port_reset;
+
+
+	return platform_driver_register(&ehci_fsl_driver);
+}
+module_init(ehci_fsl_init);
+
+static void __exit ehci_fsl_cleanup(void)
+{
+	platform_driver_unregister(&ehci_fsl_driver);
+}
+module_exit(ehci_fsl_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
new file mode 100644
index 0000000..cbc4220
--- /dev/null
+++ b/drivers/usb/host/ehci-fsl.h
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
+ * Copyright (c) 2005 MontaVista Software
+ */
+#ifndef _EHCI_FSL_H
+#define _EHCI_FSL_H
+
+/* offsets for the non-ehci registers in the FSL SOC USB controller */
+#define FSL_SOC_USB_SBUSCFG	0x90
+#define SBUSCFG_INCR8		0x02	/* INCR8, specified */
+#define FSL_SOC_USB_ULPIVP	0x170
+#define FSL_SOC_USB_PORTSC1	0x184
+#define PORT_PTS_MSK		(3<<30)
+#define PORT_PTS_UTMI		(0<<30)
+#define PORT_PTS_ULPI		(2<<30)
+#define	PORT_PTS_SERIAL		(3<<30)
+#define PORT_PTS_PTW		(1<<28)
+#define FSL_SOC_USB_PORTSC2	0x188
+#define FSL_SOC_USB_USBMODE	0x1a8
+#define USBMODE_CM_MASK		(3 << 0)	/* controller mode mask */
+#define USBMODE_CM_HOST		(3 << 0)	/* controller mode: host */
+#define USBMODE_ES		(1 << 2)	/* (Big) Endian Select */
+
+#define FSL_SOC_USB_USBGENCTRL	0x200
+#define USBGENCTRL_PPP		(1 << 3)
+#define USBGENCTRL_PFP		(1 << 2)
+#define FSL_SOC_USB_ISIPHYCTRL	0x204
+#define ISIPHYCTRL_PXE		(1)
+#define ISIPHYCTRL_PHYE		(1 << 4)
+
+#define FSL_SOC_USB_SNOOP1	0x400	/* NOTE: big-endian */
+#define FSL_SOC_USB_SNOOP2	0x404	/* NOTE: big-endian */
+#define FSL_SOC_USB_AGECNTTHRSH	0x408	/* NOTE: big-endian */
+#define FSL_SOC_USB_PRICTRL	0x40c	/* NOTE: big-endian */
+#define FSL_SOC_USB_SICTRL	0x410	/* NOTE: big-endian */
+#define FSL_SOC_USB_CTRL	0x500	/* NOTE: big-endian */
+#define CTRL_UTMI_PHY_EN	(1<<9)
+#define CTRL_PHY_CLK_VALID	(1 << 17)
+#define SNOOP_SIZE_2GB		0x1e
+
+/* control Register Bit Masks */
+#define CONTROL_REGISTER_W1C_MASK       0x00020000  /* W1C: PHY_CLK_VALID */
+#define ULPI_INT_EN             (1<<0)
+#define WU_INT_EN               (1<<1)
+#define USB_CTRL_USB_EN         (1<<2)
+#define LINE_STATE_FILTER__EN   (1<<3)
+#define KEEP_OTG_ON             (1<<4)
+#define OTG_PORT                (1<<5)
+#define PLL_RESET               (1<<8)
+#define UTMI_PHY_EN             (1<<9)
+#define ULPI_PHY_CLK_SEL        (1<<10)
+#define PHY_CLK_VALID		(1<<17)
+#endif				/* _EHCI_FSL_H */
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
new file mode 100644
index 0000000..656b8c0
--- /dev/null
+++ b/drivers/usb/host/ehci-grlib.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
+ *
+ * GRUSBHC is typically found on LEON/GRLIB SoCs
+ *
+ * (c) Jan Andersson <jan@gaisler.com>
+ *
+ * Based on ehci-ppc-of.c which is:
+ * (c) Valentine Barshak <vbarshak@ru.mvista.com>
+ * and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#define GRUSBHC_HCIVERSION 0x0100 /* Known value of cap. reg. HCIVERSION */
+
+static const struct hc_driver ehci_grlib_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "GRLIB GRUSBHC EHCI",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_setup,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+#endif
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+
+static int ehci_hcd_grlib_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct usb_hcd *hcd;
+	struct ehci_hcd	*ehci = NULL;
+	struct resource res;
+	u32 hc_capbase;
+	int irq;
+	int rv;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	dev_dbg(&op->dev, "initializing GRUSBHC EHCI USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	/* usb_create_hcd requires dma_mask != NULL */
+	op->dev.dma_mask = &op->dev.coherent_dma_mask;
+	hcd = usb_create_hcd(&ehci_grlib_hc_driver, &op->dev,
+			"GRUSBHC EHCI USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = resource_size(&res);
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (irq == NO_IRQ) {
+		dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+			__FILE__);
+		rv = -EBUSY;
+		goto err_irq;
+	}
+
+	hcd->regs = devm_ioremap_resource(&op->dev, &res);
+	if (IS_ERR(hcd->regs)) {
+		rv = PTR_ERR(hcd->regs);
+		goto err_ioremap;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+
+	ehci->caps = hcd->regs;
+
+	/* determine endianness of this implementation */
+	hc_capbase = ehci_readl(ehci, &ehci->caps->hc_capbase);
+	if (HC_VERSION(ehci, hc_capbase) != GRUSBHC_HCIVERSION) {
+		ehci->big_endian_mmio = 1;
+		ehci->big_endian_desc = 1;
+		ehci->big_endian_capbase = 1;
+	}
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv)
+		goto err_ioremap;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_ioremap:
+	irq_dispose_mapping(irq);
+err_irq:
+	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+
+static int ehci_hcd_grlib_remove(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	irq_dispose_mapping(hcd->irq);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+
+static const struct of_device_id ehci_hcd_grlib_of_match[] = {
+	{
+		.name = "GAISLER_EHCI",
+	 },
+	{
+		.name = "01_026",
+	 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
+
+
+static struct platform_driver ehci_grlib_driver = {
+	.probe		= ehci_hcd_grlib_probe,
+	.remove		= ehci_hcd_grlib_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "grlib-ehci",
+		.of_match_table = ehci_hcd_grlib_of_match,
+	},
+};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
new file mode 100644
index 0000000..8608ac5
--- /dev/null
+++ b/drivers/usb/host/ehci-hcd.c
@@ -0,0 +1,1386 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * Copyright (c) 2000-2004 by David Brownell
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI hc_driver implementation ... experimental, incomplete.
+ * Based on the final 1.0 register interface specification.
+ *
+ * USB 2.0 shows up in upcoming www.pcmcia.org technology.
+ * First was PCMCIA, like ISA; then CardBus, which is PCI.
+ * Next comes "CardBay", using USB 2.0 signals.
+ *
+ * Contains additional contributions by Brad Hards, Rory Bolt, and others.
+ * Special thanks to Intel and VIA for providing host controllers to
+ * test this driver on, and Cypress (including In-System Design) for
+ * providing early devices for those host controllers to talk to!
+ */
+
+#define DRIVER_AUTHOR "David Brownell"
+#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
+
+static const char	hcd_name [] = "ehci_hcd";
+
+
+#undef EHCI_URB_TRACE
+
+/* magic numbers that can affect system performance */
+#define	EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
+#define	EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */
+#define	EHCI_TUNE_RL_TT		0
+#define	EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */
+#define	EHCI_TUNE_MULT_TT	1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code).  In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define	EHCI_TUNE_FLS		1	/* (medium) 512-frame schedule */
+
+/* Initial IRQ latency:  faster than hw default */
+static int log2_irq_thresh = 0;		// 0 to 6
+module_param (log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting:  slower than hw default */
+static unsigned park = 0;
+module_param (park, uint, S_IRUGO);
+MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
+
+/* for flakey hardware, ignore overcurrent indicators */
+static bool ignore_oc;
+module_param (ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+
+#define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+/*-------------------------------------------------------------------------*/
+
+#include "ehci.h"
+#include "pci-quirks.h"
+
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+		struct ehci_tt *tt);
+
+/*
+ * The MosChip MCS9990 controller updates its microframe counter
+ * a little before the frame counter, and occasionally we will read
+ * the invalid intermediate value.  Avoid problems by checking the
+ * microframe number (the low-order 3 bits); if they are 0 then
+ * re-read the register to get the correct value.
+ */
+static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci)
+{
+	unsigned uf;
+
+	uf = ehci_readl(ehci, &ehci->regs->frame_index);
+	if (unlikely((uf & 7) == 0))
+		uf = ehci_readl(ehci, &ehci->regs->frame_index);
+	return uf;
+}
+
+static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+	if (ehci->frame_index_bug)
+		return ehci_moschip_read_frame_index(ehci);
+	return ehci_readl(ehci, &ehci->regs->frame_index);
+}
+
+#include "ehci-dbg.c"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * ehci_handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown:  shutting down the bridge before the devices using it.
+ */
+int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+		   u32 mask, u32 done, int usec)
+{
+	u32	result;
+
+	do {
+		result = ehci_readl(ehci, ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay (1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(ehci_handshake);
+
+/* check TDI/ARC silicon is in host mode */
+static int tdi_in_host_mode (struct ehci_hcd *ehci)
+{
+	u32		tmp;
+
+	tmp = ehci_readl(ehci, &ehci->regs->usbmode);
+	return (tmp & 3) == USBMODE_CM_HC;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int ehci_halt (struct ehci_hcd *ehci)
+{
+	u32	temp;
+
+	spin_lock_irq(&ehci->lock);
+
+	/* disable any irqs left enabled by previous code */
+	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+
+	if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
+		spin_unlock_irq(&ehci->lock);
+		return 0;
+	}
+
+	/*
+	 * This routine gets called during probe before ehci->command
+	 * has been initialized, so we can't rely on its value.
+	 */
+	ehci->command &= ~CMD_RUN;
+	temp = ehci_readl(ehci, &ehci->regs->command);
+	temp &= ~(CMD_RUN | CMD_IAAD);
+	ehci_writel(ehci, temp, &ehci->regs->command);
+
+	spin_unlock_irq(&ehci->lock);
+	synchronize_irq(ehci_to_hcd(ehci)->irq);
+
+	return ehci_handshake(ehci, &ehci->regs->status,
+			  STS_HALT, STS_HALT, 16 * 125);
+}
+
+/* put TDI/ARC silicon into EHCI mode */
+static void tdi_reset (struct ehci_hcd *ehci)
+{
+	u32		tmp;
+
+	tmp = ehci_readl(ehci, &ehci->regs->usbmode);
+	tmp |= USBMODE_CM_HC;
+	/* The default byte access to MMR space is LE after
+	 * controller reset. Set the required endian mode
+	 * for transfer buffers to match the host microprocessor
+	 */
+	if (ehci_big_endian_mmio(ehci))
+		tmp |= USBMODE_BE;
+	ehci_writel(ehci, tmp, &ehci->regs->usbmode);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+int ehci_reset(struct ehci_hcd *ehci)
+{
+	int	retval;
+	u32	command = ehci_readl(ehci, &ehci->regs->command);
+
+	/* If the EHCI debug controller is active, special care must be
+	 * taken before and after a host controller reset */
+	if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci)))
+		ehci->debug = NULL;
+
+	command |= CMD_RESET;
+	dbg_cmd (ehci, "reset", command);
+	ehci_writel(ehci, command, &ehci->regs->command);
+	ehci->rh_state = EHCI_RH_HALTED;
+	ehci->next_statechange = jiffies;
+	retval = ehci_handshake(ehci, &ehci->regs->command,
+			    CMD_RESET, 0, 250 * 1000);
+
+	if (ehci->has_hostpc) {
+		ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
+				&ehci->regs->usbmode_ex);
+		ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning);
+	}
+	if (retval)
+		return retval;
+
+	if (ehci_is_TDI(ehci))
+		tdi_reset (ehci);
+
+	if (ehci->debug)
+		dbgp_external_startup(ehci_to_hcd(ehci));
+
+	ehci->port_c_suspend = ehci->suspended_ports =
+			ehci->resuming_ports = 0;
+	return retval;
+}
+EXPORT_SYMBOL_GPL(ehci_reset);
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void ehci_quiesce (struct ehci_hcd *ehci)
+{
+	u32	temp;
+
+	if (ehci->rh_state != EHCI_RH_RUNNING)
+		return;
+
+	/* wait for any schedule enables/disables to take effect */
+	temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
+	ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp,
+			16 * 125);
+
+	/* then disable anything that's still active */
+	spin_lock_irq(&ehci->lock);
+	ehci->command &= ~(CMD_ASE | CMD_PSE);
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	spin_unlock_irq(&ehci->lock);
+
+	/* hardware can take 16 microframes to turn off ... */
+	ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0,
+			16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_iaa_cycle(struct ehci_hcd *ehci);
+static void end_unlink_async(struct ehci_hcd *ehci);
+static void unlink_empty_async(struct ehci_hcd *ehci);
+static void ehci_work(struct ehci_hcd *ehci);
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable);
+
+#include "ehci-timer.c"
+#include "ehci-hub.c"
+#include "ehci-mem.c"
+#include "ehci-q.c"
+#include "ehci-sched.c"
+#include "ehci-sysfs.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
+{
+	int	port = HCS_N_PORTS(ehci->hcs_params);
+
+	while (port--) {
+		spin_unlock_irq(&ehci->lock);
+		ehci_port_power(ehci, port, false);
+		spin_lock_irq(&ehci->lock);
+		ehci_writel(ehci, PORT_RWC_BITS,
+				&ehci->regs->port_status[port]);
+	}
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void ehci_silence_controller(struct ehci_hcd *ehci)
+{
+	ehci_halt(ehci);
+
+	spin_lock_irq(&ehci->lock);
+	ehci->rh_state = EHCI_RH_HALTED;
+	ehci_turn_off_all_ports(ehci);
+
+	/* make BIOS/etc use companion controller during reboot */
+	ehci_writel(ehci, 0, &ehci->regs->configured_flag);
+
+	/* unblock posted writes */
+	ehci_readl(ehci, &ehci->regs->configured_flag);
+	spin_unlock_irq(&ehci->lock);
+}
+
+/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void ehci_shutdown(struct usb_hcd *hcd)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+
+	/**
+	 * Protect the system from crashing at system shutdown in cases where
+	 * usb host is not added yet from OTG controller driver.
+	 * As ehci_setup() not done yet, so stop accessing registers or
+	 * variables initialized in ehci_setup()
+	 */
+	if (!ehci->sbrn)
+		return;
+
+	spin_lock_irq(&ehci->lock);
+	ehci->shutdown = true;
+	ehci->rh_state = EHCI_RH_STOPPING;
+	ehci->enabled_hrtimer_events = 0;
+	spin_unlock_irq(&ehci->lock);
+
+	ehci_silence_controller(ehci);
+
+	hrtimer_cancel(&ehci->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * ehci_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping ehci->lock.
+ */
+static void ehci_work (struct ehci_hcd *ehci)
+{
+	/* another CPU may drop ehci->lock during a schedule scan while
+	 * it reports urb completions.  this flag guards against bogus
+	 * attempts at re-entrant schedule scanning.
+	 */
+	if (ehci->scanning) {
+		ehci->need_rescan = true;
+		return;
+	}
+	ehci->scanning = true;
+
+ rescan:
+	ehci->need_rescan = false;
+	if (ehci->async_count)
+		scan_async(ehci);
+	if (ehci->intr_count > 0)
+		scan_intr(ehci);
+	if (ehci->isoc_count > 0)
+		scan_isoc(ehci);
+	if (ehci->need_rescan)
+		goto rescan;
+	ehci->scanning = false;
+
+	/* the IO watchdog guards against hardware or driver bugs that
+	 * misplace IRQs, and should let us run completely without IRQs.
+	 * such lossage has been observed on both VT6202 and VT8235.
+	 */
+	turn_on_io_watchdog(ehci);
+}
+
+/*
+ * Called when the ehci_hcd module is removed.
+ */
+static void ehci_stop (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+
+	ehci_dbg (ehci, "stop\n");
+
+	/* no more interrupts ... */
+
+	spin_lock_irq(&ehci->lock);
+	ehci->enabled_hrtimer_events = 0;
+	spin_unlock_irq(&ehci->lock);
+
+	ehci_quiesce(ehci);
+	ehci_silence_controller(ehci);
+	ehci_reset (ehci);
+
+	hrtimer_cancel(&ehci->hrtimer);
+	remove_sysfs_files(ehci);
+	remove_debug_files (ehci);
+
+	/* root hub is shut down separately (first, when possible) */
+	spin_lock_irq (&ehci->lock);
+	end_free_itds(ehci);
+	spin_unlock_irq (&ehci->lock);
+	ehci_mem_cleanup (ehci);
+
+	if (ehci->amd_pll_fix == 1)
+		usb_amd_dev_put();
+
+	dbg_status (ehci, "ehci_stop completed",
+		    ehci_readl(ehci, &ehci->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int ehci_init(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	u32			temp;
+	int			retval;
+	u32			hcc_params;
+	struct ehci_qh_hw	*hw;
+
+	spin_lock_init(&ehci->lock);
+
+	/*
+	 * keep io watchdog by default, those good HCDs could turn off it later
+	 */
+	ehci->need_io_watchdog = 1;
+
+	hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	ehci->hrtimer.function = ehci_hrtimer_func;
+	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
+	/*
+	 * by default set standard 80% (== 100 usec/uframe) max periodic
+	 * bandwidth as required by USB 2.0
+	 */
+	ehci->uframe_periodic_max = 100;
+
+	/*
+	 * hw default: 1K periodic list heads, one per frame.
+	 * periodic_size can shrink by USBCMD update if hcc_params allows.
+	 */
+	ehci->periodic_size = DEFAULT_I_TDPS;
+	INIT_LIST_HEAD(&ehci->async_unlink);
+	INIT_LIST_HEAD(&ehci->async_idle);
+	INIT_LIST_HEAD(&ehci->intr_unlink_wait);
+	INIT_LIST_HEAD(&ehci->intr_unlink);
+	INIT_LIST_HEAD(&ehci->intr_qh_list);
+	INIT_LIST_HEAD(&ehci->cached_itd_list);
+	INIT_LIST_HEAD(&ehci->cached_sitd_list);
+	INIT_LIST_HEAD(&ehci->tt_list);
+
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		switch (EHCI_TUNE_FLS) {
+		case 0: ehci->periodic_size = 1024; break;
+		case 1: ehci->periodic_size = 512; break;
+		case 2: ehci->periodic_size = 256; break;
+		default:	BUG();
+		}
+	}
+	if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
+		return retval;
+
+	/* controllers may cache some of the periodic schedule ... */
+	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
+		ehci->i_thresh = 0;
+	else					// N microframes cached
+		ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
+
+	/*
+	 * dedicate a qh for the async ring head, since we couldn't unlink
+	 * a 'real' qh without stopping the async schedule [4.8].  use it
+	 * as the 'reclamation list head' too.
+	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
+	 * from automatically advancing to the next td after short reads.
+	 */
+	ehci->async->qh_next.qh = NULL;
+	hw = ehci->async->hw;
+	hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
+	hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
+	hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
+#endif
+	hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+	hw->hw_qtd_next = EHCI_LIST_END(ehci);
+	ehci->async->qh_state = QH_STATE_LINKED;
+	hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
+
+	/* clear interrupt enables, set irq latency */
+	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+		log2_irq_thresh = 0;
+	temp = 1 << (16 + log2_irq_thresh);
+	if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
+		ehci->has_ppcd = 1;
+		ehci_dbg(ehci, "enable per-port change event\n");
+		temp |= CMD_PPCEE;
+	}
+	if (HCC_CANPARK(hcc_params)) {
+		/* HW default park == 3, on hardware that supports it (like
+		 * NVidia and ALI silicon), maximizes throughput on the async
+		 * schedule by avoiding QH fetches between transfers.
+		 *
+		 * With fast usb storage devices and NForce2, "park" seems to
+		 * make problems:  throughput reduction (!), data errors...
+		 */
+		if (park) {
+			park = min(park, (unsigned) 3);
+			temp |= CMD_PARK;
+			temp |= park << 8;
+		}
+		ehci_dbg(ehci, "park %d\n", park);
+	}
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		temp &= ~(3 << 2);
+		temp |= (EHCI_TUNE_FLS << 2);
+	}
+	ehci->command = temp;
+
+	/* Accept arbitrarily long scatter-gather lists */
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
+
+	/* Prepare for unlinking active QHs */
+	ehci->old_current = ~0;
+	return 0;
+}
+
+/* start HC running; it's halted, ehci_init() has been run (once) */
+static int ehci_run (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	u32			temp;
+	u32			hcc_params;
+
+	hcd->uses_new_polling = 1;
+
+	/* EHCI spec section 4.1 */
+
+	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+	ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+
+	/*
+	 * hcc_params controls whether ehci->regs->segment must (!!!)
+	 * be used; it constrains QH/ITD/SITD and QTD locations.
+	 * dma_pool consistent memory always uses segment zero.
+	 * streaming mappings for I/O buffers, like pci_map_single(),
+	 * can return segments above 4GB, if the device allows.
+	 *
+	 * NOTE:  the dma mask is visible through dev->dma_mask, so
+	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+	 * host side drivers though.
+	 */
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params)) {
+		ehci_writel(ehci, 0, &ehci->regs->segment);
+#if 0
+// this is deeply broken on almost all architectures
+		if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
+			ehci_info(ehci, "enabled 64bit DMA\n");
+#endif
+	}
+
+
+	// Philips, Intel, and maybe others need CMD_RUN before the
+	// root hub will detect new devices (why?); NEC doesn't
+	ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+	ehci->command |= CMD_RUN;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	dbg_cmd (ehci, "init", ehci->command);
+
+	/*
+	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+	 * are explicitly handed to companion controller(s), so no TT is
+	 * involved with the root hub.  (Except where one is integrated,
+	 * and there's no companion controller unless maybe for USB OTG.)
+	 *
+	 * Turning on the CF flag will transfer ownership of all ports
+	 * from the companions to the EHCI controller.  If any of the
+	 * companions are in the middle of a port reset at the time, it
+	 * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
+	 * guarantees that no resets are in progress.  After we set CF,
+	 * a short delay lets the hardware catch up; new resets shouldn't
+	 * be started before the port switching actions could complete.
+	 */
+	down_write(&ehci_cf_port_reset_rwsem);
+	ehci->rh_state = EHCI_RH_RUNNING;
+	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
+	msleep(5);
+	up_write(&ehci_cf_port_reset_rwsem);
+	ehci->last_periodic_enable = ktime_get_real();
+
+	temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+	ehci_info (ehci,
+		"USB %x.%x started, EHCI %x.%02x%s\n",
+		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+		temp >> 8, temp & 0xff,
+		ignore_oc ? ", overcurrent ignored" : "");
+
+	ehci_writel(ehci, INTR_MASK,
+		    &ehci->regs->intr_enable); /* Turn On Interrupts */
+
+	/* GRR this is run-once init(), being done every time the HC starts.
+	 * So long as they're part of class devices, we can't do it init()
+	 * since the class device isn't created that early.
+	 */
+	create_debug_files(ehci);
+	create_sysfs_files(ehci);
+
+	return 0;
+}
+
+int ehci_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	ehci->regs = (void __iomem *)ehci->caps +
+	    HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+	dbg_hcs_params(ehci, "reset");
+	dbg_hcc_params(ehci, "reset");
+
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	ehci->sbrn = HCD_USB2;
+
+	/* data structure init */
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	retval = ehci_halt(ehci);
+	if (retval) {
+		ehci_mem_cleanup(ehci);
+		return retval;
+	}
+
+	ehci_reset(ehci);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ehci_setup);
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	u32			status, masked_status, pcd_status = 0, cmd;
+	int			bh;
+	unsigned long		flags;
+
+	/*
+	 * For threadirqs option we use spin_lock_irqsave() variant to prevent
+	 * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+	 * in interrupt context even when threadirqs is specified. We can go
+	 * back to spin_lock() variant when hrtimer callbacks become threaded.
+	 */
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	status = ehci_readl(ehci, &ehci->regs->status);
+
+	/* e.g. cardbus physical eject */
+	if (status == ~(u32) 0) {
+		ehci_dbg (ehci, "device removed\n");
+		goto dead;
+	}
+
+	/*
+	 * We don't use STS_FLR, but some controllers don't like it to
+	 * remain on, so mask it out along with the other status bits.
+	 */
+	masked_status = status & (INTR_MASK | STS_FLR);
+
+	/* Shared IRQ? */
+	if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+		spin_unlock_irqrestore(&ehci->lock, flags);
+		return IRQ_NONE;
+	}
+
+	/* clear (just) interrupts */
+	ehci_writel(ehci, masked_status, &ehci->regs->status);
+	cmd = ehci_readl(ehci, &ehci->regs->command);
+	bh = 0;
+
+	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
+		if (likely ((status & STS_ERR) == 0))
+			COUNT (ehci->stats.normal);
+		else
+			COUNT (ehci->stats.error);
+		bh = 1;
+	}
+
+	/* complete the unlinking of some qh [4.15.2.3] */
+	if (status & STS_IAA) {
+
+		/* Turn off the IAA watchdog */
+		ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
+
+		/*
+		 * Mild optimization: Allow another IAAD to reset the
+		 * hrtimer, if one occurs before the next expiration.
+		 * In theory we could always cancel the hrtimer, but
+		 * tests show that about half the time it will be reset
+		 * for some other event anyway.
+		 */
+		if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
+			++ehci->next_hrtimer_event;
+
+		/* guard against (alleged) silicon errata */
+		if (cmd & CMD_IAAD)
+			ehci_dbg(ehci, "IAA with IAAD still set?\n");
+		if (ehci->iaa_in_progress)
+			COUNT(ehci->stats.iaa);
+		end_iaa_cycle(ehci);
+	}
+
+	/* remote wakeup [4.3.1] */
+	if (status & STS_PCD) {
+		unsigned	i = HCS_N_PORTS (ehci->hcs_params);
+		u32		ppcd = ~0;
+
+		/* kick root hub later */
+		pcd_status = status;
+
+		/* resume root hub? */
+		if (ehci->rh_state == EHCI_RH_SUSPENDED)
+			usb_hcd_resume_root_hub(hcd);
+
+		/* get per-port change detect bits */
+		if (ehci->has_ppcd)
+			ppcd = status >> 16;
+
+		while (i--) {
+			int pstatus;
+
+			/* leverage per-port change bits feature */
+			if (!(ppcd & (1 << i)))
+				continue;
+			pstatus = ehci_readl(ehci,
+					 &ehci->regs->port_status[i]);
+
+			if (pstatus & PORT_OWNER)
+				continue;
+			if (!(test_bit(i, &ehci->suspended_ports) &&
+					((pstatus & PORT_RESUME) ||
+						!(pstatus & PORT_SUSPEND)) &&
+					(pstatus & PORT_PE) &&
+					ehci->reset_done[i] == 0))
+				continue;
+
+			/* start USB_RESUME_TIMEOUT msec resume signaling from
+			 * this port, and make hub_wq collect
+			 * PORT_STAT_C_SUSPEND to stop that signaling.
+			 */
+			ehci->reset_done[i] = jiffies +
+				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			set_bit(i, &ehci->resuming_ports);
+			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+			usb_hcd_start_port_resume(&hcd->self, i);
+			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
+		}
+	}
+
+	/* PCI errors [4.15.2.4] */
+	if (unlikely ((status & STS_FATAL) != 0)) {
+		ehci_err(ehci, "fatal error\n");
+		dbg_cmd(ehci, "fatal", cmd);
+		dbg_status(ehci, "fatal", status);
+dead:
+		usb_hc_died(hcd);
+
+		/* Don't let the controller do anything more */
+		ehci->shutdown = true;
+		ehci->rh_state = EHCI_RH_STOPPING;
+		ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+		ehci_writel(ehci, ehci->command, &ehci->regs->command);
+		ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+		ehci_handle_controller_death(ehci);
+
+		/* Handle completions when the controller stops */
+		bh = 0;
+	}
+
+	if (bh)
+		ehci_work (ehci);
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	if (pcd_status)
+		usb_hcd_poll_rh_status(hcd);
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int ehci_urb_enqueue (
+	struct usb_hcd	*hcd,
+	struct urb	*urb,
+	gfp_t		mem_flags
+) {
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	struct list_head	qtd_list;
+
+	INIT_LIST_HEAD (&qtd_list);
+
+	switch (usb_pipetype (urb->pipe)) {
+	case PIPE_CONTROL:
+		/* qh_completions() code doesn't handle all the fault cases
+		 * in multi-TD control transfers.  Even 1KB is rare anyway.
+		 */
+		if (urb->transfer_buffer_length > (16 * 1024))
+			return -EMSGSIZE;
+		/* FALLTHROUGH */
+	/* case PIPE_BULK: */
+	default:
+		if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return submit_async(ehci, urb, &qtd_list, mem_flags);
+
+	case PIPE_INTERRUPT:
+		if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return intr_submit(ehci, urb, &qtd_list, mem_flags);
+
+	case PIPE_ISOCHRONOUS:
+		if (urb->dev->speed == USB_SPEED_HIGH)
+			return itd_submit (ehci, urb, mem_flags);
+		else
+			return sitd_submit (ehci, urb, mem_flags);
+	}
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	struct ehci_qh		*qh;
+	unsigned long		flags;
+	int			rc;
+
+	spin_lock_irqsave (&ehci->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc)
+		goto done;
+
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		/*
+		 * We don't expedite dequeue for isochronous URBs.
+		 * Just wait until they complete normally or their
+		 * time slot expires.
+		 */
+	} else {
+		qh = (struct ehci_qh *) urb->hcpriv;
+		qh->unlink_reason |= QH_UNLINK_REQUESTED;
+		switch (qh->qh_state) {
+		case QH_STATE_LINKED:
+			if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
+				start_unlink_intr(ehci, qh);
+			else
+				start_unlink_async(ehci, qh);
+			break;
+		case QH_STATE_COMPLETING:
+			qh->dequeue_during_giveback = 1;
+			break;
+		case QH_STATE_UNLINK:
+		case QH_STATE_UNLINK_WAIT:
+			/* already started */
+			break;
+		case QH_STATE_IDLE:
+			/* QH might be waiting for a Clear-TT-Buffer */
+			qh_completions(ehci, qh);
+			break;
+		}
+	}
+done:
+	spin_unlock_irqrestore (&ehci->lock, flags);
+	return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// bulk qh holds the data toggle
+
+static void
+ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	unsigned long		flags;
+	struct ehci_qh		*qh;
+
+	/* ASSERT:  any requests/urbs are being unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+
+rescan:
+	spin_lock_irqsave (&ehci->lock, flags);
+	qh = ep->hcpriv;
+	if (!qh)
+		goto done;
+
+	/* endpoints can be iso streams.  for now, we don't
+	 * accelerate iso completions ... so spin a while.
+	 */
+	if (qh->hw == NULL) {
+		struct ehci_iso_stream	*stream = ep->hcpriv;
+
+		if (!list_empty(&stream->td_list))
+			goto idle_timeout;
+
+		/* BUG_ON(!list_empty(&stream->free_list)); */
+		reserve_release_iso_bandwidth(ehci, stream, -1);
+		kfree(stream);
+		goto done;
+	}
+
+	qh->unlink_reason |= QH_UNLINK_REQUESTED;
+	switch (qh->qh_state) {
+	case QH_STATE_LINKED:
+		if (list_empty(&qh->qtd_list))
+			qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
+		else
+			WARN_ON(1);
+		if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
+			start_unlink_async(ehci, qh);
+		else
+			start_unlink_intr(ehci, qh);
+		/* FALL THROUGH */
+	case QH_STATE_COMPLETING:	/* already in unlinking */
+	case QH_STATE_UNLINK:		/* wait for hw to finish? */
+	case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+		spin_unlock_irqrestore (&ehci->lock, flags);
+		schedule_timeout_uninterruptible(1);
+		goto rescan;
+	case QH_STATE_IDLE:		/* fully unlinked */
+		if (qh->clearing_tt)
+			goto idle_timeout;
+		if (list_empty (&qh->qtd_list)) {
+			if (qh->ps.bw_uperiod)
+				reserve_release_intr_bandwidth(ehci, qh, -1);
+			qh_destroy(ehci, qh);
+			break;
+		}
+		/* fall through */
+	default:
+		/* caller was supposed to have unlinked any requests;
+		 * that's not our job.  just leak this memory.
+		 */
+		ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
+			qh, ep->desc.bEndpointAddress, qh->qh_state,
+			list_empty (&qh->qtd_list) ? "" : "(has tds)");
+		break;
+	}
+ done:
+	ep->hcpriv = NULL;
+	spin_unlock_irqrestore (&ehci->lock, flags);
+}
+
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct ehci_qh		*qh;
+	int			eptype = usb_endpoint_type(&ep->desc);
+	int			epnum = usb_endpoint_num(&ep->desc);
+	int			is_out = usb_endpoint_dir_out(&ep->desc);
+	unsigned long		flags;
+
+	if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+		return;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+	qh = ep->hcpriv;
+
+	/* For Bulk and Interrupt endpoints we maintain the toggle state
+	 * in the hardware; the toggle bits in udev aren't used at all.
+	 * When an endpoint is reset by usb_clear_halt() we must reset
+	 * the toggle bit in the QH.
+	 */
+	if (qh) {
+		if (!list_empty(&qh->qtd_list)) {
+			WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+		} else {
+			/* The toggle value in the QH can't be updated
+			 * while the QH is active.  Unlink it now;
+			 * re-linking will call qh_refresh().
+			 */
+			usb_settoggle(qh->ps.udev, epnum, is_out, 0);
+			qh->unlink_reason |= QH_UNLINK_REQUESTED;
+			if (eptype == USB_ENDPOINT_XFER_BULK)
+				start_unlink_async(ehci, qh);
+			else
+				start_unlink_intr(ehci, qh);
+		}
+	}
+	spin_unlock_irqrestore(&ehci->lock, flags);
+}
+
+static int ehci_get_frame (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Device addition and removal */
+
+static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+
+	spin_lock_irq(&ehci->lock);
+	drop_tt(udev);
+	spin_unlock_irq(&ehci->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+
+/* suspend/resume, section 4.3 */
+
+/* These routines handle the generic parts of controller suspend/resume */
+
+int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+
+	if (time_before(jiffies, ehci->next_statechange))
+		msleep(10);
+
+	/*
+	 * Root hub was already suspended.  Disable IRQ emission and
+	 * mark HW unaccessible.  The PM and USB cores make sure that
+	 * the root hub is either suspended or stopped.
+	 */
+	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+
+	spin_lock_irq(&ehci->lock);
+	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+	(void) ehci_readl(ehci, &ehci->regs->intr_enable);
+
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	spin_unlock_irq(&ehci->lock);
+
+	synchronize_irq(hcd->irq);
+
+	/* Check for race with a wakeup request */
+	if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+		ehci_resume(hcd, false);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ehci_suspend);
+
+/* Returns 0 if power was preserved, 1 if power was lost */
+int ehci_resume(struct usb_hcd *hcd, bool force_reset)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+
+	if (time_before(jiffies, ehci->next_statechange))
+		msleep(100);
+
+	/* Mark hardware accessible again as we are back to full power by now */
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	if (ehci->shutdown)
+		return 0;		/* Controller is dead */
+
+	/*
+	 * If CF is still set and reset isn't forced
+	 * then we maintained suspend power.
+	 * Just undo the effect of ehci_suspend().
+	 */
+	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+			!force_reset) {
+		int	mask = INTR_MASK;
+
+		ehci_prepare_ports_for_controller_resume(ehci);
+
+		spin_lock_irq(&ehci->lock);
+		if (ehci->shutdown)
+			goto skip;
+
+		if (!hcd->self.root_hub->do_remote_wakeup)
+			mask &= ~STS_PCD;
+		ehci_writel(ehci, mask, &ehci->regs->intr_enable);
+		ehci_readl(ehci, &ehci->regs->intr_enable);
+ skip:
+		spin_unlock_irq(&ehci->lock);
+		return 0;
+	}
+
+	/*
+	 * Else reset, to cope with power loss or resume from hibernation
+	 * having let the firmware kick in during reboot.
+	 */
+	usb_root_hub_lost_power(hcd->self.root_hub);
+	(void) ehci_halt(ehci);
+	(void) ehci_reset(ehci);
+
+	spin_lock_irq(&ehci->lock);
+	if (ehci->shutdown)
+		goto skip;
+
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
+
+	ehci->rh_state = EHCI_RH_SUSPENDED;
+	spin_unlock_irq(&ehci->lock);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(ehci_resume);
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ehci_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"EHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ehci_irq,
+	.flags =		HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		ehci_setup,
+	.start =		ehci_run,
+	.stop =			ehci_stop,
+	.shutdown =		ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		ehci_urb_enqueue,
+	.urb_dequeue =		ehci_urb_dequeue,
+	.endpoint_disable =	ehci_endpoint_disable,
+	.endpoint_reset =	ehci_endpoint_reset,
+	.clear_tt_buffer_complete =	ehci_clear_tt_buffer_complete,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	ehci_hub_status_data,
+	.hub_control =		ehci_hub_control,
+	.bus_suspend =		ehci_bus_suspend,
+	.bus_resume =		ehci_bus_resume,
+	.relinquish_port =	ehci_relinquish_port,
+	.port_handed_over =	ehci_port_handed_over,
+	.get_resuming_ports =	ehci_get_resuming_ports,
+
+	/*
+	 * device support
+	 */
+	.free_dev =		ehci_remove_device,
+};
+
+void ehci_init_driver(struct hc_driver *drv,
+		const struct ehci_driver_overrides *over)
+{
+	/* Copy the generic table to drv and then apply the overrides */
+	*drv = ehci_hc_driver;
+
+	if (over) {
+		drv->hcd_priv_size += over->extra_priv_size;
+		if (over->reset)
+			drv->reset = over->reset;
+		if (over->port_power)
+			drv->port_power = over->port_power;
+	}
+}
+EXPORT_SYMBOL_GPL(ehci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_LICENSE ("GPL");
+
+#ifdef CONFIG_USB_EHCI_SH
+#include "ehci-sh.c"
+#define PLATFORM_DRIVER		ehci_hcd_sh_driver
+#endif
+
+#ifdef CONFIG_PPC_PS3
+#include "ehci-ps3.c"
+#define	PS3_SYSTEM_BUS_DRIVER	ps3_ehci_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
+#include "ehci-ppc-of.c"
+#define OF_PLATFORM_DRIVER	ehci_hcd_ppc_of_driver
+#endif
+
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+#include "ehci-xilinx-of.c"
+#define XILINX_OF_PLATFORM_DRIVER	ehci_hcd_xilinx_of_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
+#include "ehci-pmcmsp.c"
+#define	PLATFORM_DRIVER		ehci_hcd_msp_driver
+#endif
+
+#ifdef CONFIG_SPARC_LEON
+#include "ehci-grlib.c"
+#define PLATFORM_DRIVER		ehci_grlib_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define        PLATFORM_DRIVER         ehci_mv_driver
+#endif
+
+static int __init ehci_hcd_init(void)
+{
+	int retval = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+	set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+	if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+			test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+		printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
+				" before uhci_hcd and ohci_hcd, not after\n");
+
+	pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd sitd %zd\n",
+		 hcd_name,
+		 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
+		 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+	ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
+#endif
+
+#ifdef PLATFORM_DRIVER
+	retval = platform_driver_register(&PLATFORM_DRIVER);
+	if (retval < 0)
+		goto clean0;
+#endif
+
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
+	if (retval < 0)
+		goto clean2;
+#endif
+
+#ifdef OF_PLATFORM_DRIVER
+	retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+	if (retval < 0)
+		goto clean3;
+#endif
+
+#ifdef XILINX_OF_PLATFORM_DRIVER
+	retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
+	if (retval < 0)
+		goto clean4;
+#endif
+	return retval;
+
+#ifdef XILINX_OF_PLATFORM_DRIVER
+	/* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
+clean4:
+#endif
+#ifdef OF_PLATFORM_DRIVER
+	platform_driver_unregister(&OF_PLATFORM_DRIVER);
+clean3:
+#endif
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+clean2:
+#endif
+#ifdef PLATFORM_DRIVER
+	platform_driver_unregister(&PLATFORM_DRIVER);
+clean0:
+#endif
+#ifdef CONFIG_DYNAMIC_DEBUG
+	debugfs_remove(ehci_debug_root);
+	ehci_debug_root = NULL;
+#endif
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+	return retval;
+}
+module_init(ehci_hcd_init);
+
+static void __exit ehci_hcd_cleanup(void)
+{
+#ifdef XILINX_OF_PLATFORM_DRIVER
+	platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
+#endif
+#ifdef OF_PLATFORM_DRIVER
+	platform_driver_unregister(&OF_PLATFORM_DRIVER);
+#endif
+#ifdef PLATFORM_DRIVER
+	platform_driver_unregister(&PLATFORM_DRIVER);
+#endif
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#endif
+#ifdef CONFIG_DYNAMIC_DEBUG
+	debugfs_remove(ehci_debug_root);
+#endif
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(ehci_hcd_cleanup);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
new file mode 100644
index 0000000..ce0eaf7
--- /dev/null
+++ b/drivers/usb/host/ehci-hub.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2001-2004 by David Brownell
+ */
+
+/* this file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Root Hub ... the nonsharable stuff
+ *
+ * Registers don't need cpu_to_le32, that happens transparently
+ */
+
+/*-------------------------------------------------------------------------*/
+#include <linux/usb/otg.h>
+
+#define	PORT_WAKE_BITS	(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+
+#ifdef	CONFIG_PM
+
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
+
+static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
+{
+	return !udev->maxchild && udev->persist_enabled &&
+		udev->bus->root_hub->speed < USB_SPEED_HIGH;
+}
+
+/* After a power loss, ports that were owned by the companion must be
+ * reset so that the companion can still own them.
+ */
+static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
+{
+	u32 __iomem	*reg;
+	u32		status;
+	int		port;
+	__le32		buf;
+	struct usb_hcd	*hcd = ehci_to_hcd(ehci);
+
+	if (!ehci->owned_ports)
+		return;
+
+	/*
+	 * USB 1.1 devices are mostly HIDs, which don't need to persist across
+	 * suspends. If we ensure that none of our companion's devices have
+	 * persist_enabled (by looking through all USB 1.1 buses in the system),
+	 * we can skip this and avoid slowing resume down. Devices without
+	 * persist will just get reenumerated shortly after resume anyway.
+	 */
+	if (!usb_for_each_dev(NULL, persist_enabled_on_companion))
+		return;
+
+	/* Make sure the ports are powered */
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		if (test_bit(port, &ehci->owned_ports)) {
+			reg = &ehci->regs->port_status[port];
+			status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+			if (!(status & PORT_POWER))
+				ehci_port_power(ehci, port, true);
+		}
+	}
+
+	/* Give the connections some time to appear */
+	msleep(20);
+
+	spin_lock_irq(&ehci->lock);
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		if (test_bit(port, &ehci->owned_ports)) {
+			reg = &ehci->regs->port_status[port];
+			status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+
+			/* Port already owned by companion? */
+			if (status & PORT_OWNER)
+				clear_bit(port, &ehci->owned_ports);
+			else if (test_bit(port, &ehci->companion_ports))
+				ehci_writel(ehci, status & ~PORT_PE, reg);
+			else {
+				spin_unlock_irq(&ehci->lock);
+				ehci_hub_control(hcd, SetPortFeature,
+						USB_PORT_FEAT_RESET, port + 1,
+						NULL, 0);
+				spin_lock_irq(&ehci->lock);
+			}
+		}
+	}
+	spin_unlock_irq(&ehci->lock);
+
+	if (!ehci->owned_ports)
+		return;
+	msleep(90);		/* Wait for resets to complete */
+
+	spin_lock_irq(&ehci->lock);
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		if (test_bit(port, &ehci->owned_ports)) {
+			spin_unlock_irq(&ehci->lock);
+			ehci_hub_control(hcd, GetPortStatus,
+					0, port + 1,
+					(char *) &buf, sizeof(buf));
+			spin_lock_irq(&ehci->lock);
+
+			/* The companion should now own the port,
+			 * but if something went wrong the port must not
+			 * remain enabled.
+			 */
+			reg = &ehci->regs->port_status[port];
+			status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+			if (status & PORT_OWNER)
+				ehci_writel(ehci, status | PORT_CSC, reg);
+			else {
+				ehci_dbg(ehci, "failed handover port %d: %x\n",
+						port + 1, status);
+				ehci_writel(ehci, status & ~PORT_PE, reg);
+			}
+		}
+	}
+
+	ehci->owned_ports = 0;
+	spin_unlock_irq(&ehci->lock);
+}
+
+static int ehci_port_change(struct ehci_hcd *ehci)
+{
+	int i = HCS_N_PORTS(ehci->hcs_params);
+
+	/* First check if the controller indicates a change event */
+
+	if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)
+		return 1;
+
+	/*
+	 * Not all controllers appear to update this while going from D3 to D0,
+	 * so check the individual port status registers as well
+	 */
+
+	while (i--)
+		if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC)
+			return 1;
+
+	return 0;
+}
+
+void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+		bool suspending, bool do_wakeup)
+{
+	int		port;
+	u32		temp;
+
+	/* If remote wakeup is enabled for the root hub but disabled
+	 * for the controller, we must adjust all the port wakeup flags
+	 * when the controller is suspended or resumed.  In all other
+	 * cases they don't need to be changed.
+	 */
+	if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
+		return;
+
+	spin_lock_irq(&ehci->lock);
+
+	/* clear phy low-power mode before changing wakeup flags */
+	if (ehci->has_tdi_phy_lpm) {
+		port = HCS_N_PORTS(ehci->hcs_params);
+		while (port--) {
+			u32 __iomem	*hostpc_reg = &ehci->regs->hostpc[port];
+
+			temp = ehci_readl(ehci, hostpc_reg);
+			ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
+		}
+		spin_unlock_irq(&ehci->lock);
+		msleep(5);
+		spin_lock_irq(&ehci->lock);
+	}
+
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		u32 __iomem	*reg = &ehci->regs->port_status[port];
+		u32		t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+		u32		t2 = t1 & ~PORT_WAKE_BITS;
+
+		/* If we are suspending the controller, clear the flags.
+		 * If we are resuming the controller, set the wakeup flags.
+		 */
+		if (!suspending) {
+			if (t1 & PORT_CONNECT)
+				t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+			else
+				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+		}
+		ehci_writel(ehci, t2, reg);
+	}
+
+	/* enter phy low-power mode again */
+	if (ehci->has_tdi_phy_lpm) {
+		port = HCS_N_PORTS(ehci->hcs_params);
+		while (port--) {
+			u32 __iomem	*hostpc_reg = &ehci->regs->hostpc[port];
+
+			temp = ehci_readl(ehci, hostpc_reg);
+			ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
+		}
+	}
+
+	/* Does the root hub have a port wakeup pending? */
+	if (!suspending && ehci_port_change(ehci))
+		usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+	spin_unlock_irq(&ehci->lock);
+}
+EXPORT_SYMBOL_GPL(ehci_adjust_port_wakeup_flags);
+
+static int ehci_bus_suspend (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	int			port;
+	int			mask;
+	int			changed;
+	bool			fs_idle_delay;
+
+	ehci_dbg(ehci, "suspend root hub\n");
+
+	if (time_before (jiffies, ehci->next_statechange))
+		msleep(5);
+
+	/* stop the schedules */
+	ehci_quiesce(ehci);
+
+	spin_lock_irq (&ehci->lock);
+	if (ehci->rh_state < EHCI_RH_RUNNING)
+		goto done;
+
+	/* Once the controller is stopped, port resumes that are already
+	 * in progress won't complete.  Hence if remote wakeup is enabled
+	 * for the root hub and any ports are in the middle of a resume or
+	 * remote wakeup, we must fail the suspend.
+	 */
+	if (hcd->self.root_hub->do_remote_wakeup) {
+		if (ehci->resuming_ports) {
+			spin_unlock_irq(&ehci->lock);
+			ehci_dbg(ehci, "suspend failed because a port is resuming\n");
+			return -EBUSY;
+		}
+	}
+
+	/* Unlike other USB host controller types, EHCI doesn't have
+	 * any notion of "global" or bus-wide suspend.  The driver has
+	 * to manually suspend all the active unsuspended ports, and
+	 * then manually resume them in the bus_resume() routine.
+	 */
+	ehci->bus_suspended = 0;
+	ehci->owned_ports = 0;
+	changed = 0;
+	fs_idle_delay = false;
+	port = HCS_N_PORTS(ehci->hcs_params);
+	while (port--) {
+		u32 __iomem	*reg = &ehci->regs->port_status [port];
+		u32		t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+		u32		t2 = t1 & ~PORT_WAKE_BITS;
+
+		/* keep track of which ports we suspend */
+		if (t1 & PORT_OWNER)
+			set_bit(port, &ehci->owned_ports);
+		else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
+			t2 |= PORT_SUSPEND;
+			set_bit(port, &ehci->bus_suspended);
+		}
+
+		/* enable remote wakeup on all ports, if told to do so */
+		if (hcd->self.root_hub->do_remote_wakeup) {
+			/* only enable appropriate wake bits, otherwise the
+			 * hardware can not go phy low power mode. If a race
+			 * condition happens here(connection change during bits
+			 * set), the port change detection will finally fix it.
+			 */
+			if (t1 & PORT_CONNECT)
+				t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+			else
+				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+		}
+
+		if (t1 != t2) {
+			/*
+			 * On some controllers, Wake-On-Disconnect will
+			 * generate false wakeup signals until the bus
+			 * switches over to full-speed idle.  For their
+			 * sake, add a delay if we need one.
+			 */
+			if ((t2 & PORT_WKDISC_E) &&
+					ehci_port_speed(ehci, t2) ==
+						USB_PORT_STAT_HIGH_SPEED)
+				fs_idle_delay = true;
+			ehci_writel(ehci, t2, reg);
+			changed = 1;
+		}
+	}
+	spin_unlock_irq(&ehci->lock);
+
+	if (changed && ehci_has_fsl_susp_errata(ehci))
+		/*
+		 * Wait for at least 10 millisecondes to ensure the controller
+		 * enter the suspend status before initiating a port resume
+		 * using the Force Port Resume bit (Not-EHCI compatible).
+		 */
+		usleep_range(10000, 20000);
+
+	if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+		/*
+		 * Wait for HCD to enter low-power mode or for the bus
+		 * to switch to full-speed idle.
+		 */
+		usleep_range(5000, 5500);
+	}
+
+	if (changed && ehci->has_tdi_phy_lpm) {
+		spin_lock_irq(&ehci->lock);
+		port = HCS_N_PORTS(ehci->hcs_params);
+		while (port--) {
+			u32 __iomem	*hostpc_reg = &ehci->regs->hostpc[port];
+			u32		t3;
+
+			t3 = ehci_readl(ehci, hostpc_reg);
+			ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+			t3 = ehci_readl(ehci, hostpc_reg);
+			ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
+					port, (t3 & HOSTPC_PHCD) ?
+					"succeeded" : "failed");
+		}
+		spin_unlock_irq(&ehci->lock);
+	}
+
+	/* Apparently some devices need a >= 1-uframe delay here */
+	if (ehci->bus_suspended)
+		udelay(150);
+
+	/* turn off now-idle HC */
+	ehci_halt (ehci);
+
+	spin_lock_irq(&ehci->lock);
+	if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
+		ehci_handle_controller_death(ehci);
+	if (ehci->rh_state != EHCI_RH_RUNNING)
+		goto done;
+	ehci->rh_state = EHCI_RH_SUSPENDED;
+
+	unlink_empty_async_suspended(ehci);
+
+	/* Any IAA cycle that started before the suspend is now invalid */
+	end_iaa_cycle(ehci);
+	ehci_handle_start_intr_unlinks(ehci);
+	ehci_handle_intr_unlinks(ehci);
+	end_free_itds(ehci);
+
+	/* allow remote wakeup */
+	mask = INTR_MASK;
+	if (!hcd->self.root_hub->do_remote_wakeup)
+		mask &= ~STS_PCD;
+	ehci_writel(ehci, mask, &ehci->regs->intr_enable);
+	ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ done:
+	ehci->next_statechange = jiffies + msecs_to_jiffies(10);
+	ehci->enabled_hrtimer_events = 0;
+	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+	spin_unlock_irq (&ehci->lock);
+
+	hrtimer_cancel(&ehci->hrtimer);
+	return 0;
+}
+
+
+/* caller has locked the root hub, and should reset/reinit on error */
+static int ehci_bus_resume (struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
+	u32			temp;
+	u32			power_okay;
+	int			i;
+	unsigned long		resume_needed = 0;
+
+	if (time_before (jiffies, ehci->next_statechange))
+		msleep(5);
+	spin_lock_irq (&ehci->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
+		goto shutdown;
+
+	if (unlikely(ehci->debug)) {
+		if (!dbgp_reset_prep(hcd))
+			ehci->debug = NULL;
+		else
+			dbgp_external_startup(hcd);
+	}
+
+	/* Ideally and we've got a real resume here, and no port's power
+	 * was lost.  (For PCI, that means Vaux was maintained.)  But we
+	 * could instead be restoring a swsusp snapshot -- so that BIOS was
+	 * the last user of the controller, not reset/pm hardware keeping
+	 * state we gave to it.
+	 */
+	power_okay = ehci_readl(ehci, &ehci->regs->intr_enable);
+	ehci_dbg(ehci, "resume root hub%s\n",
+			power_okay ? "" : " after power loss");
+
+	/* at least some APM implementations will try to deliver
+	 * IRQs right away, so delay them until we're ready.
+	 */
+	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+
+	/* re-init operational registers */
+	ehci_writel(ehci, 0, &ehci->regs->segment);
+	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+	ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
+
+	/* restore CMD_RUN, framelist size, and irq threshold */
+	ehci->command |= CMD_RUN;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	ehci->rh_state = EHCI_RH_RUNNING;
+
+	/*
+	 * According to Bugzilla #8190, the port status for some controllers
+	 * will be wrong without a delay. At their wrong status, the port
+	 * is enabled, but not suspended neither resumed.
+	 */
+	i = HCS_N_PORTS(ehci->hcs_params);
+	while (i--) {
+		temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+		if ((temp & PORT_PE) &&
+				!(temp & (PORT_SUSPEND | PORT_RESUME))) {
+			ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp);
+			spin_unlock_irq(&ehci->lock);
+			msleep(8);
+			spin_lock_irq(&ehci->lock);
+			break;
+		}
+	}
+
+	if (ehci->shutdown)
+		goto shutdown;
+
+	/* clear phy low-power mode before resume */
+	if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
+		i = HCS_N_PORTS(ehci->hcs_params);
+		while (i--) {
+			if (test_bit(i, &ehci->bus_suspended)) {
+				u32 __iomem	*hostpc_reg =
+							&ehci->regs->hostpc[i];
+
+				temp = ehci_readl(ehci, hostpc_reg);
+				ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+						hostpc_reg);
+			}
+		}
+		spin_unlock_irq(&ehci->lock);
+		msleep(5);
+		spin_lock_irq(&ehci->lock);
+		if (ehci->shutdown)
+			goto shutdown;
+	}
+
+	/* manually resume the ports we suspended during bus_suspend() */
+	i = HCS_N_PORTS (ehci->hcs_params);
+	while (i--) {
+		temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+		if (test_bit(i, &ehci->bus_suspended) &&
+				(temp & PORT_SUSPEND)) {
+			temp |= PORT_RESUME;
+			set_bit(i, &resume_needed);
+		}
+		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+	}
+
+	/*
+	 * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
+	 * port
+	 */
+	if (resume_needed) {
+		spin_unlock_irq(&ehci->lock);
+		msleep(USB_RESUME_TIMEOUT);
+		spin_lock_irq(&ehci->lock);
+		if (ehci->shutdown)
+			goto shutdown;
+	}
+
+	i = HCS_N_PORTS (ehci->hcs_params);
+	while (i--) {
+		temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+		if (test_bit(i, &resume_needed)) {
+			temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
+			ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+		}
+	}
+
+	ehci->next_statechange = jiffies + msecs_to_jiffies(5);
+	spin_unlock_irq(&ehci->lock);
+
+	ehci_handover_companion_ports(ehci);
+
+	/* Now we can safely re-enable irqs */
+	spin_lock_irq(&ehci->lock);
+	if (ehci->shutdown)
+		goto shutdown;
+	ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+	(void) ehci_readl(ehci, &ehci->regs->intr_enable);
+	spin_unlock_irq(&ehci->lock);
+
+	return 0;
+
+ shutdown:
+	spin_unlock_irq(&ehci->lock);
+	return -ESHUTDOWN;
+}
+
+static unsigned long ehci_get_resuming_ports(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+
+	return ehci->resuming_ports;
+}
+
+#else
+
+#define ehci_bus_suspend	NULL
+#define ehci_bus_resume		NULL
+#define ehci_get_resuming_ports	NULL
+
+#endif	/* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Sets the owner of a port
+ */
+static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
+{
+	u32 __iomem		*status_reg;
+	u32			port_status;
+	int 			try;
+
+	status_reg = &ehci->regs->port_status[portnum];
+
+	/*
+	 * The controller won't set the OWNER bit if the port is
+	 * enabled, so this loop will sometimes require at least two
+	 * iterations: one to disable the port and one to set OWNER.
+	 */
+	for (try = 4; try > 0; --try) {
+		spin_lock_irq(&ehci->lock);
+		port_status = ehci_readl(ehci, status_reg);
+		if ((port_status & PORT_OWNER) == new_owner
+				|| (port_status & (PORT_OWNER | PORT_CONNECT))
+					== 0)
+			try = 0;
+		else {
+			port_status ^= PORT_OWNER;
+			port_status &= ~(PORT_PE | PORT_RWC_BITS);
+			ehci_writel(ehci, port_status, status_reg);
+		}
+		spin_unlock_irq(&ehci->lock);
+		if (try > 1)
+			msleep(5);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete (
+	struct ehci_hcd	*ehci,
+	int		index,
+	u32 __iomem	*status_reg,
+	int		port_status
+) {
+	if (!(port_status & PORT_CONNECT))
+		return port_status;
+
+	/* if reset finished and it's still not enabled -- handoff */
+	if (!(port_status & PORT_PE)) {
+
+		/* with integrated TT, there's nobody to hand it to! */
+		if (ehci_is_TDI(ehci)) {
+			ehci_dbg (ehci,
+				"Failed to enable port %d on root hub TT\n",
+				index+1);
+			return port_status;
+		}
+
+		ehci_dbg (ehci, "port %d full speed --> companion\n",
+			index + 1);
+
+		// what happens if HCS_N_CC(params) == 0 ?
+		port_status |= PORT_OWNER;
+		port_status &= ~PORT_RWC_BITS;
+		ehci_writel(ehci, port_status, status_reg);
+
+		/* ensure 440EPX ohci controller state is operational */
+		if (ehci->has_amcc_usb23)
+			set_ohci_hcfs(ehci, 1);
+	} else {
+		ehci_dbg(ehci, "port %d reset complete, port enabled\n",
+			index + 1);
+		/* ensure 440EPx ohci controller state is suspended */
+		if (ehci->has_amcc_usb23)
+			set_ohci_hcfs(ehci, 0);
+	}
+
+	return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci (hcd);
+	u32		temp, status;
+	u32		mask;
+	int		ports, i, retval = 1;
+	unsigned long	flags;
+	u32		ppcd = ~0;
+
+	/* init status to no-changes */
+	buf [0] = 0;
+	ports = HCS_N_PORTS (ehci->hcs_params);
+	if (ports > 7) {
+		buf [1] = 0;
+		retval++;
+	}
+
+	/* Inform the core about resumes-in-progress by returning
+	 * a non-zero value even if there are no status changes.
+	 */
+	status = ehci->resuming_ports;
+
+	/* Some boards (mostly VIA?) report bogus overcurrent indications,
+	 * causing massive log spam unless we completely ignore them.  It
+	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
+	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+	 * PORT_POWER; that's surprising, but maybe within-spec.
+	 */
+	if (!ignore_oc)
+		mask = PORT_CSC | PORT_PEC | PORT_OCC;
+	else
+		mask = PORT_CSC | PORT_PEC;
+	// PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
+
+	/* no hub change reports (bit 0) for now (power, ...) */
+
+	/* port N changes (bit N)? */
+	spin_lock_irqsave (&ehci->lock, flags);
+
+	/* get per-port change detect bits */
+	if (ehci->has_ppcd)
+		ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16;
+
+	for (i = 0; i < ports; i++) {
+		/* leverage per-port change bits feature */
+		if (ppcd & (1 << i))
+			temp = ehci_readl(ehci, &ehci->regs->port_status[i]);
+		else
+			temp = 0;
+
+		/*
+		 * Return status information even for ports with OWNER set.
+		 * Otherwise hub_wq wouldn't see the disconnect event when a
+		 * high-speed device is switched over to the companion
+		 * controller by the user.
+		 */
+
+		if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
+				|| (ehci->reset_done[i] && time_after_eq(
+					jiffies, ehci->reset_done[i]))) {
+			if (i < 7)
+			    buf [0] |= 1 << (i + 1);
+			else
+			    buf [1] |= 1 << (i - 7);
+			status = STS_PCD;
+		}
+	}
+
+	/* If a resume is in progress, make sure it can finish */
+	if (ehci->resuming_ports)
+		mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
+	spin_unlock_irqrestore (&ehci->lock, flags);
+	return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+ehci_hub_descriptor (
+	struct ehci_hcd			*ehci,
+	struct usb_hub_descriptor	*desc
+) {
+	int		ports = HCS_N_PORTS (ehci->hcs_params);
+	u16		temp;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bPwrOn2PwrGood = 10;	/* ehci 1.0, 2.3.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	/* two bitmaps:  ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+	temp = HUB_CHAR_INDV_PORT_OCPM;	/* per-port overcurrent reporting */
+	if (HCS_PPC (ehci->hcs_params))
+		temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
+	else
+		temp |= HUB_CHAR_NO_LPSM; /* no power switching */
+#if 0
+// re-enable when we support USB_PORT_FEAT_INDICATOR below.
+	if (HCS_INDICATOR (ehci->hcs_params))
+		temp |= HUB_CHAR_PORTIND; /* per-port indicators (LEDs) */
+#endif
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+
+static void usb_ehset_completion(struct urb *urb)
+{
+	struct completion  *done = urb->context;
+
+	complete(done);
+}
+static int submit_single_step_set_feature(
+	struct usb_hcd	*hcd,
+	struct urb	*urb,
+	int		is_setup
+);
+
+/*
+ * Allocate and initialize a control URB. This request will be used by the
+ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
+ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
+ * Return NULL if failed.
+ */
+static struct urb *request_single_step_set_feature_urb(
+	struct usb_device	*udev,
+	void			*dr,
+	void			*buf,
+	struct completion	*done
+) {
+	struct urb *urb;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+	struct usb_host_endpoint *ep;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return NULL;
+
+	urb->pipe = usb_rcvctrlpipe(udev, 0);
+	ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+				[usb_pipeendpoint(urb->pipe)];
+	if (!ep) {
+		usb_free_urb(urb);
+		return NULL;
+	}
+
+	urb->ep = ep;
+	urb->dev = udev;
+	urb->setup_packet = (void *)dr;
+	urb->transfer_buffer = buf;
+	urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+	urb->complete = usb_ehset_completion;
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->transfer_flags = URB_DIR_IN;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	urb->setup_dma = dma_map_single(
+			hcd->self.sysdev,
+			urb->setup_packet,
+			sizeof(struct usb_ctrlrequest),
+			DMA_TO_DEVICE);
+	urb->transfer_dma = dma_map_single(
+			hcd->self.sysdev,
+			urb->transfer_buffer,
+			urb->transfer_buffer_length,
+			DMA_FROM_DEVICE);
+	urb->context = done;
+	return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+	int retval = -ENOMEM;
+	struct usb_ctrlrequest *dr;
+	struct urb *urb;
+	struct usb_device *udev;
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct usb_device_descriptor *buf;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	/* Obtain udev of the rhub's child port */
+	udev = usb_hub_find_child(hcd->self.root_hub, port);
+	if (!udev) {
+		ehci_err(ehci, "No device attached to the RootHub\n");
+		return -ENODEV;
+	}
+	buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	if (!dr) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	/* Fill Setup packet for GetDescriptor */
+	dr->bRequestType = USB_DIR_IN;
+	dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+	dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+	dr->wIndex = 0;
+	dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+	urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+	if (!urb)
+		goto cleanup;
+
+	/* Submit just the SETUP stage */
+	retval = submit_single_step_set_feature(hcd, urb, 1);
+	if (retval)
+		goto out1;
+	if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+		goto out1;
+	}
+	msleep(15 * 1000);
+
+	/* Complete remaining DATA and STATUS stages using the same URB */
+	urb->status = -EINPROGRESS;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	retval = submit_single_step_set_feature(hcd, urb, 0);
+	if (!retval && !wait_for_completion_timeout(&done,
+						msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+	}
+out1:
+	usb_free_urb(urb);
+cleanup:
+	kfree(dr);
+	kfree(buf);
+	return retval;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+/*-------------------------------------------------------------------------*/
+
+int ehci_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+) {
+	struct ehci_hcd	*ehci = hcd_to_ehci (hcd);
+	int		ports = HCS_N_PORTS (ehci->hcs_params);
+	u32 __iomem	*status_reg, *hostpc_reg;
+	u32		temp, temp1, status;
+	unsigned long	flags;
+	int		retval = 0;
+	unsigned	selector;
+
+	/*
+	 * Avoid underflow while calculating (wIndex & 0xff) - 1.
+	 * The compiler might deduce that wIndex can never be 0 and then
+	 * optimize away the tests for !wIndex below.
+	 */
+	temp = wIndex & 0xff;
+	temp -= (temp > 0);
+	status_reg = &ehci->regs->port_status[temp];
+	hostpc_reg = &ehci->regs->hostpc[temp];
+
+	/*
+	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+	 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+	 * (track current state ourselves) ... blink for diagnostics,
+	 * power, "this is the one", etc.  EHCI spec supports this.
+	 */
+
+	spin_lock_irqsave (&ehci->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = ehci_readl(ehci, status_reg);
+		temp &= ~PORT_RWC_BITS;
+
+		/*
+		 * Even if OWNER is set, so the port is owned by the
+		 * companion controller, hub_wq needs to be able to clear
+		 * the port-change status bits (especially
+		 * USB_PORT_STAT_C_CONNECTION).
+		 */
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			ehci_writel(ehci, temp & ~PORT_PE, status_reg);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			ehci_writel(ehci, temp | PORT_PEC, status_reg);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (temp & PORT_RESET)
+				goto error;
+			if (ehci->no_selective_suspend)
+				break;
+#ifdef CONFIG_USB_OTG
+			if ((hcd->self.otg_port == (wIndex + 1))
+			    && hcd->self.b_hnp_enable) {
+				otg_start_hnp(hcd->usb_phy->otg);
+				break;
+			}
+#endif
+			if (!(temp & PORT_SUSPEND))
+				break;
+			if ((temp & PORT_PE) == 0)
+				goto error;
+
+			/* clear phy low-power mode before resume */
+			if (ehci->has_tdi_phy_lpm) {
+				temp1 = ehci_readl(ehci, hostpc_reg);
+				ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+						hostpc_reg);
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				msleep(5);/* wait to leave low-power mode */
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+			/* resume signaling for 20 msec */
+			temp &= ~PORT_WAKE_BITS;
+			ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+			ehci->reset_done[wIndex] = jiffies
+					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			set_bit(wIndex, &ehci->resuming_ports);
+			usb_hcd_start_port_resume(&hcd->self, wIndex);
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			clear_bit(wIndex, &ehci->port_c_suspend);
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(ehci->hcs_params)) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				ehci_port_power(ehci, wIndex, false);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			ehci_writel(ehci, temp | PORT_CSC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			ehci_writel(ehci, temp | PORT_OCC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* GetPortStatus clears reset */
+			break;
+		default:
+			goto error;
+		}
+		ehci_readl(ehci, &ehci->regs->command);	/* unblock posted write */
+		break;
+	case GetHubDescriptor:
+		ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
+			buf);
+		break;
+	case GetHubStatus:
+		/* no hub-wide feature/status flags */
+		memset (buf, 0, 4);
+		//cpu_to_le32s ((u32 *) buf);
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		status = 0;
+		temp = ehci_readl(ehci, status_reg);
+
+		// wPortChange bits
+		if (temp & PORT_CSC)
+			status |= USB_PORT_STAT_C_CONNECTION << 16;
+		if (temp & PORT_PEC)
+			status |= USB_PORT_STAT_C_ENABLE << 16;
+
+		if ((temp & PORT_OCC) && !ignore_oc){
+			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+			/*
+			 * Hubs should disable port power on over-current.
+			 * However, not all EHCI implementations do this
+			 * automatically, even if they _do_ support per-port
+			 * power switching; they're allowed to just limit the
+			 * current.  hub_wq will turn the power back on.
+			 */
+			if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle))
+					&& HCS_PPC(ehci->hcs_params)) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				ehci_port_power(ehci, wIndex, false);
+				spin_lock_irqsave(&ehci->lock, flags);
+				temp = ehci_readl(ehci, status_reg);
+			}
+		}
+
+		/* no reset or resume pending */
+		if (!ehci->reset_done[wIndex]) {
+
+			/* Remote Wakeup received? */
+			if (temp & PORT_RESUME) {
+				/* resume signaling for 20 msec */
+				ehci->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+				usb_hcd_start_port_resume(&hcd->self, wIndex);
+				set_bit(wIndex, &ehci->resuming_ports);
+				/* check the port again */
+				mod_timer(&ehci_to_hcd(ehci)->rh_timer,
+						ehci->reset_done[wIndex]);
+			}
+
+		/* reset or resume not yet complete */
+		} else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
+			;	/* wait until it is complete */
+
+		/* resume completed */
+		} else if (test_bit(wIndex, &ehci->resuming_ports)) {
+			clear_bit(wIndex, &ehci->suspended_ports);
+			set_bit(wIndex, &ehci->port_c_suspend);
+			ehci->reset_done[wIndex] = 0;
+			usb_hcd_end_port_resume(&hcd->self, wIndex);
+
+			/* stop resume signaling */
+			temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
+			ehci_writel(ehci, temp, status_reg);
+			clear_bit(wIndex, &ehci->resuming_ports);
+			retval = ehci_handshake(ehci, status_reg,
+					PORT_RESUME, 0, 2000 /* 2msec */);
+			if (retval != 0) {
+				ehci_err(ehci, "port %d resume error %d\n",
+						wIndex + 1, retval);
+				goto error;
+			}
+			temp = ehci_readl(ehci, status_reg);
+
+		/* whoever resets must GetPortStatus to complete it!! */
+		} else {
+			status |= USB_PORT_STAT_C_RESET << 16;
+			ehci->reset_done [wIndex] = 0;
+
+			/* force reset to complete */
+			ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
+					status_reg);
+			/* REVISIT:  some hardware needs 550+ usec to clear
+			 * this bit; seems too long to spin routinely...
+			 */
+			retval = ehci_handshake(ehci, status_reg,
+					PORT_RESET, 0, 1000);
+			if (retval != 0) {
+				ehci_err (ehci, "port %d reset error %d\n",
+					wIndex + 1, retval);
+				goto error;
+			}
+
+			/* see what we found out */
+			temp = check_reset_complete (ehci, wIndex, status_reg,
+					ehci_readl(ehci, status_reg));
+		}
+
+		/* transfer dedicated ports to the companion hc */
+		if ((temp & PORT_CONNECT) &&
+				test_bit(wIndex, &ehci->companion_ports)) {
+			temp &= ~PORT_RWC_BITS;
+			temp |= PORT_OWNER;
+			ehci_writel(ehci, temp, status_reg);
+			ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
+			temp = ehci_readl(ehci, status_reg);
+		}
+
+		/*
+		 * Even if OWNER is set, there's no harm letting hub_wq
+		 * see the wPortStatus values (they should all be 0 except
+		 * for PORT_POWER anyway).
+		 */
+
+		if (temp & PORT_CONNECT) {
+			status |= USB_PORT_STAT_CONNECTION;
+			// status may be from integrated TT
+			if (ehci->has_hostpc) {
+				temp1 = ehci_readl(ehci, hostpc_reg);
+				status |= ehci_port_speed(ehci, temp1);
+			} else
+				status |= ehci_port_speed(ehci, temp);
+		}
+		if (temp & PORT_PE)
+			status |= USB_PORT_STAT_ENABLE;
+
+		/* maybe the port was unsuspended without our knowledge */
+		if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+			status |= USB_PORT_STAT_SUSPEND;
+		} else if (test_bit(wIndex, &ehci->suspended_ports)) {
+			clear_bit(wIndex, &ehci->suspended_ports);
+			clear_bit(wIndex, &ehci->resuming_ports);
+			ehci->reset_done[wIndex] = 0;
+			if (temp & PORT_PE)
+				set_bit(wIndex, &ehci->port_c_suspend);
+			usb_hcd_end_port_resume(&hcd->self, wIndex);
+		}
+
+		if (temp & PORT_OC)
+			status |= USB_PORT_STAT_OVERCURRENT;
+		if (temp & PORT_RESET)
+			status |= USB_PORT_STAT_RESET;
+		if (temp & PORT_POWER)
+			status |= USB_PORT_STAT_POWER;
+		if (test_bit(wIndex, &ehci->port_c_suspend))
+			status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+		if (status & ~0xffff)	/* only if wPortChange is interesting */
+			dbg_port(ehci, "GetStatus", wIndex + 1, temp);
+		put_unaligned_le32(status, buf);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		selector = wIndex >> 8;
+		wIndex &= 0xff;
+		if (unlikely(ehci->debug)) {
+			/* If the debug port is active any port
+			 * feature requests should get denied */
+			if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
+			    (readl(&ehci->debug->control) & DBGP_ENABLED)) {
+				retval = -ENODEV;
+				goto error_exit;
+			}
+		}
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = ehci_readl(ehci, status_reg);
+		if (temp & PORT_OWNER)
+			break;
+
+		temp &= ~PORT_RWC_BITS;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if (ehci->no_selective_suspend)
+				break;
+			if ((temp & PORT_PE) == 0
+					|| (temp & PORT_RESET) != 0)
+				goto error;
+
+			/* After above check the port must be connected.
+			 * Set appropriate bit thus could put phy into low power
+			 * mode if we have tdi_phy_lpm feature
+			 */
+			temp &= ~PORT_WKCONN_E;
+			temp |= PORT_WKDISC_E | PORT_WKOC_E;
+			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+			if (ehci->has_tdi_phy_lpm) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				msleep(5);/* 5ms for HCD enter low pwr mode */
+				spin_lock_irqsave(&ehci->lock, flags);
+				temp1 = ehci_readl(ehci, hostpc_reg);
+				ehci_writel(ehci, temp1 | HOSTPC_PHCD,
+					hostpc_reg);
+				temp1 = ehci_readl(ehci, hostpc_reg);
+				ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+					wIndex, (temp1 & HOSTPC_PHCD) ?
+					"succeeded" : "failed");
+			}
+			if (ehci_has_fsl_susp_errata(ehci)) {
+				/* 10ms for HCD enter suspend */
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				usleep_range(10000, 20000);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+			set_bit(wIndex, &ehci->suspended_ports);
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(ehci->hcs_params)) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				ehci_port_power(ehci, wIndex, true);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+			break;
+		case USB_PORT_FEAT_RESET:
+			if (temp & (PORT_SUSPEND|PORT_RESUME))
+				goto error;
+			/* line status bits may report this as low speed,
+			 * which can be fine if this root hub has a
+			 * transaction translator built in.
+			 */
+			if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
+					&& !ehci_is_TDI(ehci)
+					&& PORT_USB11 (temp)) {
+				ehci_dbg (ehci,
+					"port %d low speed --> companion\n",
+					wIndex + 1);
+				temp |= PORT_OWNER;
+			} else {
+				temp |= PORT_RESET;
+				temp &= ~PORT_PE;
+
+				/*
+				 * caller must wait, then call GetPortStatus
+				 * usb 2.0 spec says 50 ms resets on root
+				 */
+				ehci->reset_done [wIndex] = jiffies
+						+ msecs_to_jiffies (50);
+
+				/*
+				 * Force full-speed connect for FSL high-speed
+				 * erratum; disable HS Chirp by setting PFSC bit
+				 */
+				if (ehci_has_fsl_hs_errata(ehci))
+					temp |= (1 << PORTSC_FSL_PFSC);
+			}
+			ehci_writel(ehci, temp, status_reg);
+			break;
+
+		/* For downstream facing ports (these):  one hub port is put
+		 * into test mode according to USB2 11.24.2.13, then the hub
+		 * must be reset (which for root hub now means rmmod+modprobe,
+		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+		 * about the EHCI-specific stuff.
+		 */
+		case USB_PORT_FEAT_TEST:
+#ifdef CONFIG_USB_HCD_TEST_MODE
+			if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				retval = ehset_single_step_set_feature(hcd,
+								wIndex + 1);
+				spin_lock_irqsave(&ehci->lock, flags);
+				break;
+			}
+#endif
+			if (!selector || selector > 5)
+				goto error;
+			spin_unlock_irqrestore(&ehci->lock, flags);
+			ehci_quiesce(ehci);
+			spin_lock_irqsave(&ehci->lock, flags);
+
+			/* Put all enabled ports into suspend */
+			while (ports--) {
+				u32 __iomem *sreg =
+						&ehci->regs->port_status[ports];
+
+				temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+				if (temp & PORT_PE)
+					ehci_writel(ehci, temp | PORT_SUSPEND,
+							sreg);
+			}
+
+			spin_unlock_irqrestore(&ehci->lock, flags);
+			ehci_halt(ehci);
+			spin_lock_irqsave(&ehci->lock, flags);
+
+			temp = ehci_readl(ehci, status_reg);
+			temp |= selector << 16;
+			ehci_writel(ehci, temp, status_reg);
+			break;
+
+		default:
+			goto error;
+		}
+		ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
+		break;
+
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+error_exit:
+	spin_unlock_irqrestore (&ehci->lock, flags);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(ehci_hub_control);
+
+static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+
+	if (ehci_is_TDI(ehci))
+		return;
+	set_owner(ehci, --portnum, PORT_OWNER);
+}
+
+static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	u32 __iomem		*reg;
+
+	if (ehci_is_TDI(ehci))
+		return 0;
+	reg = &ehci->regs->port_status[portnum - 1];
+	return ehci_readl(ehci, reg) & PORT_OWNER;
+}
+
+static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable)
+{
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	u32 __iomem *status_reg = &ehci->regs->port_status[portnum];
+	u32 temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS;
+
+	if (enable)
+		ehci_writel(ehci, temp | PORT_POWER, status_reg);
+	else
+		ehci_writel(ehci, temp & ~PORT_POWER, status_reg);
+
+	if (hcd->driver->port_power)
+		hcd->driver->port_power(hcd, portnum, enable);
+
+	return 0;
+}
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
new file mode 100644
index 0000000..21307d8
--- /dev/null
+++ b/drivers/usb/host/ehci-mem.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2001 by David Brownell
+ */
+
+/* this file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * There's basically three types of memory:
+ *	- data used only by the HCD ... kmalloc is fine
+ *	- async and periodic schedules, shared by HC and HCD ... these
+ *	  need to use dma_pool or dma_alloc_coherent
+ *	- driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
+				  dma_addr_t dma)
+{
+	memset (qtd, 0, sizeof *qtd);
+	qtd->qtd_dma = dma;
+	qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+	qtd->hw_next = EHCI_LIST_END(ehci);
+	qtd->hw_alt_next = EHCI_LIST_END(ehci);
+	INIT_LIST_HEAD (&qtd->qtd_list);
+}
+
+static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
+{
+	struct ehci_qtd		*qtd;
+	dma_addr_t		dma;
+
+	qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
+	if (qtd != NULL) {
+		ehci_qtd_init(ehci, qtd, dma);
+	}
+	return qtd;
+}
+
+static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
+{
+	dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	/* clean qtds first, and know this is not linked */
+	if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
+		ehci_dbg (ehci, "unused qh not empty!\n");
+		BUG ();
+	}
+	if (qh->dummy)
+		ehci_qtd_free (ehci, qh->dummy);
+	dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+	kfree(qh);
+}
+
+static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
+{
+	struct ehci_qh		*qh;
+	dma_addr_t		dma;
+
+	qh = kzalloc(sizeof *qh, GFP_ATOMIC);
+	if (!qh)
+		goto done;
+	qh->hw = (struct ehci_qh_hw *)
+		dma_pool_alloc(ehci->qh_pool, flags, &dma);
+	if (!qh->hw)
+		goto fail;
+	memset(qh->hw, 0, sizeof *qh->hw);
+	qh->qh_dma = dma;
+	// INIT_LIST_HEAD (&qh->qh_list);
+	INIT_LIST_HEAD (&qh->qtd_list);
+	INIT_LIST_HEAD(&qh->unlink_node);
+
+	/* dummy td enables safe urb queuing */
+	qh->dummy = ehci_qtd_alloc (ehci, flags);
+	if (qh->dummy == NULL) {
+		ehci_dbg (ehci, "no dummy td\n");
+		goto fail1;
+	}
+done:
+	return qh;
+fail1:
+	dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+fail:
+	kfree(qh);
+	return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void ehci_mem_cleanup (struct ehci_hcd *ehci)
+{
+	if (ehci->async)
+		qh_destroy(ehci, ehci->async);
+	ehci->async = NULL;
+
+	if (ehci->dummy)
+		qh_destroy(ehci, ehci->dummy);
+	ehci->dummy = NULL;
+
+	/* DMA consistent memory and pools */
+	dma_pool_destroy(ehci->qtd_pool);
+	ehci->qtd_pool = NULL;
+	dma_pool_destroy(ehci->qh_pool);
+	ehci->qh_pool = NULL;
+	dma_pool_destroy(ehci->itd_pool);
+	ehci->itd_pool = NULL;
+	dma_pool_destroy(ehci->sitd_pool);
+	ehci->sitd_pool = NULL;
+
+	if (ehci->periodic)
+		dma_free_coherent(ehci_to_hcd(ehci)->self.sysdev,
+			ehci->periodic_size * sizeof (u32),
+			ehci->periodic, ehci->periodic_dma);
+	ehci->periodic = NULL;
+
+	/* shadow periodic table */
+	kfree(ehci->pshadow);
+	ehci->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
+{
+	int i;
+
+	/* QTDs for control/bulk/intr transfers */
+	ehci->qtd_pool = dma_pool_create ("ehci_qtd",
+			ehci_to_hcd(ehci)->self.sysdev,
+			sizeof (struct ehci_qtd),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!ehci->qtd_pool) {
+		goto fail;
+	}
+
+	/* QHs for control/bulk/intr transfers */
+	ehci->qh_pool = dma_pool_create ("ehci_qh",
+			ehci_to_hcd(ehci)->self.sysdev,
+			sizeof(struct ehci_qh_hw),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!ehci->qh_pool) {
+		goto fail;
+	}
+	ehci->async = ehci_qh_alloc (ehci, flags);
+	if (!ehci->async) {
+		goto fail;
+	}
+
+	/* ITD for high speed ISO transfers */
+	ehci->itd_pool = dma_pool_create ("ehci_itd",
+			ehci_to_hcd(ehci)->self.sysdev,
+			sizeof (struct ehci_itd),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!ehci->itd_pool) {
+		goto fail;
+	}
+
+	/* SITD for full/low speed split ISO transfers */
+	ehci->sitd_pool = dma_pool_create ("ehci_sitd",
+			ehci_to_hcd(ehci)->self.sysdev,
+			sizeof (struct ehci_sitd),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!ehci->sitd_pool) {
+		goto fail;
+	}
+
+	/* Hardware periodic table */
+	ehci->periodic = (__le32 *)
+		dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev,
+			ehci->periodic_size * sizeof(__le32),
+			&ehci->periodic_dma, flags);
+	if (ehci->periodic == NULL) {
+		goto fail;
+	}
+
+	if (ehci->use_dummy_qh) {
+		struct ehci_qh_hw	*hw;
+		ehci->dummy = ehci_qh_alloc(ehci, flags);
+		if (!ehci->dummy)
+			goto fail;
+
+		hw = ehci->dummy->hw;
+		hw->hw_next = EHCI_LIST_END(ehci);
+		hw->hw_qtd_next = EHCI_LIST_END(ehci);
+		hw->hw_alt_next = EHCI_LIST_END(ehci);
+		ehci->dummy->hw = hw;
+
+		for (i = 0; i < ehci->periodic_size; i++)
+			ehci->periodic[i] = cpu_to_hc32(ehci,
+					ehci->dummy->qh_dma);
+	} else {
+		for (i = 0; i < ehci->periodic_size; i++)
+			ehci->periodic[i] = EHCI_LIST_END(ehci);
+	}
+
+	/* software shadow of hardware table */
+	ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
+	if (ehci->pshadow != NULL)
+		return 0;
+
+fail:
+	ehci_dbg (ehci, "couldn't init memory\n");
+	ehci_mem_cleanup (ehci);
+	return -ENOMEM;
+}
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 0000000..de76445
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ *        Neil Zhang <zhangwm@marvell.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK         (0xff)
+
+struct ehci_hcd_mv {
+	struct usb_hcd *hcd;
+
+	/* Which mode does this ehci running OTG/Host ? */
+	int mode;
+
+	void __iomem *phy_regs;
+	void __iomem *cap_regs;
+	void __iomem *op_regs;
+
+	struct usb_phy *otg;
+
+	struct mv_usb_platform_data *pdata;
+
+	struct clk *clk;
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+	clk_prepare_enable(ehci_mv->clk);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+	clk_disable_unprepare(ehci_mv->clk);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+	int retval;
+
+	ehci_clock_enable(ehci_mv);
+	if (ehci_mv->pdata->phy_init) {
+		retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+		if (retval)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+	if (ehci_mv->pdata->phy_deinit)
+		ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+	ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+	int retval;
+
+	if (ehci_mv == NULL) {
+		dev_err(dev, "Can not find private ehci data\n");
+		return -ENODEV;
+	}
+
+	hcd->has_tt = 1;
+
+	retval = ehci_setup(hcd);
+	if (retval)
+		dev_err(dev, "ehci_setup failed %d\n", retval);
+
+	return retval;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "Marvell EHCI",
+	.hcd_priv_size = sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq = ehci_irq,
+	.flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset = mv_ehci_reset,
+	.start = ehci_run,
+	.stop = ehci_stop,
+	.shutdown = ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue = ehci_urb_enqueue,
+	.urb_dequeue = ehci_urb_dequeue,
+	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number = ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data = ehci_hub_status_data,
+	.hub_control = ehci_hub_control,
+	.bus_suspend = ehci_bus_suspend,
+	.bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	struct ehci_hcd_mv *ehci_mv;
+	struct resource *r;
+	int retval = -ENODEV;
+	u32 offset;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "missing platform_data\n");
+		return -ENODEV;
+	}
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+	if (!hcd)
+		return -ENOMEM;
+
+	ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
+	if (ehci_mv == NULL) {
+		retval = -ENOMEM;
+		goto err_put_hcd;
+	}
+
+	platform_set_drvdata(pdev, ehci_mv);
+	ehci_mv->pdata = pdata;
+	ehci_mv->hcd = hcd;
+
+	ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(ehci_mv->clk)) {
+		dev_err(&pdev->dev, "error getting clock\n");
+		retval = PTR_ERR(ehci_mv->clk);
+		goto err_put_hcd;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+	ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(ehci_mv->phy_regs)) {
+		retval = PTR_ERR(ehci_mv->phy_regs);
+		goto err_put_hcd;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+	ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(ehci_mv->cap_regs)) {
+		retval = PTR_ERR(ehci_mv->cap_regs);
+		goto err_put_hcd;
+	}
+
+	retval = mv_ehci_enable(ehci_mv);
+	if (retval) {
+		dev_err(&pdev->dev, "init phy error %d\n", retval);
+		goto err_put_hcd;
+	}
+
+	offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+	ehci_mv->op_regs =
+		(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+	hcd->rsrc_start = r->start;
+	hcd->rsrc_len = resource_size(r);
+	hcd->regs = ehci_mv->op_regs;
+
+	hcd->irq = platform_get_irq(pdev, 0);
+	if (!hcd->irq) {
+		dev_err(&pdev->dev, "Cannot get irq.");
+		retval = -ENODEV;
+		goto err_disable_clk;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+
+	ehci_mv->mode = pdata->mode;
+	if (ehci_mv->mode == MV_USB_MODE_OTG) {
+		ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+		if (IS_ERR(ehci_mv->otg)) {
+			retval = PTR_ERR(ehci_mv->otg);
+
+			if (retval == -ENXIO)
+				dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+						"must have CONFIG_USB_PHY enabled\n");
+			else
+				dev_err(&pdev->dev,
+						"unable to find transceiver\n");
+			goto err_disable_clk;
+		}
+
+		retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+				"unable to register with transceiver\n");
+			retval = -ENODEV;
+			goto err_disable_clk;
+		}
+		/* otg will enable clock before use as host */
+		mv_ehci_disable(ehci_mv);
+	} else {
+		if (pdata->set_vbus)
+			pdata->set_vbus(1);
+
+		retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+		if (retval) {
+			dev_err(&pdev->dev,
+				"failed to add hcd with err %d\n", retval);
+			goto err_set_vbus;
+		}
+		device_wakeup_enable(hcd->self.controller);
+	}
+
+	if (pdata->private_init)
+		pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+	dev_info(&pdev->dev,
+		 "successful find EHCI device with regs 0x%p irq %d"
+		 " working in %s mode\n", hcd->regs, hcd->irq,
+		 ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+	return 0;
+
+err_set_vbus:
+	if (pdata->set_vbus)
+		pdata->set_vbus(0);
+err_disable_clk:
+	mv_ehci_disable(ehci_mv);
+err_put_hcd:
+	usb_put_hcd(hcd);
+
+	return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+	struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = ehci_mv->hcd;
+
+	if (hcd->rh_registered)
+		usb_remove_hcd(hcd);
+
+	if (!IS_ERR_OR_NULL(ehci_mv->otg))
+		otg_set_host(ehci_mv->otg->otg, NULL);
+
+	if (ehci_mv->mode == MV_USB_MODE_HOST) {
+		if (ehci_mv->pdata->set_vbus)
+			ehci_mv->pdata->set_vbus(0);
+
+		mv_ehci_disable(ehci_mv);
+	}
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+	{"pxa-u2oehci", PXA_U2OEHCI},
+	{"pxa-sph", PXA_SPH},
+	{"mmp3-hsic", MMP3_HSIC},
+	{"mmp3-fsic", MMP3_FSIC},
+	{},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+	struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = ehci_mv->hcd;
+
+	if (!hcd->rh_registered)
+		return;
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+	.probe = mv_ehci_probe,
+	.remove = mv_ehci_remove,
+	.shutdown = mv_ehci_shutdown,
+	.driver = {
+		   .name = "mv-ehci",
+		   .bus = &platform_bus_type,
+		   },
+	.id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 0000000..c9f91e6
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/usb-ehci-mxc.h>
+#include "ehci.h"
+
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
+
+#define ULPI_VIEWPORT_OFFSET	0x170
+
+struct ehci_mxc_priv {
+	struct clk *usbclk, *ahbclk, *phyclk;
+};
+
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
+
+static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
+	.extra_priv_size =	sizeof(struct ehci_mxc_priv),
+};
+
+static int ehci_mxc_drv_probe(struct platform_device *pdev)
+{
+	struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int irq, ret;
+	struct ehci_mxc_priv *priv;
+	struct device *dev = &pdev->dev;
+	struct ehci_hcd *ehci;
+
+	if (!pdata) {
+		dev_err(dev, "No platform data given, bailing out.\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+
+	hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto err_alloc;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	hcd->has_tt = 1;
+	ehci = hcd_to_ehci(hcd);
+	priv = (struct ehci_mxc_priv *) ehci->priv;
+
+	/* enable clocks */
+	priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(priv->usbclk)) {
+		ret = PTR_ERR(priv->usbclk);
+		goto err_alloc;
+	}
+	clk_prepare_enable(priv->usbclk);
+
+	priv->ahbclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(priv->ahbclk)) {
+		ret = PTR_ERR(priv->ahbclk);
+		goto err_clk_ahb;
+	}
+	clk_prepare_enable(priv->ahbclk);
+
+	/* "dr" device has its own clock on i.MX51 */
+	priv->phyclk = devm_clk_get(&pdev->dev, "phy");
+	if (IS_ERR(priv->phyclk))
+		priv->phyclk = NULL;
+	if (priv->phyclk)
+		clk_prepare_enable(priv->phyclk);
+
+
+	/* call platform specific init function */
+	if (pdata->init) {
+		ret = pdata->init(pdev);
+		if (ret) {
+			dev_err(dev, "platform init failed\n");
+			goto err_init;
+		}
+		/* platforms need some time to settle changed IO settings */
+		mdelay(10);
+	}
+
+	/* EHCI registers start at offset 0x100 */
+	ehci->caps = hcd->regs + 0x100;
+	ehci->regs = hcd->regs + 0x100 +
+		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* set up the PORTSCx register */
+	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
+
+	/* is this really needed? */
+	msleep(10);
+
+	/* Initialize the transceiver */
+	if (pdata->otg) {
+		pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
+		ret = usb_phy_init(pdata->otg);
+		if (ret) {
+			dev_err(dev, "unable to init transceiver, probably missing\n");
+			ret = -ENODEV;
+			goto err_add;
+		}
+		ret = otg_set_vbus(pdata->otg->otg, 1);
+		if (ret) {
+			dev_err(dev, "unable to enable vbus on transceiver\n");
+			goto err_add;
+		}
+	}
+
+	platform_set_drvdata(pdev, hcd);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto err_add;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_add:
+	if (pdata && pdata->exit)
+		pdata->exit(pdev);
+err_init:
+	if (priv->phyclk)
+		clk_disable_unprepare(priv->phyclk);
+
+	clk_disable_unprepare(priv->ahbclk);
+err_clk_ahb:
+	clk_disable_unprepare(priv->usbclk);
+err_alloc:
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+static int ehci_mxc_drv_remove(struct platform_device *pdev)
+{
+	struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+	usb_remove_hcd(hcd);
+
+	if (pdata && pdata->exit)
+		pdata->exit(pdev);
+
+	if (pdata && pdata->otg)
+		usb_phy_shutdown(pdata->otg);
+
+	clk_disable_unprepare(priv->usbclk);
+	clk_disable_unprepare(priv->ahbclk);
+
+	if (priv->phyclk)
+		clk_disable_unprepare(priv->phyclk);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+MODULE_ALIAS("platform:mxc-ehci");
+
+static struct platform_driver ehci_mxc_driver = {
+	.probe = ehci_mxc_drv_probe,
+	.remove = ehci_mxc_drv_remove,
+	.shutdown = usb_hcd_platform_shutdown,
+	.driver = {
+		   .name = "mxc-ehci",
+	},
+};
+
+static int __init ehci_mxc_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+	return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+	platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
new file mode 100644
index 0000000..adaf8fb
--- /dev/null
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM7xx driver for EHCI HCD
+ *
+ * Copyright (C) 2018 Nuvoton Technologies,
+ * Avi Fishman <avi.fishman@nuvoton.com> <avifishman70@gmail.com>
+ * Tomer Maimon <tomer.maimon@nuvoton.com> <tmaimon77@gmail.com>
+ *
+ * Based on various ehci-spear.c driver
+ */
+
+
+#include <linux/dma-mapping.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define DRIVER_DESC "EHCI npcm7xx driver"
+
+static const char hcd_name[] = "npcm7xx-ehci";
+
+#define  USB2PHYCTL_OFFSET 0x144
+
+#define  IPSRST2_OFFSET 0x24
+#define  IPSRST3_OFFSET 0x34
+
+
+static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_npcm7xx_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+
+	return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_npcm7xx_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	ehci_resume(hcd, false);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ehci_npcm7xx_pm_ops, ehci_npcm7xx_drv_suspend,
+		ehci_npcm7xx_drv_resume);
+
+static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res;
+	struct regmap *gcr_regmap;
+	struct regmap *rst_regmap;
+	const struct hc_driver *driver = &ehci_npcm7xx_hc_driver;
+	int irq;
+	int retval;
+
+	dev_dbg(&pdev->dev,	"initializing npcm7xx ehci USB Controller\n");
+
+	gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+	if (IS_ERR(gcr_regmap)) {
+		dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-gcr\n",
+			__func__);
+		return PTR_ERR(gcr_regmap);
+	}
+
+	rst_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
+	if (IS_ERR(rst_regmap)) {
+		dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-rst\n",
+			__func__);
+		return PTR_ERR(rst_regmap);
+	}
+
+	/********* phy init  ******/
+	// reset usb host
+	regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
+			(0x1 << 26), (0x1 << 26));
+	regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
+			(0x1 << 25), (0x1 << 25));
+	regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
+			(0x1 << 28), 0);
+
+	udelay(1);
+
+	// enable phy
+	regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
+			(0x1 << 25), 0);
+
+	udelay(50); // enable phy
+
+	regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
+			(0x1 << 28), (0x1 << 28));
+
+	// enable host
+	regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
+			(0x1 << 26), 0);
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto fail;
+	}
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail;
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err_put_hcd;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	/* registers start at offset 0x0 */
+	hcd_to_ehci(hcd)->caps = hcd->regs;
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval)
+		goto err_put_hcd;
+
+	device_wakeup_enable(hcd->self.controller);
+	return retval;
+
+err_put_hcd:
+	usb_put_hcd(hcd);
+fail:
+	dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+	return retval;
+}
+
+static int npcm7xx_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_remove_hcd(hcd);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static const struct of_device_id npcm7xx_ehci_id_table[] = {
+	{ .compatible = "nuvoton,npcm750-ehci" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table);
+
+static struct platform_driver npcm7xx_ehci_hcd_driver = {
+	.probe		= npcm7xx_ehci_hcd_drv_probe,
+	.remove		= npcm7xx_ehci_hcd_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name = "npcm7xx-ehci",
+		.bus = &platform_bus_type,
+		.pm = &ehci_npcm7xx_pm_ops,
+		.of_match_table = npcm7xx_ehci_id_table,
+	}
+};
+
+static int __init ehci_npcm7xx_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_npcm7xx_hc_driver, NULL);
+	return platform_driver_register(&npcm7xx_ehci_hcd_driver);
+}
+module_init(ehci_npcm7xx_init);
+
+static void __exit ehci_npcm7xx_cleanup(void)
+{
+	platform_driver_unregister(&npcm7xx_ehci_hcd_driver);
+}
+module_exit(ehci_npcm7xx_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:npcm7xx-ehci");
+MODULE_AUTHOR("Avi Fishman");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 0000000..7e4c133
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ehci-omap.c - driver for USBHOST on OMAP3/4 processors
+ *
+ * Bus Glue for the EHCI controllers in OMAP3/4
+ * Tested on several OMAP3 boards, and OMAP4 Pandaboard
+ *
+ * Copyright (C) 2007-2013 Texas Instruments, Inc.
+ *	Author: Vikram Pandita <vikram.pandita@ti.com>
+ *	Author: Anand Gadiyar <gadiyar@ti.com>
+ *	Author: Keshava Munegowda <keshava_mgowda@ti.com>
+ *	Author: Roger Quadros <rogerq@ti.com>
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *	Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb/ulpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+
+#include "ehci.h"
+
+#include <linux/platform_data/usb-omap.h>
+
+/* EHCI Register Set */
+#define EHCI_INSNREG04					(0xA0)
+#define EHCI_INSNREG04_DISABLE_UNSUSPEND		(1 << 5)
+#define	EHCI_INSNREG05_ULPI				(0xA4)
+#define	EHCI_INSNREG05_ULPI_CONTROL_SHIFT		31
+#define	EHCI_INSNREG05_ULPI_PORTSEL_SHIFT		24
+#define	EHCI_INSNREG05_ULPI_OPSEL_SHIFT			22
+#define	EHCI_INSNREG05_ULPI_REGADD_SHIFT		16
+#define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8
+#define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0
+
+#define DRIVER_DESC "OMAP-EHCI Host Controller driver"
+
+static const char hcd_name[] = "ehci-omap";
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_hcd {
+	struct usb_phy *phy[OMAP3_HS_USB_PORTS]; /* one PHY for each port */
+	int nports;
+};
+
+static inline void ehci_write(void __iomem *base, u32 reg, u32 val)
+{
+	__raw_writel(val, base + reg);
+}
+
+static inline u32 ehci_read(void __iomem *base, u32 reg)
+{
+	return __raw_readl(base + reg);
+}
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+static struct hc_driver __read_mostly ehci_omap_hc_driver;
+
+static const struct ehci_driver_overrides ehci_omap_overrides __initconst = {
+	.extra_priv_size = sizeof(struct omap_hcd),
+};
+
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
+	struct resource	*res;
+	struct usb_hcd	*hcd;
+	void __iomem *regs;
+	int ret;
+	int irq;
+	int i;
+	struct omap_hcd	*omap;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (!dev->parent) {
+		dev_err(dev, "Missing parent device\n");
+		return -ENODEV;
+	}
+
+	/* For DT boot, get platform data from parent. i.e. usbhshost */
+	if (dev->of_node) {
+		pdata = dev_get_platdata(dev->parent);
+		dev->platform_data = pdata;
+	}
+
+	if (!pdata) {
+		dev_err(dev, "Missing platform data\n");
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "EHCI irq failed: %d\n", irq);
+		return irq;
+	}
+
+	res =  platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	ret = -ENODEV;
+	hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
+			dev_name(dev));
+	if (!hcd) {
+		dev_err(dev, "Failed to create HCD\n");
+		return -ENOMEM;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+	hcd->regs = regs;
+	hcd_to_ehci(hcd)->caps = regs;
+
+	omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
+	omap->nports = pdata->nports;
+
+	platform_set_drvdata(pdev, hcd);
+
+	/* get the PHY devices if needed */
+	for (i = 0 ; i < omap->nports ; i++) {
+		struct usb_phy *phy;
+
+		/* get the PHY device */
+		phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
+		if (IS_ERR(phy)) {
+			/* Don't bail out if PHY is not absolutely necessary */
+			if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY)
+				continue;
+
+			ret = PTR_ERR(phy);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Can't get PHY for port %d: %d\n",
+					i, ret);
+			goto err_phy;
+		}
+
+		omap->phy[i] = phy;
+
+		if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
+			usb_phy_init(omap->phy[i]);
+			/* bring PHY out of suspend */
+			usb_phy_set_suspend(omap->phy[i], 0);
+		}
+	}
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	/*
+	 * An undocumented "feature" in the OMAP3 EHCI controller,
+	 * causes suspended ports to be taken out of suspend when
+	 * the USBCMD.Run/Stop bit is cleared (for example when
+	 * we do ehci_bus_suspend).
+	 * This breaks suspend-resume if the root-hub is allowed
+	 * to suspend. Writing 1 to this undocumented register bit
+	 * disables this feature and restores normal behavior.
+	 */
+	ehci_write(regs, EHCI_INSNREG04,
+				EHCI_INSNREG04_DISABLE_UNSUSPEND);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret) {
+		dev_err(dev, "failed to add hcd with err %d\n", ret);
+		goto err_pm_runtime;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	/*
+	 * Bring PHYs out of reset for non PHY modes.
+	 * Even though HSIC mode is a PHY-less mode, the reset
+	 * line exists between the chips and can be modelled
+	 * as a PHY device for reset control.
+	 */
+	for (i = 0; i < omap->nports; i++) {
+		if (!omap->phy[i] ||
+		     pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY)
+			continue;
+
+		usb_phy_init(omap->phy[i]);
+		/* bring PHY out of suspend */
+		usb_phy_set_suspend(omap->phy[i], 0);
+	}
+
+	return 0;
+
+err_pm_runtime:
+	pm_runtime_put_sync(dev);
+
+err_phy:
+	for (i = 0; i < omap->nports; i++) {
+		if (omap->phy[i])
+			usb_phy_shutdown(omap->phy[i]);
+	}
+
+	usb_put_hcd(hcd);
+
+	return ret;
+}
+
+
+/**
+ * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int ehci_hcd_omap_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct omap_hcd *omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
+	int i;
+
+	usb_remove_hcd(hcd);
+
+	for (i = 0; i < omap->nports; i++) {
+		if (omap->phy[i])
+			usb_phy_shutdown(omap->phy[i]);
+	}
+
+	usb_put_hcd(hcd);
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+static const struct of_device_id omap_ehci_dt_ids[] = {
+	{ .compatible = "ti,ehci-omap" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
+
+static struct platform_driver ehci_hcd_omap_driver = {
+	.probe			= ehci_hcd_omap_probe,
+	.remove			= ehci_hcd_omap_remove,
+	.shutdown		= usb_hcd_platform_shutdown,
+	/*.suspend		= ehci_hcd_omap_suspend, */
+	/*.resume		= ehci_hcd_omap_resume, */
+	.driver = {
+		.name		= hcd_name,
+		.of_match_table = omap_ehci_dt_ids,
+	}
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init ehci_omap_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
+	return platform_driver_register(&ehci_hcd_omap_driver);
+}
+module_init(ehci_omap_init);
+
+static void __exit ehci_omap_cleanup(void)
+{
+	platform_driver_unregister(&ehci_hcd_omap_driver);
+}
+module_exit(ehci_omap_cleanup);
+
+MODULE_ALIAS("platform:ehci-omap");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
new file mode 100644
index 0000000..1ad7264
--- /dev/null
+++ b/drivers/usb/host/ehci-orion.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/usb/host/ehci-orion.c
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mbus.h>
+#include <linux/clk.h>
+#include <linux/platform_data/usb-ehci-orion.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ehci.h"
+
+#define rdl(off)	readl_relaxed(hcd->regs + (off))
+#define wrl(off, val)	writel_relaxed((val), hcd->regs + (off))
+
+#define USB_CMD			0x140
+#define   USB_CMD_RUN		BIT(0)
+#define   USB_CMD_RESET		BIT(1)
+#define USB_MODE		0x1a8
+#define   USB_MODE_MASK		GENMASK(1, 0)
+#define   USB_MODE_DEVICE	0x2
+#define   USB_MODE_HOST		0x3
+#define   USB_MODE_SDIS		BIT(4)
+#define USB_CAUSE		0x310
+#define USB_MASK		0x314
+#define USB_WINDOW_CTRL(i)	(0x320 + ((i) << 4))
+#define USB_WINDOW_BASE(i)	(0x324 + ((i) << 4))
+#define USB_IPG			0x360
+#define USB_PHY_PWR_CTRL	0x400
+#define USB_PHY_TX_CTRL		0x420
+#define USB_PHY_RX_CTRL		0x430
+#define USB_PHY_IVREF_CTRL	0x440
+#define USB_PHY_TST_GRP_CTRL	0x450
+
+#define USB_SBUSCFG		0x90
+
+/* BAWR = BARD = 3 : Align read/write bursts packets larger than 128 bytes */
+#define USB_SBUSCFG_BAWR_ALIGN_128B	(0x3 << 6)
+#define USB_SBUSCFG_BARD_ALIGN_128B	(0x3 << 3)
+/* AHBBRST = 3	   : Align AHB Burst to INCR16 (64 bytes) */
+#define USB_SBUSCFG_AHBBRST_INCR16	(0x3 << 0)
+
+#define USB_SBUSCFG_DEF_VAL (USB_SBUSCFG_BAWR_ALIGN_128B	\
+			     | USB_SBUSCFG_BARD_ALIGN_128B	\
+			     | USB_SBUSCFG_AHBBRST_INCR16)
+
+#define DRIVER_DESC "EHCI orion driver"
+
+#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
+
+struct orion_ehci_hcd {
+	struct clk *clk;
+	struct phy *phy;
+};
+
+static const char hcd_name[] = "ehci-orion";
+
+static struct hc_driver __read_mostly ehci_orion_hc_driver;
+
+/*
+ * Implement Orion USB controller specification guidelines
+ */
+static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
+{
+	/* The below GLs are according to the Orion Errata document */
+	/*
+	 * Clear interrupt cause and mask
+	 */
+	wrl(USB_CAUSE, 0);
+	wrl(USB_MASK, 0);
+
+	/*
+	 * Reset controller
+	 */
+	wrl(USB_CMD, rdl(USB_CMD) | USB_CMD_RESET);
+	while (rdl(USB_CMD) & USB_CMD_RESET);
+
+	/*
+	 * GL# USB-10: Set IPG for non start of frame packets
+	 * Bits[14:8]=0xc
+	 */
+	wrl(USB_IPG, (rdl(USB_IPG) & ~0x7f00) | 0xc00);
+
+	/*
+	 * GL# USB-9: USB 2.0 Power Control
+	 * BG_VSEL[7:6]=0x1
+	 */
+	wrl(USB_PHY_PWR_CTRL, (rdl(USB_PHY_PWR_CTRL) & ~0xc0)| 0x40);
+
+	/*
+	 * GL# USB-1: USB PHY Tx Control - force calibration to '8'
+	 * TXDATA_BLOCK_EN[21]=0x1, EXT_RCAL_EN[13]=0x1, IMP_CAL[6:3]=0x8
+	 */
+	wrl(USB_PHY_TX_CTRL, (rdl(USB_PHY_TX_CTRL) & ~0x78) | 0x202040);
+
+	/*
+	 * GL# USB-3 GL# USB-9: USB PHY Rx Control
+	 * RXDATA_BLOCK_LENGHT[31:30]=0x3, EDGE_DET_SEL[27:26]=0,
+	 * CDR_FASTLOCK_EN[21]=0, DISCON_THRESHOLD[9:8]=0, SQ_THRESH[7:4]=0x1
+	 */
+	wrl(USB_PHY_RX_CTRL, (rdl(USB_PHY_RX_CTRL) & ~0xc2003f0) | 0xc0000010);
+
+	/*
+	 * GL# USB-3 GL# USB-9: USB PHY IVREF Control
+	 * PLLVDD12[1:0]=0x2, RXVDD[5:4]=0x3, Reserved[19]=0
+	 */
+	wrl(USB_PHY_IVREF_CTRL, (rdl(USB_PHY_IVREF_CTRL) & ~0x80003 ) | 0x32);
+
+	/*
+	 * GL# USB-3 GL# USB-9: USB PHY Test Group Control
+	 * REG_FIFO_SQ_RST[15]=0
+	 */
+	wrl(USB_PHY_TST_GRP_CTRL, rdl(USB_PHY_TST_GRP_CTRL) & ~0x8000);
+
+	/*
+	 * Stop and reset controller
+	 */
+	wrl(USB_CMD, rdl(USB_CMD) & ~USB_CMD_RUN);
+	wrl(USB_CMD, rdl(USB_CMD) | USB_CMD_RESET);
+	while (rdl(USB_CMD) & USB_CMD_RESET);
+
+	/*
+	 * GL# USB-5 Streaming disable REG_USB_MODE[4]=1
+	 * TBD: This need to be done after each reset!
+	 * GL# USB-4 Setup USB Host mode
+	 */
+	wrl(USB_MODE, USB_MODE_SDIS | USB_MODE_HOST);
+}
+
+static void
+ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
+			     const struct mbus_dram_target_info *dram)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		wrl(USB_WINDOW_CTRL(i), 0);
+		wrl(USB_WINDOW_BASE(i), 0);
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
+					(cs->mbus_attr << 8) |
+					(dram->mbus_dram_target_id << 4) | 1);
+		wrl(USB_WINDOW_BASE(i), cs->base);
+	}
+}
+
+static int ehci_orion_drv_reset(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	int ret;
+
+	ret = ehci_setup(hcd);
+	if (ret)
+		return ret;
+
+	/*
+	 * For SoC without hlock, need to program sbuscfg value to guarantee
+	 * AHB master's burst would not overrun or underrun FIFO.
+	 *
+	 * sbuscfg reg has to be set after usb controller reset, otherwise
+	 * the value would be override to 0.
+	 */
+	if (of_device_is_compatible(dev->of_node, "marvell,armada-3700-ehci"))
+		wrl(USB_SBUSCFG, USB_SBUSCFG_DEF_VAL);
+
+	return ret;
+}
+
+static const struct ehci_driver_overrides orion_overrides __initconst = {
+	.extra_priv_size =	sizeof(struct orion_ehci_hcd),
+	.reset = ehci_orion_drv_reset,
+};
+
+static int ehci_orion_drv_probe(struct platform_device *pdev)
+{
+	struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
+	const struct mbus_dram_target_info *dram;
+	struct resource *res;
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	void __iomem *regs;
+	int irq, err;
+	enum orion_ehci_phy_ver phy_version;
+	struct orion_ehci_hcd *priv;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_debug("Initializing Orion-SoC USB Host Controller\n");
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		err = -ENODEV;
+		goto err;
+	}
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask
+	 * set. Since shared usb code relies on it, set it here for
+	 * now. Once we have dma capability bindings this can go away.
+	 */
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		goto err;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(regs)) {
+		err = PTR_ERR(regs);
+		goto err;
+	}
+
+	hcd = usb_create_hcd(&ehci_orion_hc_driver,
+			&pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+	hcd->regs = regs;
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = hcd->regs + 0x100;
+	hcd->has_tt = 1;
+
+	priv = hcd_to_orion_priv(hcd);
+	/*
+	 * Not all platforms can gate the clock, so it is not an error if
+	 * the clock does not exists.
+	 */
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(priv->clk))
+		clk_prepare_enable(priv->clk);
+
+	priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
+	if (IS_ERR(priv->phy)) {
+		err = PTR_ERR(priv->phy);
+		if (err != -ENOSYS)
+			goto err_phy_get;
+	} else {
+		err = phy_init(priv->phy);
+		if (err)
+			goto err_phy_init;
+
+		err = phy_power_on(priv->phy);
+		if (err)
+			goto err_phy_power_on;
+	}
+
+	/*
+	 * (Re-)program MBUS remapping windows if we are asked to.
+	 */
+	dram = mv_mbus_dram_info();
+	if (dram)
+		ehci_orion_conf_mbus_windows(hcd, dram);
+
+	/*
+	 * setup Orion USB controller.
+	 */
+	if (pdev->dev.of_node)
+		phy_version = EHCI_PHY_NA;
+	else
+		phy_version = pd->phy_version;
+
+	switch (phy_version) {
+	case EHCI_PHY_NA:	/* dont change USB phy settings */
+		break;
+	case EHCI_PHY_ORION:
+		orion_usb_phy_v1_setup(hcd);
+		break;
+	case EHCI_PHY_DD:
+	case EHCI_PHY_KW:
+	default:
+		dev_warn(&pdev->dev, "USB phy version isn't supported.\n");
+	}
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err)
+		goto err_add_hcd;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_add_hcd:
+	if (!IS_ERR(priv->phy))
+		phy_power_off(priv->phy);
+err_phy_power_on:
+	if (!IS_ERR(priv->phy))
+		phy_exit(priv->phy);
+err_phy_init:
+err_phy_get:
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
+	usb_put_hcd(hcd);
+err:
+	dev_err(&pdev->dev, "init %s fail, %d\n",
+		dev_name(&pdev->dev), err);
+
+	return err;
+}
+
+static int ehci_orion_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct orion_ehci_hcd *priv = hcd_to_orion_priv(hcd);
+
+	usb_remove_hcd(hcd);
+
+	if (!IS_ERR(priv->phy)) {
+		phy_power_off(priv->phy);
+		phy_exit(priv->phy);
+	}
+
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static const struct of_device_id ehci_orion_dt_ids[] = {
+	{ .compatible = "marvell,orion-ehci", },
+	{ .compatible = "marvell,armada-3700-ehci", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
+
+static struct platform_driver ehci_orion_driver = {
+	.probe		= ehci_orion_drv_probe,
+	.remove		= ehci_orion_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name	= "orion-ehci",
+		.of_match_table = ehci_orion_dt_ids,
+	},
+};
+
+static int __init ehci_orion_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
+	return platform_driver_register(&ehci_orion_driver);
+}
+module_init(ehci_orion_init);
+
+static void __exit ehci_orion_cleanup(void)
+{
+	platform_driver_unregister(&ehci_orion_driver);
+}
+module_exit(ehci_orion_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:orion-ehci");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
new file mode 100644
index 0000000..fe9422d
--- /dev/null
+++ b/drivers/usb/host/ehci-pci.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EHCI HCD (Host Controller Driver) PCI Bus Glue.
+ *
+ * Copyright (c) 2000-2004 by David Brownell
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+#include "pci-quirks.h"
+
+#define DRIVER_DESC "EHCI PCI platform driver"
+
+static const char hcd_name[] = "ehci-pci";
+
+/* defined here to avoid adding to pci_ids.h for single instance use */
+#define PCI_DEVICE_ID_INTEL_CE4100_USB	0x2e70
+
+/*-------------------------------------------------------------------------*/
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC		0x0939
+static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
+{
+	return pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
+}
+
+/*
+ * This is the list of PCI IDs for the devices that have EHCI USB class and
+ * specific drivers for that. One of the example is a ChipIdea device installed
+ * on some Intel MID platforms.
+ */
+static const struct pci_device_id bypass_pci_id_table[] = {
+	/* ChipIdea on Intel MID platform */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), },
+	{}
+};
+
+static inline bool is_bypassed_id(struct pci_dev *pdev)
+{
+	return !!pci_match_id(bypass_pci_id_table, pdev);
+}
+
+/*
+ * 0x84 is the offset of in/out threshold register,
+ * and it is the same offset as the register of 'hostpc'.
+ */
+#define	intel_quark_x1000_insnreg01	hostpc
+
+/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
+#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD	0x007f007f
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
+{
+	int			retval;
+
+	/* we expect static quirk code to handle the "extended capabilities"
+	 * (currently just BIOS handoff) allowed starting with EHCI 0.96
+	 */
+
+	/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+	retval = pci_set_mwi(pdev);
+	if (!retval)
+		ehci_dbg(ehci, "MWI active\n");
+
+	/* Reset the threshold limit */
+	if (is_intel_quark_x1000(pdev)) {
+		/*
+		 * For the Intel QUARK X1000, raise the I/O threshold to the
+		 * maximum usable value in order to improve performance.
+		 */
+		ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
+			ehci->regs->intel_quark_x1000_insnreg01);
+	}
+
+	return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_pci_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	u32			temp;
+	int			retval;
+
+	ehci->caps = hcd->regs;
+
+	/*
+	 * ehci_init() causes memory for DMA transfers to be
+	 * allocated.  Thus, any vendor-specific workarounds based on
+	 * limiting the type of memory used for DMA transfers must
+	 * happen before ehci_setup() is called.
+	 *
+	 * Most other workarounds can be done either before or after
+	 * init and reset; they are located here too.
+	 */
+	switch (pdev->vendor) {
+	case PCI_VENDOR_ID_TOSHIBA_2:
+		/* celleb's companion chip */
+		if (pdev->device == 0x01b5) {
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+			ehci->big_endian_mmio = 1;
+#else
+			ehci_warn(ehci,
+				  "unsupported big endian Toshiba quirk\n");
+#endif
+		}
+		break;
+	case PCI_VENDOR_ID_NVIDIA:
+		/* NVidia reports that certain chips don't handle
+		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
+		 * data buffer, and periodic schedule are normal.)
+		 */
+		switch (pdev->device) {
+		case 0x003c:	/* MCP04 */
+		case 0x005b:	/* CK804 */
+		case 0x00d8:	/* CK8 */
+		case 0x00e8:	/* CK8S */
+			if (pci_set_consistent_dma_mask(pdev,
+						DMA_BIT_MASK(31)) < 0)
+				ehci_warn(ehci, "can't enable NVidia "
+					"workaround for >2GB RAM\n");
+			break;
+
+		/* Some NForce2 chips have problems with selective suspend;
+		 * fixed in newer silicon.
+		 */
+		case 0x0068:
+			if (pdev->revision < 0xa4)
+				ehci->no_selective_suspend = 1;
+			break;
+		}
+		break;
+	case PCI_VENDOR_ID_INTEL:
+		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
+			hcd->has_tt = 1;
+		break;
+	case PCI_VENDOR_ID_TDI:
+		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI)
+			hcd->has_tt = 1;
+		break;
+	case PCI_VENDOR_ID_AMD:
+		/* AMD PLL quirk */
+		if (usb_amd_find_chipset_info())
+			ehci->amd_pll_fix = 1;
+		/* AMD8111 EHCI doesn't work, according to AMD errata */
+		if (pdev->device == 0x7463) {
+			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
+			retval = -EIO;
+			goto done;
+		}
+
+		/*
+		 * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+		 * read/write memory space which does not belong to it when
+		 * there is NULL pointer with T-bit set to 1 in the frame list
+		 * table. To avoid the issue, the frame list link pointer
+		 * should always contain a valid pointer to a inactive qh.
+		 */
+		if (pdev->device == 0x7808) {
+			ehci->use_dummy_qh = 1;
+			ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
+		}
+		break;
+	case PCI_VENDOR_ID_VIA:
+		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
+			u8 tmp;
+
+			/* The VT6212 defaults to a 1 usec EHCI sleep time which
+			 * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
+			 * that sleep time use the conventional 10 usec.
+			 */
+			pci_read_config_byte(pdev, 0x4b, &tmp);
+			if (tmp & 0x20)
+				break;
+			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
+		}
+		break;
+	case PCI_VENDOR_ID_ATI:
+		/* AMD PLL quirk */
+		if (usb_amd_find_chipset_info())
+			ehci->amd_pll_fix = 1;
+
+		/*
+		 * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
+		 * read/write memory space which does not belong to it when
+		 * there is NULL pointer with T-bit set to 1 in the frame list
+		 * table. To avoid the issue, the frame list link pointer
+		 * should always contain a valid pointer to a inactive qh.
+		 */
+		if (pdev->device == 0x4396) {
+			ehci->use_dummy_qh = 1;
+			ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n");
+		}
+		/* SB600 and old version of SB700 have a bug in EHCI controller,
+		 * which causes usb devices lose response in some cases.
+		 */
+		if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
+				usb_amd_hang_symptom_quirk()) {
+			u8 tmp;
+			ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
+			pci_read_config_byte(pdev, 0x53, &tmp);
+			pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
+		}
+		break;
+	case PCI_VENDOR_ID_NETMOS:
+		/* MosChip frame-index-register bug */
+		ehci_info(ehci, "applying MosChip frame-index workaround\n");
+		ehci->frame_index_bug = 1;
+		break;
+	}
+
+	/* optional debug port, normally in the first BAR */
+	temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+	if (temp) {
+		pci_read_config_dword(pdev, temp, &temp);
+		temp >>= 16;
+		if (((temp >> 13) & 7) == 1) {
+			u32 hcs_params = ehci_readl(ehci,
+						    &ehci->caps->hcs_params);
+
+			temp &= 0x1fff;
+			ehci->debug = hcd->regs + temp;
+			temp = ehci_readl(ehci, &ehci->debug->control);
+			ehci_info(ehci, "debug port %d%s\n",
+				  HCS_DEBUG_PORT(hcs_params),
+				  (temp & DBGP_ENABLED) ? " IN USE" : "");
+			if (!(temp & DBGP_ENABLED))
+				ehci->debug = NULL;
+		}
+	}
+
+	retval = ehci_setup(hcd);
+	if (retval)
+		return retval;
+
+	/* These workarounds need to be applied after ehci_setup() */
+	switch (pdev->vendor) {
+	case PCI_VENDOR_ID_NEC:
+	case PCI_VENDOR_ID_INTEL:
+	case PCI_VENDOR_ID_AMD:
+		ehci->need_io_watchdog = 0;
+		break;
+	case PCI_VENDOR_ID_NVIDIA:
+		switch (pdev->device) {
+		/* MCP89 chips on the MacBookAir3,1 give EPROTO when
+		 * fetching device descriptors unless LPM is disabled.
+		 * There are also intermittent problems enumerating
+		 * devices with PPCD enabled.
+		 */
+		case 0x0d9d:
+			ehci_info(ehci, "disable ppcd for nvidia mcp89\n");
+			ehci->has_ppcd = 0;
+			ehci->command &= ~CMD_PPCEE;
+			break;
+		}
+		break;
+	}
+
+	/* at least the Genesys GL880S needs fixup here */
+	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
+	temp &= 0x0f;
+	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
+		ehci_dbg(ehci, "bogus port configuration: "
+			"cc=%d x pcc=%d < ports=%d\n",
+			HCS_N_CC(ehci->hcs_params),
+			HCS_N_PCC(ehci->hcs_params),
+			HCS_N_PORTS(ehci->hcs_params));
+
+		switch (pdev->vendor) {
+		case 0x17a0:		/* GENESYS */
+			/* GL880S: should be PORTS=2 */
+			temp |= (ehci->hcs_params & ~0xf);
+			ehci->hcs_params = temp;
+			break;
+		case PCI_VENDOR_ID_NVIDIA:
+			/* NF4: should be PCC=10 */
+			break;
+		}
+	}
+
+	/* Serial Bus Release Number is at PCI 0x60 offset */
+	if (pdev->vendor == PCI_VENDOR_ID_STMICRO
+	    && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
+		;	/* ConneXT has no sbrn register */
+	else
+		pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
+
+	/* Keep this around for a while just in case some EHCI
+	 * implementation uses legacy PCI PM support.  This test
+	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
+	 * been triggered by then.
+	 */
+	if (!device_can_wakeup(&pdev->dev)) {
+		u16	port_wake;
+
+		pci_read_config_word(pdev, 0x62, &port_wake);
+		if (port_wake & 0x0001) {
+			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
+			device_set_wakeup_capable(&pdev->dev, 1);
+		}
+	}
+
+#ifdef	CONFIG_PM
+	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
+		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
+#endif
+
+	retval = ehci_pci_reinit(ehci, pdev);
+done:
+	return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+
+/* suspend/resume, section 4.3 */
+
+/* These routines rely on the PCI bus glue
+ * to handle powerdown and wakeup, and currently also on
+ * transceivers that don't need any software attention to set up
+ * the right sort of wakeup.
+ * Also they depend on separate root hub suspend/resume.
+ */
+
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+
+	if (ehci_resume(hcd, hibernated) != 0)
+		(void) ehci_pci_reinit(ehci, pdev);
+	return 0;
+}
+
+#else
+
+#define ehci_suspend		NULL
+#define ehci_pci_resume		NULL
+#endif	/* CONFIG_PM */
+
+static struct hc_driver __read_mostly ehci_pci_hc_driver;
+
+static const struct ehci_driver_overrides pci_overrides __initconst = {
+	.reset =		ehci_pci_setup,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	if (is_bypassed_id(pdev))
+		return -ENODEV;
+	return usb_hcd_pci_probe(pdev, id);
+}
+
+static void ehci_pci_remove(struct pci_dev *pdev)
+{
+	pci_clear_mwi(pdev);
+	usb_hcd_pci_remove(pdev);	
+}
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids [] = { {
+	/* handle any USB 2.0 EHCI controller */
+	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
+	.driver_data =	(unsigned long) &ehci_pci_hc_driver,
+	}, {
+	PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
+	.driver_data = (unsigned long) &ehci_pci_hc_driver,
+	},
+	{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver ehci_pci_driver = {
+	.name =		(char *) hcd_name,
+	.id_table =	pci_ids,
+
+	.probe =	ehci_pci_probe,
+	.remove =	ehci_pci_remove,
+	.shutdown = 	usb_hcd_pci_shutdown,
+
+#ifdef CONFIG_PM
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
+};
+
+static int __init ehci_pci_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
+
+	/* Entries for the PCI suspend/resume callbacks are special */
+	ehci_pci_hc_driver.pci_suspend = ehci_suspend;
+	ehci_pci_hc_driver.pci_resume = ehci_pci_resume;
+
+	return pci_register_driver(&ehci_pci_driver);
+}
+module_init(ehci_pci_init);
+
+static void __exit ehci_pci_cleanup(void)
+{
+	pci_unregister_driver(&ehci_pci_driver);
+}
+module_exit(ehci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
new file mode 100644
index 0000000..4c306fb
--- /dev/null
+++ b/drivers/usb/host/ehci-platform.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic platform ehci driver
+ *
+ * Copyright 2007 Steven Brown <sbrown@cortland.com>
+ * Copyright 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Derived from the ohci-ssb driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the EHCI-PCI driver
+ * Copyright (c) 2000-2004 by David Brownell
+ *
+ * Derived from the ohci-pci driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ */
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/of.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI generic platform driver"
+#define EHCI_MAX_CLKS 4
+#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+struct ehci_platform_priv {
+	struct clk *clks[EHCI_MAX_CLKS];
+	struct reset_control *rsts;
+	bool reset_on_resume;
+};
+
+static const char hcd_name[] = "ehci-platform";
+
+static int ehci_platform_reset(struct usb_hcd *hcd)
+{
+	struct platform_device *pdev = to_platform_device(hcd->self.controller);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
+
+	if (pdata->pre_setup) {
+		retval = pdata->pre_setup(hcd);
+		if (retval < 0)
+			return retval;
+	}
+
+	ehci->caps = hcd->regs + pdata->caps_offset;
+	retval = ehci_setup(hcd);
+	if (retval)
+		return retval;
+
+	if (pdata->no_io_watchdog)
+		ehci->need_io_watchdog = 0;
+	return 0;
+}
+
+static int ehci_platform_power_on(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk, ret;
+
+	for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+		ret = clk_prepare_enable(priv->clks[clk]);
+		if (ret)
+			goto err_disable_clks;
+	}
+
+	return 0;
+
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(priv->clks[clk]);
+
+	return ret;
+}
+
+static void ehci_platform_power_off(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk;
+
+	for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
+		if (priv->clks[clk])
+			clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+	.reset =		ehci_platform_reset,
+	.extra_priv_size =	sizeof(struct ehci_platform_priv),
+};
+
+static struct usb_ehci_pdata ehci_platform_defaults = {
+	.power_on =		ehci_platform_power_on,
+	.power_suspend =	ehci_platform_power_off,
+	.power_off =		ehci_platform_power_off,
+};
+
+static int ehci_platform_probe(struct platform_device *dev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res_mem;
+	struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct ehci_platform_priv *priv;
+	struct ehci_hcd *ehci;
+	int err, irq, clk = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * Use reasonable defaults so platforms don't have to provide these
+	 * with DT probing on ARM.
+	 */
+	if (!pdata)
+		pdata = &ehci_platform_defaults;
+
+	err = dma_coerce_mask_and_coherent(&dev->dev,
+		pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+	if (err) {
+		dev_err(&dev->dev, "Error: DMA mask configuration failed\n");
+		return err;
+	}
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0) {
+		dev_err(&dev->dev, "no irq provided");
+		return irq;
+	}
+
+	hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+			     dev_name(&dev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	platform_set_drvdata(dev, hcd);
+	dev->dev.platform_data = pdata;
+	priv = hcd_to_ehci_priv(hcd);
+	ehci = hcd_to_ehci(hcd);
+
+	if (pdata == &ehci_platform_defaults && dev->dev.of_node) {
+		if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+			ehci->big_endian_mmio = 1;
+
+		if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+			ehci->big_endian_desc = 1;
+
+		if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+			ehci->big_endian_mmio = ehci->big_endian_desc = 1;
+
+		if (of_property_read_bool(dev->dev.of_node,
+					  "needs-reset-on-resume"))
+			priv->reset_on_resume = true;
+
+		if (of_property_read_bool(dev->dev.of_node,
+					  "has-transaction-translator"))
+			hcd->has_tt = 1;
+
+		for (clk = 0; clk < EHCI_MAX_CLKS; clk++) {
+			priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+			if (IS_ERR(priv->clks[clk])) {
+				err = PTR_ERR(priv->clks[clk]);
+				if (err == -EPROBE_DEFER)
+					goto err_put_clks;
+				priv->clks[clk] = NULL;
+				break;
+			}
+		}
+	}
+
+	priv->rsts = devm_reset_control_array_get_optional_shared(&dev->dev);
+	if (IS_ERR(priv->rsts)) {
+		err = PTR_ERR(priv->rsts);
+		goto err_put_clks;
+	}
+
+	err = reset_control_deassert(priv->rsts);
+	if (err)
+		goto err_put_clks;
+
+	if (pdata->big_endian_desc)
+		ehci->big_endian_desc = 1;
+	if (pdata->big_endian_mmio)
+		ehci->big_endian_mmio = 1;
+	if (pdata->has_tt)
+		hcd->has_tt = 1;
+	if (pdata->reset_on_resume)
+		priv->reset_on_resume = true;
+
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+	if (ehci->big_endian_mmio) {
+		dev_err(&dev->dev,
+			"Error: CONFIG_USB_EHCI_BIG_ENDIAN_MMIO not set\n");
+		err = -EINVAL;
+		goto err_reset;
+	}
+#endif
+#ifndef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+	if (ehci->big_endian_desc) {
+		dev_err(&dev->dev,
+			"Error: CONFIG_USB_EHCI_BIG_ENDIAN_DESC not set\n");
+		err = -EINVAL;
+		goto err_reset;
+	}
+#endif
+
+	if (pdata->power_on) {
+		err = pdata->power_on(dev);
+		if (err < 0)
+			goto err_reset;
+	}
+
+	res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto err_power;
+	}
+	hcd->rsrc_start = res_mem->start;
+	hcd->rsrc_len = resource_size(res_mem);
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err)
+		goto err_power;
+
+	device_wakeup_enable(hcd->self.controller);
+	device_enable_async_suspend(hcd->self.controller);
+	platform_set_drvdata(dev, hcd);
+
+	return err;
+
+err_power:
+	if (pdata->power_off)
+		pdata->power_off(dev);
+err_reset:
+	reset_control_assert(priv->rsts);
+err_put_clks:
+	while (--clk >= 0)
+		clk_put(priv->clks[clk]);
+
+	if (pdata == &ehci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	usb_put_hcd(hcd);
+
+	return err;
+}
+
+static int ehci_platform_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk;
+
+	usb_remove_hcd(hcd);
+
+	if (pdata->power_off)
+		pdata->power_off(dev);
+
+	reset_control_assert(priv->rsts);
+
+	for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
+		clk_put(priv->clks[clk]);
+
+	usb_put_hcd(hcd);
+
+	if (pdata == &ehci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_platform_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+	ret = ehci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	if (pdata->power_suspend)
+		pdata->power_suspend(pdev);
+
+	return ret;
+}
+
+static int ehci_platform_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	struct device *companion_dev;
+
+	if (pdata->power_on) {
+		int err = pdata->power_on(pdev);
+		if (err < 0)
+			return err;
+	}
+
+	companion_dev = usb_of_get_companion_dev(hcd->self.controller);
+	if (companion_dev) {
+		device_pm_wait_for_dev(hcd->self.controller, companion_dev);
+		put_device(companion_dev);
+	}
+
+	ehci_resume(hcd, priv->reset_on_resume);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id vt8500_ehci_ids[] = {
+	{ .compatible = "via,vt8500-ehci", },
+	{ .compatible = "wm,prizm-ehci", },
+	{ .compatible = "generic-ehci", },
+	{ .compatible = "cavium,octeon-6335-ehci", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
+
+static const struct acpi_device_id ehci_acpi_match[] = {
+	{ "PNP0D20", 0 }, /* EHCI controller without debug */
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, ehci_acpi_match);
+
+static const struct platform_device_id ehci_platform_table[] = {
+	{ "ehci-platform", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, ehci_platform_table);
+
+static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend,
+	ehci_platform_resume);
+
+static struct platform_driver ehci_platform_driver = {
+	.id_table	= ehci_platform_table,
+	.probe		= ehci_platform_probe,
+	.remove		= ehci_platform_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "ehci-platform",
+		.pm	= &ehci_platform_pm_ops,
+		.of_match_table = vt8500_ehci_ids,
+		.acpi_match_table = ACPI_PTR(ehci_acpi_match),
+	}
+};
+
+static int __init ehci_platform_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+	return platform_driver_register(&ehci_platform_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+	platform_driver_unregister(&ehci_platform_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
new file mode 100644
index 0000000..46e1603
--- /dev/null
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PMC MSP EHCI (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2006-2010 PMC-Sierra Inc
+ */
+
+/* includes */
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/usb.h>
+#include <msp_usb.h>
+
+/* stream disable*/
+#define USB_CTRL_MODE_STREAM_DISABLE	0x10
+
+/* threshold */
+#define USB_CTRL_FIFO_THRESH		0x00300000
+
+/* register offset for usb_mode */
+#define USB_EHCI_REG_USB_MODE		0x68
+
+/* register offset for usb fifo */
+#define USB_EHCI_REG_USB_FIFO		0x24
+
+/* register offset for usb status */
+#define USB_EHCI_REG_USB_STATUS		0x44
+
+/* serial/parallel transceiver */
+#define USB_EHCI_REG_BIT_STAT_STS	(1<<29)
+
+/* TWI USB0 host device pin */
+#define MSP_PIN_USB0_HOST_DEV		49
+
+/* TWI USB1 host device pin */
+#define MSP_PIN_USB1_HOST_DEV		50
+
+
+static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
+{
+	u8 *base;
+	u8 *statreg;
+	u8 *fiforeg;
+	u32 val;
+	struct ehci_regs *reg_base = ehci->regs;
+
+	/* get register base */
+	base = (u8 *)reg_base + USB_EHCI_REG_USB_MODE;
+	statreg = (u8 *)reg_base + USB_EHCI_REG_USB_STATUS;
+	fiforeg = (u8 *)reg_base + USB_EHCI_REG_USB_FIFO;
+
+	/* Disable controller mode stream */
+	val = ehci_readl(ehci, (u32 *)base);
+	ehci_writel(ehci, (val | USB_CTRL_MODE_STREAM_DISABLE),
+			(u32 *)base);
+
+	/* clear STS to select parallel transceiver interface */
+	val = ehci_readl(ehci, (u32 *)statreg);
+	val = val & ~USB_EHCI_REG_BIT_STAT_STS;
+	ehci_writel(ehci, val, (u32 *)statreg);
+
+	/* write to set the proper fifo threshold */
+	ehci_writel(ehci, USB_CTRL_FIFO_THRESH, (u32 *)fiforeg);
+
+	/* set TWI GPIO USB_HOST_DEV pin high */
+	gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
+}
+
+/* called during probe() after chip reset completes */
+static int ehci_msp_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	int			retval;
+
+	ehci->big_endian_mmio = 1;
+	ehci->big_endian_desc = 1;
+
+	ehci->caps = hcd->regs;
+	hcd->has_tt = 1;
+
+	retval = ehci_setup(hcd);
+	if (retval)
+		return retval;
+
+	usb_hcd_tdi_set_mode(ehci);
+
+	return retval;
+}
+
+
+/* configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
+{
+	struct resource *res;
+	struct platform_device *pdev = &dev->dev;
+	u32 res_len;
+	int retval;
+
+	/* MAB register space */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res == NULL)
+		return -ENOMEM;
+	res_len = resource_size(res);
+	if (!request_mem_region(res->start, res_len, "mab regs"))
+		return -EBUSY;
+
+	dev->mab_regs = ioremap_nocache(res->start, res_len);
+	if (dev->mab_regs == NULL) {
+		retval = -ENOMEM;
+		goto err1;
+	}
+
+	/* MSP USB register space */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (res == NULL) {
+		retval = -ENOMEM;
+		goto err2;
+	}
+	res_len = resource_size(res);
+	if (!request_mem_region(res->start, res_len, "usbid regs")) {
+		retval = -EBUSY;
+		goto err2;
+	}
+	dev->usbid_regs = ioremap_nocache(res->start, res_len);
+	if (dev->usbid_regs == NULL) {
+		retval = -ENOMEM;
+		goto err3;
+	}
+
+	return 0;
+err3:
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	res_len = resource_size(res);
+	release_mem_region(res->start, res_len);
+err2:
+	iounmap(dev->mab_regs);
+err1:
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	res_len = resource_size(res);
+	release_mem_region(res->start, res_len);
+	dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
+	return retval;
+}
+
+/**
+ * usb_hcd_msp_probe - initialize PMC MSP-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_hcd_msp_probe(const struct hc_driver *driver,
+			  struct platform_device *dev)
+{
+	int retval;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	struct ehci_hcd		*ehci ;
+
+	hcd = usb_create_hcd(driver, &dev->dev, "pmcmsp");
+	if (!hcd)
+		return -ENOMEM;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		pr_debug("No IOMEM resource info for %s.\n", dev->name);
+		retval = -ENOMEM;
+		goto err1;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
+		retval = -EBUSY;
+		goto err1;
+	}
+	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		pr_debug("ioremap failed");
+		retval = -ENOMEM;
+		goto err2;
+	}
+
+	res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&dev->dev, "No IRQ resource info for %s.\n", dev->name);
+		retval = -ENOMEM;
+		goto err3;
+	}
+
+	/* Map non-EHCI register spaces */
+	retval = usb_hcd_msp_map_regs(to_mspusb_device(dev));
+	if (retval != 0)
+		goto err3;
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->big_endian_mmio = 1;
+	ehci->big_endian_desc = 1;
+
+
+	retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
+	if (retval == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return 0;
+	}
+
+	usb_remove_hcd(hcd);
+err3:
+	iounmap(hcd->regs);
+err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+	usb_put_hcd(hcd);
+
+	return retval;
+}
+
+
+
+/**
+ * usb_hcd_msp_remove - shutdown processing for PMC MSP-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_msp_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
+{
+	usb_remove_hcd(hcd);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_msp_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"PMC MSP EHCI",
+	.hcd_priv_size =	sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ehci_irq,
+	.flags =		HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_msp_setup,
+	.shutdown		= ehci_shutdown,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	pr_debug("In ehci_hcd_msp_drv_probe");
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
+
+	ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
+
+	return ret;
+}
+
+static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_hcd_msp_remove(hcd, pdev);
+
+	/* free TWI GPIO USB_HOST_DEV pin */
+	gpio_free(MSP_PIN_USB0_HOST_DEV);
+
+	return 0;
+}
+
+MODULE_ALIAS("pmcmsp-ehci");
+
+static struct platform_driver ehci_hcd_msp_driver = {
+	.probe		= ehci_hcd_msp_drv_probe,
+	.remove		= ehci_hcd_msp_drv_remove,
+	.driver		= {
+		.name	= "pmcmsp-ehci",
+	},
+};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
new file mode 100644
index 0000000..576f7d7
--- /dev/null
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * Bus Glue for PPC On-Chip EHCI driver on the of_platform bus
+ * Tested on AMCC PPC 440EPx
+ *
+ * Valentine Barshak <vbarshak@ru.mvista.com>
+ *
+ * Based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+
+static const struct hc_driver ehci_ppc_of_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "OF EHCI",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_setup,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+#endif
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+
+/*
+ * 440EPx Errata USBH_3
+ * Fix: Enable Break Memory Transfer (BMT) in INSNREG3
+ */
+#define PPC440EPX_EHCI0_INSREG_BMT	(0x1 << 0)
+static int
+ppc44x_enable_bmt(struct device_node *dn)
+{
+	__iomem u32 *insreg_virt;
+
+	insreg_virt = of_iomap(dn, 1);
+	if (!insreg_virt)
+		return  -EINVAL;
+
+	out_be32(insreg_virt + 3, PPC440EPX_EHCI0_INSREG_BMT);
+
+	iounmap(insreg_virt);
+	return 0;
+}
+
+
+static int ehci_hcd_ppc_of_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct usb_hcd *hcd;
+	struct ehci_hcd	*ehci = NULL;
+	struct resource res;
+	int irq;
+	int rv;
+
+	struct device_node *np;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	hcd = usb_create_hcd(&ehci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = resource_size(&res);
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (irq == NO_IRQ) {
+		dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+			__FILE__);
+		rv = -EBUSY;
+		goto err_irq;
+	}
+
+	hcd->regs = devm_ioremap_resource(&op->dev, &res);
+	if (IS_ERR(hcd->regs)) {
+		rv = PTR_ERR(hcd->regs);
+		goto err_ioremap;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+	np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+	if (np != NULL) {
+		/* claim we really affected by usb23 erratum */
+		if (!of_address_to_resource(np, 0, &res))
+			ehci->ohci_hcctrl_reg =
+				devm_ioremap(&op->dev,
+					     res.start + OHCI_HCCTRL_OFFSET,
+					     OHCI_HCCTRL_LEN);
+		else
+			pr_debug("%s: no ohci offset in fdt\n", __FILE__);
+		if (!ehci->ohci_hcctrl_reg) {
+			pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
+		} else {
+			ehci->has_amcc_usb23 = 1;
+		}
+	}
+
+	if (of_get_property(dn, "big-endian", NULL)) {
+		ehci->big_endian_mmio = 1;
+		ehci->big_endian_desc = 1;
+	}
+	if (of_get_property(dn, "big-endian-regs", NULL))
+		ehci->big_endian_mmio = 1;
+	if (of_get_property(dn, "big-endian-desc", NULL))
+		ehci->big_endian_desc = 1;
+
+	ehci->caps = hcd->regs;
+
+	if (of_device_is_compatible(dn, "ibm,usb-ehci-440epx")) {
+		rv = ppc44x_enable_bmt(dn);
+		ehci_dbg(ehci, "Break Memory Transfer (BMT) is %senabled!\n",
+				rv ? "NOT ": "");
+	}
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv)
+		goto err_ioremap;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_ioremap:
+	irq_dispose_mapping(irq);
+err_irq:
+	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+
+static int ehci_hcd_ppc_of_remove(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+	struct device_node *np;
+	struct resource res;
+
+	dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	irq_dispose_mapping(hcd->irq);
+
+	/* use request_mem_region to test if the ohci driver is loaded.  if so
+	 * ensure the ohci core is operational.
+	 */
+	if (ehci->has_amcc_usb23) {
+		np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+		if (np != NULL) {
+			if (!of_address_to_resource(np, 0, &res))
+				if (!request_mem_region(res.start,
+							    0x4, hcd_name))
+					set_ohci_hcfs(ehci, 1);
+				else
+					release_mem_region(res.start, 0x4);
+			else
+				pr_debug("%s: no ohci offset in fdt\n", __FILE__);
+			of_node_put(np);
+		}
+	}
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+
+static const struct of_device_id ehci_hcd_ppc_of_match[] = {
+	{
+		.compatible = "usb-ehci",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
+
+
+static struct platform_driver ehci_hcd_ppc_of_driver = {
+	.probe		= ehci_hcd_ppc_of_probe,
+	.remove		= ehci_hcd_ppc_of_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "ppc-of-ehci",
+		.of_match_table = ehci_hcd_ppc_of_match,
+	},
+};
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
new file mode 100644
index 0000000..454d8c6
--- /dev/null
+++ b/drivers/usb/host/ehci-ps3.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  PS3 EHCI Host Controller driver
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ */
+
+#include <asm/firmware.h>
+#include <asm/ps3.h>
+
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
+{
+	/* PS3 HC internal setup register offsets. */
+
+	enum ps3_ehci_hc_insnreg {
+		ps3_ehci_hc_insnreg01 = 0x084,
+		ps3_ehci_hc_insnreg02 = 0x088,
+		ps3_ehci_hc_insnreg03 = 0x08c,
+	};
+
+	/* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+	 * internal INSNREGXX setup regs back to the chip default values
+	 * on Host Controller Reset (CMD_RESET) or Light Host Controller
+	 * Reset (CMD_LRESET).  The work-around for this is for the HC
+	 * driver to re-initialise these regs when ever the HC is reset.
+	 */
+
+	/* Set burst transfer counts to 256 out, 32 in. */
+
+	writel_be(0x01000020, (void __iomem *)ehci->regs +
+		ps3_ehci_hc_insnreg01);
+
+	/* Enable burst transfer counts. */
+
+	writel_be(0x00000001, (void __iomem *)ehci->regs +
+		ps3_ehci_hc_insnreg03);
+}
+
+static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
+{
+	int result;
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+	ehci->big_endian_mmio = 1;
+	ehci->caps = hcd->regs;
+
+	result = ehci_setup(hcd);
+	if (result)
+		return result;
+
+	ps3_ehci_setup_insnreg(ehci);
+
+	return result;
+}
+
+static const struct hc_driver ps3_ehci_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "PS3 EHCI Host Controller",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2 | HCD_BH,
+	.reset			= ps3_ehci_hc_reset,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+	.get_frame_number	= ehci_get_frame,
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+#if defined(CONFIG_PM)
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+#endif
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
+{
+	int result;
+	struct usb_hcd *hcd;
+	unsigned int virq;
+	static u64 dummy_mask;
+
+	if (usb_disabled()) {
+		result = -ENODEV;
+		goto fail_start;
+	}
+
+	result = ps3_open_hv_device(dev);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
+			__func__, __LINE__);
+		goto fail_open;
+	}
+
+	result = ps3_dma_region_create(dev->d_region);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
+			"(%d)\n", __func__, __LINE__, result);
+		BUG_ON("check region type");
+		goto fail_dma_region;
+	}
+
+	result = ps3_mmio_region_create(dev->m_region);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
+			__func__, __LINE__);
+		result = -EPERM;
+		goto fail_mmio_region;
+	}
+
+	dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
+		__LINE__, dev->m_region->lpar_addr);
+
+	result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
+			__func__, __LINE__, virq);
+		result = -EPERM;
+		goto fail_irq;
+	}
+
+	dummy_mask = DMA_BIT_MASK(32);
+	dev->core.dma_mask = &dummy_mask;
+	dma_set_coherent_mask(&dev->core, dummy_mask);
+
+	hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core));
+
+	if (!hcd) {
+		dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
+			__LINE__);
+		result = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	hcd->rsrc_start = dev->m_region->lpar_addr;
+	hcd->rsrc_len = dev->m_region->len;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
+		dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
+			__func__, __LINE__);
+
+	hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
+
+	if (!hcd->regs) {
+		dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
+			__LINE__);
+		result = -EPERM;
+		goto fail_ioremap;
+	}
+
+	dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->rsrc_start);
+	dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len   %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->rsrc_len);
+	dev_dbg(&dev->core, "%s:%d: hcd->regs       %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->regs);
+	dev_dbg(&dev->core, "%s:%d: virq            %lu\n", __func__, __LINE__,
+		(unsigned long)virq);
+
+	ps3_system_bus_set_drvdata(dev, hcd);
+
+	result = usb_add_hcd(hcd, virq, 0);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
+			__func__, __LINE__, result);
+		goto fail_add_hcd;
+	}
+
+	device_wakeup_enable(hcd->self.controller);
+	return result;
+
+fail_add_hcd:
+	iounmap(hcd->regs);
+fail_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	ps3_io_irq_destroy(virq);
+fail_irq:
+	ps3_free_mmio_region(dev->m_region);
+fail_mmio_region:
+	ps3_dma_region_free(dev->d_region);
+fail_dma_region:
+	ps3_close_hv_device(dev);
+fail_open:
+fail_start:
+	return result;
+}
+
+static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
+{
+	unsigned int tmp;
+	struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
+
+	BUG_ON(!hcd);
+
+	dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
+	dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
+
+	tmp = hcd->irq;
+
+	usb_remove_hcd(hcd);
+
+	ps3_system_bus_set_drvdata(dev, NULL);
+
+	BUG_ON(!hcd->regs);
+	iounmap(hcd->regs);
+
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+
+	ps3_io_irq_destroy(tmp);
+	ps3_free_mmio_region(dev->m_region);
+
+	ps3_dma_region_free(dev->d_region);
+	ps3_close_hv_device(dev);
+
+	return 0;
+}
+
+static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
+{
+	return firmware_has_feature(FW_FEATURE_PS3_LV1)
+		? ps3_system_bus_driver_register(drv)
+		: 0;
+}
+
+static void ps3_ehci_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+	if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+		ps3_system_bus_driver_unregister(drv);
+}
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_EHCI);
+
+static struct ps3_system_bus_driver ps3_ehci_driver = {
+	.core.name = "ps3-ehci-driver",
+	.core.owner = THIS_MODULE,
+	.match_id = PS3_MATCH_ID_EHCI,
+	.probe = ps3_ehci_probe,
+	.remove = ps3_ehci_remove,
+	.shutdown = ps3_ehci_remove,
+};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
new file mode 100644
index 0000000..3276304
--- /dev/null
+++ b/drivers/usb/host/ehci-q.c
@@ -0,0 +1,1518 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2001-2004 by David Brownell
+ */
+
+/* this file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number).  We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
+ * interrupts) needs careful scheduling.  Performance improvements can be
+ * an ongoing challenge.  That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries.  TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
+		  size_t len, int token, int maxpacket)
+{
+	int	i, count;
+	u64	addr = buf;
+
+	/* one buffer entry per 4K ... first might be short or unaligned */
+	qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
+	qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
+	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
+	if (likely (len < count))		/* ... iff needed */
+		count = len;
+	else {
+		buf +=  0x1000;
+		buf &= ~0x0fff;
+
+		/* per-qtd limit: from 16K to 20K (best alignment) */
+		for (i = 1; count < len && i < 5; i++) {
+			addr = buf;
+			qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
+			qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
+					(u32)(addr >> 32));
+			buf += 0x1000;
+			if ((count + 0x1000) < len)
+				count += 0x1000;
+			else
+				count = len;
+		}
+
+		/* short packets may only terminate transfers */
+		if (count != len)
+			count -= (count % maxpacket);
+	}
+	qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
+	qtd->length = count;
+
+	return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
+{
+	struct ehci_qh_hw *hw = qh->hw;
+
+	/* writes to an active overlay are unsafe */
+	WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+	hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
+	hw->hw_alt_next = EHCI_LIST_END(ehci);
+
+	/* Except for control endpoints, we make hardware maintain data
+	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+	 * ever clear it.
+	 */
+	if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
+		unsigned	is_out, epnum;
+
+		is_out = qh->is_out;
+		epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
+		if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
+			hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+			usb_settoggle(qh->ps.udev, epnum, is_out, 1);
+		}
+	}
+
+	hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	struct ehci_qtd *qtd;
+
+	qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
+
+	/*
+	 * first qtd may already be partially processed.
+	 * If we come here during unlink, the QH overlay region
+	 * might have reference to the just unlinked qtd. The
+	 * qtd is updated in qh_completions(). Update the QH
+	 * overlay here.
+	 */
+	if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
+		qh->hw->hw_qtd_next = qtd->hw_next;
+		if (qh->should_be_inactive)
+			ehci_warn(ehci, "qh %p should be inactive!\n", qh);
+	} else {
+		qh_update(ehci, qh, qtd);
+	}
+	qh->should_be_inactive = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
+static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
+		struct usb_host_endpoint *ep)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct ehci_qh		*qh = ep->hcpriv;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+	qh->clearing_tt = 0;
+	if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+			&& ehci->rh_state == EHCI_RH_RUNNING)
+		qh_link_async(ehci, qh);
+	spin_unlock_irqrestore(&ehci->lock, flags);
+}
+
+static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
+		struct urb *urb, u32 token)
+{
+
+	/* If an async split transaction gets an error or is unlinked,
+	 * the TT buffer may be left in an indeterminate state.  We
+	 * have to clear the TT buffer.
+	 *
+	 * Note: this routine is never called for Isochronous transfers.
+	 */
+	if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+#ifdef CONFIG_DYNAMIC_DEBUG
+		struct usb_device *tt = urb->dev->tt->hub;
+		dev_dbg(&tt->dev,
+			"clear tt buffer port %d, a%d ep%d t%08x\n",
+			urb->dev->ttport, urb->dev->devnum,
+			usb_pipeendpoint(urb->pipe), token);
+#endif /* CONFIG_DYNAMIC_DEBUG */
+		if (!ehci_is_TDI(ehci)
+				|| urb->dev->tt->hub !=
+				   ehci_to_hcd(ehci)->self.root_hub) {
+			if (usb_hub_clear_tt_buffer(urb) == 0)
+				qh->clearing_tt = 1;
+		} else {
+
+			/* REVISIT ARC-derived cores don't clear the root
+			 * hub TT buffer in this way...
+			 */
+		}
+	}
+}
+
+static int qtd_copy_status (
+	struct ehci_hcd *ehci,
+	struct urb *urb,
+	size_t length,
+	u32 token
+)
+{
+	int	status = -EINPROGRESS;
+
+	/* count IN/OUT bytes, not SETUP (even short packets) */
+	if (likely (QTD_PID (token) != 2))
+		urb->actual_length += length - QTD_LENGTH (token);
+
+	/* don't modify error codes */
+	if (unlikely(urb->unlinked))
+		return status;
+
+	/* force cleanup after short read; not always an error */
+	if (unlikely (IS_SHORT_READ (token)))
+		status = -EREMOTEIO;
+
+	/* serious "can't proceed" faults reported by the hardware */
+	if (token & QTD_STS_HALT) {
+		if (token & QTD_STS_BABBLE) {
+			/* FIXME "must" disable babbling device's port too */
+			status = -EOVERFLOW;
+		/* CERR nonzero + halt --> stall */
+		} else if (QTD_CERR(token)) {
+			status = -EPIPE;
+
+		/* In theory, more than one of the following bits can be set
+		 * since they are sticky and the transaction is retried.
+		 * Which to test first is rather arbitrary.
+		 */
+		} else if (token & QTD_STS_MMF) {
+			/* fs/ls interrupt xfer missed the complete-split */
+			status = -EPROTO;
+		} else if (token & QTD_STS_DBE) {
+			status = (QTD_PID (token) == 1) /* IN ? */
+				? -ENOSR  /* hc couldn't read data */
+				: -ECOMM; /* hc couldn't write data */
+		} else if (token & QTD_STS_XACT) {
+			/* timeout, bad CRC, wrong PID, etc */
+			ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n",
+				urb->dev->devpath,
+				usb_pipeendpoint(urb->pipe),
+				usb_pipein(urb->pipe) ? "in" : "out");
+			status = -EPROTO;
+		} else {	/* unknown */
+			status = -EPROTO;
+		}
+	}
+
+	return status;
+}
+
+static void
+ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+{
+	if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+		/* ... update hc-wide periodic stats */
+		ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
+	}
+
+	if (unlikely(urb->unlinked)) {
+		COUNT(ehci->stats.unlink);
+	} else {
+		/* report non-error and short read status as zero */
+		if (status == -EINPROGRESS || status == -EREMOTEIO)
+			status = 0;
+		COUNT(ehci->stats.complete);
+	}
+
+#ifdef EHCI_URB_TRACE
+	ehci_dbg (ehci,
+		"%s %s urb %p ep%d%s status %d len %d/%d\n",
+		__func__, urb->dev->devpath, urb,
+		usb_pipeendpoint (urb->pipe),
+		usb_pipein (urb->pipe) ? "in" : "out",
+		status,
+		urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+	usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+	usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
+}
+
+static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current.  Returns nonzero if the caller should
+ * unlink qh.
+ */
+static unsigned
+qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	struct ehci_qtd		*last, *end = qh->dummy;
+	struct list_head	*entry, *tmp;
+	int			last_status;
+	int			stopped;
+	u8			state;
+	struct ehci_qh_hw	*hw = qh->hw;
+
+	/* completions (or tasks on other cpus) must never clobber HALT
+	 * till we've gone through and cleaned everything up, even when
+	 * they add urbs to this qh's queue or mark them for unlinking.
+	 *
+	 * NOTE:  unlinking expects to be done in queue order.
+	 *
+	 * It's a bug for qh->qh_state to be anything other than
+	 * QH_STATE_IDLE, unless our caller is scan_async() or
+	 * scan_intr().
+	 */
+	state = qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+	last = NULL;
+	last_status = -EINPROGRESS;
+	qh->dequeue_during_giveback = 0;
+
+	/* remove de-activated QTDs from front of queue.
+	 * after faults (including short reads), cleanup this urb
+	 * then let the queue advance.
+	 * if queue is stopped, handles unlinks.
+	 */
+	list_for_each_safe (entry, tmp, &qh->qtd_list) {
+		struct ehci_qtd	*qtd;
+		struct urb	*urb;
+		u32		token = 0;
+
+		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
+		urb = qtd->urb;
+
+		/* clean up any state from previous QTD ...*/
+		if (last) {
+			if (likely (last->urb != urb)) {
+				ehci_urb_done(ehci, last->urb, last_status);
+				last_status = -EINPROGRESS;
+			}
+			ehci_qtd_free (ehci, last);
+			last = NULL;
+		}
+
+		/* ignore urbs submitted during completions we reported */
+		if (qtd == end)
+			break;
+
+		/* hardware copies qtd out of qh overlay */
+		rmb ();
+		token = hc32_to_cpu(ehci, qtd->hw_token);
+
+		/* always clean up qtds the hc de-activated */
+ retry_xacterr:
+		if ((token & QTD_STS_ACTIVE) == 0) {
+
+			/* Report Data Buffer Error: non-fatal but useful */
+			if (token & QTD_STS_DBE)
+				ehci_dbg(ehci,
+					"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+					urb,
+					usb_endpoint_num(&urb->ep->desc),
+					usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+					urb->transfer_buffer_length,
+					qtd,
+					qh);
+
+			/* on STALL, error, and short reads this urb must
+			 * complete and all its qtds must be recycled.
+			 */
+			if ((token & QTD_STS_HALT) != 0) {
+
+				/* retry transaction errors until we
+				 * reach the software xacterr limit
+				 */
+				if ((token & QTD_STS_XACT) &&
+						QTD_CERR(token) == 0 &&
+						++qh->xacterrs < QH_XACTERR_MAX &&
+						!urb->unlinked) {
+					ehci_dbg(ehci,
+	"detected XactErr len %zu/%zu retry %d\n",
+	qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+					/* reset the token in the qtd and the
+					 * qh overlay (which still contains
+					 * the qtd) so that we pick up from
+					 * where we left off
+					 */
+					token &= ~QTD_STS_HALT;
+					token |= QTD_STS_ACTIVE |
+							(EHCI_TUNE_CERR << 10);
+					qtd->hw_token = cpu_to_hc32(ehci,
+							token);
+					wmb();
+					hw->hw_token = cpu_to_hc32(ehci,
+							token);
+					goto retry_xacterr;
+				}
+				stopped = 1;
+				qh->unlink_reason |= QH_UNLINK_HALTED;
+
+			/* magic dummy for some short reads; qh won't advance.
+			 * that silicon quirk can kick in with this dummy too.
+			 *
+			 * other short reads won't stop the queue, including
+			 * control transfers (status stage handles that) or
+			 * most other single-qtd reads ... the queue stops if
+			 * URB_SHORT_NOT_OK was set so the driver submitting
+			 * the urbs could clean it up.
+			 */
+			} else if (IS_SHORT_READ (token)
+					&& !(qtd->hw_alt_next
+						& EHCI_LIST_END(ehci))) {
+				stopped = 1;
+				qh->unlink_reason |= QH_UNLINK_SHORT_READ;
+			}
+
+		/* stop scanning when we reach qtds the hc is using */
+		} else if (likely (!stopped
+				&& ehci->rh_state >= EHCI_RH_RUNNING)) {
+			break;
+
+		/* scan the whole queue for unlinks whenever it stops */
+		} else {
+			stopped = 1;
+
+			/* cancel everything if we halt, suspend, etc */
+			if (ehci->rh_state < EHCI_RH_RUNNING) {
+				last_status = -ESHUTDOWN;
+				qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
+			}
+
+			/* this qtd is active; skip it unless a previous qtd
+			 * for its urb faulted, or its urb was canceled.
+			 */
+			else if (last_status == -EINPROGRESS && !urb->unlinked)
+				continue;
+
+			/*
+			 * If this was the active qtd when the qh was unlinked
+			 * and the overlay's token is active, then the overlay
+			 * hasn't been written back to the qtd yet so use its
+			 * token instead of the qtd's.  After the qtd is
+			 * processed and removed, the overlay won't be valid
+			 * any more.
+			 */
+			if (state == QH_STATE_IDLE &&
+					qh->qtd_list.next == &qtd->qtd_list &&
+					(hw->hw_token & ACTIVE_BIT(ehci))) {
+				token = hc32_to_cpu(ehci, hw->hw_token);
+				hw->hw_token &= ~ACTIVE_BIT(ehci);
+				qh->should_be_inactive = 1;
+
+				/* An unlink may leave an incomplete
+				 * async transaction in the TT buffer.
+				 * We have to clear it.
+				 */
+				ehci_clear_tt_buffer(ehci, qh, urb, token);
+			}
+		}
+
+		/* unless we already know the urb's status, collect qtd status
+		 * and update count of bytes transferred.  in common short read
+		 * cases with only one data qtd (including control transfers),
+		 * queue processing won't halt.  but with two or more qtds (for
+		 * example, with a 32 KB transfer), when the first qtd gets a
+		 * short read the second must be removed by hand.
+		 */
+		if (last_status == -EINPROGRESS) {
+			last_status = qtd_copy_status(ehci, urb,
+					qtd->length, token);
+			if (last_status == -EREMOTEIO
+					&& (qtd->hw_alt_next
+						& EHCI_LIST_END(ehci)))
+				last_status = -EINPROGRESS;
+
+			/* As part of low/full-speed endpoint-halt processing
+			 * we must clear the TT buffer (11.17.5).
+			 */
+			if (unlikely(last_status != -EINPROGRESS &&
+					last_status != -EREMOTEIO)) {
+				/* The TT's in some hubs malfunction when they
+				 * receive this request following a STALL (they
+				 * stop sending isochronous packets).  Since a
+				 * STALL can't leave the TT buffer in a busy
+				 * state (if you believe Figures 11-48 - 11-51
+				 * in the USB 2.0 spec), we won't clear the TT
+				 * buffer in this case.  Strictly speaking this
+				 * is a violation of the spec.
+				 */
+				if (last_status != -EPIPE)
+					ehci_clear_tt_buffer(ehci, qh, urb,
+							token);
+			}
+		}
+
+		/* if we're removing something not at the queue head,
+		 * patch the hardware queue pointer.
+		 */
+		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+			last = list_entry (qtd->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			last->hw_next = qtd->hw_next;
+		}
+
+		/* remove qtd; it's recycled after possible urb completion */
+		list_del (&qtd->qtd_list);
+		last = qtd;
+
+		/* reinit the xacterr counter for the next qtd */
+		qh->xacterrs = 0;
+	}
+
+	/* last urb's completion might still need calling */
+	if (likely (last != NULL)) {
+		ehci_urb_done(ehci, last->urb, last_status);
+		ehci_qtd_free (ehci, last);
+	}
+
+	/* Do we need to rescan for URBs dequeued during a giveback? */
+	if (unlikely(qh->dequeue_during_giveback)) {
+		/* If the QH is already unlinked, do the rescan now. */
+		if (state == QH_STATE_IDLE)
+			goto rescan;
+
+		/* Otherwise the caller must unlink the QH. */
+	}
+
+	/* restore original state; caller must unlink or relink */
+	qh->qh_state = state;
+
+	/* be sure the hardware's done with the qh before refreshing
+	 * it after fault cleanup, or recovering from silicon wrongly
+	 * overlaying the dummy qtd (which reduces DMA chatter).
+	 *
+	 * We won't refresh a QH that's linked (after the HC
+	 * stopped the queue).  That avoids a race:
+	 *  - HC reads first part of QH;
+	 *  - CPU updates that first part and the token;
+	 *  - HC reads rest of that QH, including token
+	 * Result:  HC gets an inconsistent image, and then
+	 * DMAs to/from the wrong memory (corrupting it).
+	 *
+	 * That should be rare for interrupt transfers,
+	 * except maybe high bandwidth ...
+	 */
+	if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
+		qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
+
+	/* Let the caller know if the QH needs to be unlinked. */
+	return qh->unlink_reason;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * reverse of qh_urb_transaction:  free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free (
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct list_head	*qtd_list
+) {
+	struct list_head	*entry, *temp;
+
+	list_for_each_safe (entry, temp, qtd_list) {
+		struct ehci_qtd	*qtd;
+
+		qtd = list_entry (entry, struct ehci_qtd, qtd_list);
+		list_del (&qtd->qtd_list);
+		ehci_qtd_free (ehci, qtd);
+	}
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction (
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct list_head	*head,
+	gfp_t			flags
+) {
+	struct ehci_qtd		*qtd, *qtd_prev;
+	dma_addr_t		buf;
+	int			len, this_sg_len, maxpacket;
+	int			is_input;
+	u32			token;
+	int			i;
+	struct scatterlist	*sg;
+
+	/*
+	 * URBs map to sequences of QTDs:  one logical transaction
+	 */
+	qtd = ehci_qtd_alloc (ehci, flags);
+	if (unlikely (!qtd))
+		return NULL;
+	list_add_tail (&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+	/* for split transactions, SplitXState initialized to zero */
+
+	len = urb->transfer_buffer_length;
+	is_input = usb_pipein (urb->pipe);
+	if (usb_pipecontrol (urb->pipe)) {
+		/* SETUP pid */
+		qtd_fill(ehci, qtd, urb->setup_dma,
+				sizeof (struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+
+		/* ... and always at least one more pid */
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc (ehci, flags);
+		if (unlikely (!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+		list_add_tail (&qtd->qtd_list, head);
+
+		/* for zero length DATA stages, STATUS is always IN */
+		if (len == 0)
+			token |= (1 /* "in" */ << 8);
+	}
+
+	/*
+	 * data transfer stage:  buffer setup
+	 */
+	i = urb->num_mapped_sgs;
+	if (len > 0 && i > 0) {
+		sg = urb->sg;
+		buf = sg_dma_address(sg);
+
+		/* urb->transfer_buffer_length may be smaller than the
+		 * size of the scatterlist (or vice versa)
+		 */
+		this_sg_len = min_t(int, sg_dma_len(sg), len);
+	} else {
+		sg = NULL;
+		buf = urb->transfer_dma;
+		this_sg_len = len;
+	}
+
+	if (is_input)
+		token |= (1 /* "in" */ << 8);
+	/* else it's already initted to "out" pid (0 << 8) */
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last one may be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+	for (;;) {
+		int this_qtd_len;
+
+		this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
+				maxpacket);
+		this_sg_len -= this_qtd_len;
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+
+		/*
+		 * short reads advance to a "magic" dummy instead of the next
+		 * qtd ... that forces the queue to stop, for manual cleanup.
+		 * (this will usually be overridden later.)
+		 */
+		if (is_input)
+			qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
+
+		/* qh makes control packets use qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+			token ^= QTD_TOGGLE;
+
+		if (likely(this_sg_len <= 0)) {
+			if (--i <= 0 || len <= 0)
+				break;
+			sg = sg_next(sg);
+			buf = sg_dma_address(sg);
+			this_sg_len = min_t(int, sg_dma_len(sg), len);
+		}
+
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc (ehci, flags);
+		if (unlikely (!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+		list_add_tail (&qtd->qtd_list, head);
+	}
+
+	/*
+	 * unless the caller requires manual cleanup after short reads,
+	 * have the alt_next mechanism keep the queue running after the
+	 * last data qtd (the only one, for control and most other cases).
+	 */
+	if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+				|| usb_pipecontrol (urb->pipe)))
+		qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+	/*
+	 * control requests may need a terminating data "status" ack;
+	 * other OUT ones may need a terminating short packet
+	 * (zero length).
+	 */
+	if (likely (urb->transfer_buffer_length != 0)) {
+		int	one_more = 0;
+
+		if (usb_pipecontrol (urb->pipe)) {
+			one_more = 1;
+			token ^= 0x0100;	/* "in" <--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+		} else if (usb_pipeout(urb->pipe)
+				&& (urb->transfer_flags & URB_ZERO_PACKET)
+				&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = ehci_qtd_alloc (ehci, flags);
+			if (unlikely (!qtd))
+				goto cleanup;
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+			list_add_tail (&qtd->qtd_list, head);
+
+			/* never any data in such packets */
+			qtd_fill(ehci, qtd, 0, 0, token, 0);
+		}
+	}
+
+	/* by default, enable interrupt on urb completion */
+	if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
+		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+	return head;
+
+cleanup:
+	qtd_list_free (ehci, urb, head);
+	return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+// Would be best to create all qh's from config descriptors,
+// when each interface/altsetting is established.  Unlink
+// any previous qh and cancel its urbs first; endpoints are
+// implicitly reset then (data toggle too).
+// That'd mean updating how usbcore talks to HCDs. (2.7?)
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled.  For highspeed, that's
+ * just one microframe in the s-mask.  For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct ehci_qh *
+qh_make (
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	gfp_t			flags
+) {
+	struct ehci_qh		*qh = ehci_qh_alloc (ehci, flags);
+	struct usb_host_endpoint *ep;
+	u32			info1 = 0, info2 = 0;
+	int			is_input, type;
+	int			maxp = 0;
+	int			mult;
+	struct usb_tt		*tt = urb->dev->tt;
+	struct ehci_qh_hw	*hw;
+
+	if (!qh)
+		return qh;
+
+	/*
+	 * init endpoint/device data for this QH
+	 */
+	info1 |= usb_pipeendpoint (urb->pipe) << 8;
+	info1 |= usb_pipedevice (urb->pipe) << 0;
+
+	is_input = usb_pipein (urb->pipe);
+	type = usb_pipetype (urb->pipe);
+	ep = usb_pipe_endpoint (urb->dev, urb->pipe);
+	maxp = usb_endpoint_maxp (&ep->desc);
+	mult = usb_endpoint_maxp_mult (&ep->desc);
+
+	/* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
+	 * acts like up to 3KB, but is built from smaller packets.
+	 */
+	if (maxp > 1024) {
+		ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
+		goto done;
+	}
+
+	/* Compute interrupt scheduling parameters just once, and save.
+	 * - allowing for high bandwidth, how many nsec/uframe are used?
+	 * - split transactions need a second CSPLIT uframe; same question
+	 * - splits also need a schedule gap (for full/low speed I/O)
+	 * - qh has a polling interval
+	 *
+	 * For control/bulk requests, the HC or TT handles these.
+	 */
+	if (type == PIPE_INTERRUPT) {
+		unsigned	tmp;
+
+		qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+				is_input, 0, mult * maxp));
+		qh->ps.phase = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->ps.c_usecs = 0;
+			qh->gap_uf = 0;
+
+			if (urb->interval > 1 && urb->interval < 8) {
+				/* NOTE interval 2 or 4 uframes could work.
+				 * But interval 1 scheduling is simpler, and
+				 * includes high bandwidth.
+				 */
+				urb->interval = 1;
+			} else if (urb->interval > ehci->periodic_size << 3) {
+				urb->interval = ehci->periodic_size << 3;
+			}
+			qh->ps.period = urb->interval >> 3;
+
+			/* period for bandwidth allocation */
+			tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+					1 << (urb->ep->desc.bInterval - 1));
+
+			/* Allow urb->interval to override */
+			qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+			qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
+		} else {
+			int		think_time;
+
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
+					is_input, 0, maxp) / (125 * 1000);
+
+			/* FIXME this just approximates SPLIT/CSPLIT times */
+			if (is_input) {		// SPLIT, gap, CSPLIT+DATA
+				qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
+				qh->ps.usecs = HS_USECS(1);
+			} else {		// SPLIT+DATA, gap, CSPLIT
+				qh->ps.usecs += HS_USECS(1);
+				qh->ps.c_usecs = HS_USECS(0);
+			}
+
+			think_time = tt ? tt->think_time : 0;
+			qh->ps.tt_usecs = NS_TO_US(think_time +
+					usb_calc_bus_time (urb->dev->speed,
+					is_input, 0, maxp));
+			if (urb->interval > ehci->periodic_size)
+				urb->interval = ehci->periodic_size;
+			qh->ps.period = urb->interval;
+
+			/* period for bandwidth allocation */
+			tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+					urb->ep->desc.bInterval);
+			tmp = rounddown_pow_of_two(tmp);
+
+			/* Allow urb->interval to override */
+			qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+			qh->ps.bw_uperiod = qh->ps.bw_period << 3;
+		}
+	}
+
+	/* support for tt scheduling, and access to toggles */
+	qh->ps.udev = urb->dev;
+	qh->ps.ep = urb->ep;
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= QH_LOW_SPEED;
+		/* FALL THROUGH */
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT)
+			info1 |= (EHCI_TUNE_RL_TT << 28);
+		if (type == PIPE_CONTROL) {
+			info1 |= QH_CONTROL_EP;		/* for TT */
+			info1 |= QH_TOGGLE_CTL;		/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (EHCI_TUNE_MULT_TT << 30);
+
+		/* Some Freescale processors have an erratum in which the
+		 * port number in the queue head was 0..N-1 instead of 1..N.
+		 */
+		if (ehci_has_fsl_portno_bug(ehci))
+			info2 |= (urb->dev->ttport-1) << 23;
+		else
+			info2 |= urb->dev->ttport << 23;
+
+		/* set the address of the TT; for TDI's integrated
+		 * root hub tt, leave it zeroed.
+		 */
+		if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
+			info2 |= tt->hub->devnum << 16;
+
+		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+		break;
+
+	case USB_SPEED_HIGH:		/* no TT involved */
+		info1 |= QH_HIGH_SPEED;
+		if (type == PIPE_CONTROL) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
+			info1 |= QH_TOGGLE_CTL;	/* toggle from qtd */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else if (type == PIPE_BULK) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			/* The USB spec says that high speed bulk endpoints
+			 * always use 512 byte maxpacket.  But some device
+			 * vendors decided to ignore that, and MSFT is happy
+			 * to help them do so.  So now people expect to use
+			 * such nonconformant devices with Linux too; sigh.
+			 */
+			info1 |= maxp << 16;
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else {		/* PIPE_INTERRUPT */
+			info1 |= maxp << 16;
+			info2 |= mult << 30;
+		}
+		break;
+	default:
+		ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
+			urb->dev->speed);
+done:
+		qh_destroy(ehci, qh);
+		return NULL;
+	}
+
+	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+	/* init as live, toggle clear */
+	qh->qh_state = QH_STATE_IDLE;
+	hw = qh->hw;
+	hw->hw_info1 = cpu_to_hc32(ehci, info1);
+	hw->hw_info2 = cpu_to_hc32(ehci, info2);
+	qh->is_out = !is_input;
+	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
+	return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct ehci_hcd *ehci)
+{
+	if (ehci->async_count++)
+		return;
+
+	/* Stop waiting to turn off the async schedule */
+	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
+
+	/* Don't start the schedule until ASS is 0 */
+	ehci_poll_ASS(ehci);
+	turn_on_io_watchdog(ehci);
+}
+
+static void disable_async(struct ehci_hcd *ehci)
+{
+	if (--ehci->async_count)
+		return;
+
+	/* The async schedule and unlink lists are supposed to be empty */
+	WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
+			!list_empty(&ehci->async_idle));
+
+	/* Don't turn off the schedule until ASS is 1 */
+	ehci_poll_ASS(ehci);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue.  */
+
+static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	__hc32		dma = QH_NEXT(ehci, qh->qh_dma);
+	struct ehci_qh	*head;
+
+	/* Don't link a QH if there's a Clear-TT-Buffer pending */
+	if (unlikely(qh->clearing_tt))
+		return;
+
+	WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+	/* clear halt and/or toggle; and maybe recover from silicon quirk */
+	qh_refresh(ehci, qh);
+
+	/* splice right after start */
+	head = ehci->async;
+	qh->qh_next = head->qh_next;
+	qh->hw->hw_next = head->hw->hw_next;
+	wmb ();
+
+	head->qh_next.qh = qh;
+	head->hw->hw_next = dma;
+
+	qh->qh_state = QH_STATE_LINKED;
+	qh->xacterrs = 0;
+	qh->unlink_reason = 0;
+	/* qtd completions reported later by interrupt */
+
+	enable_async(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct ehci_qh *qh_append_tds (
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct list_head	*qtd_list,
+	int			epnum,
+	void			**ptr
+)
+{
+	struct ehci_qh		*qh = NULL;
+	__hc32			qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
+
+	qh = (struct ehci_qh *) *ptr;
+	if (unlikely (qh == NULL)) {
+		/* can't sleep here, we have ehci->lock... */
+		qh = qh_make (ehci, urb, GFP_ATOMIC);
+		*ptr = qh;
+	}
+	if (likely (qh != NULL)) {
+		struct ehci_qtd	*qtd;
+
+		if (unlikely (list_empty (qtd_list)))
+			qtd = NULL;
+		else
+			qtd = list_entry (qtd_list->next, struct ehci_qtd,
+					qtd_list);
+
+		/* control qh may need patching ... */
+		if (unlikely (epnum == 0)) {
+
+                        /* usb_reset_device() briefly reverts to address 0 */
+                        if (usb_pipedevice (urb->pipe) == 0)
+				qh->hw->hw_info1 &= ~qh_addr_mask;
+		}
+
+		/* just one way to queue requests: swap with the dummy qtd.
+		 * only hc or qh_refresh() ever modify the overlay.
+		 */
+		if (likely (qtd != NULL)) {
+			struct ehci_qtd		*dummy;
+			dma_addr_t		dma;
+			__hc32			token;
+
+			/* to avoid racing the HC, use the dummy td instead of
+			 * the first td of our list (becomes new dummy).  both
+			 * tds stay deactivated until we're done, when the
+			 * HC is allowed to fetch the old dummy (4.10.2).
+			 */
+			token = qtd->hw_token;
+			qtd->hw_token = HALT_BIT(ehci);
+
+			dummy = qh->dummy;
+
+			dma = dummy->qtd_dma;
+			*dummy = *qtd;
+			dummy->qtd_dma = dma;
+
+			list_del (&qtd->qtd_list);
+			list_add (&dummy->qtd_list, qtd_list);
+			list_splice_tail(qtd_list, &qh->qtd_list);
+
+			ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
+			qh->dummy = qtd;
+
+			/* hc must see the new dummy at list end */
+			dma = qtd->qtd_dma;
+			qtd = list_entry (qh->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			qtd->hw_next = QTD_NEXT(ehci, dma);
+
+			/* let the hc process these next qtds */
+			wmb ();
+			dummy->hw_token = token;
+
+			urb->hcpriv = qh;
+		}
+	}
+	return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async (
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct list_head	*qtd_list,
+	gfp_t			mem_flags
+) {
+	int			epnum;
+	unsigned long		flags;
+	struct ehci_qh		*qh = NULL;
+	int			rc;
+
+	epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef EHCI_URB_TRACE
+	{
+		struct ehci_qtd *qtd;
+		qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+		ehci_dbg(ehci,
+			 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+			 __func__, urb->dev->devpath, urb,
+			 epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+			 urb->transfer_buffer_length,
+			 qtd, urb->ep->hcpriv);
+	}
+#endif
+
+	spin_lock_irqsave (&ehci->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
+		rc = -ESHUTDOWN;
+		goto done;
+	}
+	rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+	if (unlikely(rc))
+		goto done;
+
+	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	if (unlikely(qh == NULL)) {
+		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	/* Control/bulk operations through TTs don't need scheduling,
+	 * the HC and TT handle it when the TT has a buffer ready.
+	 */
+	if (likely (qh->qh_state == QH_STATE_IDLE))
+		qh_link_async(ehci, qh);
+ done:
+	spin_unlock_irqrestore (&ehci->lock, flags);
+	if (unlikely (qh == NULL))
+		qtd_list_free (ehci, urb, qtd_list);
+	return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+/*
+ * This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+static int submit_single_step_set_feature(
+	struct usb_hcd  *hcd,
+	struct urb      *urb,
+	int             is_setup
+) {
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct list_head	qtd_list;
+	struct list_head	*head;
+
+	struct ehci_qtd		*qtd, *qtd_prev;
+	dma_addr_t		buf;
+	int			len, maxpacket;
+	u32			token;
+
+	INIT_LIST_HEAD(&qtd_list);
+	head = &qtd_list;
+
+	/* URBs map to sequences of QTDs:  one logical transaction */
+	qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+	if (unlikely(!qtd))
+		return -1;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+
+	len = urb->transfer_buffer_length;
+	/*
+	 * Check if the request is to perform just the SETUP stage (getDesc)
+	 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+	 * 15 secs after the setup
+	 */
+	if (is_setup) {
+		/* SETUP pid, and interrupt after SETUP completion */
+		qtd_fill(ehci, qtd, urb->setup_dma,
+				sizeof(struct usb_ctrlrequest),
+				QTD_IOC | token | (2 /* "setup" */ << 8), 8);
+
+		submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+		return 0; /*Return now; we shall come back after 15 seconds*/
+	}
+
+	/*
+	 * IN: data transfer stage:  buffer setup : start the IN txn phase for
+	 * the get_Desc SETUP which was sent 15seconds back
+	 */
+	token ^= QTD_TOGGLE;   /*We need to start IN with DATA-1 Pid-sequence*/
+	buf = urb->transfer_dma;
+
+	token |= (1 /* "in" */ << 8);  /*This is IN stage*/
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
+
+	qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+	/*
+	 * Our IN phase shall always be a short read; so keep the queue running
+	 * and let it advance to the next qtd which zero length OUT status
+	 */
+	qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+	/* STATUS stage for GetDesc control request */
+	token ^= 0x0100;        /* "in" <--> "out"  */
+	token |= QTD_TOGGLE;    /* force DATA1 */
+
+	qtd_prev = qtd;
+	qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+	if (unlikely(!qtd))
+		goto cleanup;
+	qtd->urb = urb;
+	qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+	list_add_tail(&qtd->qtd_list, head);
+
+	/* Interrupt after STATUS completion */
+	qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
+
+	submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+	return 0;
+
+cleanup:
+	qtd_list_free(ehci, urb, head);
+	return -1;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	struct ehci_qh		*prev;
+
+	/* Add to the end of the list of QHs waiting for the next IAAD */
+	qh->qh_state = QH_STATE_UNLINK_WAIT;
+	list_add_tail(&qh->unlink_node, &ehci->async_unlink);
+
+	/* Unlink it from the schedule */
+	prev = ehci->async;
+	while (prev->qh_next.qh != qh)
+		prev = prev->qh_next.qh;
+
+	prev->hw->hw_next = qh->hw->hw_next;
+	prev->qh_next = qh->qh_next;
+	if (ehci->qh_scan_next == qh)
+		ehci->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct ehci_hcd *ehci)
+{
+	/* If the controller isn't running, we don't have to wait for it */
+	if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+		end_unlink_async(ehci);
+
+	/* Otherwise start a new IAA cycle if one isn't already running */
+	} else if (ehci->rh_state == EHCI_RH_RUNNING &&
+			!ehci->iaa_in_progress) {
+
+		/* Make sure the unlinks are all visible to the hardware */
+		wmb();
+
+		ehci_writel(ehci, ehci->command | CMD_IAAD,
+				&ehci->regs->command);
+		ehci_readl(ehci, &ehci->regs->command);
+		ehci->iaa_in_progress = true;
+		ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
+	}
+}
+
+static void end_iaa_cycle(struct ehci_hcd *ehci)
+{
+	if (ehci->has_synopsys_hc_bug)
+		ehci_writel(ehci, (u32) ehci->async->qh_dma,
+			    &ehci->regs->async_next);
+
+	/* The current IAA cycle has ended */
+	ehci->iaa_in_progress = false;
+
+	end_unlink_async(ehci);
+}
+
+/* See if the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+	struct ehci_qh		*qh;
+	bool			early_exit;
+
+	if (list_empty(&ehci->async_unlink))
+		return;
+	qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
+			unlink_node);	/* QH whose IAA cycle just ended */
+
+	/*
+	 * If async_unlinking is set then this routine is already running,
+	 * either on the stack or on another CPU.
+	 */
+	early_exit = ehci->async_unlinking;
+
+	/* If the controller isn't running, process all the waiting QHs */
+	if (ehci->rh_state < EHCI_RH_RUNNING)
+		list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle);
+
+	/*
+	 * Intel (?) bug: The HC can write back the overlay region even
+	 * after the IAA interrupt occurs.  In self-defense, always go
+	 * through two IAA cycles for each QH.
+	 */
+	else if (qh->qh_state == QH_STATE_UNLINK) {
+		/*
+		 * Second IAA cycle has finished.  Process only the first
+		 * waiting QH (NVIDIA (?) bug).
+		 */
+		list_move_tail(&qh->unlink_node, &ehci->async_idle);
+	}
+
+	/*
+	 * AMD/ATI (?) bug: The HC can continue to use an active QH long
+	 * after the IAA interrupt occurs.  To prevent problems, QHs that
+	 * may still be active will wait until 2 ms have passed with no
+	 * change to the hw_current and hw_token fields (this delay occurs
+	 * between the two IAA cycles).
+	 *
+	 * The EHCI spec (4.8.2) says that active QHs must not be removed
+	 * from the async schedule and recommends waiting until the QH
+	 * goes inactive.  This is ridiculous because the QH will _never_
+	 * become inactive if the endpoint NAKs indefinitely.
+	 */
+
+	/* Some reasons for unlinking guarantee the QH can't be active */
+	else if (qh->unlink_reason & (QH_UNLINK_HALTED |
+			QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY))
+		goto DelayDone;
+
+	/* The QH can't be active if the queue was and still is empty... */
+	else if	((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
+			list_empty(&qh->qtd_list))
+		goto DelayDone;
+
+	/* ... or if the QH has halted */
+	else if	(qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
+		goto DelayDone;
+
+	/* Otherwise we have to wait until the QH stops changing */
+	else {
+		__hc32		qh_current, qh_token;
+
+		qh_current = qh->hw->hw_current;
+		qh_token = qh->hw->hw_token;
+		if (qh_current != ehci->old_current ||
+				qh_token != ehci->old_token) {
+			ehci->old_current = qh_current;
+			ehci->old_token = qh_token;
+			ehci_enable_event(ehci,
+					EHCI_HRTIMER_ACTIVE_UNLINK, true);
+			return;
+		}
+ DelayDone:
+		qh->qh_state = QH_STATE_UNLINK;
+		early_exit = true;
+	}
+	ehci->old_current = ~0;		/* Prepare for next QH */
+
+	/* Start a new IAA cycle if any QHs are waiting for it */
+	if (!list_empty(&ehci->async_unlink))
+		start_iaa_cycle(ehci);
+
+	/*
+	 * Don't allow nesting or concurrent calls,
+	 * or wait for the second IAA cycle for the next QH.
+	 */
+	if (early_exit)
+		return;
+
+	/* Process the idle QHs */
+	ehci->async_unlinking = true;
+	while (!list_empty(&ehci->async_idle)) {
+		qh = list_first_entry(&ehci->async_idle, struct ehci_qh,
+				unlink_node);
+		list_del(&qh->unlink_node);
+
+		qh->qh_state = QH_STATE_IDLE;
+		qh->qh_next.qh = NULL;
+
+		if (!list_empty(&qh->qtd_list))
+			qh_completions(ehci, qh);
+		if (!list_empty(&qh->qtd_list) &&
+				ehci->rh_state == EHCI_RH_RUNNING)
+			qh_link_async(ehci, qh);
+		disable_async(ehci);
+	}
+	ehci->async_unlinking = false;
+}
+
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
+static void unlink_empty_async(struct ehci_hcd *ehci)
+{
+	struct ehci_qh		*qh;
+	struct ehci_qh		*qh_to_unlink = NULL;
+	int			count = 0;
+
+	/* Find the last async QH which has been empty for a timer cycle */
+	for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
+		if (list_empty(&qh->qtd_list) &&
+				qh->qh_state == QH_STATE_LINKED) {
+			++count;
+			if (qh->unlink_cycle != ehci->async_unlink_cycle)
+				qh_to_unlink = qh;
+		}
+	}
+
+	/* If nothing else is being unlinked, unlink the last empty QH */
+	if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
+		qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
+		start_unlink_async(ehci, qh_to_unlink);
+		--count;
+	}
+
+	/* Other QHs will be handled later */
+	if (count > 0) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+		++ehci->async_unlink_cycle;
+	}
+}
+
+#ifdef	CONFIG_PM
+
+/* The root hub is suspended; unlink all the async QHs */
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
+{
+	struct ehci_qh		*qh;
+
+	while (ehci->async->qh_next.qh) {
+		qh = ehci->async->qh_next.qh;
+		WARN_ON(!list_empty(&qh->qtd_list));
+		single_unlink_async(ehci, qh);
+	}
+}
+
+#endif
+
+/* makes sure the async qh will become idle */
+/* caller must own ehci->lock */
+
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	/* If the QH isn't linked then there's nothing we can do. */
+	if (qh->qh_state != QH_STATE_LINKED)
+		return;
+
+	single_unlink_async(ehci, qh);
+	start_iaa_cycle(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async (struct ehci_hcd *ehci)
+{
+	struct ehci_qh		*qh;
+	bool			check_unlinks_later = false;
+
+	ehci->qh_scan_next = ehci->async->qh_next.qh;
+	while (ehci->qh_scan_next) {
+		qh = ehci->qh_scan_next;
+		ehci->qh_scan_next = qh->qh_next.qh;
+
+		/* clean any finished work for this qh */
+		if (!list_empty(&qh->qtd_list)) {
+			int temp;
+
+			/*
+			 * Unlinks could happen here; completion reporting
+			 * drops the lock.  That's why ehci->qh_scan_next
+			 * always holds the next qh to scan; if the next qh
+			 * gets unlinked then ehci->qh_scan_next is adjusted
+			 * in single_unlink_async().
+			 */
+			temp = qh_completions(ehci, qh);
+			if (unlikely(temp)) {
+				start_unlink_async(ehci, qh);
+			} else if (list_empty(&qh->qtd_list)
+					&& qh->qh_state == QH_STATE_LINKED) {
+				qh->unlink_cycle = ehci->async_unlink_cycle;
+				check_unlinks_later = true;
+			}
+		}
+	}
+
+	/*
+	 * Unlink empty entries, reducing DMA usage as well
+	 * as HCD schedule-scanning costs.  Delay for any qh
+	 * we just scanned, there's a not-unusual case that it
+	 * doesn't stay idle for long.
+	 */
+	if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
+			!(ehci->enabled_hrtimer_events &
+				BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+		++ehci->async_unlink_cycle;
+	}
+}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
new file mode 100644
index 0000000..da7b00a
--- /dev/null
+++ b/drivers/usb/host/ehci-sched.c
@@ -0,0 +1,2500 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2001-2004 by David Brownell
+ * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
+ */
+
+/* this file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI scheduled transaction support:  interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int ehci_get_frame(struct usb_hcd *hcd);
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd/sitd
+ * @tag: hardware tag for type of this record
+ */
+static union ehci_shadow *
+periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
+		__hc32 tag)
+{
+	switch (hc32_to_cpu(ehci, tag)) {
+	case Q_TYPE_QH:
+		return &periodic->qh->qh_next;
+	case Q_TYPE_FSTN:
+		return &periodic->fstn->fstn_next;
+	case Q_TYPE_ITD:
+		return &periodic->itd->itd_next;
+	/* case Q_TYPE_SITD: */
+	default:
+		return &periodic->sitd->sitd_next;
+	}
+}
+
+static __hc32 *
+shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
+		__hc32 tag)
+{
+	switch (hc32_to_cpu(ehci, tag)) {
+	/* our ehci_shadow.qh is actually software part */
+	case Q_TYPE_QH:
+		return &periodic->qh->hw->hw_next;
+	/* others are hw parts */
+	default:
+		return periodic->hw_next;
+	}
+}
+
+/* caller must hold ehci->lock */
+static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
+{
+	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
+	__hc32			*hw_p = &ehci->periodic[frame];
+	union ehci_shadow	here = *prev_p;
+
+	/* find predecessor of "ptr"; hw and shadow lists are in sync */
+	while (here.ptr && here.ptr != ptr) {
+		prev_p = periodic_next_shadow(ehci, prev_p,
+				Q_NEXT_TYPE(ehci, *hw_p));
+		hw_p = shadow_next_periodic(ehci, &here,
+				Q_NEXT_TYPE(ehci, *hw_p));
+		here = *prev_p;
+	}
+	/* an interrupt entry (at list end) could have been shared */
+	if (!here.ptr)
+		return;
+
+	/* update shadow and hardware lists ... the old "next" pointers
+	 * from ptr may still be in use, the caller updates them.
+	 */
+	*prev_p = *periodic_next_shadow(ehci, &here,
+			Q_NEXT_TYPE(ehci, *hw_p));
+
+	if (!ehci->use_dummy_qh ||
+	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
+			!= EHCI_LIST_END(ehci))
+		*hw_p = *shadow_next_periodic(ehci, &here,
+				Q_NEXT_TYPE(ehci, *hw_p));
+	else
+		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Bandwidth and TT management */
+
+/* Find the TT data structure for this device; create it if necessary */
+static struct ehci_tt *find_tt(struct usb_device *udev)
+{
+	struct usb_tt		*utt = udev->tt;
+	struct ehci_tt		*tt, **tt_index, **ptt;
+	unsigned		port;
+	bool			allocated_index = false;
+
+	if (!utt)
+		return NULL;		/* Not below a TT */
+
+	/*
+	 * Find/create our data structure.
+	 * For hubs with a single TT, we get it directly.
+	 * For hubs with multiple TTs, there's an extra level of pointers.
+	 */
+	tt_index = NULL;
+	if (utt->multi) {
+		tt_index = utt->hcpriv;
+		if (!tt_index) {		/* Create the index array */
+			tt_index = kcalloc(utt->hub->maxchild,
+					   sizeof(*tt_index),
+					   GFP_ATOMIC);
+			if (!tt_index)
+				return ERR_PTR(-ENOMEM);
+			utt->hcpriv = tt_index;
+			allocated_index = true;
+		}
+		port = udev->ttport - 1;
+		ptt = &tt_index[port];
+	} else {
+		port = 0;
+		ptt = (struct ehci_tt **) &utt->hcpriv;
+	}
+
+	tt = *ptt;
+	if (!tt) {				/* Create the ehci_tt */
+		struct ehci_hcd		*ehci =
+				hcd_to_ehci(bus_to_hcd(udev->bus));
+
+		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
+		if (!tt) {
+			if (allocated_index) {
+				utt->hcpriv = NULL;
+				kfree(tt_index);
+			}
+			return ERR_PTR(-ENOMEM);
+		}
+		list_add_tail(&tt->tt_list, &ehci->tt_list);
+		INIT_LIST_HEAD(&tt->ps_list);
+		tt->usb_tt = utt;
+		tt->tt_port = port;
+		*ptt = tt;
+	}
+
+	return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+	struct usb_tt		*utt = udev->tt;
+	struct ehci_tt		*tt, **tt_index, **ptt;
+	int			cnt, i;
+
+	if (!utt || !utt->hcpriv)
+		return;		/* Not below a TT, or never allocated */
+
+	cnt = 0;
+	if (utt->multi) {
+		tt_index = utt->hcpriv;
+		ptt = &tt_index[udev->ttport - 1];
+
+		/* How many entries are left in tt_index? */
+		for (i = 0; i < utt->hub->maxchild; ++i)
+			cnt += !!tt_index[i];
+	} else {
+		tt_index = NULL;
+		ptt = (struct ehci_tt **) &utt->hcpriv;
+	}
+
+	tt = *ptt;
+	if (!tt || !list_empty(&tt->ps_list))
+		return;		/* never allocated, or still in use */
+
+	list_del(&tt->tt_list);
+	*ptt = NULL;
+	kfree(tt);
+	if (cnt == 1) {
+		utt->hcpriv = NULL;
+		kfree(tt_index);
+	}
+}
+
+static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
+		struct ehci_per_sched *ps)
+{
+	dev_dbg(&ps->udev->dev,
+			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
+			ps->ep->desc.bEndpointAddress,
+			(sign >= 0 ? "reserve" : "release"), type,
+			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
+			ps->phase, ps->phase_uf, ps->period,
+			ps->usecs, ps->c_usecs, ps->cs_mask);
+}
+
+static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
+		struct ehci_qh *qh, int sign)
+{
+	unsigned		start_uf;
+	unsigned		i, j, m;
+	int			usecs = qh->ps.usecs;
+	int			c_usecs = qh->ps.c_usecs;
+	int			tt_usecs = qh->ps.tt_usecs;
+	struct ehci_tt		*tt;
+
+	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
+		return;
+	start_uf = qh->ps.bw_phase << 3;
+
+	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
+
+	if (sign < 0) {		/* Release bandwidth */
+		usecs = -usecs;
+		c_usecs = -c_usecs;
+		tt_usecs = -tt_usecs;
+	}
+
+	/* Entire transaction (high speed) or start-split (full/low speed) */
+	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+			i += qh->ps.bw_uperiod)
+		ehci->bandwidth[i] += usecs;
+
+	/* Complete-split (full/low speed) */
+	if (qh->ps.c_usecs) {
+		/* NOTE: adjustments needed for FSTN */
+		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
+				i += qh->ps.bw_uperiod) {
+			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
+				if (qh->ps.cs_mask & m)
+					ehci->bandwidth[i+j] += c_usecs;
+			}
+		}
+	}
+
+	/* FS/LS bus bandwidth */
+	if (tt_usecs) {
+		tt = find_tt(qh->ps.udev);
+		if (sign > 0)
+			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
+		else
+			list_del(&qh->ps.ps_list);
+
+		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
+				i += qh->ps.bw_period)
+			tt->bandwidth[i] += tt_usecs;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
+		struct ehci_tt *tt)
+{
+	struct ehci_per_sched	*ps;
+	unsigned		uframe, uf, x;
+	u8			*budget_line;
+
+	if (!tt)
+		return;
+	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
+
+	/* Add up the contributions from all the endpoints using this TT */
+	list_for_each_entry(ps, &tt->ps_list, ps_list) {
+		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
+				uframe += ps->bw_uperiod) {
+			budget_line = &budget_table[uframe];
+			x = ps->tt_usecs;
+
+			/* propagate the time forward */
+			for (uf = ps->phase_uf; uf < 8; ++uf) {
+				x += budget_line[uf];
+
+				/* Each microframe lasts 125 us */
+				if (x <= 125) {
+					budget_line[uf] = x;
+					break;
+				}
+				budget_line[uf] = 125;
+				x -= 125;
+			}
+		}
+	}
+}
+
+static int __maybe_unused same_tt(struct usb_device *dev1,
+		struct usb_device *dev2)
+{
+	if (!dev1->tt || !dev2->tt)
+		return 0;
+	if (dev1->tt != dev2->tt)
+		return 0;
+	if (dev1->tt->multi)
+		return dev1->ttport == dev2->ttport;
+	else
+		return 1;
+}
+
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+
+/* Which uframe does the low/fullspeed transfer start in?
+ *
+ * The parameter is the mask of ssplits in "H-frame" terms
+ * and this returns the transfer start uframe in "B-frame" terms,
+ * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
+ * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
+ * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
+ */
+static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+{
+	unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
+
+	if (!smask) {
+		ehci_err(ehci, "invalid empty smask!\n");
+		/* uframe 7 can't have bw so this will indicate failure */
+		return 7;
+	}
+	return ffs(smask) - 1;
+}
+
+static const unsigned char
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+
+/* carryover low/fullspeed bandwidth that crosses uframe boundries */
+static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+{
+	int i;
+
+	for (i = 0; i < 7; i++) {
+		if (max_tt_usecs[i] < tt_usecs[i]) {
+			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
+			tt_usecs[i] = max_tt_usecs[i];
+		}
+	}
+}
+
+/*
+ * Return true if the device's tt's downstream bus is available for a
+ * periodic transfer of the specified length (usecs), starting at the
+ * specified frame/uframe.  Note that (as summarized in section 11.19
+ * of the usb 2.0 spec) TTs can buffer multiple transactions for each
+ * uframe.
+ *
+ * The uframe parameter is when the fullspeed/lowspeed transfer
+ * should be executed in "B-frame" terms, which is the same as the
+ * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
+ * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
+ * See the EHCI spec sec 4.5 and fig 4.7.
+ *
+ * This checks if the full/lowspeed bus, at the specified starting uframe,
+ * has the specified bandwidth available, according to rules listed
+ * in USB 2.0 spec section 11.18.1 fig 11-60.
+ *
+ * This does not check if the transfer would exceed the max ssplit
+ * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
+ * since proper scheduling limits ssplits to less than 16 per uframe.
+ */
+static int tt_available(
+	struct ehci_hcd		*ehci,
+	struct ehci_per_sched	*ps,
+	struct ehci_tt		*tt,
+	unsigned		frame,
+	unsigned		uframe
+)
+{
+	unsigned		period = ps->bw_period;
+	unsigned		usecs = ps->tt_usecs;
+
+	if ((period == 0) || (uframe >= 7))	/* error */
+		return 0;
+
+	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
+			frame += period) {
+		unsigned	i, uf;
+		unsigned short	tt_usecs[8];
+
+		if (tt->bandwidth[frame] + usecs > 900)
+			return 0;
+
+		uf = frame << 3;
+		for (i = 0; i < 8; (++i, ++uf))
+			tt_usecs[i] = ehci->tt_budget[uf];
+
+		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
+			return 0;
+
+		/* special case for isoc transfers larger than 125us:
+		 * the first and each subsequent fully used uframe
+		 * must be empty, so as to not illegally delay
+		 * already scheduled transactions
+		 */
+		if (usecs > 125) {
+			int ufs = (usecs / 125);
+
+			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
+				if (tt_usecs[i] > 0)
+					return 0;
+		}
+
+		tt_usecs[uframe] += usecs;
+
+		carryover_tt_bandwidth(tt_usecs);
+
+		/* fail if the carryover pushed bw past the last uframe's limit */
+		if (max_tt_usecs[7] < tt_usecs[7])
+			return 0;
+	}
+
+	return 1;
+}
+
+#else
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision(
+	struct ehci_hcd		*ehci,
+	unsigned		period,
+	struct usb_device	*dev,
+	unsigned		frame,
+	u32			uf_mask
+)
+{
+	if (period == 0)	/* error */
+		return 0;
+
+	/* note bandwidth wastage:  split never follows csplit
+	 * (different dev or endpoint) until the next uframe.
+	 * calling convention doesn't make that distinction.
+	 */
+	for (; frame < ehci->periodic_size; frame += period) {
+		union ehci_shadow	here;
+		__hc32			type;
+		struct ehci_qh_hw	*hw;
+
+		here = ehci->pshadow[frame];
+		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
+		while (here.ptr) {
+			switch (hc32_to_cpu(ehci, type)) {
+			case Q_TYPE_ITD:
+				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
+				here = here.itd->itd_next;
+				continue;
+			case Q_TYPE_QH:
+				hw = here.qh->hw;
+				if (same_tt(dev, here.qh->ps.udev)) {
+					u32		mask;
+
+					mask = hc32_to_cpu(ehci,
+							hw->hw_info2);
+					/* "knows" no gap is needed */
+					mask |= mask >> 8;
+					if (mask & uf_mask)
+						break;
+				}
+				type = Q_NEXT_TYPE(ehci, hw->hw_next);
+				here = here.qh->qh_next;
+				continue;
+			case Q_TYPE_SITD:
+				if (same_tt(dev, here.sitd->urb->dev)) {
+					u16		mask;
+
+					mask = hc32_to_cpu(ehci, here.sitd
+								->hw_uframe);
+					/* FIXME assumes no gap for IN! */
+					mask |= mask >> 8;
+					if (mask & uf_mask)
+						break;
+				}
+				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
+				here = here.sitd->sitd_next;
+				continue;
+			/* case Q_TYPE_FSTN: */
+			default:
+				ehci_dbg(ehci,
+					"periodic frame %d bogus type %d\n",
+					frame, type);
+			}
+
+			/* collision or error */
+			return 0;
+		}
+	}
+
+	/* no collision */
+	return 1;
+}
+
+#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct ehci_hcd *ehci)
+{
+	if (ehci->periodic_count++)
+		return;
+
+	/* Stop waiting to turn off the periodic schedule */
+	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
+
+	/* Don't start the schedule until PSS is 0 */
+	ehci_poll_PSS(ehci);
+	turn_on_io_watchdog(ehci);
+}
+
+static void disable_periodic(struct ehci_hcd *ehci)
+{
+	if (--ehci->periodic_count)
+		return;
+
+	/* Don't turn off the schedule until PSS is 1 */
+	ehci_poll_PSS(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; ehci 0.96+)
+ */
+static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	unsigned	i;
+	unsigned	period = qh->ps.period;
+
+	dev_dbg(&qh->ps.udev->dev,
+		"link qh%d-%04x/%p start %d [%d/%d us]\n",
+		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
+			& (QH_CMASK | QH_SMASK),
+		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
+
+	/* high bandwidth, or otherwise every microframe */
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
+		union ehci_shadow	*prev = &ehci->pshadow[i];
+		__hc32			*hw_p = &ehci->periodic[i];
+		union ehci_shadow	here = *prev;
+		__hc32			type = 0;
+
+		/* skip the iso nodes at list head */
+		while (here.ptr) {
+			type = Q_NEXT_TYPE(ehci, *hw_p);
+			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
+				break;
+			prev = periodic_next_shadow(ehci, prev, type);
+			hw_p = shadow_next_periodic(ehci, &here, type);
+			here = *prev;
+		}
+
+		/* sorting each branch by period (slow-->fast)
+		 * enables sharing interior tree nodes
+		 */
+		while (here.ptr && qh != here.qh) {
+			if (qh->ps.period > here.qh->ps.period)
+				break;
+			prev = &here.qh->qh_next;
+			hw_p = &here.qh->hw->hw_next;
+			here = *prev;
+		}
+		/* link in this qh, unless some earlier pass did that */
+		if (qh != here.qh) {
+			qh->qh_next = here;
+			if (here.qh)
+				qh->hw->hw_next = *hw_p;
+			wmb();
+			prev->qh = qh;
+			*hw_p = QH_NEXT(ehci, qh->qh_dma);
+		}
+	}
+	qh->qh_state = QH_STATE_LINKED;
+	qh->xacterrs = 0;
+	qh->unlink_reason = 0;
+
+	/* update per-qh bandwidth for debugfs */
+	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
+		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+		: (qh->ps.usecs * 8);
+
+	list_add(&qh->intr_node, &ehci->intr_qh_list);
+
+	/* maybe enable periodic schedule processing */
+	++ehci->intr_count;
+	enable_periodic(ehci);
+}
+
+static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	unsigned	i;
+	unsigned	period;
+
+	/*
+	 * If qh is for a low/full-speed device, simply unlinking it
+	 * could interfere with an ongoing split transaction.  To unlink
+	 * it safely would require setting the QH_INACTIVATE bit and
+	 * waiting at least one frame, as described in EHCI 4.12.2.5.
+	 *
+	 * We won't bother with any of this.  Instead, we assume that the
+	 * only reason for unlinking an interrupt QH while the current URB
+	 * is still active is to dequeue all the URBs (flush the whole
+	 * endpoint queue).
+	 *
+	 * If rebalancing the periodic schedule is ever implemented, this
+	 * approach will no longer be valid.
+	 */
+
+	/* high bandwidth, or otherwise part of every microframe */
+	period = qh->ps.period ? : 1;
+
+	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
+		periodic_unlink(ehci, i, qh);
+
+	/* update per-qh bandwidth for debugfs */
+	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
+		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
+		: (qh->ps.usecs * 8);
+
+	dev_dbg(&qh->ps.udev->dev,
+		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+		qh->ps.period,
+		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
+		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
+
+	/* qh->qh_next still "live" to HC */
+	qh->qh_state = QH_STATE_UNLINK;
+	qh->qh_next.ptr = NULL;
+
+	if (ehci->qh_scan_next == qh)
+		ehci->qh_scan_next = list_entry(qh->intr_node.next,
+				struct ehci_qh, intr_node);
+	list_del(&qh->intr_node);
+}
+
+static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	if (qh->qh_state != QH_STATE_LINKED ||
+			list_empty(&qh->unlink_node))
+		return;
+
+	list_del_init(&qh->unlink_node);
+
+	/*
+	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
+	 * avoiding unnecessary CPU wakeup
+	 */
+}
+
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	/* If the QH isn't linked then there's nothing we can do. */
+	if (qh->qh_state != QH_STATE_LINKED)
+		return;
+
+	/* if the qh is waiting for unlink, cancel it now */
+	cancel_unlink_wait_intr(ehci, qh);
+
+	qh_unlink_periodic(ehci, qh);
+
+	/* Make sure the unlinks are visible before starting the timer */
+	wmb();
+
+	/*
+	 * The EHCI spec doesn't say how long it takes the controller to
+	 * stop accessing an unlinked interrupt QH.  The timer delay is
+	 * 9 uframes; presumably that will be long enough.
+	 */
+	qh->unlink_cycle = ehci->intr_unlink_cycle;
+
+	/* New entries go at the end of the intr_unlink list */
+	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
+
+	if (ehci->intr_unlinking)
+		;	/* Avoid recursive calls */
+	else if (ehci->rh_state < EHCI_RH_RUNNING)
+		ehci_handle_intr_unlinks(ehci);
+	else if (ehci->intr_unlink.next == &qh->unlink_node) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+		++ehci->intr_unlink_cycle;
+	}
+}
+
+/*
+ * It is common only one intr URB is scheduled on one qh, and
+ * given complete() is run in tasklet context, introduce a bit
+ * delay to avoid unlink qh too early.
+ */
+static void start_unlink_intr_wait(struct ehci_hcd *ehci,
+				   struct ehci_qh *qh)
+{
+	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
+
+	/* New entries go at the end of the intr_unlink_wait list */
+	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
+
+	if (ehci->rh_state < EHCI_RH_RUNNING)
+		ehci_handle_start_intr_unlinks(ehci);
+	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+		++ehci->intr_unlink_wait_cycle;
+	}
+}
+
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	struct ehci_qh_hw	*hw = qh->hw;
+	int			rc;
+
+	qh->qh_state = QH_STATE_IDLE;
+	hw->hw_next = EHCI_LIST_END(ehci);
+
+	if (!list_empty(&qh->qtd_list))
+		qh_completions(ehci, qh);
+
+	/* reschedule QH iff another request is queued */
+	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
+		rc = qh_schedule(ehci, qh);
+		if (rc == 0) {
+			qh_refresh(ehci, qh);
+			qh_link_periodic(ehci, qh);
+		}
+
+		/* An error here likely indicates handshake failure
+		 * or no space left in the schedule.  Neither fault
+		 * should happen often ...
+		 *
+		 * FIXME kill the now-dysfunctional queued urbs
+		 */
+		else {
+			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
+					qh, rc);
+		}
+	}
+
+	/* maybe turn off periodic schedule */
+	--ehci->intr_count;
+	disable_periodic(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period(
+	struct ehci_hcd *ehci,
+	unsigned	frame,
+	unsigned	uframe,
+	unsigned	uperiod,
+	unsigned	usecs
+) {
+	/* complete split running into next frame?
+	 * given FSTN support, we could sometimes check...
+	 */
+	if (uframe >= 8)
+		return 0;
+
+	/* convert "usecs we need" to "max already claimed" */
+	usecs = ehci->uframe_periodic_max - usecs;
+
+	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
+			uframe += uperiod) {
+		if (ehci->bandwidth[uframe] > usecs)
+			return 0;
+	}
+
+	/* success! */
+	return 1;
+}
+
+static int check_intr_schedule(
+	struct ehci_hcd		*ehci,
+	unsigned		frame,
+	unsigned		uframe,
+	struct ehci_qh		*qh,
+	unsigned		*c_maskp,
+	struct ehci_tt		*tt
+)
+{
+	int		retval = -ENOSPC;
+	u8		mask = 0;
+
+	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
+		goto done;
+
+	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
+		goto done;
+	if (!qh->ps.c_usecs) {
+		retval = 0;
+		*c_maskp = 0;
+		goto done;
+	}
+
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
+		unsigned i;
+
+		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
+		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
+			if (!check_period(ehci, frame, i,
+					qh->ps.bw_uperiod, qh->ps.c_usecs))
+				goto done;
+			else
+				mask |= 1 << i;
+
+		retval = 0;
+
+		*c_maskp = mask;
+	}
+#else
+	/* Make sure this tt's buffer is also available for CSPLITs.
+	 * We pessimize a bit; probably the typical full speed case
+	 * doesn't need the second CSPLIT.
+	 *
+	 * NOTE:  both SPLIT and CSPLIT could be checked in just
+	 * one smart pass...
+	 */
+	mask = 0x03 << (uframe + qh->gap_uf);
+	*c_maskp = mask;
+
+	mask |= 1 << uframe;
+	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
+		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
+				qh->ps.bw_uperiod, qh->ps.c_usecs))
+			goto done;
+		if (!check_period(ehci, frame, uframe + qh->gap_uf,
+				qh->ps.bw_uperiod, qh->ps.c_usecs))
+			goto done;
+		retval = 0;
+	}
+#endif
+done:
+	return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+	int		status = 0;
+	unsigned	uframe;
+	unsigned	c_mask;
+	struct ehci_qh_hw	*hw = qh->hw;
+	struct ehci_tt		*tt;
+
+	hw->hw_next = EHCI_LIST_END(ehci);
+
+	/* reuse the previous schedule slots, if we can */
+	if (qh->ps.phase != NO_FRAME) {
+		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
+		return 0;
+	}
+
+	uframe = 0;
+	c_mask = 0;
+	tt = find_tt(qh->ps.udev);
+	if (IS_ERR(tt)) {
+		status = PTR_ERR(tt);
+		goto done;
+	}
+	compute_tt_budget(ehci->tt_budget, tt);
+
+	/* else scan the schedule to find a group of slots such that all
+	 * uframes have enough periodic bandwidth available.
+	 */
+	/* "normal" case, uframing flexible except with splits */
+	if (qh->ps.bw_period) {
+		int		i;
+		unsigned	frame;
+
+		for (i = qh->ps.bw_period; i > 0; --i) {
+			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
+			for (uframe = 0; uframe < 8; uframe++) {
+				status = check_intr_schedule(ehci,
+						frame, uframe, qh, &c_mask, tt);
+				if (status == 0)
+					goto got_it;
+			}
+		}
+
+	/* qh->ps.bw_period == 0 means every uframe */
+	} else {
+		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
+	}
+	if (status)
+		goto done;
+
+ got_it:
+	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
+			(qh->ps.period - 1) : 0);
+	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
+	qh->ps.phase_uf = uframe;
+	qh->ps.cs_mask = qh->ps.period ?
+			(c_mask << 8) | (1 << uframe) :
+			QH_SMASK;
+
+	/* reset S-frame and (maybe) C-frame masks */
+	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
+	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
+	reserve_release_intr_bandwidth(ehci, qh, 1);
+
+done:
+	return status;
+}
+
+static int intr_submit(
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct list_head	*qtd_list,
+	gfp_t			mem_flags
+) {
+	unsigned		epnum;
+	unsigned long		flags;
+	struct ehci_qh		*qh;
+	int			status;
+	struct list_head	empty;
+
+	/* get endpoint and transfer/schedule data */
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
+		status = -ESHUTDOWN;
+		goto done_not_linked;
+	}
+	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+	if (unlikely(status))
+		goto done_not_linked;
+
+	/* get qh and force any scheduling errors */
+	INIT_LIST_HEAD(&empty);
+	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
+	if (qh == NULL) {
+		status = -ENOMEM;
+		goto done;
+	}
+	if (qh->qh_state == QH_STATE_IDLE) {
+		status = qh_schedule(ehci, qh);
+		if (status)
+			goto done;
+	}
+
+	/* then queue the urb's tds to the qh */
+	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	BUG_ON(qh == NULL);
+
+	/* stuff into the periodic schedule */
+	if (qh->qh_state == QH_STATE_IDLE) {
+		qh_refresh(ehci, qh);
+		qh_link_periodic(ehci, qh);
+	} else {
+		/* cancel unlink wait for the qh */
+		cancel_unlink_wait_intr(ehci, qh);
+	}
+
+	/* ... update usbfs periodic stats */
+	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
+
+done:
+	if (unlikely(status))
+		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+done_not_linked:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	if (status)
+		qtd_list_free(ehci, urb, qtd_list);
+
+	return status;
+}
+
+static void scan_intr(struct ehci_hcd *ehci)
+{
+	struct ehci_qh		*qh;
+
+	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
+			intr_node) {
+
+		/* clean any finished work for this qh */
+		if (!list_empty(&qh->qtd_list)) {
+			int temp;
+
+			/*
+			 * Unlinks could happen here; completion reporting
+			 * drops the lock.  That's why ehci->qh_scan_next
+			 * always holds the next qh to scan; if the next qh
+			 * gets unlinked then ehci->qh_scan_next is adjusted
+			 * in qh_unlink_periodic().
+			 */
+			temp = qh_completions(ehci, qh);
+			if (unlikely(temp))
+				start_unlink_intr(ehci, qh);
+			else if (unlikely(list_empty(&qh->qtd_list) &&
+					qh->qh_state == QH_STATE_LINKED))
+				start_unlink_intr_wait(ehci, qh);
+		}
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ehci_iso_stream ops work with both ITD and SITD */
+
+static struct ehci_iso_stream *
+iso_stream_alloc(gfp_t mem_flags)
+{
+	struct ehci_iso_stream *stream;
+
+	stream = kzalloc(sizeof(*stream), mem_flags);
+	if (likely(stream != NULL)) {
+		INIT_LIST_HEAD(&stream->td_list);
+		INIT_LIST_HEAD(&stream->free_list);
+		stream->next_uframe = NO_FRAME;
+		stream->ps.phase = NO_FRAME;
+	}
+	return stream;
+}
+
+static void
+iso_stream_init(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_stream	*stream,
+	struct urb		*urb
+)
+{
+	static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+
+	struct usb_device	*dev = urb->dev;
+	u32			buf1;
+	unsigned		epnum, maxp;
+	int			is_input;
+	unsigned		tmp;
+
+	/*
+	 * this might be a "high bandwidth" highspeed endpoint,
+	 * as encoded in the ep descriptor's wMaxPacket field
+	 */
+	epnum = usb_pipeendpoint(urb->pipe);
+	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
+	maxp = usb_endpoint_maxp(&urb->ep->desc);
+	buf1 = is_input ? 1 << 11 : 0;
+
+	/* knows about ITD vs SITD */
+	if (dev->speed == USB_SPEED_HIGH) {
+		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
+
+		stream->highspeed = 1;
+
+		buf1 |= maxp;
+		maxp *= multi;
+
+		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
+		stream->buf1 = cpu_to_hc32(ehci, buf1);
+		stream->buf2 = cpu_to_hc32(ehci, multi);
+
+		/* usbfs wants to report the average usecs per frame tied up
+		 * when transfers on this endpoint are scheduled ...
+		 */
+		stream->ps.usecs = HS_USECS_ISO(maxp);
+
+		/* period for bandwidth allocation */
+		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
+				1 << (urb->ep->desc.bInterval - 1));
+
+		/* Allow urb->interval to override */
+		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+
+		stream->uperiod = urb->interval;
+		stream->ps.period = urb->interval >> 3;
+		stream->bandwidth = stream->ps.usecs * 8 /
+				stream->ps.bw_uperiod;
+
+	} else {
+		u32		addr;
+		int		think_time;
+		int		hs_transfers;
+
+		addr = dev->ttport << 24;
+		if (!ehci_is_TDI(ehci)
+				|| (dev->tt->hub !=
+					ehci_to_hcd(ehci)->self.root_hub))
+			addr |= dev->tt->hub->devnum << 16;
+		addr |= epnum << 8;
+		addr |= dev->devnum;
+		stream->ps.usecs = HS_USECS_ISO(maxp);
+		think_time = dev->tt->think_time;
+		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
+				dev->speed, is_input, 1, maxp));
+		hs_transfers = max(1u, (maxp + 187) / 188);
+		if (is_input) {
+			u32	tmp;
+
+			addr |= 1 << 31;
+			stream->ps.c_usecs = stream->ps.usecs;
+			stream->ps.usecs = HS_USECS_ISO(1);
+			stream->ps.cs_mask = 1;
+
+			/* c-mask as specified in USB 2.0 11.18.4 3.c */
+			tmp = (1 << (hs_transfers + 2)) - 1;
+			stream->ps.cs_mask |= tmp << (8 + 2);
+		} else
+			stream->ps.cs_mask = smask_out[hs_transfers - 1];
+
+		/* period for bandwidth allocation */
+		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
+				1 << (urb->ep->desc.bInterval - 1));
+
+		/* Allow urb->interval to override */
+		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
+
+		stream->ps.period = urb->interval;
+		stream->uperiod = urb->interval << 3;
+		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
+				stream->ps.bw_period;
+
+		/* stream->splits gets created from cs_mask later */
+		stream->address = cpu_to_hc32(ehci, addr);
+	}
+
+	stream->ps.udev = dev;
+	stream->ps.ep = urb->ep;
+
+	stream->bEndpointAddress = is_input | epnum;
+	stream->maxp = maxp;
+}
+
+static struct ehci_iso_stream *
+iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
+{
+	unsigned		epnum;
+	struct ehci_iso_stream	*stream;
+	struct usb_host_endpoint *ep;
+	unsigned long		flags;
+
+	epnum = usb_pipeendpoint (urb->pipe);
+	if (usb_pipein(urb->pipe))
+		ep = urb->dev->ep_in[epnum];
+	else
+		ep = urb->dev->ep_out[epnum];
+
+	spin_lock_irqsave(&ehci->lock, flags);
+	stream = ep->hcpriv;
+
+	if (unlikely(stream == NULL)) {
+		stream = iso_stream_alloc(GFP_ATOMIC);
+		if (likely(stream != NULL)) {
+			ep->hcpriv = stream;
+			iso_stream_init(ehci, stream, urb);
+		}
+
+	/* if dev->ep [epnum] is a QH, hw is set */
+	} else if (unlikely(stream->hw != NULL)) {
+		ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
+			urb->dev->devpath, epnum,
+			usb_pipein(urb->pipe) ? "in" : "out");
+		stream = NULL;
+	}
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ehci_iso_sched ops can be ITD-only or SITD-only */
+
+static struct ehci_iso_sched *
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
+{
+	struct ehci_iso_sched	*iso_sched;
+	int			size = sizeof(*iso_sched);
+
+	size += packets * sizeof(struct ehci_iso_packet);
+	iso_sched = kzalloc(size, mem_flags);
+	if (likely(iso_sched != NULL))
+		INIT_LIST_HEAD(&iso_sched->td_list);
+
+	return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_sched	*iso_sched,
+	struct ehci_iso_stream	*stream,
+	struct urb		*urb
+)
+{
+	unsigned	i;
+	dma_addr_t	dma = urb->transfer_dma;
+
+	/* how many uframes are needed for these transfers */
+	iso_sched->span = urb->number_of_packets * stream->uperiod;
+
+	/* figure out per-uframe itd fields that we'll need later
+	 * when we fit new itds into the schedule.
+	 */
+	for (i = 0; i < urb->number_of_packets; i++) {
+		struct ehci_iso_packet	*uframe = &iso_sched->packet[i];
+		unsigned		length;
+		dma_addr_t		buf;
+		u32			trans;
+
+		length = urb->iso_frame_desc[i].length;
+		buf = dma + urb->iso_frame_desc[i].offset;
+
+		trans = EHCI_ISOC_ACTIVE;
+		trans |= buf & 0x0fff;
+		if (unlikely(((i + 1) == urb->number_of_packets))
+				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
+			trans |= EHCI_ITD_IOC;
+		trans |= length << 16;
+		uframe->transaction = cpu_to_hc32(ehci, trans);
+
+		/* might need to cross a buffer page within a uframe */
+		uframe->bufp = (buf & ~(u64)0x0fff);
+		buf += length;
+		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
+			uframe->cross = 1;
+	}
+}
+
+static void
+iso_sched_free(
+	struct ehci_iso_stream	*stream,
+	struct ehci_iso_sched	*iso_sched
+)
+{
+	if (!iso_sched)
+		return;
+	/* caller must hold ehci->lock! */
+	list_splice(&iso_sched->td_list, &stream->free_list);
+	kfree(iso_sched);
+}
+
+static int
+itd_urb_transaction(
+	struct ehci_iso_stream	*stream,
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	gfp_t			mem_flags
+)
+{
+	struct ehci_itd		*itd;
+	dma_addr_t		itd_dma;
+	int			i;
+	unsigned		num_itds;
+	struct ehci_iso_sched	*sched;
+	unsigned long		flags;
+
+	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+	if (unlikely(sched == NULL))
+		return -ENOMEM;
+
+	itd_sched_init(ehci, sched, stream, urb);
+
+	if (urb->interval < 8)
+		num_itds = 1 + (sched->span + 7) / 8;
+	else
+		num_itds = urb->number_of_packets;
+
+	/* allocate/init ITDs */
+	spin_lock_irqsave(&ehci->lock, flags);
+	for (i = 0; i < num_itds; i++) {
+
+		/*
+		 * Use iTDs from the free list, but not iTDs that may
+		 * still be in use by the hardware.
+		 */
+		if (likely(!list_empty(&stream->free_list))) {
+			itd = list_first_entry(&stream->free_list,
+					struct ehci_itd, itd_list);
+			if (itd->frame == ehci->now_frame)
+				goto alloc_itd;
+			list_del(&itd->itd_list);
+			itd_dma = itd->itd_dma;
+		} else {
+ alloc_itd:
+			spin_unlock_irqrestore(&ehci->lock, flags);
+			itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
+					&itd_dma);
+			spin_lock_irqsave(&ehci->lock, flags);
+			if (!itd) {
+				iso_sched_free(stream, sched);
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				return -ENOMEM;
+			}
+		}
+
+		memset(itd, 0, sizeof(*itd));
+		itd->itd_dma = itd_dma;
+		itd->frame = NO_FRAME;
+		list_add(&itd->itd_list, &sched->td_list);
+	}
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	/* temporarily store schedule info in hcpriv */
+	urb->hcpriv = sched;
+	urb->error_count = 0;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
+		struct ehci_iso_stream *stream, int sign)
+{
+	unsigned		uframe;
+	unsigned		i, j;
+	unsigned		s_mask, c_mask, m;
+	int			usecs = stream->ps.usecs;
+	int			c_usecs = stream->ps.c_usecs;
+	int			tt_usecs = stream->ps.tt_usecs;
+	struct ehci_tt		*tt;
+
+	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
+		return;
+	uframe = stream->ps.bw_phase << 3;
+
+	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
+
+	if (sign < 0) {		/* Release bandwidth */
+		usecs = -usecs;
+		c_usecs = -c_usecs;
+		tt_usecs = -tt_usecs;
+	}
+
+	if (!stream->splits) {		/* High speed */
+		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
+				i += stream->ps.bw_uperiod)
+			ehci->bandwidth[i] += usecs;
+
+	} else {			/* Full speed */
+		s_mask = stream->ps.cs_mask;
+		c_mask = s_mask >> 8;
+
+		/* NOTE: adjustment needed for frame overflow */
+		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
+				i += stream->ps.bw_uperiod) {
+			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
+					(++j, m <<= 1)) {
+				if (s_mask & m)
+					ehci->bandwidth[i+j] += usecs;
+				else if (c_mask & m)
+					ehci->bandwidth[i+j] += c_usecs;
+			}
+		}
+
+		tt = find_tt(stream->ps.udev);
+		if (sign > 0)
+			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
+		else
+			list_del(&stream->ps.ps_list);
+
+		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
+				i += stream->ps.bw_period)
+			tt->bandwidth[i] += tt_usecs;
+	}
+}
+
+static inline int
+itd_slot_ok(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_stream	*stream,
+	unsigned		uframe
+)
+{
+	unsigned		usecs;
+
+	/* convert "usecs we need" to "max already claimed" */
+	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
+
+	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
+			uframe += stream->ps.bw_uperiod) {
+		if (ehci->bandwidth[uframe] > usecs)
+			return 0;
+	}
+	return 1;
+}
+
+static inline int
+sitd_slot_ok(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_stream	*stream,
+	unsigned		uframe,
+	struct ehci_iso_sched	*sched,
+	struct ehci_tt		*tt
+)
+{
+	unsigned		mask, tmp;
+	unsigned		frame, uf;
+
+	mask = stream->ps.cs_mask << (uframe & 7);
+
+	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
+	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
+		return 0;
+
+	/* for IN, don't wrap CSPLIT into the next frame */
+	if (mask & ~0xffff)
+		return 0;
+
+	/* check bandwidth */
+	uframe &= stream->ps.bw_uperiod - 1;
+	frame = uframe >> 3;
+
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+	/* The tt's fullspeed bus bandwidth must be available.
+	 * tt_available scheduling guarantees 10+% for control/bulk.
+	 */
+	uf = uframe & 7;
+	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
+		return 0;
+#else
+	/* tt must be idle for start(s), any gap, and csplit.
+	 * assume scheduling slop leaves 10+% for control/bulk.
+	 */
+	if (!tt_no_collision(ehci, stream->ps.bw_period,
+			stream->ps.udev, frame, mask))
+		return 0;
+#endif
+
+	do {
+		unsigned	max_used;
+		unsigned	i;
+
+		/* check starts (OUT uses more than one) */
+		uf = uframe;
+		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
+		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
+			if (ehci->bandwidth[uf] > max_used)
+				return 0;
+		}
+
+		/* for IN, check CSPLIT */
+		if (stream->ps.c_usecs) {
+			max_used = ehci->uframe_periodic_max -
+					stream->ps.c_usecs;
+			uf = uframe & ~7;
+			tmp = 1 << (2+8);
+			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
+				if ((stream->ps.cs_mask & tmp) == 0)
+					continue;
+				if (ehci->bandwidth[uf+i] > max_used)
+					return 0;
+			}
+		}
+
+		uframe += stream->ps.bw_uperiod;
+	} while (uframe < EHCI_BANDWIDTH_SIZE);
+
+	stream->ps.cs_mask <<= uframe & 7;
+	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
+	return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.)  That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than ehci's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
+ */
+
+static int
+iso_stream_schedule(
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	struct ehci_iso_stream	*stream
+)
+{
+	u32			now, base, next, start, period, span, now2;
+	u32			wrap = 0, skip = 0;
+	int			status = 0;
+	unsigned		mod = ehci->periodic_size << 3;
+	struct ehci_iso_sched	*sched = urb->hcpriv;
+	bool			empty = list_empty(&stream->td_list);
+	bool			new_stream = false;
+
+	period = stream->uperiod;
+	span = sched->span;
+	if (!stream->highspeed)
+		span <<= 3;
+
+	/* Start a new isochronous stream? */
+	if (unlikely(empty && !hcd_periodic_completion_in_progress(
+			ehci_to_hcd(ehci), urb->ep))) {
+
+		/* Schedule the endpoint */
+		if (stream->ps.phase == NO_FRAME) {
+			int		done = 0;
+			struct ehci_tt	*tt = find_tt(stream->ps.udev);
+
+			if (IS_ERR(tt)) {
+				status = PTR_ERR(tt);
+				goto fail;
+			}
+			compute_tt_budget(ehci->tt_budget, tt);
+
+			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
+
+			/* find a uframe slot with enough bandwidth.
+			 * Early uframes are more precious because full-speed
+			 * iso IN transfers can't use late uframes,
+			 * and therefore they should be allocated last.
+			 */
+			next = start;
+			start += period;
+			do {
+				start--;
+				/* check schedule: enough space? */
+				if (stream->highspeed) {
+					if (itd_slot_ok(ehci, stream, start))
+						done = 1;
+				} else {
+					if ((start % 8) >= 6)
+						continue;
+					if (sitd_slot_ok(ehci, stream, start,
+							sched, tt))
+						done = 1;
+				}
+			} while (start > next && !done);
+
+			/* no room in the schedule */
+			if (!done) {
+				ehci_dbg(ehci, "iso sched full %p", urb);
+				status = -ENOSPC;
+				goto fail;
+			}
+			stream->ps.phase = (start >> 3) &
+					(stream->ps.period - 1);
+			stream->ps.bw_phase = stream->ps.phase &
+					(stream->ps.bw_period - 1);
+			stream->ps.phase_uf = start & 7;
+			reserve_release_iso_bandwidth(ehci, stream, 1);
+		}
+
+		/* New stream is already scheduled; use the upcoming slot */
+		else {
+			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
+		}
+
+		stream->next_uframe = start;
+		new_stream = true;
+	}
+
+	now = ehci_read_frame_index(ehci) & (mod - 1);
+
+	/* Take the isochronous scheduling threshold into account */
+	if (ehci->i_thresh)
+		next = now + ehci->i_thresh;	/* uframe cache */
+	else
+		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
+
+	/* If needed, initialize last_iso_frame so that this URB will be seen */
+	if (ehci->isoc_count == 0)
+		ehci->last_iso_frame = now >> 3;
+
+	/*
+	 * Use ehci->last_iso_frame as the base.  There can't be any
+	 * TDs scheduled for earlier than that.
+	 */
+	base = ehci->last_iso_frame << 3;
+	next = (next - base) & (mod - 1);
+	start = (stream->next_uframe - base) & (mod - 1);
+
+	if (unlikely(new_stream))
+		goto do_ASAP;
+
+	/*
+	 * Typical case: reuse current schedule, stream may still be active.
+	 * Hopefully there are no gaps from the host falling behind
+	 * (irq delays etc).  If there are, the behavior depends on
+	 * whether URB_ISO_ASAP is set.
+	 */
+	now2 = (now - base) & (mod - 1);
+
+	/* Is the schedule about to wrap around? */
+	if (unlikely(!empty && start < period)) {
+		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
+				urb, stream->next_uframe, base, period, mod);
+		status = -EFBIG;
+		goto fail;
+	}
+
+	/* Is the next packet scheduled after the base time? */
+	if (likely(!empty || start <= now2 + period)) {
+
+		/* URB_ISO_ASAP: make sure that start >= next */
+		if (unlikely(start < next &&
+				(urb->transfer_flags & URB_ISO_ASAP)))
+			goto do_ASAP;
+
+		/* Otherwise use start, if it's not in the past */
+		if (likely(start >= now2))
+			goto use_start;
+
+	/* Otherwise we got an underrun while the queue was empty */
+	} else {
+		if (urb->transfer_flags & URB_ISO_ASAP)
+			goto do_ASAP;
+		wrap = mod;
+		now2 += mod;
+	}
+
+	/* How many uframes and packets do we need to skip? */
+	skip = (now2 - start + period - 1) & -period;
+	if (skip >= span) {		/* Entirely in the past? */
+		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
+				urb, start + base, span - period, now2 + base,
+				base);
+
+		/* Try to keep the last TD intact for scanning later */
+		skip = span - period;
+
+		/* Will it come before the current scan position? */
+		if (empty) {
+			skip = span;	/* Skip the entire URB */
+			status = 1;	/* and give it back immediately */
+			iso_sched_free(stream, sched);
+			sched = NULL;
+		}
+	}
+	urb->error_count = skip / period;
+	if (sched)
+		sched->first_packet = urb->error_count;
+	goto use_start;
+
+ do_ASAP:
+	/* Use the first slot after "next" */
+	start = next + ((start - next) & (period - 1));
+
+ use_start:
+	/* Tried to schedule too far into the future? */
+	if (unlikely(start + span - period >= mod + wrap)) {
+		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
+				urb, start, span - period, mod + wrap);
+		status = -EFBIG;
+		goto fail;
+	}
+
+	start += base;
+	stream->next_uframe = (start + skip) & (mod - 1);
+
+	/* report high speed start in uframes; full speed, in frames */
+	urb->start_frame = start & (mod - 1);
+	if (!stream->highspeed)
+		urb->start_frame >>= 3;
+	return status;
+
+ fail:
+	iso_sched_free(stream, sched);
+	urb->hcpriv = NULL;
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
+		struct ehci_itd *itd)
+{
+	int i;
+
+	/* it's been recently zeroed */
+	itd->hw_next = EHCI_LIST_END(ehci);
+	itd->hw_bufp[0] = stream->buf0;
+	itd->hw_bufp[1] = stream->buf1;
+	itd->hw_bufp[2] = stream->buf2;
+
+	for (i = 0; i < 8; i++)
+		itd->index[i] = -1;
+
+	/* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+	struct ehci_hcd		*ehci,
+	struct ehci_itd		*itd,
+	struct ehci_iso_sched	*iso_sched,
+	unsigned		index,
+	u16			uframe
+)
+{
+	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
+	unsigned		pg = itd->pg;
+
+	/* BUG_ON(pg == 6 && uf->cross); */
+
+	uframe &= 0x07;
+	itd->index[uframe] = index;
+
+	itd->hw_transaction[uframe] = uf->transaction;
+	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
+	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
+	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
+
+	/* iso_frame_desc[].offset must be strictly increasing */
+	if (unlikely(uf->cross)) {
+		u64	bufp = uf->bufp + 4096;
+
+		itd->pg = ++pg;
+		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
+		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
+	}
+}
+
+static inline void
+itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
+{
+	union ehci_shadow	*prev = &ehci->pshadow[frame];
+	__hc32			*hw_p = &ehci->periodic[frame];
+	union ehci_shadow	here = *prev;
+	__hc32			type = 0;
+
+	/* skip any iso nodes which might belong to previous microframes */
+	while (here.ptr) {
+		type = Q_NEXT_TYPE(ehci, *hw_p);
+		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
+			break;
+		prev = periodic_next_shadow(ehci, prev, type);
+		hw_p = shadow_next_periodic(ehci, &here, type);
+		here = *prev;
+	}
+
+	itd->itd_next = here;
+	itd->hw_next = *hw_p;
+	prev->itd = itd;
+	itd->frame = frame;
+	wmb();
+	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	unsigned		mod,
+	struct ehci_iso_stream	*stream
+)
+{
+	int			packet;
+	unsigned		next_uframe, uframe, frame;
+	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
+	struct ehci_itd		*itd;
+
+	next_uframe = stream->next_uframe & (mod - 1);
+
+	if (unlikely(list_empty(&stream->td_list)))
+		ehci_to_hcd(ehci)->self.bandwidth_allocated
+				+= stream->bandwidth;
+
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_pll_fix == 1)
+			usb_amd_quirk_pll_disable();
+	}
+
+	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
+
+	/* fill iTDs uframe by uframe */
+	for (packet = iso_sched->first_packet, itd = NULL;
+			packet < urb->number_of_packets;) {
+		if (itd == NULL) {
+			/* ASSERT:  we have all necessary itds */
+			/* BUG_ON(list_empty(&iso_sched->td_list)); */
+
+			/* ASSERT:  no itds for this endpoint in this uframe */
+
+			itd = list_entry(iso_sched->td_list.next,
+					struct ehci_itd, itd_list);
+			list_move_tail(&itd->itd_list, &stream->td_list);
+			itd->stream = stream;
+			itd->urb = urb;
+			itd_init(ehci, stream, itd);
+		}
+
+		uframe = next_uframe & 0x07;
+		frame = next_uframe >> 3;
+
+		itd_patch(ehci, itd, iso_sched, packet, uframe);
+
+		next_uframe += stream->uperiod;
+		next_uframe &= mod - 1;
+		packet++;
+
+		/* link completed itds into the schedule */
+		if (((next_uframe >> 3) != frame)
+				|| packet == urb->number_of_packets) {
+			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
+			itd = NULL;
+		}
+	}
+	stream->next_uframe = next_uframe;
+
+	/* don't need that schedule data any more */
+	iso_sched_free(stream, iso_sched);
+	urb->hcpriv = stream;
+
+	++ehci->isoc_count;
+	enable_periodic(ehci);
+}
+
+#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD.  Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly.  That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs.  It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
+{
+	struct urb				*urb = itd->urb;
+	struct usb_iso_packet_descriptor	*desc;
+	u32					t;
+	unsigned				uframe;
+	int					urb_index = -1;
+	struct ehci_iso_stream			*stream = itd->stream;
+	bool					retval = false;
+
+	/* for each uframe with a packet */
+	for (uframe = 0; uframe < 8; uframe++) {
+		if (likely(itd->index[uframe] == -1))
+			continue;
+		urb_index = itd->index[uframe];
+		desc = &urb->iso_frame_desc[urb_index];
+
+		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
+		itd->hw_transaction[uframe] = 0;
+
+		/* report transfer status */
+		if (unlikely(t & ISO_ERRS)) {
+			urb->error_count++;
+			if (t & EHCI_ISOC_BUF_ERR)
+				desc->status = usb_pipein(urb->pipe)
+					? -ENOSR  /* hc couldn't read */
+					: -ECOMM; /* hc couldn't write */
+			else if (t & EHCI_ISOC_BABBLE)
+				desc->status = -EOVERFLOW;
+			else /* (t & EHCI_ISOC_XACTERR) */
+				desc->status = -EPROTO;
+
+			/* HC need not update length with this error */
+			if (!(t & EHCI_ISOC_BABBLE)) {
+				desc->actual_length = EHCI_ITD_LENGTH(t);
+				urb->actual_length += desc->actual_length;
+			}
+		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
+			desc->status = 0;
+			desc->actual_length = EHCI_ITD_LENGTH(t);
+			urb->actual_length += desc->actual_length;
+		} else {
+			/* URB was too late */
+			urb->error_count++;
+		}
+	}
+
+	/* handle completion now? */
+	if (likely((urb_index + 1) != urb->number_of_packets))
+		goto done;
+
+	/*
+	 * ASSERT: it's really the last itd for this urb
+	 * list_for_each_entry (itd, &stream->td_list, itd_list)
+	 *	 BUG_ON(itd->urb == urb);
+	 */
+
+	/* give urb back to the driver; completion often (re)submits */
+	ehci_urb_done(ehci, urb, 0);
+	retval = true;
+	urb = NULL;
+
+	--ehci->isoc_count;
+	disable_periodic(ehci);
+
+	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_pll_fix == 1)
+			usb_amd_quirk_pll_enable();
+	}
+
+	if (unlikely(list_is_singular(&stream->td_list)))
+		ehci_to_hcd(ehci)->self.bandwidth_allocated
+				-= stream->bandwidth;
+
+done:
+	itd->urb = NULL;
+
+	/* Add to the end of the free list for later reuse */
+	list_move_tail(&itd->itd_list, &stream->free_list);
+
+	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+	if (list_empty(&stream->td_list)) {
+		list_splice_tail_init(&stream->free_list,
+				&ehci->cached_itd_list);
+		start_free_itds(ehci);
+	}
+
+	return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
+	gfp_t mem_flags)
+{
+	int			status = -EINVAL;
+	unsigned long		flags;
+	struct ehci_iso_stream	*stream;
+
+	/* Get iso_stream head */
+	stream = iso_stream_find(ehci, urb);
+	if (unlikely(stream == NULL)) {
+		ehci_dbg(ehci, "can't get iso stream\n");
+		return -ENOMEM;
+	}
+	if (unlikely(urb->interval != stream->uperiod)) {
+		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
+			stream->uperiod, urb->interval);
+		goto done;
+	}
+
+#ifdef EHCI_URB_TRACE
+	ehci_dbg(ehci,
+		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
+		__func__, urb->dev->devpath, urb,
+		usb_pipeendpoint(urb->pipe),
+		usb_pipein(urb->pipe) ? "in" : "out",
+		urb->transfer_buffer_length,
+		urb->number_of_packets, urb->interval,
+		stream);
+#endif
+
+	/* allocate ITDs w/o locking anything */
+	status = itd_urb_transaction(stream, ehci, urb, mem_flags);
+	if (unlikely(status < 0)) {
+		ehci_dbg(ehci, "can't init itds\n");
+		goto done;
+	}
+
+	/* schedule ... need to lock */
+	spin_lock_irqsave(&ehci->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
+		status = -ESHUTDOWN;
+		goto done_not_linked;
+	}
+	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+	if (unlikely(status))
+		goto done_not_linked;
+	status = iso_stream_schedule(ehci, urb, stream);
+	if (likely(status == 0)) {
+		itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
+	} else if (status > 0) {
+		status = 0;
+		ehci_urb_done(ehci, urb, 0);
+	} else {
+		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+	}
+ done_not_linked:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+ done:
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * "Split ISO TDs" ... used for USB 1.1 devices going through the
+ * TTs in USB 2.0 hubs.  These need microframe scheduling.
+ */
+
+static inline void
+sitd_sched_init(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_sched	*iso_sched,
+	struct ehci_iso_stream	*stream,
+	struct urb		*urb
+)
+{
+	unsigned	i;
+	dma_addr_t	dma = urb->transfer_dma;
+
+	/* how many frames are needed for these transfers */
+	iso_sched->span = urb->number_of_packets * stream->ps.period;
+
+	/* figure out per-frame sitd fields that we'll need later
+	 * when we fit new sitds into the schedule.
+	 */
+	for (i = 0; i < urb->number_of_packets; i++) {
+		struct ehci_iso_packet	*packet = &iso_sched->packet[i];
+		unsigned		length;
+		dma_addr_t		buf;
+		u32			trans;
+
+		length = urb->iso_frame_desc[i].length & 0x03ff;
+		buf = dma + urb->iso_frame_desc[i].offset;
+
+		trans = SITD_STS_ACTIVE;
+		if (((i + 1) == urb->number_of_packets)
+				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
+			trans |= SITD_IOC;
+		trans |= length << 16;
+		packet->transaction = cpu_to_hc32(ehci, trans);
+
+		/* might need to cross a buffer page within a td */
+		packet->bufp = buf;
+		packet->buf1 = (buf + length) & ~0x0fff;
+		if (packet->buf1 != (buf & ~(u64)0x0fff))
+			packet->cross = 1;
+
+		/* OUT uses multiple start-splits */
+		if (stream->bEndpointAddress & USB_DIR_IN)
+			continue;
+		length = (length + 187) / 188;
+		if (length > 1) /* BEGIN vs ALL */
+			length |= 1 << 3;
+		packet->buf1 |= length;
+	}
+}
+
+static int
+sitd_urb_transaction(
+	struct ehci_iso_stream	*stream,
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	gfp_t			mem_flags
+)
+{
+	struct ehci_sitd	*sitd;
+	dma_addr_t		sitd_dma;
+	int			i;
+	struct ehci_iso_sched	*iso_sched;
+	unsigned long		flags;
+
+	iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+	if (iso_sched == NULL)
+		return -ENOMEM;
+
+	sitd_sched_init(ehci, iso_sched, stream, urb);
+
+	/* allocate/init sITDs */
+	spin_lock_irqsave(&ehci->lock, flags);
+	for (i = 0; i < urb->number_of_packets; i++) {
+
+		/* NOTE:  for now, we don't try to handle wraparound cases
+		 * for IN (using sitd->hw_backpointer, like a FSTN), which
+		 * means we never need two sitds for full speed packets.
+		 */
+
+		/*
+		 * Use siTDs from the free list, but not siTDs that may
+		 * still be in use by the hardware.
+		 */
+		if (likely(!list_empty(&stream->free_list))) {
+			sitd = list_first_entry(&stream->free_list,
+					 struct ehci_sitd, sitd_list);
+			if (sitd->frame == ehci->now_frame)
+				goto alloc_sitd;
+			list_del(&sitd->sitd_list);
+			sitd_dma = sitd->sitd_dma;
+		} else {
+ alloc_sitd:
+			spin_unlock_irqrestore(&ehci->lock, flags);
+			sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
+					&sitd_dma);
+			spin_lock_irqsave(&ehci->lock, flags);
+			if (!sitd) {
+				iso_sched_free(stream, iso_sched);
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				return -ENOMEM;
+			}
+		}
+
+		memset(sitd, 0, sizeof(*sitd));
+		sitd->sitd_dma = sitd_dma;
+		sitd->frame = NO_FRAME;
+		list_add(&sitd->sitd_list, &iso_sched->td_list);
+	}
+
+	/* temporarily store schedule info in hcpriv */
+	urb->hcpriv = iso_sched;
+	urb->error_count = 0;
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+sitd_patch(
+	struct ehci_hcd		*ehci,
+	struct ehci_iso_stream	*stream,
+	struct ehci_sitd	*sitd,
+	struct ehci_iso_sched	*iso_sched,
+	unsigned		index
+)
+{
+	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
+	u64			bufp;
+
+	sitd->hw_next = EHCI_LIST_END(ehci);
+	sitd->hw_fullspeed_ep = stream->address;
+	sitd->hw_uframe = stream->splits;
+	sitd->hw_results = uf->transaction;
+	sitd->hw_backpointer = EHCI_LIST_END(ehci);
+
+	bufp = uf->bufp;
+	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
+	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
+
+	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
+	if (uf->cross)
+		bufp += 4096;
+	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
+	sitd->index = index;
+}
+
+static inline void
+sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
+{
+	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
+	sitd->sitd_next = ehci->pshadow[frame];
+	sitd->hw_next = ehci->periodic[frame];
+	ehci->pshadow[frame].sitd = sitd;
+	sitd->frame = frame;
+	wmb();
+	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
+}
+
+/* fit urb's sitds into the selected schedule slot; activate as needed */
+static void sitd_link_urb(
+	struct ehci_hcd		*ehci,
+	struct urb		*urb,
+	unsigned		mod,
+	struct ehci_iso_stream	*stream
+)
+{
+	int			packet;
+	unsigned		next_uframe;
+	struct ehci_iso_sched	*sched = urb->hcpriv;
+	struct ehci_sitd	*sitd;
+
+	next_uframe = stream->next_uframe;
+
+	if (list_empty(&stream->td_list))
+		/* usbfs ignores TT bandwidth */
+		ehci_to_hcd(ehci)->self.bandwidth_allocated
+				+= stream->bandwidth;
+
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_pll_fix == 1)
+			usb_amd_quirk_pll_disable();
+	}
+
+	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
+
+	/* fill sITDs frame by frame */
+	for (packet = sched->first_packet, sitd = NULL;
+			packet < urb->number_of_packets;
+			packet++) {
+
+		/* ASSERT:  we have all necessary sitds */
+		BUG_ON(list_empty(&sched->td_list));
+
+		/* ASSERT:  no itds for this endpoint in this frame */
+
+		sitd = list_entry(sched->td_list.next,
+				struct ehci_sitd, sitd_list);
+		list_move_tail(&sitd->sitd_list, &stream->td_list);
+		sitd->stream = stream;
+		sitd->urb = urb;
+
+		sitd_patch(ehci, stream, sitd, sched, packet);
+		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
+				sitd);
+
+		next_uframe += stream->uperiod;
+	}
+	stream->next_uframe = next_uframe & (mod - 1);
+
+	/* don't need that schedule data any more */
+	iso_sched_free(stream, sched);
+	urb->hcpriv = stream;
+
+	++ehci->isoc_count;
+	enable_periodic(ehci);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
+				| SITD_STS_XACT | SITD_STS_MMF)
+
+/* Process and recycle a completed SITD.  Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly.  That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs.  It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+{
+	struct urb				*urb = sitd->urb;
+	struct usb_iso_packet_descriptor	*desc;
+	u32					t;
+	int					urb_index;
+	struct ehci_iso_stream			*stream = sitd->stream;
+	bool					retval = false;
+
+	urb_index = sitd->index;
+	desc = &urb->iso_frame_desc[urb_index];
+	t = hc32_to_cpup(ehci, &sitd->hw_results);
+
+	/* report transfer status */
+	if (unlikely(t & SITD_ERRS)) {
+		urb->error_count++;
+		if (t & SITD_STS_DBE)
+			desc->status = usb_pipein(urb->pipe)
+				? -ENOSR  /* hc couldn't read */
+				: -ECOMM; /* hc couldn't write */
+		else if (t & SITD_STS_BABBLE)
+			desc->status = -EOVERFLOW;
+		else /* XACT, MMF, etc */
+			desc->status = -EPROTO;
+	} else if (unlikely(t & SITD_STS_ACTIVE)) {
+		/* URB was too late */
+		urb->error_count++;
+	} else {
+		desc->status = 0;
+		desc->actual_length = desc->length - SITD_LENGTH(t);
+		urb->actual_length += desc->actual_length;
+	}
+
+	/* handle completion now? */
+	if ((urb_index + 1) != urb->number_of_packets)
+		goto done;
+
+	/*
+	 * ASSERT: it's really the last sitd for this urb
+	 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
+	 *	 BUG_ON(sitd->urb == urb);
+	 */
+
+	/* give urb back to the driver; completion often (re)submits */
+	ehci_urb_done(ehci, urb, 0);
+	retval = true;
+	urb = NULL;
+
+	--ehci->isoc_count;
+	disable_periodic(ehci);
+
+	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_pll_fix == 1)
+			usb_amd_quirk_pll_enable();
+	}
+
+	if (list_is_singular(&stream->td_list))
+		ehci_to_hcd(ehci)->self.bandwidth_allocated
+				-= stream->bandwidth;
+
+done:
+	sitd->urb = NULL;
+
+	/* Add to the end of the free list for later reuse */
+	list_move_tail(&sitd->sitd_list, &stream->free_list);
+
+	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
+	if (list_empty(&stream->td_list)) {
+		list_splice_tail_init(&stream->free_list,
+				&ehci->cached_sitd_list);
+		start_free_itds(ehci);
+	}
+
+	return retval;
+}
+
+
+static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
+	gfp_t mem_flags)
+{
+	int			status = -EINVAL;
+	unsigned long		flags;
+	struct ehci_iso_stream	*stream;
+
+	/* Get iso_stream head */
+	stream = iso_stream_find(ehci, urb);
+	if (stream == NULL) {
+		ehci_dbg(ehci, "can't get iso stream\n");
+		return -ENOMEM;
+	}
+	if (urb->interval != stream->ps.period) {
+		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
+			stream->ps.period, urb->interval);
+		goto done;
+	}
+
+#ifdef EHCI_URB_TRACE
+	ehci_dbg(ehci,
+		"submit %p dev%s ep%d%s-iso len %d\n",
+		urb, urb->dev->devpath,
+		usb_pipeendpoint(urb->pipe),
+		usb_pipein(urb->pipe) ? "in" : "out",
+		urb->transfer_buffer_length);
+#endif
+
+	/* allocate SITDs */
+	status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
+	if (status < 0) {
+		ehci_dbg(ehci, "can't init sitds\n");
+		goto done;
+	}
+
+	/* schedule ... need to lock */
+	spin_lock_irqsave(&ehci->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
+		status = -ESHUTDOWN;
+		goto done_not_linked;
+	}
+	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+	if (unlikely(status))
+		goto done_not_linked;
+	status = iso_stream_schedule(ehci, urb, stream);
+	if (likely(status == 0)) {
+		sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
+	} else if (status > 0) {
+		status = 0;
+		ehci_urb_done(ehci, urb, 0);
+	} else {
+		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+	}
+ done_not_linked:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+ done:
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct ehci_hcd *ehci)
+{
+	unsigned		uf, now_frame, frame;
+	unsigned		fmask = ehci->periodic_size - 1;
+	bool			modified, live;
+	union ehci_shadow	q, *q_p;
+	__hc32			type, *hw_p;
+
+	/*
+	 * When running, scan from last scan point up to "now"
+	 * else clean up by scanning everything that's left.
+	 * Touches as few pages as possible:  cache-friendly.
+	 */
+	if (ehci->rh_state >= EHCI_RH_RUNNING) {
+		uf = ehci_read_frame_index(ehci);
+		now_frame = (uf >> 3) & fmask;
+		live = true;
+	} else  {
+		now_frame = (ehci->last_iso_frame - 1) & fmask;
+		live = false;
+	}
+	ehci->now_frame = now_frame;
+
+	frame = ehci->last_iso_frame;
+
+restart:
+	/* Scan each element in frame's queue for completions */
+	q_p = &ehci->pshadow[frame];
+	hw_p = &ehci->periodic[frame];
+	q.ptr = q_p->ptr;
+	type = Q_NEXT_TYPE(ehci, *hw_p);
+	modified = false;
+
+	while (q.ptr != NULL) {
+		switch (hc32_to_cpu(ehci, type)) {
+		case Q_TYPE_ITD:
+			/*
+			 * If this ITD is still active, leave it for
+			 * later processing ... check the next entry.
+			 * No need to check for activity unless the
+			 * frame is current.
+			 */
+			if (frame == now_frame && live) {
+				rmb();
+				for (uf = 0; uf < 8; uf++) {
+					if (q.itd->hw_transaction[uf] &
+							ITD_ACTIVE(ehci))
+						break;
+				}
+				if (uf < 8) {
+					q_p = &q.itd->itd_next;
+					hw_p = &q.itd->hw_next;
+					type = Q_NEXT_TYPE(ehci,
+							q.itd->hw_next);
+					q = *q_p;
+					break;
+				}
+			}
+
+			/*
+			 * Take finished ITDs out of the schedule
+			 * and process them:  recycle, maybe report
+			 * URB completion.  HC won't cache the
+			 * pointer for much longer, if at all.
+			 */
+			*q_p = q.itd->itd_next;
+			if (!ehci->use_dummy_qh ||
+					q.itd->hw_next != EHCI_LIST_END(ehci))
+				*hw_p = q.itd->hw_next;
+			else
+				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+			type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
+			wmb();
+			modified = itd_complete(ehci, q.itd);
+			q = *q_p;
+			break;
+		case Q_TYPE_SITD:
+			/*
+			 * If this SITD is still active, leave it for
+			 * later processing ... check the next entry.
+			 * No need to check for activity unless the
+			 * frame is current.
+			 */
+			if (((frame == now_frame) ||
+					(((frame + 1) & fmask) == now_frame))
+				&& live
+				&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
+
+				q_p = &q.sitd->sitd_next;
+				hw_p = &q.sitd->hw_next;
+				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
+				q = *q_p;
+				break;
+			}
+
+			/*
+			 * Take finished SITDs out of the schedule
+			 * and process them:  recycle, maybe report
+			 * URB completion.
+			 */
+			*q_p = q.sitd->sitd_next;
+			if (!ehci->use_dummy_qh ||
+					q.sitd->hw_next != EHCI_LIST_END(ehci))
+				*hw_p = q.sitd->hw_next;
+			else
+				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+			type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
+			wmb();
+			modified = sitd_complete(ehci, q.sitd);
+			q = *q_p;
+			break;
+		default:
+			ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
+					type, frame, q.ptr);
+			/* BUG(); */
+			/* FALL THROUGH */
+		case Q_TYPE_QH:
+		case Q_TYPE_FSTN:
+			/* End of the iTDs and siTDs */
+			q.ptr = NULL;
+			break;
+		}
+
+		/* Assume completion callbacks modify the queue */
+		if (unlikely(modified && ehci->isoc_count > 0))
+			goto restart;
+	}
+
+	/* Stop when we have reached the current frame */
+	if (frame == now_frame)
+		return;
+
+	/* The last frame may still have active siTDs */
+	ehci->last_iso_frame = frame;
+	frame = (frame + 1) & fmask;
+
+	goto restart;
+}
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
new file mode 100644
index 0000000..a9ee767
--- /dev/null
+++ b/drivers/usb/host/ehci-sh.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH EHCI host controller driver
+ *
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * Based on ohci-sh.c and ehci-atmel.c.
+ */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/platform_data/ehci-sh.h>
+
+struct ehci_sh_priv {
+	struct clk *iclk, *fclk;
+	struct usb_hcd *hcd;
+};
+
+static int ehci_sh_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+
+	ehci->caps = hcd->regs;
+
+	return ehci_setup(hcd);
+}
+
+static const struct hc_driver ehci_sh_hc_driver = {
+	.description			= hcd_name,
+	.product_desc			= "SuperH EHCI",
+	.hcd_priv_size			= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq				= ehci_irq,
+	.flags				= HCD_USB2 | HCD_MEMORY | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset				= ehci_sh_reset,
+	.start				= ehci_run,
+	.stop				= ehci_stop,
+	.shutdown			= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue			= ehci_urb_enqueue,
+	.urb_dequeue			= ehci_urb_dequeue,
+	.endpoint_disable		= ehci_endpoint_disable,
+	.endpoint_reset			= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number		= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data		= ehci_hub_status_data,
+	.hub_control			= ehci_hub_control,
+
+#ifdef CONFIG_PM
+	.bus_suspend			= ehci_bus_suspend,
+	.bus_resume			= ehci_bus_resume,
+#endif
+
+	.relinquish_port		= ehci_relinquish_port,
+	.port_handed_over		= ehci_port_handed_over,
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sh_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct ehci_sh_priv *priv;
+	struct ehci_sh_platdata *pdata;
+	struct usb_hcd *hcd;
+	int irq, ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		ret = -ENODEV;
+		goto fail_create_hcd;
+	}
+
+	pdata = dev_get_platdata(&pdev->dev);
+
+	/* initialize hcd */
+	hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
+			     dev_name(&pdev->dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto fail_request_resource;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct ehci_sh_priv),
+			    GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		goto fail_request_resource;
+	}
+
+	/* These are optional, we don't care if they fail */
+	priv->fclk = devm_clk_get(&pdev->dev, "usb_fck");
+	if (IS_ERR(priv->fclk))
+		priv->fclk = NULL;
+
+	priv->iclk = devm_clk_get(&pdev->dev, "usb_ick");
+	if (IS_ERR(priv->iclk))
+		priv->iclk = NULL;
+
+	clk_enable(priv->fclk);
+	clk_enable(priv->iclk);
+
+	if (pdata && pdata->phy_init)
+		pdata->phy_init();
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to add hcd");
+		goto fail_add_hcd;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	priv->hcd = hcd;
+	platform_set_drvdata(pdev, priv);
+
+	return ret;
+
+fail_add_hcd:
+	clk_disable(priv->iclk);
+	clk_disable(priv->fclk);
+
+fail_request_resource:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+
+	return ret;
+}
+
+static int ehci_hcd_sh_remove(struct platform_device *pdev)
+{
+	struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	clk_disable(priv->fclk);
+	clk_disable(priv->iclk);
+
+	return 0;
+}
+
+static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
+{
+	struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_sh_driver = {
+	.probe		= ehci_hcd_sh_probe,
+	.remove		= ehci_hcd_sh_remove,
+	.shutdown	= ehci_hcd_sh_shutdown,
+	.driver		= {
+		.name	= "sh_ehci",
+	},
+};
+
+MODULE_ALIAS("platform:sh_ehci");
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
new file mode 100644
index 0000000..add796c
--- /dev/null
+++ b/drivers/usb/host/ehci-spear.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* Driver for EHCI HCD on SPEAr SOC
+*
+* Copyright (C) 2010 ST Micro Electronics,
+* Deepak Sikri <deepak.sikri@st.com>
+*
+* Based on various ehci-*.c drivers
+*/
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+#define DRIVER_DESC "EHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ehci";
+
+struct spear_ehci {
+	struct clk *clk;
+};
+
+#define to_spear_ehci(hcd)	(struct spear_ehci *)(hcd_to_ehci(hcd)->priv)
+
+static struct hc_driver __read_mostly ehci_spear_hc_driver;
+
+#ifdef CONFIG_PM_SLEEP
+static int ehci_spear_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+
+	return ehci_suspend(hcd, do_wakeup);
+}
+
+static int ehci_spear_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	ehci_resume(hcd, false);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
+		ehci_spear_drv_resume);
+
+static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd ;
+	struct spear_ehci *sehci;
+	struct resource *res;
+	struct clk *usbh_clk;
+	const struct hc_driver *driver = &ehci_spear_hc_driver;
+	int irq, retval;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto fail;
+	}
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail;
+
+	usbh_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(usbh_clk)) {
+		dev_err(&pdev->dev, "Error getting interface clock\n");
+		retval = PTR_ERR(usbh_clk);
+		goto fail;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err_put_hcd;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	sehci = to_spear_ehci(hcd);
+	sehci->clk = usbh_clk;
+
+	/* registers start at offset 0x0 */
+	hcd_to_ehci(hcd)->caps = hcd->regs;
+
+	clk_prepare_enable(sehci->clk);
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval)
+		goto err_stop_ehci;
+
+	device_wakeup_enable(hcd->self.controller);
+	return retval;
+
+err_stop_ehci:
+	clk_disable_unprepare(sehci->clk);
+err_put_hcd:
+	usb_put_hcd(hcd);
+fail:
+	dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+	return retval ;
+}
+
+static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct spear_ehci *sehci = to_spear_ehci(hcd);
+
+	usb_remove_hcd(hcd);
+
+	if (sehci->clk)
+		clk_disable_unprepare(sehci->clk);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static const struct of_device_id spear_ehci_id_table[] = {
+	{ .compatible = "st,spear600-ehci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, spear_ehci_id_table);
+
+static struct platform_driver spear_ehci_hcd_driver = {
+	.probe		= spear_ehci_hcd_drv_probe,
+	.remove		= spear_ehci_hcd_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name = "spear-ehci",
+		.bus = &platform_bus_type,
+		.pm = &ehci_spear_pm_ops,
+		.of_match_table = spear_ehci_id_table,
+	}
+};
+
+static const struct ehci_driver_overrides spear_overrides __initconst = {
+	.extra_priv_size = sizeof(struct spear_ehci),
+};
+
+static int __init ehci_spear_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
+	return platform_driver_register(&spear_ehci_hcd_driver);
+}
+module_init(ehci_spear_init);
+
+static void __exit ehci_spear_cleanup(void)
+{
+	platform_driver_unregister(&spear_ehci_hcd_driver);
+}
+module_exit(ehci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:spear-ehci");
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
new file mode 100644
index 0000000..dc42981
--- /dev/null
+++ b/drivers/usb/host/ehci-st.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ST EHCI driver
+ *
+ * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Derived from ehci-platform.c
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "ehci.h"
+
+#define USB_MAX_CLKS 3
+
+struct st_ehci_platform_priv {
+	struct clk *clks[USB_MAX_CLKS];
+	struct clk *clk48;
+	struct reset_control *rst;
+	struct reset_control *pwr;
+	struct phy *phy;
+};
+
+#define DRIVER_DESC "EHCI STMicroelectronics driver"
+
+#define hcd_to_ehci_priv(h) \
+	((struct st_ehci_platform_priv *)hcd_to_ehci(h)->priv)
+
+static const char hcd_name[] = "ehci-st";
+
+#define EHCI_CAPS_SIZE 0x10
+#define AHB2STBUS_INSREG01 (EHCI_CAPS_SIZE + 0x84)
+
+static int st_ehci_platform_reset(struct usb_hcd *hcd)
+{
+	struct platform_device *pdev = to_platform_device(hcd->self.controller);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	u32 threshold;
+
+	/* Set EHCI packet buffer IN/OUT threshold to 128 bytes */
+	threshold = 128 | (128 << 16);
+	writel(threshold, hcd->regs + AHB2STBUS_INSREG01);
+
+	ehci->caps = hcd->regs + pdata->caps_offset;
+	return ehci_setup(hcd);
+}
+
+static int st_ehci_platform_power_on(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk, ret;
+
+	ret = reset_control_deassert(priv->pwr);
+	if (ret)
+		return ret;
+
+	ret = reset_control_deassert(priv->rst);
+	if (ret)
+		goto err_assert_power;
+
+	/* some SoCs don't have a dedicated 48Mhz clock, but those that do
+	   need the rate to be explicitly set */
+	if (priv->clk48) {
+		ret = clk_set_rate(priv->clk48, 48000000);
+		if (ret)
+			goto err_assert_reset;
+	}
+
+	for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++) {
+		ret = clk_prepare_enable(priv->clks[clk]);
+		if (ret)
+			goto err_disable_clks;
+	}
+
+	ret = phy_init(priv->phy);
+	if (ret)
+		goto err_disable_clks;
+
+	ret = phy_power_on(priv->phy);
+	if (ret)
+		goto err_exit_phy;
+
+	return 0;
+
+err_exit_phy:
+	phy_exit(priv->phy);
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(priv->clks[clk]);
+err_assert_reset:
+	reset_control_assert(priv->rst);
+err_assert_power:
+	reset_control_assert(priv->pwr);
+
+	return ret;
+}
+
+static void st_ehci_platform_power_off(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk;
+
+	reset_control_assert(priv->pwr);
+
+	reset_control_assert(priv->rst);
+
+	phy_power_off(priv->phy);
+
+	phy_exit(priv->phy);
+
+	for (clk = USB_MAX_CLKS - 1; clk >= 0; clk--)
+		if (priv->clks[clk])
+			clk_disable_unprepare(priv->clks[clk]);
+
+}
+
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+	.reset =		st_ehci_platform_reset,
+	.extra_priv_size =	sizeof(struct st_ehci_platform_priv),
+};
+
+static struct usb_ehci_pdata ehci_platform_defaults = {
+	.power_on =		st_ehci_platform_power_on,
+	.power_suspend =	st_ehci_platform_power_off,
+	.power_off =		st_ehci_platform_power_off,
+};
+
+static int st_ehci_platform_probe(struct platform_device *dev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res_mem;
+	struct usb_ehci_pdata *pdata = &ehci_platform_defaults;
+	struct st_ehci_platform_priv *priv;
+	struct ehci_hcd *ehci;
+	int err, irq, clk = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0) {
+		dev_err(&dev->dev, "no irq provided");
+		return irq;
+	}
+	res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res_mem) {
+		dev_err(&dev->dev, "no memory resource provided");
+		return -ENXIO;
+	}
+
+	hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
+			     dev_name(&dev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	platform_set_drvdata(dev, hcd);
+	dev->dev.platform_data = pdata;
+	priv = hcd_to_ehci_priv(hcd);
+	ehci = hcd_to_ehci(hcd);
+
+	priv->phy = devm_phy_get(&dev->dev, "usb");
+	if (IS_ERR(priv->phy)) {
+		err = PTR_ERR(priv->phy);
+		goto err_put_hcd;
+	}
+
+	for (clk = 0; clk < USB_MAX_CLKS; clk++) {
+		priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+		if (IS_ERR(priv->clks[clk])) {
+			err = PTR_ERR(priv->clks[clk]);
+			if (err == -EPROBE_DEFER)
+				goto err_put_clks;
+			priv->clks[clk] = NULL;
+			break;
+		}
+	}
+
+	/* some SoCs don't have a dedicated 48Mhz clock, but those that
+	   do need the rate to be explicitly set */
+	priv->clk48 = devm_clk_get(&dev->dev, "clk48");
+	if (IS_ERR(priv->clk48)) {
+		dev_info(&dev->dev, "48MHz clk not found\n");
+		priv->clk48 = NULL;
+	}
+
+	priv->pwr =
+		devm_reset_control_get_optional_shared(&dev->dev, "power");
+	if (IS_ERR(priv->pwr)) {
+		err = PTR_ERR(priv->pwr);
+		if (err == -EPROBE_DEFER)
+			goto err_put_clks;
+		priv->pwr = NULL;
+	}
+
+	priv->rst =
+		devm_reset_control_get_optional_shared(&dev->dev, "softreset");
+	if (IS_ERR(priv->rst)) {
+		err = PTR_ERR(priv->rst);
+		if (err == -EPROBE_DEFER)
+			goto err_put_clks;
+		priv->rst = NULL;
+	}
+
+	if (pdata->power_on) {
+		err = pdata->power_on(dev);
+		if (err < 0)
+			goto err_put_clks;
+	}
+
+	hcd->rsrc_start = res_mem->start;
+	hcd->rsrc_len = resource_size(res_mem);
+
+	hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto err_put_clks;
+	}
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err)
+		goto err_put_clks;
+
+	device_wakeup_enable(hcd->self.controller);
+	platform_set_drvdata(dev, hcd);
+
+	return err;
+
+err_put_clks:
+	while (--clk >= 0)
+		clk_put(priv->clks[clk]);
+err_put_hcd:
+	if (pdata == &ehci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	usb_put_hcd(hcd);
+
+	return err;
+}
+
+static int st_ehci_platform_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct st_ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+	int clk;
+
+	usb_remove_hcd(hcd);
+
+	if (pdata->power_off)
+		pdata->power_off(dev);
+
+	for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++)
+		clk_put(priv->clks[clk]);
+
+	usb_put_hcd(hcd);
+
+	if (pdata == &ehci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int st_ehci_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+	ret = ehci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	if (pdata->power_suspend)
+		pdata->power_suspend(pdev);
+
+	pinctrl_pm_select_sleep_state(dev);
+
+	return ret;
+}
+
+static int st_ehci_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	int err;
+
+	pinctrl_pm_select_default_state(dev);
+
+	if (pdata->power_on) {
+		err = pdata->power_on(pdev);
+		if (err < 0)
+			return err;
+	}
+
+	ehci_resume(hcd, false);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(st_ehci_pm_ops, st_ehci_suspend, st_ehci_resume);
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id st_ehci_ids[] = {
+	{ .compatible = "st,st-ehci-300x", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st_ehci_ids);
+
+static struct platform_driver ehci_platform_driver = {
+	.probe		= st_ehci_platform_probe,
+	.remove		= st_ehci_platform_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "st-ehci",
+#ifdef CONFIG_PM_SLEEP
+		.pm	= &st_ehci_pm_ops,
+#endif
+		.of_match_table = st_ehci_ids,
+	}
+};
+
+static int __init ehci_platform_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+	return platform_driver_register(&ehci_platform_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+	platform_driver_unregister(&ehci_platform_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
new file mode 100644
index 0000000..8f75cb7
--- /dev/null
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2007 by Alan Stern
+ */
+
+/* this file is part of ehci-hcd.c */
+
+
+/* Display the ports dedicated to the companion controller */
+static ssize_t companion_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct ehci_hcd		*ehci;
+	int			nports, index, n;
+	int			count = PAGE_SIZE;
+	char			*ptr = buf;
+
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+	nports = HCS_N_PORTS(ehci->hcs_params);
+
+	for (index = 0; index < nports; ++index) {
+		if (test_bit(index, &ehci->companion_ports)) {
+			n = scnprintf(ptr, count, "%d\n", index + 1);
+			ptr += n;
+			count -= n;
+		}
+	}
+	return ptr - buf;
+}
+
+/*
+ * Dedicate or undedicate a port to the companion controller.
+ * Syntax is "[-]portnum", where a leading '-' sign means
+ * return control of the port to the EHCI controller.
+ */
+static ssize_t companion_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ehci_hcd		*ehci;
+	int			portnum, new_owner;
+
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+	new_owner = PORT_OWNER;		/* Owned by companion */
+	if (sscanf(buf, "%d", &portnum) != 1)
+		return -EINVAL;
+	if (portnum < 0) {
+		portnum = - portnum;
+		new_owner = 0;		/* Owned by EHCI */
+	}
+	if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
+		return -ENOENT;
+	portnum--;
+	if (new_owner)
+		set_bit(portnum, &ehci->companion_ports);
+	else
+		clear_bit(portnum, &ehci->companion_ports);
+	set_owner(ehci, portnum, new_owner);
+	return count;
+}
+static DEVICE_ATTR_RW(companion);
+
+
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t uframe_periodic_max_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ehci_hcd		*ehci;
+	int			n;
+
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+	n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+	return n;
+}
+
+
+static ssize_t uframe_periodic_max_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct ehci_hcd		*ehci;
+	unsigned		uframe_periodic_max;
+	unsigned		uframe;
+	unsigned long		flags;
+	ssize_t			ret;
+
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+		return -EINVAL;
+
+	if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+		ehci_info(ehci, "rejecting invalid request for "
+				"uframe_periodic_max=%u\n", uframe_periodic_max);
+		return -EINVAL;
+	}
+
+	ret = -EINVAL;
+
+	/*
+	 * lock, so that our checking does not race with possible periodic
+	 * bandwidth allocation through submitting new urbs.
+	 */
+	spin_lock_irqsave (&ehci->lock, flags);
+
+	/*
+	 * for request to decrease max periodic bandwidth, we have to check
+	 * to see whether the decrease is possible.
+	 */
+	if (uframe_periodic_max < ehci->uframe_periodic_max) {
+		u8		allocated_max = 0;
+
+		for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
+			allocated_max = max(allocated_max,
+					ehci->bandwidth[uframe]);
+
+		if (allocated_max > uframe_periodic_max) {
+			ehci_info(ehci,
+				"cannot decrease uframe_periodic_max because "
+				"periodic bandwidth is already allocated "
+				"(%u > %u)\n",
+				allocated_max, uframe_periodic_max);
+			goto out_unlock;
+		}
+	}
+
+	/* increasing is always ok */
+
+	ehci_info(ehci, "setting max periodic bandwidth to %u%% "
+			"(== %u usec/uframe)\n",
+			100*uframe_periodic_max/125, uframe_periodic_max);
+
+	if (uframe_periodic_max != 100)
+		ehci_warn(ehci, "max periodic bandwidth set is non-standard\n");
+
+	ehci->uframe_periodic_max = uframe_periodic_max;
+	ret = count;
+
+out_unlock:
+	spin_unlock_irqrestore (&ehci->lock, flags);
+	return ret;
+}
+static DEVICE_ATTR_RW(uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct ehci_hcd *ehci)
+{
+	struct device	*controller = ehci_to_hcd(ehci)->self.controller;
+	int	i = 0;
+
+	/* with integrated TT there is no companion! */
+	if (!ehci_is_TDI(ehci))
+		i = device_create_file(controller, &dev_attr_companion);
+	if (i)
+		goto out;
+
+	i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+	return i;
+}
+
+static inline void remove_sysfs_files(struct ehci_hcd *ehci)
+{
+	struct device	*controller = ehci_to_hcd(ehci)->self.controller;
+
+	/* with integrated TT there is no companion! */
+	if (!ehci_is_TDI(ehci))
+		device_remove_file(controller, &dev_attr_companion);
+
+	device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
new file mode 100644
index 0000000..4d2cdec
--- /dev/null
+++ b/drivers/usb/host/ehci-tegra.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2009 - 2013 NVIDIA Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/tegra_usb_phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include "ehci.h"
+
+#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+
+#define TEGRA_USB_DMA_ALIGN 32
+
+#define DRIVER_DESC "Tegra EHCI driver"
+#define DRV_NAME "tegra-ehci"
+
+static struct hc_driver __read_mostly tegra_ehci_hc_driver;
+
+struct tegra_ehci_soc_config {
+	bool has_hostpc;
+};
+
+struct tegra_ehci_hcd {
+	struct tegra_usb_phy *phy;
+	struct clk *clk;
+	struct reset_control *rst;
+	int port_resuming;
+	bool needs_double_reset;
+	enum tegra_usb_phy_port_speed port_speed;
+};
+
+static int tegra_reset_usb_controller(struct platform_device *pdev)
+{
+	struct device_node *phy_np;
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct tegra_ehci_hcd *tegra =
+		(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+	struct reset_control *rst;
+	int err;
+
+	phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
+	if (!phy_np)
+		return -ENOENT;
+
+	/*
+	 * The 1st USB controller contains some UTMI pad registers that are
+	 * global for all the controllers on the chip. Those registers are
+	 * also cleared when reset is asserted to the 1st controller.
+	 */
+	rst = of_reset_control_get_shared(phy_np, "utmi-pads");
+	if (IS_ERR(rst)) {
+		dev_warn(&pdev->dev,
+			 "can't get utmi-pads reset from the PHY\n");
+		dev_warn(&pdev->dev,
+			 "continuing, but please update your DT\n");
+	} else {
+		/*
+		 * PHY driver performs UTMI-pads reset in a case of
+		 * non-legacy DT.
+		 */
+		reset_control_put(rst);
+	}
+
+	of_node_put(phy_np);
+
+	/* reset control is shared, hence initialize it first */
+	err = reset_control_deassert(tegra->rst);
+	if (err)
+		return err;
+
+	err = reset_control_assert(tegra->rst);
+	if (err)
+		return err;
+
+	udelay(1);
+
+	err = reset_control_deassert(tegra->rst);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int tegra_ehci_internal_port_reset(
+	struct ehci_hcd	*ehci,
+	u32 __iomem	*portsc_reg
+)
+{
+	u32		temp;
+	unsigned long	flags;
+	int		retval = 0;
+	int		i, tries;
+	u32		saved_usbintr;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+	saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
+	/* disable USB interrupt */
+	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	/*
+	 * Here we have to do Port Reset at most twice for
+	 * Port Enable bit to be set.
+	 */
+	for (i = 0; i < 2; i++) {
+		temp = ehci_readl(ehci, portsc_reg);
+		temp |= PORT_RESET;
+		ehci_writel(ehci, temp, portsc_reg);
+		mdelay(10);
+		temp &= ~PORT_RESET;
+		ehci_writel(ehci, temp, portsc_reg);
+		mdelay(1);
+		tries = 100;
+		do {
+			mdelay(1);
+			/*
+			 * Up to this point, Port Enable bit is
+			 * expected to be set after 2 ms waiting.
+			 * USB1 usually takes extra 45 ms, for safety,
+			 * we take 100 ms as timeout.
+			 */
+			temp = ehci_readl(ehci, portsc_reg);
+		} while (!(temp & PORT_PE) && tries--);
+		if (temp & PORT_PE)
+			break;
+	}
+	if (i == 2)
+		retval = -ETIMEDOUT;
+
+	/*
+	 * Clear Connect Status Change bit if it's set.
+	 * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
+	 */
+	if (temp & PORT_CSC)
+		ehci_writel(ehci, PORT_CSC, portsc_reg);
+
+	/*
+	 * Write to clear any interrupt status bits that might be set
+	 * during port reset.
+	 */
+	temp = ehci_readl(ehci, &ehci->regs->status);
+	ehci_writel(ehci, temp, &ehci->regs->status);
+
+	/* restore original interrupt enable bits */
+	ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
+	return retval;
+}
+
+static int tegra_ehci_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
+	u32 __iomem	*status_reg;
+	u32		temp;
+	unsigned long	flags;
+	int		retval = 0;
+
+	status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	if (typeReq == GetPortStatus) {
+		temp = ehci_readl(ehci, status_reg);
+		if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
+			/* Resume completed, re-enable disconnect detection */
+			tegra->port_resuming = 0;
+			tegra_usb_phy_postresume(hcd->usb_phy);
+		}
+	}
+
+	else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+		temp = ehci_readl(ehci, status_reg);
+		if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+			retval = -EPIPE;
+			goto done;
+		}
+
+		temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
+		temp |= PORT_WKDISC_E | PORT_WKOC_E;
+		ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+		/*
+		 * If a transaction is in progress, there may be a delay in
+		 * suspending the port. Poll until the port is suspended.
+		 */
+		if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
+						PORT_SUSPEND, 5000))
+			pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+		set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+		goto done;
+	}
+
+	/* For USB1 port we need to issue Port Reset twice internally */
+	if (tegra->needs_double_reset &&
+	   (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
+		spin_unlock_irqrestore(&ehci->lock, flags);
+		return tegra_ehci_internal_port_reset(ehci, status_reg);
+	}
+
+	/*
+	 * Tegra host controller will time the resume operation to clear the bit
+	 * when the port control state switches to HS or FS Idle. This behavior
+	 * is different from EHCI where the host controller driver is required
+	 * to set this bit to a zero after the resume duration is timed in the
+	 * driver.
+	 */
+	else if (typeReq == ClearPortFeature &&
+					wValue == USB_PORT_FEAT_SUSPEND) {
+		temp = ehci_readl(ehci, status_reg);
+		if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
+			retval = -EPIPE;
+			goto done;
+		}
+
+		if (!(temp & PORT_SUSPEND))
+			goto done;
+
+		/* Disable disconnect detection during port resume */
+		tegra_usb_phy_preresume(hcd->usb_phy);
+
+		ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
+
+		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+		/* start resume signalling */
+		ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+		set_bit(wIndex-1, &ehci->resuming_ports);
+
+		spin_unlock_irqrestore(&ehci->lock, flags);
+		msleep(20);
+		spin_lock_irqsave(&ehci->lock, flags);
+
+		/* Poll until the controller clears RESUME and SUSPEND */
+		if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
+			pr_err("%s: timeout waiting for RESUME\n", __func__);
+		if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
+			pr_err("%s: timeout waiting for SUSPEND\n", __func__);
+
+		ehci->reset_done[wIndex-1] = 0;
+		clear_bit(wIndex-1, &ehci->resuming_ports);
+
+		tegra->port_resuming = 1;
+		goto done;
+	}
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	/* Handle the hub control events here */
+	return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+
+done:
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	return retval;
+}
+
+struct dma_aligned_buffer {
+	void *kmalloc_ptr;
+	void *old_xfer_buffer;
+	u8 data[0];
+};
+
+static void free_dma_aligned_buffer(struct urb *urb)
+{
+	struct dma_aligned_buffer *temp;
+	size_t length;
+
+	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+		return;
+
+	temp = container_of(urb->transfer_buffer,
+		struct dma_aligned_buffer, data);
+
+	if (usb_urb_dir_in(urb)) {
+		if (usb_pipeisoc(urb->pipe))
+			length = urb->transfer_buffer_length;
+		else
+			length = urb->actual_length;
+
+		memcpy(temp->old_xfer_buffer, temp->data, length);
+	}
+	urb->transfer_buffer = temp->old_xfer_buffer;
+	kfree(temp->kmalloc_ptr);
+
+	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+	struct dma_aligned_buffer *temp, *kmalloc_ptr;
+	size_t kmalloc_size;
+
+	if (urb->num_sgs || urb->sg ||
+	    urb->transfer_buffer_length == 0 ||
+	    !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
+		return 0;
+
+	/* Allocate a buffer with enough padding for alignment */
+	kmalloc_size = urb->transfer_buffer_length +
+		sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1;
+
+	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+	if (!kmalloc_ptr)
+		return -ENOMEM;
+
+	/* Position our struct dma_aligned_buffer such that data is aligned */
+	temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
+	temp->kmalloc_ptr = kmalloc_ptr;
+	temp->old_xfer_buffer = urb->transfer_buffer;
+	if (usb_urb_dir_out(urb))
+		memcpy(temp->data, urb->transfer_buffer,
+		       urb->transfer_buffer_length);
+	urb->transfer_buffer = temp->data;
+
+	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+	return 0;
+}
+
+static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+				      gfp_t mem_flags)
+{
+	int ret;
+
+	ret = alloc_dma_aligned_buffer(urb, mem_flags);
+	if (ret)
+		return ret;
+
+	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+	if (ret)
+		free_dma_aligned_buffer(urb);
+
+	return ret;
+}
+
+static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+	usb_hcd_unmap_urb_for_dma(hcd, urb);
+	free_dma_aligned_buffer(urb);
+}
+
+static const struct tegra_ehci_soc_config tegra30_soc_config = {
+	.has_hostpc = true,
+};
+
+static const struct tegra_ehci_soc_config tegra20_soc_config = {
+	.has_hostpc = false,
+};
+
+static const struct of_device_id tegra_ehci_of_match[] = {
+	{ .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
+	{ .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
+	{ },
+};
+
+static int tegra_ehci_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct tegra_ehci_soc_config *soc_config;
+	struct resource *res;
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	struct tegra_ehci_hcd *tegra;
+	int err = 0;
+	int irq;
+	struct usb_phy *u_phy;
+
+	match = of_match_device(tegra_ehci_of_match, &pdev->dev);
+	if (!match) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+	soc_config = match->data;
+
+	/* Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
+					dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_err(&pdev->dev, "Unable to create HCD\n");
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, hcd);
+	ehci = hcd_to_ehci(hcd);
+	tegra = (struct tegra_ehci_hcd *)ehci->priv;
+
+	hcd->has_tt = 1;
+
+	tegra->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(tegra->clk)) {
+		dev_err(&pdev->dev, "Can't get ehci clock\n");
+		err = PTR_ERR(tegra->clk);
+		goto cleanup_hcd_create;
+	}
+
+	tegra->rst = devm_reset_control_get_shared(&pdev->dev, "usb");
+	if (IS_ERR(tegra->rst)) {
+		dev_err(&pdev->dev, "Can't get ehci reset\n");
+		err = PTR_ERR(tegra->rst);
+		goto cleanup_hcd_create;
+	}
+
+	err = clk_prepare_enable(tegra->clk);
+	if (err)
+		goto cleanup_hcd_create;
+
+	err = tegra_reset_usb_controller(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to reset controller\n");
+		goto cleanup_clk_en;
+	}
+
+	u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
+	if (IS_ERR(u_phy)) {
+		err = -EPROBE_DEFER;
+		goto cleanup_clk_en;
+	}
+	hcd->usb_phy = u_phy;
+	hcd->skip_phy_initialization = 1;
+
+	tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
+		"nvidia,needs-double-reset");
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto cleanup_clk_en;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	ehci->caps = hcd->regs + 0x100;
+	ehci->has_hostpc = soc_config->has_hostpc;
+
+	err = usb_phy_init(hcd->usb_phy);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize phy\n");
+		goto cleanup_clk_en;
+	}
+
+	u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
+			     GFP_KERNEL);
+	if (!u_phy->otg) {
+		err = -ENOMEM;
+		goto cleanup_phy;
+	}
+	u_phy->otg->host = hcd_to_bus(hcd);
+
+	err = usb_phy_set_suspend(hcd->usb_phy, 0);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to power on the phy\n");
+		goto cleanup_phy;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "Failed to get IRQ\n");
+		err = -ENODEV;
+		goto cleanup_phy;
+	}
+
+	otg_set_host(u_phy->otg, &hcd->self);
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to add USB HCD\n");
+		goto cleanup_otg_set_host;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	return err;
+
+cleanup_otg_set_host:
+	otg_set_host(u_phy->otg, NULL);
+cleanup_phy:
+	usb_phy_shutdown(hcd->usb_phy);
+cleanup_clk_en:
+	clk_disable_unprepare(tegra->clk);
+cleanup_hcd_create:
+	usb_put_hcd(hcd);
+	return err;
+}
+
+static int tegra_ehci_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct tegra_ehci_hcd *tegra =
+		(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+
+	otg_set_host(hcd->usb_phy->otg, NULL);
+
+	usb_phy_shutdown(hcd->usb_phy);
+	usb_remove_hcd(hcd);
+
+	reset_control_assert(tegra->rst);
+	udelay(1);
+
+	clk_disable_unprepare(tegra->clk);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver tegra_ehci_driver = {
+	.probe		= tegra_ehci_probe,
+	.remove		= tegra_ehci_remove,
+	.shutdown	= tegra_ehci_hcd_shutdown,
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = tegra_ehci_of_match,
+	}
+};
+
+static int tegra_ehci_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+	int txfifothresh;
+
+	retval = ehci_setup(hcd);
+	if (retval)
+		return retval;
+
+	/*
+	 * We should really pull this value out of tegra_ehci_soc_config, but
+	 * to avoid needing access to it, make use of the fact that Tegra20 is
+	 * the only one so far that needs a value of 10, and Tegra20 is the
+	 * only one which doesn't set has_hostpc.
+	 */
+	txfifothresh = ehci->has_hostpc ? 0x10 : 10;
+	ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
+
+	return 0;
+}
+
+static const struct ehci_driver_overrides tegra_overrides __initconst = {
+	.extra_priv_size	= sizeof(struct tegra_ehci_hcd),
+	.reset			= tegra_ehci_reset,
+};
+
+static int __init ehci_tegra_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info(DRV_NAME ": " DRIVER_DESC "\n");
+
+	ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
+
+	/*
+	 * The Tegra HW has some unusual quirks, which require Tegra-specific
+	 * workarounds. We override certain hc_driver functions here to
+	 * achieve that. We explicitly do not enhance ehci_driver_overrides to
+	 * allow this more easily, since this is an unusual case, and we don't
+	 * want to encourage others to override these functions by making it
+	 * too easy.
+	 */
+
+	tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
+	tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
+	tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
+
+	return platform_driver_register(&tegra_ehci_driver);
+}
+module_init(ehci_tegra_init);
+
+static void __exit ehci_tegra_cleanup(void)
+{
+	platform_driver_unregister(&tegra_ehci_driver);
+}
+module_exit(ehci_tegra_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
new file mode 100644
index 0000000..4fcebda
--- /dev/null
+++ b/drivers/usb/host/ehci-timer.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012 by Alan Stern
+ */
+
+/* This file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+	ehci->command |= bit;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+	/* unblock posted write */
+	ehci_readl(ehci, &ehci->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+	ehci->command &= ~bit;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+	/* unblock posted write */
+	ehci_readl(ehci, &ehci->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support...  Now using hrtimers.
+ *
+ * Lots of different events are triggered from ehci->hrtimer.  Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * ehci->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * ehci->next_hrtimer_event.  Whenever ehci->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay.  This doesn't
+ * matter, because none of the events are especially time-critical.  The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay.  In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum ehci_hrtimer_event in ehci.h.
+ */
+static unsigned event_delays_ns[] = {
+	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_ASS */
+	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_PSS */
+	1 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_POLL_DEAD */
+	1125 * NSEC_PER_USEC,	/* EHCI_HRTIMER_UNLINK_INTR */
+	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_FREE_ITDS */
+	2 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ACTIVE_UNLINK */
+	5 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_START_UNLINK_INTR */
+	6 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_ASYNC_UNLINKS */
+	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IAA_WATCHDOG */
+	10 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_PERIODIC */
+	15 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_DISABLE_ASYNC */
+	100 * NSEC_PER_MSEC,	/* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
+		bool resched)
+{
+	ktime_t		*timeout = &ehci->hr_timeouts[event];
+
+	if (resched)
+		*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
+	ehci->enabled_hrtimer_events |= (1 << event);
+
+	/* Track only the lowest-numbered pending event */
+	if (event < ehci->next_hrtimer_event) {
+		ehci->next_hrtimer_event = event;
+		hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
+				NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+	}
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void ehci_poll_ASS(struct ehci_hcd *ehci)
+{
+	unsigned	actual, want;
+
+	/* Don't enable anything if the controller isn't running (e.g., died) */
+	if (ehci->rh_state != EHCI_RH_RUNNING)
+		return;
+
+	want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
+	actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
+
+	if (want != actual) {
+
+		/* Poll again later, but give up after about 2-4 ms */
+		if (ehci->ASS_poll_count++ < 2) {
+			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+			return;
+		}
+		ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
+				want, actual);
+	}
+	ehci->ASS_poll_count = 0;
+
+	/* The status is up-to-date; restart or stop the schedule as needed */
+	if (want == 0) {	/* Stopped */
+		if (ehci->async_count > 0)
+			ehci_set_command_bit(ehci, CMD_ASE);
+
+	} else {		/* Running */
+		if (ehci->async_count == 0) {
+
+			/* Turn off the schedule after a while */
+			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
+					true);
+		}
+	}
+}
+
+/* Turn off the async schedule after a brief delay */
+static void ehci_disable_ASE(struct ehci_hcd *ehci)
+{
+	ehci_clear_command_bit(ehci, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void ehci_poll_PSS(struct ehci_hcd *ehci)
+{
+	unsigned	actual, want;
+
+	/* Don't do anything if the controller isn't running (e.g., died) */
+	if (ehci->rh_state != EHCI_RH_RUNNING)
+		return;
+
+	want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
+	actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
+
+	if (want != actual) {
+
+		/* Poll again later, but give up after about 2-4 ms */
+		if (ehci->PSS_poll_count++ < 2) {
+			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+			return;
+		}
+		ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+				want, actual);
+	}
+	ehci->PSS_poll_count = 0;
+
+	/* The status is up-to-date; restart or stop the schedule as needed */
+	if (want == 0) {	/* Stopped */
+		if (ehci->periodic_count > 0)
+			ehci_set_command_bit(ehci, CMD_PSE);
+
+	} else {		/* Running */
+		if (ehci->periodic_count == 0) {
+
+			/* Turn off the schedule after a while */
+			ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
+					true);
+		}
+	}
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void ehci_disable_PSE(struct ehci_hcd *ehci)
+{
+	ehci_clear_command_bit(ehci, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void ehci_handle_controller_death(struct ehci_hcd *ehci)
+{
+	if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
+
+		/* Give up after a few milliseconds */
+		if (ehci->died_poll_count++ < 5) {
+			/* Try again later */
+			ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
+			return;
+		}
+		ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
+	}
+
+	/* Clean up the mess */
+	ehci->rh_state = EHCI_RH_HALTED;
+	ehci_writel(ehci, 0, &ehci->regs->configured_flag);
+	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+	ehci_work(ehci);
+	end_unlink_async(ehci);
+
+	/* Not in process context, so don't try to reset the controller */
+}
+
+/* start to unlink interrupt QHs  */
+static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
+{
+	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+	/*
+	 * Process all the QHs on the intr_unlink list that were added
+	 * before the current unlink cycle began.  The list is in
+	 * temporal order, so stop when we reach the first entry in the
+	 * current cycle.  But if the root hub isn't running then
+	 * process all the QHs on the list.
+	 */
+	while (!list_empty(&ehci->intr_unlink_wait)) {
+		struct ehci_qh	*qh;
+
+		qh = list_first_entry(&ehci->intr_unlink_wait,
+				struct ehci_qh, unlink_node);
+		if (!stopped && (qh->unlink_cycle ==
+				ehci->intr_unlink_wait_cycle))
+			break;
+		list_del_init(&qh->unlink_node);
+		qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
+		start_unlink_intr(ehci, qh);
+	}
+
+	/* Handle remaining entries later */
+	if (!list_empty(&ehci->intr_unlink_wait)) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+		++ehci->intr_unlink_wait_cycle;
+	}
+}
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
+{
+	bool		stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+	/*
+	 * Process all the QHs on the intr_unlink list that were added
+	 * before the current unlink cycle began.  The list is in
+	 * temporal order, so stop when we reach the first entry in the
+	 * current cycle.  But if the root hub isn't running then
+	 * process all the QHs on the list.
+	 */
+	ehci->intr_unlinking = true;
+	while (!list_empty(&ehci->intr_unlink)) {
+		struct ehci_qh	*qh;
+
+		qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
+				unlink_node);
+		if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
+			break;
+		list_del_init(&qh->unlink_node);
+		end_unlink_intr(ehci, qh);
+	}
+
+	/* Handle remaining entries later */
+	if (!list_empty(&ehci->intr_unlink)) {
+		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+		++ehci->intr_unlink_cycle;
+	}
+	ehci->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct ehci_hcd *ehci)
+{
+	if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
+		ehci->last_itd_to_free = list_entry(
+				ehci->cached_itd_list.prev,
+				struct ehci_itd, itd_list);
+		ehci->last_sitd_to_free = list_entry(
+				ehci->cached_sitd_list.prev,
+				struct ehci_sitd, sitd_list);
+		ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
+	}
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct ehci_hcd *ehci)
+{
+	struct ehci_itd		*itd, *n;
+	struct ehci_sitd	*sitd, *sn;
+
+	if (ehci->rh_state < EHCI_RH_RUNNING) {
+		ehci->last_itd_to_free = NULL;
+		ehci->last_sitd_to_free = NULL;
+	}
+
+	list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
+		list_del(&itd->itd_list);
+		dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
+		if (itd == ehci->last_itd_to_free)
+			break;
+	}
+	list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+		list_del(&sitd->sitd_list);
+		dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
+		if (sitd == ehci->last_sitd_to_free)
+			break;
+	}
+
+	if (!list_empty(&ehci->cached_itd_list) ||
+			!list_empty(&ehci->cached_sitd_list))
+		start_free_itds(ehci);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
+{
+	u32 cmd, status;
+
+	/*
+	 * Lost IAA irqs wedge things badly; seen first with a vt8235.
+	 * So we need this watchdog, but must protect it against both
+	 * (a) SMP races against real IAA firing and retriggering, and
+	 * (b) clean HC shutdown, when IAA watchdog was pending.
+	 */
+	if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING)
+		return;
+
+	/* If we get here, IAA is *REALLY* late.  It's barely
+	 * conceivable that the system is so busy that CMD_IAAD
+	 * is still legitimately set, so let's be sure it's
+	 * clear before we read STS_IAA.  (The HC should clear
+	 * CMD_IAAD when it sets STS_IAA.)
+	 */
+	cmd = ehci_readl(ehci, &ehci->regs->command);
+
+	/*
+	 * If IAA is set here it either legitimately triggered
+	 * after the watchdog timer expired (_way_ late, so we'll
+	 * still count it as lost) ... or a silicon erratum:
+	 * - VIA seems to set IAA without triggering the IRQ;
+	 * - IAAD potentially cleared without setting IAA.
+	 */
+	status = ehci_readl(ehci, &ehci->regs->status);
+	if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+		COUNT(ehci->stats.lost_iaa);
+		ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+	}
+
+	ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
+	end_iaa_cycle(ehci);
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct ehci_hcd *ehci)
+{
+	/* Not needed if the controller isn't running or it's already enabled */
+	if (ehci->rh_state != EHCI_RH_RUNNING ||
+			(ehci->enabled_hrtimer_events &
+				BIT(EHCI_HRTIMER_IO_WATCHDOG)))
+		return;
+
+	/*
+	 * Isochronous transfers always need the watchdog.
+	 * For other sorts we use it only if the flag is set.
+	 */
+	if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
+			ehci->async_count + ehci->intr_count > 0))
+		ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum ehci_hrtimer_event in ehci.h.
+ */
+static void (*event_handlers[])(struct ehci_hcd *) = {
+	ehci_poll_ASS,			/* EHCI_HRTIMER_POLL_ASS */
+	ehci_poll_PSS,			/* EHCI_HRTIMER_POLL_PSS */
+	ehci_handle_controller_death,	/* EHCI_HRTIMER_POLL_DEAD */
+	ehci_handle_intr_unlinks,	/* EHCI_HRTIMER_UNLINK_INTR */
+	end_free_itds,			/* EHCI_HRTIMER_FREE_ITDS */
+	end_unlink_async,		/* EHCI_HRTIMER_ACTIVE_UNLINK */
+	ehci_handle_start_intr_unlinks,	/* EHCI_HRTIMER_START_UNLINK_INTR */
+	unlink_empty_async,		/* EHCI_HRTIMER_ASYNC_UNLINKS */
+	ehci_iaa_watchdog,		/* EHCI_HRTIMER_IAA_WATCHDOG */
+	ehci_disable_PSE,		/* EHCI_HRTIMER_DISABLE_PERIODIC */
+	ehci_disable_ASE,		/* EHCI_HRTIMER_DISABLE_ASYNC */
+	ehci_work,			/* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
+{
+	struct ehci_hcd	*ehci = container_of(t, struct ehci_hcd, hrtimer);
+	ktime_t		now;
+	unsigned long	events;
+	unsigned long	flags;
+	unsigned	e;
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	events = ehci->enabled_hrtimer_events;
+	ehci->enabled_hrtimer_events = 0;
+	ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+	/*
+	 * Check each pending event.  If its time has expired, handle
+	 * the event; otherwise re-enable it.
+	 */
+	now = ktime_get();
+	for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
+		if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
+			event_handlers[e](ehci);
+		else
+			ehci_enable_event(ehci, e, false);
+	}
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	return HRTIMER_NORESTART;
+}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
new file mode 100644
index 0000000..6d77ace
--- /dev/null
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/driver/usb/host/ehci-w90x900.c
+ *
+ * Copyright (c) 2008 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+/* enable phy0 and phy1 for w90p910 */
+#define	ENPHY		(0x01<<8)
+#define PHY0_CTR	(0xA4)
+#define PHY1_CTR	(0xA8)
+
+#define DRIVER_DESC "EHCI w90x900 driver"
+
+static const char hcd_name[] = "ehci-w90x900 ";
+
+static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
+
+static int ehci_w90x900_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	struct resource *res;
+	int retval = 0, irq;
+	unsigned long val;
+
+	hcd = usb_create_hcd(&ehci_w90x900_hc_driver,
+			&pdev->dev, "w90x900 EHCI");
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto err1;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err2;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = hcd->regs;
+	ehci->regs = hcd->regs +
+		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* enable PHY 0,1,the regs only apply to w90p910
+	 *  0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+	 *  w90p910 IC relative to ehci->regs.
+	 */
+	val = __raw_readl(ehci->regs+PHY0_CTR);
+	val |= ENPHY;
+	__raw_writel(val, ehci->regs+PHY0_CTR);
+
+	val = __raw_readl(ehci->regs+PHY1_CTR);
+	val |= ENPHY;
+	__raw_writel(val, ehci->regs+PHY1_CTR);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto err2;
+	}
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval != 0)
+		goto err2;
+
+	device_wakeup_enable(hcd->self.controller);
+	return retval;
+err2:
+	usb_put_hcd(hcd);
+err1:
+	return retval;
+}
+
+static int ehci_w90x900_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static struct platform_driver ehci_hcd_w90x900_driver = {
+	.probe  = ehci_w90x900_probe,
+	.remove = ehci_w90x900_remove,
+	.driver = {
+		.name = "w90x900-ehci",
+	},
+};
+
+static int __init ehci_w90X900_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ehci_init_driver(&ehci_w90x900_hc_driver, NULL);
+	return platform_driver_register(&ehci_hcd_w90x900_driver);
+}
+module_init(ehci_w90X900_init);
+
+static void __exit ehci_w90X900_cleanup(void)
+{
+	platform_driver_unregister(&ehci_hcd_w90x900_driver);
+}
+module_exit(ehci_w90X900_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_ALIAS("platform:w90p910-ehci");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 0000000..d2a2757
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * Bus Glue for Xilinx EHCI core on the of_platform bus
+ *
+ * Copyright (c) 2009 Xilinx, Inc.
+ *
+ * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
+ * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ */
+
+#include <linux/err.h>
+#include <linux/signal.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+/**
+ * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
+ * @hcd:	Pointer to the usb_hcd device to which the host controller bound
+ * @portnum:Port number to which the device is attached.
+ *
+ * This function is used as a place to tell the user that the Xilinx USB host
+ * controller does support LS devices. And in an HS only configuration, it
+ * does not support FS devices either. It is hoped that this can help a
+ * confused user.
+ *
+ * There are cases when the host controller fails to enable the port due to,
+ * for example, insufficient power that can be supplied to the device from
+ * the USB bus. In those cases, the messages printed here are not helpful.
+ */
+static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+	dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
+	if (hcd->has_tt) {
+		dev_warn(hcd->self.controller,
+			"Maybe you have connected a low speed device?\n");
+
+		dev_warn(hcd->self.controller,
+			"We do not support low speed devices\n");
+	} else {
+		dev_warn(hcd->self.controller,
+			"Maybe your device is not a high speed device?\n");
+		dev_warn(hcd->self.controller,
+			"The USB host controller does not support full speed "
+			"nor low speed devices\n");
+		dev_warn(hcd->self.controller,
+			"You can reconfigure the host controller to have "
+			"full speed support\n");
+	}
+
+	return 0;
+}
+
+
+static const struct hc_driver ehci_xilinx_of_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "OF EHCI",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2 | HCD_BH,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_setup,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+#endif
+	.relinquish_port	= NULL,
+	.port_handed_over	= ehci_xilinx_port_handed_over,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+/**
+ * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
+ * @op:		pointer to the platform_device bound to the host controller
+ *
+ * This function requests resources and sets up appropriate properties for the
+ * host controller. Because the Xilinx USB host controller can be configured
+ * as HS only or HS/FS only, it checks the configuration in the device tree
+ * entry, and sets an appropriate value for hcd->has_tt.
+ */
+static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct usb_hcd *hcd;
+	struct ehci_hcd	*ehci;
+	struct resource res;
+	int irq;
+	int rv;
+	int *value;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
+				"XILINX-OF USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = resource_size(&res);
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (!irq) {
+		dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+			__FILE__);
+		rv = -EBUSY;
+		goto err_irq;
+	}
+
+	hcd->regs = devm_ioremap_resource(&op->dev, &res);
+	if (IS_ERR(hcd->regs)) {
+		rv = PTR_ERR(hcd->regs);
+		goto err_irq;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+
+	/* This core always has big-endian register interface and uses
+	 * big-endian memory descriptors.
+	 */
+	ehci->big_endian_mmio = 1;
+	ehci->big_endian_desc = 1;
+
+	/* Check whether the FS support option is selected in the hardware.
+	 */
+	value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
+	if (value && (*value == 1)) {
+		ehci_dbg(ehci, "USB host controller supports FS devices\n");
+		hcd->has_tt = 1;
+	} else {
+		ehci_dbg(ehci,
+			"USB host controller is HS only\n");
+		hcd->has_tt = 0;
+	}
+
+	/* Debug registers are at the first 0x100 region
+	 */
+	ehci->caps = hcd->regs + 0x100;
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return 0;
+	}
+
+err_irq:
+	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+/**
+ * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
+ * @op:		pointer to platform_device structure that is to be removed
+ *
+ * Remove the hcd structure, and release resources that has been requested
+ * during probe.
+ */
+static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
+		{.compatible = "xlnx,xps-usb-host-1.00.a",},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
+
+static struct platform_driver ehci_hcd_xilinx_of_driver = {
+	.probe		= ehci_hcd_xilinx_of_probe,
+	.remove		= ehci_hcd_xilinx_of_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "xilinx-of-ehci",
+		.of_match_table = ehci_hcd_xilinx_of_match,
+	},
+};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
new file mode 100644
index 0000000..c8e9a48
--- /dev/null
+++ b/drivers/usb/host/ehci.h
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2001-2002 by David Brownell
+ */
+
+#ifndef __LINUX_EHCI_HCD_H
+#define __LINUX_EHCI_HCD_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+#else
+#define __hc32	__le32
+#define __hc16	__le16
+#endif
+
+/* statistics can be kept for tuning/monitoring */
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define EHCI_STATS
+#endif
+
+struct ehci_stats {
+	/* irq usage */
+	unsigned long		normal;
+	unsigned long		error;
+	unsigned long		iaa;
+	unsigned long		lost_iaa;
+
+	/* termination of urbs from core */
+	unsigned long		complete;
+	unsigned long		unlink;
+};
+
+/*
+ * Scheduling and budgeting information for periodic transfers, for both
+ * high-speed devices and full/low-speed devices lying behind a TT.
+ */
+struct ehci_per_sched {
+	struct usb_device	*udev;		/* access to the TT */
+	struct usb_host_endpoint *ep;
+	struct list_head	ps_list;	/* node on ehci_tt's ps_list */
+	u16			tt_usecs;	/* time on the FS/LS bus */
+	u16			cs_mask;	/* C-mask and S-mask bytes */
+	u16			period;		/* actual period in frames */
+	u16			phase;		/* actual phase, frame part */
+	u8			bw_phase;	/* same, for bandwidth
+						   reservation */
+	u8			phase_uf;	/* uframe part of the phase */
+	u8			usecs, c_usecs;	/* times on the HS bus */
+	u8			bw_uperiod;	/* period in microframes, for
+						   bandwidth reservation */
+	u8			bw_period;	/* same, in frames */
+};
+#define NO_FRAME	29999			/* frame not assigned yet */
+
+/* ehci_hcd->lock guards shared data against other CPUs:
+ *   ehci_hcd:	async, unlink, periodic (and shadow), ...
+ *   usb_host_endpoint: hcpriv
+ *   ehci_qh:	qh_next, qtd_list
+ *   ehci_qtd:	qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define	EHCI_MAX_ROOT_PORTS	15		/* see HCS_N_PORTS */
+
+/*
+ * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the
+ * controller may be doing DMA.  Lower values mean there's no DMA.
+ */
+enum ehci_rh_state {
+	EHCI_RH_HALTED,
+	EHCI_RH_SUSPENDED,
+	EHCI_RH_RUNNING,
+	EHCI_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum ehci_hrtimer_event {
+	EHCI_HRTIMER_POLL_ASS,		/* Poll for async schedule off */
+	EHCI_HRTIMER_POLL_PSS,		/* Poll for periodic schedule off */
+	EHCI_HRTIMER_POLL_DEAD,		/* Wait for dead controller to stop */
+	EHCI_HRTIMER_UNLINK_INTR,	/* Wait for interrupt QH unlink */
+	EHCI_HRTIMER_FREE_ITDS,		/* Wait for unused iTDs and siTDs */
+	EHCI_HRTIMER_ACTIVE_UNLINK,	/* Wait while unlinking an active QH */
+	EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
+	EHCI_HRTIMER_ASYNC_UNLINKS,	/* Unlink empty async QHs */
+	EHCI_HRTIMER_IAA_WATCHDOG,	/* Handle lost IAA interrupts */
+	EHCI_HRTIMER_DISABLE_PERIODIC,	/* Wait to disable periodic sched */
+	EHCI_HRTIMER_DISABLE_ASYNC,	/* Wait to disable async sched */
+	EHCI_HRTIMER_IO_WATCHDOG,	/* Check for missing IRQs */
+	EHCI_HRTIMER_NUM_EVENTS		/* Must come last */
+};
+#define EHCI_HRTIMER_NO_EVENT	99
+
+struct ehci_hcd {			/* one per controller */
+	/* timing support */
+	enum ehci_hrtimer_event	next_hrtimer_event;
+	unsigned		enabled_hrtimer_events;
+	ktime_t			hr_timeouts[EHCI_HRTIMER_NUM_EVENTS];
+	struct hrtimer		hrtimer;
+
+	int			PSS_poll_count;
+	int			ASS_poll_count;
+	int			died_poll_count;
+
+	/* glue to PCI and HCD framework */
+	struct ehci_caps __iomem *caps;
+	struct ehci_regs __iomem *regs;
+	struct ehci_dbg_port __iomem *debug;
+
+	__u32			hcs_params;	/* cached register copy */
+	spinlock_t		lock;
+	enum ehci_rh_state	rh_state;
+
+	/* general schedule support */
+	bool			scanning:1;
+	bool			need_rescan:1;
+	bool			intr_unlinking:1;
+	bool			iaa_in_progress:1;
+	bool			async_unlinking:1;
+	bool			shutdown:1;
+	struct ehci_qh		*qh_scan_next;
+
+	/* async schedule support */
+	struct ehci_qh		*async;
+	struct ehci_qh		*dummy;		/* For AMD quirk use */
+	struct list_head	async_unlink;
+	struct list_head	async_idle;
+	unsigned		async_unlink_cycle;
+	unsigned		async_count;	/* async activity count */
+	__hc32			old_current;	/* Test for QH becoming */
+	__hc32			old_token;	/*  inactive during unlink */
+
+	/* periodic schedule support */
+#define	DEFAULT_I_TDPS		1024		/* some HCs can do less */
+	unsigned		periodic_size;
+	__hc32			*periodic;	/* hw periodic table */
+	dma_addr_t		periodic_dma;
+	struct list_head	intr_qh_list;
+	unsigned		i_thresh;	/* uframes HC might cache */
+
+	union ehci_shadow	*pshadow;	/* mirror hw periodic table */
+	struct list_head	intr_unlink_wait;
+	struct list_head	intr_unlink;
+	unsigned		intr_unlink_wait_cycle;
+	unsigned		intr_unlink_cycle;
+	unsigned		now_frame;	/* frame from HC hardware */
+	unsigned		last_iso_frame;	/* last frame scanned for iso */
+	unsigned		intr_count;	/* intr activity count */
+	unsigned		isoc_count;	/* isoc activity count */
+	unsigned		periodic_count;	/* periodic activity count */
+	unsigned		uframe_periodic_max; /* max periodic time per uframe */
+
+
+	/* list of itds & sitds completed while now_frame was still active */
+	struct list_head	cached_itd_list;
+	struct ehci_itd		*last_itd_to_free;
+	struct list_head	cached_sitd_list;
+	struct ehci_sitd	*last_sitd_to_free;
+
+	/* per root hub port */
+	unsigned long		reset_done[EHCI_MAX_ROOT_PORTS];
+
+	/* bit vectors (one bit per port) */
+	unsigned long		bus_suspended;		/* which ports were
+			already suspended at the start of a bus suspend */
+	unsigned long		companion_ports;	/* which ports are
+			dedicated to the companion controller */
+	unsigned long		owned_ports;		/* which ports are
+			owned by the companion during a bus suspend */
+	unsigned long		port_c_suspend;		/* which ports have
+			the change-suspend feature turned on */
+	unsigned long		suspended_ports;	/* which ports are
+			suspended */
+	unsigned long		resuming_ports;		/* which ports have
+			started to resume */
+
+	/* per-HC memory pools (could be per-bus, but ...) */
+	struct dma_pool		*qh_pool;	/* qh per active urb */
+	struct dma_pool		*qtd_pool;	/* one or more per qh */
+	struct dma_pool		*itd_pool;	/* itd per iso urb */
+	struct dma_pool		*sitd_pool;	/* sitd per split iso urb */
+
+	unsigned		random_frame;
+	unsigned long		next_statechange;
+	ktime_t			last_periodic_enable;
+	u32			command;
+
+	/* SILICON QUIRKS */
+	unsigned		no_selective_suspend:1;
+	unsigned		has_fsl_port_bug:1; /* FreeScale */
+	unsigned		has_fsl_hs_errata:1;	/* Freescale HS quirk */
+	unsigned		has_fsl_susp_errata:1;	/* NXP SUSP quirk */
+	unsigned		big_endian_mmio:1;
+	unsigned		big_endian_desc:1;
+	unsigned		big_endian_capbase:1;
+	unsigned		has_amcc_usb23:1;
+	unsigned		need_io_watchdog:1;
+	unsigned		amd_pll_fix:1;
+	unsigned		use_dummy_qh:1;	/* AMD Frame List table quirk*/
+	unsigned		has_synopsys_hc_bug:1; /* Synopsys HC */
+	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
+	unsigned		need_oc_pp_cycle:1; /* MPC834X port power */
+	unsigned		imx28_write_fix:1; /* For Freescale i.MX28 */
+
+	/* required for usb32 quirk */
+	#define OHCI_CTRL_HCFS          (3 << 6)
+	#define OHCI_USB_OPER           (2 << 6)
+	#define OHCI_USB_SUSPEND        (3 << 6)
+
+	#define OHCI_HCCTRL_OFFSET      0x4
+	#define OHCI_HCCTRL_LEN         0x4
+	__hc32			*ohci_hcctrl_reg;
+	unsigned		has_hostpc:1;
+	unsigned		has_tdi_phy_lpm:1;
+	unsigned		has_ppcd:1; /* support per-port change bits */
+	u8			sbrn;		/* packed release number */
+
+	/* irq statistics */
+#ifdef EHCI_STATS
+	struct ehci_stats	stats;
+#	define COUNT(x) ((x)++)
+#else
+#	define COUNT(x)
+#endif
+
+	/* debug files */
+#ifdef CONFIG_DYNAMIC_DEBUG
+	struct dentry		*debug_dir;
+#endif
+
+	/* bandwidth usage */
+#define EHCI_BANDWIDTH_SIZE	64
+#define EHCI_BANDWIDTH_FRAMES	(EHCI_BANDWIDTH_SIZE >> 3)
+	u8			bandwidth[EHCI_BANDWIDTH_SIZE];
+						/* us allocated per uframe */
+	u8			tt_budget[EHCI_BANDWIDTH_SIZE];
+						/* us budgeted per uframe */
+	struct list_head	tt_list;
+
+	/* platform-specific data -- must come last */
+	unsigned long		priv[0] __aligned(sizeof(s64));
+};
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct ehci_hcd *hcd_to_ehci(struct usb_hcd *hcd)
+{
+	return (struct ehci_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *ehci_to_hcd(struct ehci_hcd *ehci)
+{
+	return container_of((void *) ehci, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#include <linux/usb/ehci_def.h>
+
+/*-------------------------------------------------------------------------*/
+
+#define	QTD_NEXT(ehci, dma)	cpu_to_hc32(ehci, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct ehci_qtd {
+	/* first part defined by EHCI spec */
+	__hc32			hw_next;	/* see EHCI 3.5.1 */
+	__hc32			hw_alt_next;    /* see EHCI 3.5.2 */
+	__hc32			hw_token;       /* see EHCI 3.5.3 */
+#define	QTD_TOGGLE	(1 << 31)	/* data toggle */
+#define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	QTD_IOC		(1 << 15)	/* interrupt on complete */
+#define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)	(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT	(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
+#define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
+#define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
+#define	QTD_STS_STS	(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING	(1 << 0)	/* issue PING? */
+
+#define ACTIVE_BIT(ehci)	cpu_to_hc32(ehci, QTD_STS_ACTIVE)
+#define HALT_BIT(ehci)		cpu_to_hc32(ehci, QTD_STS_HALT)
+#define STATUS_BIT(ehci)	cpu_to_hc32(ehci, QTD_STS_STS)
+
+	__hc32			hw_buf[5];        /* see EHCI 3.5.4 */
+	__hc32			hw_buf_hi[5];        /* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		qtd_dma;		/* qtd address */
+	struct list_head	qtd_list;		/* sw qtd list */
+	struct urb		*urb;			/* qtd's urb */
+	size_t			length;			/* length of buffer */
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(ehci)	cpu_to_hc32(ehci, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,sitd,fstn}->hw_next */
+#define Q_NEXT_TYPE(ehci, dma)	((dma) & cpu_to_hc32(ehci, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD	(0 << 1)
+#define Q_TYPE_QH	(1 << 1)
+#define Q_TYPE_SITD	(2 << 1)
+#define Q_TYPE_FSTN	(3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(ehci, dma) \
+		(cpu_to_hc32(ehci, (((u32) dma) & ~0x01f) | Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define EHCI_LIST_END(ehci)	cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure.  That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule.  Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union ehci_shadow {
+	struct ehci_qh		*qh;		/* Q_TYPE_QH */
+	struct ehci_itd		*itd;		/* Q_TYPE_ITD */
+	struct ehci_sitd	*sitd;		/* Q_TYPE_SITD */
+	struct ehci_fstn	*fstn;		/* Q_TYPE_FSTN */
+	__hc32			*hw_next;	/* (all types) */
+	void			*ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct ehci_qh_hw {
+	__hc32			hw_next;	/* see EHCI 3.6.1 */
+	__hc32			hw_info1;       /* see EHCI 3.6.2 */
+#define	QH_CONTROL_EP	(1 << 27)	/* FS/LS control endpoint */
+#define	QH_HEAD		(1 << 15)	/* Head of async reclamation list */
+#define	QH_TOGGLE_CTL	(1 << 14)	/* Data toggle control */
+#define	QH_HIGH_SPEED	(2 << 12)	/* Endpoint speed */
+#define	QH_LOW_SPEED	(1 << 12)
+#define	QH_FULL_SPEED	(0 << 12)
+#define	QH_INACTIVATE	(1 << 7)	/* Inactivate on next transaction */
+	__hc32			hw_info2;        /* see EHCI 3.6.2 */
+#define	QH_SMASK	0x000000ff
+#define	QH_CMASK	0x0000ff00
+#define	QH_HUBADDR	0x007f0000
+#define	QH_HUBPORT	0x3f800000
+#define	QH_MULT		0xc0000000
+	__hc32			hw_current;	/* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct ehci_qtd) */
+	__hc32			hw_qtd_next;
+	__hc32			hw_alt_next;
+	__hc32			hw_token;
+	__hc32			hw_buf[5];
+	__hc32			hw_buf_hi[5];
+} __aligned(32);
+
+struct ehci_qh {
+	struct ehci_qh_hw	*hw;		/* Must come first */
+	/* the rest is HCD-private */
+	dma_addr_t		qh_dma;		/* address of qh */
+	union ehci_shadow	qh_next;	/* ptr to qh; or periodic */
+	struct list_head	qtd_list;	/* sw qtd list */
+	struct list_head	intr_node;	/* list of intr QHs */
+	struct ehci_qtd		*dummy;
+	struct list_head	unlink_node;
+	struct ehci_per_sched	ps;		/* scheduling info */
+
+	unsigned		unlink_cycle;
+
+	u8			qh_state;
+#define	QH_STATE_LINKED		1		/* HC sees this */
+#define	QH_STATE_UNLINK		2		/* HC may still see this */
+#define	QH_STATE_IDLE		3		/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on unlink q */
+#define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
+
+	u8			xacterrs;	/* XactErr retry counter */
+#define	QH_XACTERR_MAX		32		/* XactErr retry limit */
+
+	u8			unlink_reason;
+#define QH_UNLINK_HALTED	0x01		/* Halt flag is set */
+#define QH_UNLINK_SHORT_READ	0x02		/* Recover from a short read */
+#define QH_UNLINK_DUMMY_OVERLAY	0x04		/* QH overlayed the dummy TD */
+#define QH_UNLINK_SHUTDOWN	0x08		/* The HC isn't running */
+#define QH_UNLINK_QUEUE_EMPTY	0x10		/* Reached end of the queue */
+#define QH_UNLINK_REQUESTED	0x20		/* Disable, reset, or dequeue */
+
+	u8			gap_uf;		/* uframes split/csplit gap */
+
+	unsigned		is_out:1;	/* bulk or intr OUT */
+	unsigned		clearing_tt:1;	/* Clear-TT-Buf in progress */
+	unsigned		dequeue_during_giveback:1;
+	unsigned		should_be_inactive:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct ehci_iso_packet {
+	/* These will be copied to iTD when scheduling */
+	u64			bufp;		/* itd->hw_bufp{,_hi}[pg] |= */
+	__hc32			transaction;	/* itd->hw_transaction[i] |= */
+	u8			cross;		/* buf crosses pages */
+	/* for full speed OUT splits */
+	u32			buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct ehci_iso_sched {
+	struct list_head	td_list;
+	unsigned		span;
+	unsigned		first_packet;
+	struct ehci_iso_packet	packet[0];
+};
+
+/*
+ * ehci_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct ehci_iso_stream {
+	/* first field matches ehci_hq, but is NULL */
+	struct ehci_qh_hw	*hw;
+
+	u8			bEndpointAddress;
+	u8			highspeed;
+	struct list_head	td_list;	/* queued itds/sitds */
+	struct list_head	free_list;	/* list of unused itds/sitds */
+
+	/* output of (re)scheduling */
+	struct ehci_per_sched	ps;		/* scheduling info */
+	unsigned		next_uframe;
+	__hc32			splits;
+
+	/* the rest is derived from the endpoint descriptor,
+	 * including the extra info for hw_bufp[0..2]
+	 */
+	u16			uperiod;	/* period in uframes */
+	u16			maxp;
+	unsigned		bandwidth;
+
+	/* This is used to initialize iTD's hw_bufp fields */
+	__hc32			buf0;
+	__hc32			buf1;
+	__hc32			buf2;
+
+	/* this is used to initialize sITD's tt info */
+	__hc32			address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct ehci_itd {
+	/* first part defined by EHCI spec */
+	__hc32			hw_next;           /* see EHCI 3.3.1 */
+	__hc32			hw_transaction[8]; /* see EHCI 3.3.2 */
+#define EHCI_ISOC_ACTIVE        (1<<31)        /* activate transfer this slot */
+#define EHCI_ISOC_BUF_ERR       (1<<30)        /* Data buffer error */
+#define EHCI_ISOC_BABBLE        (1<<29)        /* babble detected */
+#define EHCI_ISOC_XACTERR       (1<<28)        /* XactErr - transaction error */
+#define	EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x0fff)
+#define	EHCI_ITD_IOC		(1 << 15)	/* interrupt on complete */
+
+#define ITD_ACTIVE(ehci)	cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
+
+	__hc32			hw_bufp[7];	/* see EHCI 3.3.3 */
+	__hc32			hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		itd_dma;	/* for this itd */
+	union ehci_shadow	itd_next;	/* ptr to periodic q entry */
+
+	struct urb		*urb;
+	struct ehci_iso_stream	*stream;	/* endpoint's queue */
+	struct list_head	itd_list;	/* list of stream's itds */
+
+	/* any/all hw_transactions here may be used by that urb */
+	unsigned		frame;		/* where scheduled */
+	unsigned		pg;
+	unsigned		index[8];	/* in urb->iso_frame_desc */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.4
+ * siTD, aka split-transaction isochronous Transfer Descriptor
+ *       ... describe full speed iso xfers through TT in hubs
+ * see Figure 3-5 "Split-transaction Isochronous Transaction Descriptor (siTD)
+ */
+struct ehci_sitd {
+	/* first part defined by EHCI spec */
+	__hc32			hw_next;
+/* uses bit field macros above - see EHCI 0.95 Table 3-8 */
+	__hc32			hw_fullspeed_ep;	/* EHCI table 3-9 */
+	__hc32			hw_uframe;		/* EHCI table 3-10 */
+	__hc32			hw_results;		/* EHCI table 3-11 */
+#define	SITD_IOC	(1 << 31)	/* interrupt on completion */
+#define	SITD_PAGE	(1 << 30)	/* buffer 0/1 */
+#define	SITD_LENGTH(x)	(((x) >> 16) & 0x3ff)
+#define	SITD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
+#define	SITD_STS_ERR	(1 << 6)	/* error from TT */
+#define	SITD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
+#define	SITD_STS_BABBLE	(1 << 4)	/* device was babbling */
+#define	SITD_STS_XACT	(1 << 3)	/* illegal IN response */
+#define	SITD_STS_MMF	(1 << 2)	/* incomplete split transaction */
+#define	SITD_STS_STS	(1 << 1)	/* split transaction state */
+
+#define SITD_ACTIVE(ehci)	cpu_to_hc32(ehci, SITD_STS_ACTIVE)
+
+	__hc32			hw_buf[2];		/* EHCI table 3-12 */
+	__hc32			hw_backpointer;		/* EHCI table 3-13 */
+	__hc32			hw_buf_hi[2];		/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		sitd_dma;
+	union ehci_shadow	sitd_next;	/* ptr to periodic q entry */
+
+	struct urb		*urb;
+	struct ehci_iso_stream	*stream;	/* endpoint's queue */
+	struct list_head	sitd_list;	/* list of stream's sitds */
+	unsigned		frame;
+	unsigned		index;
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2.  In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct ehci_fstn {
+	__hc32			hw_next;	/* any periodic q entry */
+	__hc32			hw_prev;	/* qh or EHCI_LIST_END */
+
+	/* the rest is HCD-private */
+	dma_addr_t		fstn_dma;
+	union ehci_shadow	fstn_next;	/* ptr to periodic q entry */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * USB-2.0 Specification Sections 11.14 and 11.18
+ * Scheduling and budgeting split transactions using TTs
+ *
+ * A hub can have a single TT for all its ports, or multiple TTs (one for each
+ * port).  The bandwidth and budgeting information for the full/low-speed bus
+ * below each TT is self-contained and independent of the other TTs or the
+ * high-speed bus.
+ *
+ * "Bandwidth" refers to the number of microseconds on the FS/LS bus allocated
+ * to an interrupt or isochronous endpoint for each frame.  "Budget" refers to
+ * the best-case estimate of the number of full-speed bytes allocated to an
+ * endpoint for each microframe within an allocated frame.
+ *
+ * Removal of an endpoint invalidates a TT's budget.  Instead of trying to
+ * keep an up-to-date record, we recompute the budget when it is needed.
+ */
+
+struct ehci_tt {
+	u16			bandwidth[EHCI_BANDWIDTH_FRAMES];
+
+	struct list_head	tt_list;	/* List of all ehci_tt's */
+	struct list_head	ps_list;	/* Items using this TT */
+	struct usb_tt		*usb_tt;
+	int			tt_port;	/* TT port number */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup)	\
+		ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup)
+
+#define ehci_prepare_ports_for_controller_resume(ehci)			\
+		ehci_adjust_port_wakeup_flags(ehci, false, false)
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature.  Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+#define	ehci_is_TDI(e)			(ehci_to_hcd(e)->has_tt)
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
+{
+	if (ehci_is_TDI(ehci)) {
+		switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) {
+		case 0:
+			return 0;
+		case 1:
+			return USB_PORT_STAT_LOW_SPEED;
+		case 2:
+		default:
+			return USB_PORT_STAT_HIGH_SPEED;
+		}
+	}
+	return USB_PORT_STAT_HIGH_SPEED;
+}
+
+#else
+
+#define	ehci_is_TDI(e)			(0)
+
+#define	ehci_port_speed(ehci, portsc)	USB_PORT_STAT_HIGH_SPEED
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PPC_83xx
+/* Some Freescale processors have an erratum in which the TT
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+#define	ehci_has_fsl_portno_bug(e)		((e)->has_fsl_port_bug)
+#else
+#define	ehci_has_fsl_portno_bug(e)		(0)
+#endif
+
+#define PORTSC_FSL_PFSC	24	/* Port Force Full-Speed Connect */
+
+#if defined(CONFIG_PPC_85xx)
+/* Some Freescale processors have an erratum (USB A-005275) in which
+ * incoming packets get corrupted in HS mode
+ */
+#define ehci_has_fsl_hs_errata(e)	((e)->has_fsl_hs_errata)
+#else
+#define ehci_has_fsl_hs_errata(e)	(0)
+#endif
+
+/*
+ * Some Freescale/NXP processors have an erratum (USB A-005697)
+ * in which we need to wait for 10ms for bus to enter suspend mode
+ * after setting SUSP bit.
+ */
+#define ehci_has_fsl_susp_errata(e)	((e)->has_fsl_susp_errata)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ * ehci_big_endian_capbase is a special quirk for controllers that
+ * implement the HC capability registers as separate registers and not
+ * as fields of a 32-bit register.
+ */
+
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+#define ehci_big_endian_mmio(e)		((e)->big_endian_mmio)
+#define ehci_big_endian_capbase(e)	((e)->big_endian_capbase)
+#else
+#define ehci_big_endian_mmio(e)		0
+#define ehci_big_endian_capbase(e)	0
+#endif
+
+/*
+ * Big-endian read/write functions are arch-specific.
+ * Other arches can be added if/when they're needed.
+ */
+#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_IXP4XX)
+#define readl_be(addr)		__raw_readl((__force unsigned *)addr)
+#define writel_be(val, addr)	__raw_writel(val, (__force unsigned *)addr)
+#endif
+
+static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
+		__u32 __iomem *regs)
+{
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+	return ehci_big_endian_mmio(ehci) ?
+		readl_be(regs) :
+		readl(regs);
+#else
+	return readl(regs);
+#endif
+}
+
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ehci_writel(const unsigned int val,
+		volatile __u32 __iomem *addr)
+{
+	__asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ehci_writel(const unsigned int val,
+		volatile __u32 __iomem *addr)
+{
+}
+#endif
+static inline void ehci_writel(const struct ehci_hcd *ehci,
+		const unsigned int val, __u32 __iomem *regs)
+{
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
+	ehci_big_endian_mmio(ehci) ?
+		writel_be(val, regs) :
+		writel(val, regs);
+#else
+	if (ehci->imx28_write_fix)
+		imx28_ehci_writel(val, regs);
+	else
+		writel(val, regs);
+#endif
+}
+
+/*
+ * On certain ppc-44x SoC there is a HW issue, that could only worked around with
+ * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
+ * Other common bits are dependent on has_amcc_usb23 quirk flag.
+ */
+#ifdef CONFIG_44x
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{
+	u32 hc_control;
+
+	hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS);
+	if (operational)
+		hc_control |= OHCI_USB_OPER;
+	else
+		hc_control |= OHCI_USB_SUSPEND;
+
+	writel_be(hc_control, ehci->ohci_hcctrl_reg);
+	(void) readl_be(ehci->ohci_hcctrl_reg);
+}
+#else
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{ }
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The AMCC 440EPx not only implements its EHCI registers in big-endian
+ * format, but also its DMA data structures (descriptors).
+ *
+ * EHCI controllers accessed through PCI work normally (little-endian
+ * everywhere), so we won't bother supporting a BE-only mode for now.
+ */
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+#define ehci_big_endian_desc(e)		((e)->big_endian_desc)
+
+/* cpu to ehci */
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
+{
+	return ehci_big_endian_desc(ehci)
+		? (__force __hc32)cpu_to_be32(x)
+		: (__force __hc32)cpu_to_le32(x);
+}
+
+/* ehci to cpu */
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
+{
+	return ehci_big_endian_desc(ehci)
+		? be32_to_cpu((__force __be32)x)
+		: le32_to_cpu((__force __le32)x);
+}
+
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
+{
+	return ehci_big_endian_desc(ehci)
+		? be32_to_cpup((__force __be32 *)x)
+		: le32_to_cpup((__force __le32 *)x);
+}
+
+#else
+
+/* cpu to ehci */
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
+{
+	return cpu_to_le32(x);
+}
+
+/* ehci to cpu */
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
+{
+	return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
+{
+	return le32_to_cpup(x);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define ehci_dbg(ehci, fmt, args...) \
+	dev_dbg(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
+#define ehci_err(ehci, fmt, args...) \
+	dev_err(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
+#define ehci_info(ehci, fmt, args...) \
+	dev_info(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
+#define ehci_warn(ehci, fmt, args...) \
+	dev_warn(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* Declarations of things exported for use by ehci platform drivers */
+
+struct ehci_driver_overrides {
+	size_t		extra_priv_size;
+	int		(*reset)(struct usb_hcd *hcd);
+	int		(*port_power)(struct usb_hcd *hcd,
+				int portnum, bool enable);
+};
+
+extern void	ehci_init_driver(struct hc_driver *drv,
+				const struct ehci_driver_overrides *over);
+extern int	ehci_setup(struct usb_hcd *hcd);
+extern int	ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
+				u32 mask, u32 done, int usec);
+extern int	ehci_reset(struct ehci_hcd *ehci);
+
+extern int	ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int	ehci_resume(struct usb_hcd *hcd, bool force_reset);
+extern void	ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+			bool suspending, bool do_wakeup);
+
+extern int	ehci_hub_control(struct usb_hcd	*hcd, u16 typeReq, u16 wValue,
+				 u16 wIndex, char *buf, u16 wLength);
+
+#endif /* __LINUX_EHCI_HCD_H */
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
new file mode 100644
index 0000000..100048b
--- /dev/null
+++ b/drivers/usb/host/fhci-dbg.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "fhci.h"
+
+void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
+{
+	int i;
+
+	if (usb_er == -1) {
+		fhci->usb_irq_stat[12]++;
+		return;
+	}
+
+	for (i = 0; i < 12; ++i) {
+		if (usb_er & (1 << i))
+			fhci->usb_irq_stat[i]++;
+	}
+}
+
+static int fhci_dfs_regs_show(struct seq_file *s, void *v)
+{
+	struct fhci_hcd *fhci = s->private;
+	struct qe_usb_ctlr __iomem *regs = fhci->regs;
+
+	seq_printf(s,
+		"mode: 0x%x\n" "addr: 0x%x\n"
+		"command: 0x%x\n" "ep0: 0x%x\n"
+		"event: 0x%x\n" "mask: 0x%x\n"
+		"status: 0x%x\n" "SOF timer: %d\n"
+		"frame number: %d\n"
+		"lines status: 0x%x\n",
+		in_8(&regs->usb_usmod), in_8(&regs->usb_usadr),
+		in_8(&regs->usb_uscom), in_be16(&regs->usb_usep[0]),
+		in_be16(&regs->usb_usber), in_be16(&regs->usb_usbmr),
+		in_8(&regs->usb_usbs), in_be16(&regs->usb_ussft),
+		in_be16(&regs->usb_usfrn),
+		fhci_ioports_check_bus_state(fhci));
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fhci_dfs_regs);
+
+static int fhci_dfs_irq_stat_show(struct seq_file *s, void *v)
+{
+	struct fhci_hcd *fhci = s->private;
+	int *usb_irq_stat = fhci->usb_irq_stat;
+
+	seq_printf(s,
+		"RXB: %d\n" "TXB: %d\n" "BSY: %d\n"
+		"SOF: %d\n" "TXE0: %d\n" "TXE1: %d\n"
+		"TXE2: %d\n" "TXE3: %d\n" "IDLE: %d\n"
+		"RESET: %d\n" "SFT: %d\n" "MSF: %d\n"
+		"IDLE_ONLY: %d\n",
+		usb_irq_stat[0], usb_irq_stat[1], usb_irq_stat[2],
+		usb_irq_stat[3], usb_irq_stat[4], usb_irq_stat[5],
+		usb_irq_stat[6], usb_irq_stat[7], usb_irq_stat[8],
+		usb_irq_stat[9], usb_irq_stat[10], usb_irq_stat[11],
+		usb_irq_stat[12]);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fhci_dfs_irq_stat);
+
+void fhci_dfs_create(struct fhci_hcd *fhci)
+{
+	struct device *dev = fhci_to_hcd(fhci)->self.controller;
+
+	fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
+
+	debugfs_create_file("regs", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
+			    &fhci_dfs_regs_fops);
+	debugfs_create_file("irq_stat", S_IFREG | S_IRUGO, fhci->dfs_root, fhci,
+			    &fhci_dfs_irq_stat_fops);
+}
+
+void fhci_dfs_destroy(struct fhci_hcd *fhci)
+{
+	debugfs_remove_recursive(fhci->dfs_root);
+}
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
new file mode 100644
index 0000000..48fe9e6
--- /dev/null
+++ b/drivers/usb/host/fhci-hcd.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/fsl_gtm.h>
+#include "fhci.h"
+
+void fhci_start_sof_timer(struct fhci_hcd *fhci)
+{
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	/* clear frame_n */
+	out_be16(&fhci->pram->frame_num, 0);
+
+	out_be16(&fhci->regs->usb_ussft, 0);
+	setbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+void fhci_stop_sof_timer(struct fhci_hcd *fhci)
+{
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	clrbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE);
+	gtm_stop_timer16(fhci->timer);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+u16 fhci_get_sof_timer_count(struct fhci_usb *usb)
+{
+	return be16_to_cpu(in_be16(&usb->fhci->regs->usb_ussft) / 12);
+}
+
+/* initialize the endpoint zero */
+static u32 endpoint_zero_init(struct fhci_usb *usb,
+			      enum fhci_mem_alloc data_mem,
+			      u32 ring_len)
+{
+	u32 rc;
+
+	rc = fhci_create_ep(usb, data_mem, ring_len);
+	if (rc)
+		return rc;
+
+	/* inilialize endpoint registers */
+	fhci_init_ep_registers(usb, usb->ep0, data_mem);
+
+	return 0;
+}
+
+/* enable the USB interrupts */
+void fhci_usb_enable_interrupt(struct fhci_usb *usb)
+{
+	struct fhci_hcd *fhci = usb->fhci;
+
+	if (usb->intr_nesting_cnt == 1) {
+		/* initialize the USB interrupt */
+		enable_irq(fhci_to_hcd(fhci)->irq);
+
+		/* initialize the event register and mask register */
+		out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+		out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+
+		/* enable the timer interrupts */
+		enable_irq(fhci->timer->irq);
+	} else if (usb->intr_nesting_cnt > 1)
+		fhci_info(fhci, "unbalanced USB interrupts nesting\n");
+	usb->intr_nesting_cnt--;
+}
+
+/* disable the usb interrupt */
+void fhci_usb_disable_interrupt(struct fhci_usb *usb)
+{
+	struct fhci_hcd *fhci = usb->fhci;
+
+	if (usb->intr_nesting_cnt == 0) {
+		/* disable the timer interrupt */
+		disable_irq_nosync(fhci->timer->irq);
+
+		/* disable the usb interrupt */
+		disable_irq_nosync(fhci_to_hcd(fhci)->irq);
+		out_be16(&usb->fhci->regs->usb_usbmr, 0);
+	}
+	usb->intr_nesting_cnt++;
+}
+
+/* enable the USB controller */
+static u32 fhci_usb_enable(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+	out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+	setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
+
+	mdelay(100);
+
+	return 0;
+}
+
+/* disable the USB controller */
+static u32 fhci_usb_disable(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	fhci_usb_disable_interrupt(usb);
+	fhci_port_disable(fhci);
+
+	/* disable the usb controller */
+	if (usb->port_status == FHCI_PORT_FULL ||
+			usb->port_status == FHCI_PORT_LOW)
+		fhci_device_disconnected_interrupt(fhci);
+
+	clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
+
+	return 0;
+}
+
+/* check the bus state by polling the QE bit on the IO ports */
+int fhci_ioports_check_bus_state(struct fhci_hcd *fhci)
+{
+	u8 bits = 0;
+
+	/* check USBOE,if transmitting,exit */
+	if (!gpio_get_value(fhci->gpios[GPIO_USBOE]))
+		return -1;
+
+	/* check USBRP */
+	if (gpio_get_value(fhci->gpios[GPIO_USBRP]))
+		bits |= 0x2;
+
+	/* check USBRN */
+	if (gpio_get_value(fhci->gpios[GPIO_USBRN]))
+		bits |= 0x1;
+
+	return bits;
+}
+
+static void fhci_mem_free(struct fhci_hcd *fhci)
+{
+	struct ed *ed;
+	struct ed *next_ed;
+	struct td *td;
+	struct td *next_td;
+
+	list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) {
+		list_del(&ed->node);
+		kfree(ed);
+	}
+
+	list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) {
+		list_del(&td->node);
+		kfree(td);
+	}
+
+	kfree(fhci->vroot_hub);
+	fhci->vroot_hub = NULL;
+
+	kfree(fhci->hc_list);
+	fhci->hc_list = NULL;
+}
+
+static int fhci_mem_init(struct fhci_hcd *fhci)
+{
+	int i;
+
+	fhci->hc_list = kzalloc(sizeof(*fhci->hc_list), GFP_KERNEL);
+	if (!fhci->hc_list)
+		goto err;
+
+	INIT_LIST_HEAD(&fhci->hc_list->ctrl_list);
+	INIT_LIST_HEAD(&fhci->hc_list->bulk_list);
+	INIT_LIST_HEAD(&fhci->hc_list->iso_list);
+	INIT_LIST_HEAD(&fhci->hc_list->intr_list);
+	INIT_LIST_HEAD(&fhci->hc_list->done_list);
+
+	fhci->vroot_hub = kzalloc(sizeof(*fhci->vroot_hub), GFP_KERNEL);
+	if (!fhci->vroot_hub)
+		goto err;
+
+	INIT_LIST_HEAD(&fhci->empty_eds);
+	INIT_LIST_HEAD(&fhci->empty_tds);
+
+	/* initialize work queue to handle done list */
+	fhci_tasklet.data = (unsigned long)fhci;
+	fhci->process_done_task = &fhci_tasklet;
+
+	for (i = 0; i < MAX_TDS; i++) {
+		struct td *td;
+
+		td = kmalloc(sizeof(*td), GFP_KERNEL);
+		if (!td)
+			goto err;
+		fhci_recycle_empty_td(fhci, td);
+	}
+	for (i = 0; i < MAX_EDS; i++) {
+		struct ed *ed;
+
+		ed = kmalloc(sizeof(*ed), GFP_KERNEL);
+		if (!ed)
+			goto err;
+		fhci_recycle_empty_ed(fhci, ed);
+	}
+
+	fhci->active_urbs = 0;
+	return 0;
+err:
+	fhci_mem_free(fhci);
+	return -ENOMEM;
+}
+
+/* destroy the fhci_usb structure */
+static void fhci_usb_free(void *lld)
+{
+	struct fhci_usb *usb = lld;
+	struct fhci_hcd *fhci;
+
+	if (usb) {
+		fhci = usb->fhci;
+		fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
+		fhci_ep0_free(usb);
+		kfree(usb->actual_frame);
+		kfree(usb);
+	}
+}
+
+/* initialize the USB */
+static int fhci_usb_init(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	memset_io(usb->fhci->pram, 0, FHCI_PRAM_SIZE);
+
+	usb->port_status = FHCI_PORT_DISABLED;
+	usb->max_frame_usage = FRAME_TIME_USAGE;
+	usb->sw_transaction_time = SW_FIX_TIME_BETWEEN_TRANSACTION;
+
+	usb->actual_frame = kzalloc(sizeof(*usb->actual_frame), GFP_KERNEL);
+	if (!usb->actual_frame) {
+		fhci_usb_free(usb);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&usb->actual_frame->tds_list);
+
+	/* initializing registers on chip, clear frame number */
+	out_be16(&fhci->pram->frame_num, 0);
+
+	/* clear rx state */
+	out_be32(&fhci->pram->rx_state, 0);
+
+	/* set mask register */
+	usb->saved_msk = (USB_E_TXB_MASK |
+			  USB_E_TXE1_MASK |
+			  USB_E_IDLE_MASK |
+			  USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK);
+
+	out_8(&usb->fhci->regs->usb_usmod, USB_MODE_HOST | USB_MODE_EN);
+
+	/* clearing the mask register */
+	out_be16(&usb->fhci->regs->usb_usbmr, 0);
+
+	/* initialing the event register */
+	out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+
+	if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) {
+		fhci_usb_free(usb);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* initialize the fhci_usb struct and the corresponding data staruct */
+static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb;
+
+	/* allocate memory for SCC data structure */
+	usb = kzalloc(sizeof(*usb), GFP_KERNEL);
+	if (!usb)
+		return NULL;
+
+	usb->fhci = fhci;
+	usb->hc_list = fhci->hc_list;
+	usb->vroot_hub = fhci->vroot_hub;
+
+	usb->transfer_confirm = fhci_transfer_confirm_callback;
+
+	return usb;
+}
+
+static int fhci_start(struct usb_hcd *hcd)
+{
+	int ret;
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+
+	ret = fhci_mem_init(fhci);
+	if (ret) {
+		fhci_err(fhci, "failed to allocate memory\n");
+		goto err;
+	}
+
+	fhci->usb_lld = fhci_create_lld(fhci);
+	if (!fhci->usb_lld) {
+		fhci_err(fhci, "low level driver config failed\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = fhci_usb_init(fhci);
+	if (ret) {
+		fhci_err(fhci, "low level driver initialize failed\n");
+		goto err;
+	}
+
+	spin_lock_init(&fhci->lock);
+
+	/* connect the virtual root hub */
+	fhci->vroot_hub->dev_num = 1;	/* this field may be needed to fix */
+	fhci->vroot_hub->hub.wHubStatus = 0;
+	fhci->vroot_hub->hub.wHubChange = 0;
+	fhci->vroot_hub->port.wPortStatus = 0;
+	fhci->vroot_hub->port.wPortChange = 0;
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/*
+	 * From here on, hub_wq concurrently accesses the root
+	 * hub; drivers will be talking to enumerated devices.
+	 * (On restart paths, hub_wq already knows about the root
+	 * hub and could find work as soon as we wrote FLAG_CF.)
+	 *
+	 * Before this point the HC was idle/ready.  After, hub_wq
+	 * and device drivers may start it running.
+	 */
+	fhci_usb_enable(fhci);
+	return 0;
+err:
+	fhci_mem_free(fhci);
+	return ret;
+}
+
+static void fhci_stop(struct usb_hcd *hcd)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+
+	fhci_usb_disable_interrupt(fhci->usb_lld);
+	fhci_usb_disable(fhci);
+
+	fhci_usb_free(fhci->usb_lld);
+	fhci->usb_lld = NULL;
+	fhci_mem_free(fhci);
+}
+
+static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+			    gfp_t mem_flags)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	u32 pipe = urb->pipe;
+	int ret;
+	int i;
+	int size = 0;
+	struct urb_priv *urb_priv;
+	unsigned long flags;
+
+	switch (usb_pipetype(pipe)) {
+	case PIPE_CONTROL:
+		/* 1 td fro setup,1 for ack */
+		size = 2;
+	case PIPE_BULK:
+		/* one td for every 4096 bytes(can be up to 8k) */
+		size += urb->transfer_buffer_length / 4096;
+		/* ...add for any remaining bytes... */
+		if ((urb->transfer_buffer_length % 4096) != 0)
+			size++;
+		/* ..and maybe a zero length packet to wrap it up */
+		if (size == 0)
+			size++;
+		else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
+			 && (urb->transfer_buffer_length
+			     % usb_maxpacket(urb->dev, pipe,
+					     usb_pipeout(pipe))) != 0)
+			size++;
+		break;
+	case PIPE_ISOCHRONOUS:
+		size = urb->number_of_packets;
+		if (size <= 0)
+			return -EINVAL;
+		for (i = 0; i < urb->number_of_packets; i++) {
+			urb->iso_frame_desc[i].actual_length = 0;
+			urb->iso_frame_desc[i].status = (u32) (-EXDEV);
+		}
+		break;
+	case PIPE_INTERRUPT:
+		size = 1;
+	}
+
+	/* allocate the private part of the URB */
+	urb_priv = kzalloc(sizeof(*urb_priv), mem_flags);
+	if (!urb_priv)
+		return -ENOMEM;
+
+	/* allocate the private part of the URB */
+	urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
+	if (!urb_priv->tds) {
+		kfree(urb_priv);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&fhci->lock, flags);
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto err;
+
+	/* fill the private part of the URB */
+	urb_priv->num_of_tds = size;
+
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->error_count = 0;
+	urb->hcpriv = urb_priv;
+
+	fhci_queue_urb(fhci, urb);
+err:
+	if (ret) {
+		kfree(urb_priv->tds);
+		kfree(urb_priv);
+	}
+	spin_unlock_irqrestore(&fhci->lock, flags);
+	return ret;
+}
+
+/* dequeue FHCI URB */
+static int fhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	struct fhci_usb *usb = fhci->usb_lld;
+	int ret = -EINVAL;
+	unsigned long flags;
+
+	if (!urb || !urb->dev || !urb->dev->bus)
+		goto out;
+
+	spin_lock_irqsave(&fhci->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto out2;
+
+	if (usb->port_status != FHCI_PORT_DISABLED) {
+		struct urb_priv *urb_priv;
+
+		/*
+		 * flag the urb's data for deletion in some upcoming
+		 * SF interrupt's delete list processing
+		 */
+		urb_priv = urb->hcpriv;
+
+		if (!urb_priv || (urb_priv->state == URB_DEL))
+			goto out2;
+
+		urb_priv->state = URB_DEL;
+
+		/* already pending? */
+		urb_priv->ed->state = FHCI_ED_URB_DEL;
+	} else {
+		fhci_urb_complete_free(fhci, urb);
+	}
+
+out2:
+	spin_unlock_irqrestore(&fhci->lock, flags);
+out:
+	return ret;
+}
+
+static void fhci_endpoint_disable(struct usb_hcd *hcd,
+				  struct usb_host_endpoint *ep)
+{
+	struct fhci_hcd *fhci;
+	struct ed *ed;
+	unsigned long flags;
+
+	fhci = hcd_to_fhci(hcd);
+	spin_lock_irqsave(&fhci->lock, flags);
+	ed = ep->hcpriv;
+	if (ed) {
+		while (ed->td_head != NULL) {
+			struct td *td = fhci_remove_td_from_ed(ed);
+			fhci_urb_complete_free(fhci, td->urb);
+		}
+		fhci_recycle_empty_ed(fhci, ed);
+		ep->hcpriv = NULL;
+	}
+	spin_unlock_irqrestore(&fhci->lock, flags);
+}
+
+static int fhci_get_frame_number(struct usb_hcd *hcd)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+
+	return get_frame_num(fhci);
+}
+
+static const struct hc_driver fhci_driver = {
+	.description = "fsl,usb-fhci",
+	.product_desc = "FHCI HOST Controller",
+	.hcd_priv_size = sizeof(struct fhci_hcd),
+
+	/* generic hardware linkage */
+	.irq = fhci_irq,
+	.flags = HCD_USB11 | HCD_MEMORY,
+
+	/* basic lifecycle operation */
+	.start = fhci_start,
+	.stop = fhci_stop,
+
+	/* managing i/o requests and associated device resources */
+	.urb_enqueue = fhci_urb_enqueue,
+	.urb_dequeue = fhci_urb_dequeue,
+	.endpoint_disable = fhci_endpoint_disable,
+
+	/* scheduling support */
+	.get_frame_number = fhci_get_frame_number,
+
+	/* root hub support */
+	.hub_status_data = fhci_hub_status_data,
+	.hub_control = fhci_hub_control,
+};
+
+static int of_fhci_probe(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct device_node *node = dev->of_node;
+	struct usb_hcd *hcd;
+	struct fhci_hcd *fhci;
+	struct resource usb_regs;
+	unsigned long pram_addr;
+	unsigned int usb_irq;
+	const char *sprop;
+	const u32 *iprop;
+	int size;
+	int ret;
+	int i;
+	int j;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	sprop = of_get_property(node, "mode", NULL);
+	if (sprop && strcmp(sprop, "host"))
+		return -ENODEV;
+
+	hcd = usb_create_hcd(&fhci_driver, dev, dev_name(dev));
+	if (!hcd) {
+		dev_err(dev, "could not create hcd\n");
+		return -ENOMEM;
+	}
+
+	fhci = hcd_to_fhci(hcd);
+	hcd->self.controller = dev;
+	dev_set_drvdata(dev, hcd);
+
+	iprop = of_get_property(node, "hub-power-budget", &size);
+	if (iprop && size == sizeof(*iprop))
+		hcd->power_budget = *iprop;
+
+	/* FHCI registers. */
+	ret = of_address_to_resource(node, 0, &usb_regs);
+	if (ret) {
+		dev_err(dev, "could not get regs\n");
+		goto err_regs;
+	}
+
+	hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs));
+	if (!hcd->regs) {
+		dev_err(dev, "could not ioremap regs\n");
+		ret = -ENOMEM;
+		goto err_regs;
+	}
+	fhci->regs = hcd->regs;
+
+	/* Parameter RAM. */
+	iprop = of_get_property(node, "reg", &size);
+	if (!iprop || size < sizeof(*iprop) * 4) {
+		dev_err(dev, "can't get pram offset\n");
+		ret = -EINVAL;
+		goto err_pram;
+	}
+
+	pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64);
+	if (IS_ERR_VALUE(pram_addr)) {
+		dev_err(dev, "failed to allocate usb pram\n");
+		ret = -ENOMEM;
+		goto err_pram;
+	}
+
+	qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB,
+		     QE_CR_PROTOCOL_UNSPECIFIED, pram_addr);
+	fhci->pram = cpm_muram_addr(pram_addr);
+
+	/* GPIOs and pins */
+	for (i = 0; i < NUM_GPIOS; i++) {
+		int gpio;
+		enum of_gpio_flags flags;
+
+		gpio = of_get_gpio_flags(node, i, &flags);
+		fhci->gpios[i] = gpio;
+		fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW;
+
+		if (!gpio_is_valid(gpio)) {
+			if (i < GPIO_SPEED) {
+				dev_err(dev, "incorrect GPIO%d: %d\n",
+					i, gpio);
+				goto err_gpios;
+			} else {
+				dev_info(dev, "assuming board doesn't have "
+					"%s gpio\n", i == GPIO_SPEED ?
+					"speed" : "power");
+				continue;
+			}
+		}
+
+		ret = gpio_request(gpio, dev_name(dev));
+		if (ret) {
+			dev_err(dev, "failed to request gpio %d", i);
+			goto err_gpios;
+		}
+
+		if (i >= GPIO_SPEED) {
+			ret = gpio_direction_output(gpio, 0);
+			if (ret) {
+				dev_err(dev, "failed to set gpio %d as "
+					"an output\n", i);
+				i++;
+				goto err_gpios;
+			}
+		}
+	}
+
+	for (j = 0; j < NUM_PINS; j++) {
+		fhci->pins[j] = qe_pin_request(node, j);
+		if (IS_ERR(fhci->pins[j])) {
+			ret = PTR_ERR(fhci->pins[j]);
+			dev_err(dev, "can't get pin %d: %d\n", j, ret);
+			goto err_pins;
+		}
+	}
+
+	/* Frame limit timer and its interrupt. */
+	fhci->timer = gtm_get_timer16();
+	if (IS_ERR(fhci->timer)) {
+		ret = PTR_ERR(fhci->timer);
+		dev_err(dev, "failed to request qe timer: %i", ret);
+		goto err_get_timer;
+	}
+
+	ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq,
+			  0, "qe timer (usb)", hcd);
+	if (ret) {
+		dev_err(dev, "failed to request timer irq");
+		goto err_timer_irq;
+	}
+
+	/* USB Host interrupt. */
+	usb_irq = irq_of_parse_and_map(node, 0);
+	if (usb_irq == NO_IRQ) {
+		dev_err(dev, "could not get usb irq\n");
+		ret = -EINVAL;
+		goto err_usb_irq;
+	}
+
+	/* Clocks. */
+	sprop = of_get_property(node, "fsl,fullspeed-clock", NULL);
+	if (sprop) {
+		fhci->fullspeed_clk = qe_clock_source(sprop);
+		if (fhci->fullspeed_clk == QE_CLK_DUMMY) {
+			dev_err(dev, "wrong fullspeed-clock\n");
+			ret = -EINVAL;
+			goto err_clocks;
+		}
+	}
+
+	sprop = of_get_property(node, "fsl,lowspeed-clock", NULL);
+	if (sprop) {
+		fhci->lowspeed_clk = qe_clock_source(sprop);
+		if (fhci->lowspeed_clk == QE_CLK_DUMMY) {
+			dev_err(dev, "wrong lowspeed-clock\n");
+			ret = -EINVAL;
+			goto err_clocks;
+		}
+	}
+
+	if (fhci->fullspeed_clk == QE_CLK_NONE &&
+			fhci->lowspeed_clk == QE_CLK_NONE) {
+		dev_err(dev, "no clocks specified\n");
+		ret = -EINVAL;
+		goto err_clocks;
+	}
+
+	dev_info(dev, "at 0x%p, irq %d\n", hcd->regs, usb_irq);
+
+	fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
+
+	/* Start with full-speed, if possible. */
+	if (fhci->fullspeed_clk != QE_CLK_NONE) {
+		fhci_config_transceiver(fhci, FHCI_PORT_FULL);
+		qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
+	} else {
+		fhci_config_transceiver(fhci, FHCI_PORT_LOW);
+		qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
+	}
+
+	/* Clear and disable any pending interrupts. */
+	out_be16(&fhci->regs->usb_usber, 0xffff);
+	out_be16(&fhci->regs->usb_usbmr, 0);
+
+	ret = usb_add_hcd(hcd, usb_irq, 0);
+	if (ret < 0)
+		goto err_add_hcd;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	fhci_dfs_create(fhci);
+
+	return 0;
+
+err_add_hcd:
+err_clocks:
+	irq_dispose_mapping(usb_irq);
+err_usb_irq:
+	free_irq(fhci->timer->irq, hcd);
+err_timer_irq:
+	gtm_put_timer16(fhci->timer);
+err_get_timer:
+err_pins:
+	while (--j >= 0)
+		qe_pin_free(fhci->pins[j]);
+err_gpios:
+	while (--i >= 0) {
+		if (gpio_is_valid(fhci->gpios[i]))
+			gpio_free(fhci->gpios[i]);
+	}
+	cpm_muram_free(pram_addr);
+err_pram:
+	iounmap(hcd->regs);
+err_regs:
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+static int fhci_remove(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	int i;
+	int j;
+
+	usb_remove_hcd(hcd);
+	free_irq(fhci->timer->irq, hcd);
+	gtm_put_timer16(fhci->timer);
+	cpm_muram_free(cpm_muram_offset(fhci->pram));
+	for (i = 0; i < NUM_GPIOS; i++) {
+		if (!gpio_is_valid(fhci->gpios[i]))
+			continue;
+		gpio_free(fhci->gpios[i]);
+	}
+	for (j = 0; j < NUM_PINS; j++)
+		qe_pin_free(fhci->pins[j]);
+	fhci_dfs_destroy(fhci);
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+static int of_fhci_remove(struct platform_device *ofdev)
+{
+	return fhci_remove(&ofdev->dev);
+}
+
+static const struct of_device_id of_fhci_match[] = {
+	{ .compatible = "fsl,mpc8323-qe-usb", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_fhci_match);
+
+static struct platform_driver of_fhci_driver = {
+	.driver = {
+		.name = "fsl,usb-fhci",
+		.of_match_table = of_fhci_match,
+	},
+	.probe		= of_fhci_probe,
+	.remove		= of_fhci_remove,
+};
+
+module_platform_driver(of_fhci_driver);
+
+MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
+MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
+	      "Jerry Huang <Chang-Ming.Huang@freescale.com>, "
+	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
new file mode 100644
index 0000000..c359dcd
--- /dev/null
+++ b/drivers/usb/host/fhci-hub.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/gpio.h>
+#include <soc/fsl/qe/qe.h>
+#include "fhci.h"
+
+/* virtual root hub specific descriptor */
+static u8 root_hub_des[] = {
+	0x09, /* blength */
+	USB_DT_HUB, /* bDescriptorType;hub-descriptor */
+	0x01, /* bNbrPorts */
+	HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM, /* wHubCharacteristics */
+	0x00, /* per-port power, no overcurrent */
+	0x01, /* bPwrOn2pwrGood;2ms */
+	0x00, /* bHubContrCurrent;0mA */
+	0x00, /* DeviceRemoveable */
+	0xff, /* PortPwrCtrlMask */
+};
+
+static void fhci_gpio_set_value(struct fhci_hcd *fhci, int gpio_nr, bool on)
+{
+	int gpio = fhci->gpios[gpio_nr];
+	bool alow = fhci->alow_gpios[gpio_nr];
+
+	if (!gpio_is_valid(gpio))
+		return;
+
+	gpio_set_value(gpio, on ^ alow);
+	mdelay(5);
+}
+
+void fhci_config_transceiver(struct fhci_hcd *fhci,
+			     enum fhci_port_status status)
+{
+	fhci_dbg(fhci, "-> %s: %d\n", __func__, status);
+
+	switch (status) {
+	case FHCI_PORT_POWER_OFF:
+		fhci_gpio_set_value(fhci, GPIO_POWER, false);
+		break;
+	case FHCI_PORT_DISABLED:
+	case FHCI_PORT_WAITING:
+		fhci_gpio_set_value(fhci, GPIO_POWER, true);
+		break;
+	case FHCI_PORT_LOW:
+		fhci_gpio_set_value(fhci, GPIO_SPEED, false);
+		break;
+	case FHCI_PORT_FULL:
+		fhci_gpio_set_value(fhci, GPIO_SPEED, true);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	fhci_dbg(fhci, "<- %s: %d\n", __func__, status);
+}
+
+/* disable the USB port by clearing the EN bit in the USBMOD register */
+void fhci_port_disable(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = (struct fhci_usb *)fhci->usb_lld;
+	enum fhci_port_status port_status;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	fhci_stop_sof_timer(fhci);
+
+	fhci_flush_all_transmissions(usb);
+
+	fhci_usb_disable_interrupt((struct fhci_usb *)fhci->usb_lld);
+	port_status = usb->port_status;
+	usb->port_status = FHCI_PORT_DISABLED;
+
+	/* Enable IDLE since we want to know if something comes along */
+	usb->saved_msk |= USB_E_IDLE_MASK;
+	out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+
+	/* check if during the disconnection process attached new device */
+	if (port_status == FHCI_PORT_WAITING)
+		fhci_device_connected_interrupt(fhci);
+	usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_ENABLE;
+	usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
+	fhci_usb_enable_interrupt((struct fhci_usb *)fhci->usb_lld);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+/* enable the USB port by setting the EN bit in the USBMOD register */
+void fhci_port_enable(void *lld)
+{
+	struct fhci_usb *usb = (struct fhci_usb *)lld;
+	struct fhci_hcd *fhci = usb->fhci;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	fhci_config_transceiver(fhci, usb->port_status);
+
+	if ((usb->port_status != FHCI_PORT_FULL) &&
+			(usb->port_status != FHCI_PORT_LOW))
+		fhci_start_sof_timer(fhci);
+
+	usb->vroot_hub->port.wPortStatus |= USB_PORT_STAT_ENABLE;
+	usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_ENABLE;
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+void fhci_io_port_generate_reset(struct fhci_hcd *fhci)
+{
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	gpio_direction_output(fhci->gpios[GPIO_USBOE], 0);
+	gpio_direction_output(fhci->gpios[GPIO_USBTP], 0);
+	gpio_direction_output(fhci->gpios[GPIO_USBTN], 0);
+
+	mdelay(5);
+
+	qe_pin_set_dedicated(fhci->pins[PIN_USBOE]);
+	qe_pin_set_dedicated(fhci->pins[PIN_USBTP]);
+	qe_pin_set_dedicated(fhci->pins[PIN_USBTN]);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+/* generate the RESET condition on the bus */
+void fhci_port_reset(void *lld)
+{
+	struct fhci_usb *usb = (struct fhci_usb *)lld;
+	struct fhci_hcd *fhci = usb->fhci;
+	u8 mode;
+	u16 mask;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	fhci_stop_sof_timer(fhci);
+	/* disable the USB controller */
+	mode = in_8(&fhci->regs->usb_usmod);
+	out_8(&fhci->regs->usb_usmod, mode & (~USB_MODE_EN));
+
+	/* disable idle interrupts */
+	mask = in_be16(&fhci->regs->usb_usbmr);
+	out_be16(&fhci->regs->usb_usbmr, mask & (~USB_E_IDLE_MASK));
+
+	fhci_io_port_generate_reset(fhci);
+
+	/* enable interrupt on this endpoint */
+	out_be16(&fhci->regs->usb_usbmr, mask);
+
+	/* enable the USB controller */
+	mode = in_8(&fhci->regs->usb_usmod);
+	out_8(&fhci->regs->usb_usmod, mode | USB_MODE_EN);
+	fhci_start_sof_timer(fhci);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+int fhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	int ret = 0;
+	unsigned long flags;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	spin_lock_irqsave(&fhci->lock, flags);
+
+	if (fhci->vroot_hub->port.wPortChange & (USB_PORT_STAT_C_CONNECTION |
+			USB_PORT_STAT_C_ENABLE | USB_PORT_STAT_C_SUSPEND |
+			USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_OVERCURRENT)) {
+		*buf = 1 << 1;
+		ret = 1;
+		fhci_dbg(fhci, "-- %s\n", __func__);
+	}
+
+	spin_unlock_irqrestore(&fhci->lock, flags);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+
+	return ret;
+}
+
+int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+			    u16 wIndex, char *buf, u16 wLength)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	int retval = 0;
+	struct usb_hub_status *hub_status;
+	struct usb_port_status *port_status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fhci->lock, flags);
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		fhci->vroot_hub->feature &= (1 << wValue);
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			fhci->vroot_hub->port.wPortStatus &=
+			    ~USB_PORT_STAT_ENABLE;
+			fhci_port_disable(fhci);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			fhci->vroot_hub->port.wPortChange &=
+			    ~USB_PORT_STAT_C_ENABLE;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			fhci->vroot_hub->port.wPortStatus &=
+			    ~USB_PORT_STAT_SUSPEND;
+			fhci_stop_sof_timer(fhci);
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			fhci->vroot_hub->port.wPortChange &=
+			    ~USB_PORT_STAT_C_SUSPEND;
+			break;
+		case USB_PORT_FEAT_POWER:
+			fhci->vroot_hub->port.wPortStatus &=
+			    ~USB_PORT_STAT_POWER;
+			fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			fhci->vroot_hub->port.wPortChange &=
+			    ~USB_PORT_STAT_C_CONNECTION;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			fhci->vroot_hub->port.wPortChange &=
+			    ~USB_PORT_STAT_C_OVERCURRENT;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			fhci->vroot_hub->port.wPortChange &=
+			    ~USB_PORT_STAT_C_RESET;
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case GetHubDescriptor:
+		memcpy(buf, root_hub_des, sizeof(root_hub_des));
+		break;
+	case GetHubStatus:
+		hub_status = (struct usb_hub_status *)buf;
+		hub_status->wHubStatus =
+		    cpu_to_le16(fhci->vroot_hub->hub.wHubStatus);
+		hub_status->wHubChange =
+		    cpu_to_le16(fhci->vroot_hub->hub.wHubChange);
+		break;
+	case GetPortStatus:
+		port_status = (struct usb_port_status *)buf;
+		port_status->wPortStatus =
+		    cpu_to_le16(fhci->vroot_hub->port.wPortStatus);
+		port_status->wPortChange =
+		    cpu_to_le16(fhci->vroot_hub->port.wPortChange);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		fhci->vroot_hub->feature |= (1 << wValue);
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			fhci->vroot_hub->port.wPortStatus |=
+			    USB_PORT_STAT_ENABLE;
+			fhci_port_enable(fhci->usb_lld);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			fhci->vroot_hub->port.wPortStatus |=
+			    USB_PORT_STAT_SUSPEND;
+			fhci_stop_sof_timer(fhci);
+			break;
+		case USB_PORT_FEAT_RESET:
+			fhci->vroot_hub->port.wPortStatus |=
+			    USB_PORT_STAT_RESET;
+			fhci_port_reset(fhci->usb_lld);
+			fhci->vroot_hub->port.wPortStatus |=
+			    USB_PORT_STAT_ENABLE;
+			fhci->vroot_hub->port.wPortStatus &=
+			    ~USB_PORT_STAT_RESET;
+			break;
+		case USB_PORT_FEAT_POWER:
+			fhci->vroot_hub->port.wPortStatus |=
+			    USB_PORT_STAT_POWER;
+			fhci_config_transceiver(fhci, FHCI_PORT_WAITING);
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+error:
+		retval = -EPIPE;
+	}
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+
+	spin_unlock_irqrestore(&fhci->lock, flags);
+
+	return retval;
+}
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
new file mode 100644
index 0000000..658aedc
--- /dev/null
+++ b/drivers/usb/host/fhci-mem.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "fhci.h"
+
+static void init_td(struct td *td)
+{
+	memset(td, 0, sizeof(*td));
+	INIT_LIST_HEAD(&td->node);
+	INIT_LIST_HEAD(&td->frame_lh);
+}
+
+static void init_ed(struct ed *ed)
+{
+	memset(ed, 0, sizeof(*ed));
+	INIT_LIST_HEAD(&ed->td_list);
+	INIT_LIST_HEAD(&ed->node);
+}
+
+static struct td *get_empty_td(struct fhci_hcd *fhci)
+{
+	struct td *td;
+
+	if (!list_empty(&fhci->empty_tds)) {
+		td = list_entry(fhci->empty_tds.next, struct td, node);
+		list_del(fhci->empty_tds.next);
+	} else {
+		td = kmalloc(sizeof(*td), GFP_ATOMIC);
+		if (!td)
+			fhci_err(fhci, "No memory to allocate to TD\n");
+		else
+			init_td(td);
+	}
+
+	return td;
+}
+
+void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td)
+{
+	init_td(td);
+	list_add(&td->node, &fhci->empty_tds);
+}
+
+struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci)
+{
+	struct ed *ed;
+
+	if (!list_empty(&fhci->empty_eds)) {
+		ed = list_entry(fhci->empty_eds.next, struct ed, node);
+		list_del(fhci->empty_eds.next);
+	} else {
+		ed = kmalloc(sizeof(*ed), GFP_ATOMIC);
+		if (!ed)
+			fhci_err(fhci, "No memory to allocate to ED\n");
+		else
+			init_ed(ed);
+	}
+
+	return ed;
+}
+
+void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed)
+{
+	init_ed(ed);
+	list_add(&ed->node, &fhci->empty_eds);
+}
+
+struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb,
+			struct urb_priv *urb_priv, struct ed *ed, u16 index,
+			enum fhci_ta_type type, int toggle, u8 *data, u32 len,
+			u16 interval, u16 start_frame, bool ioc)
+{
+	struct td *td = get_empty_td(fhci);
+
+	if (!td)
+		return NULL;
+
+	td->urb = urb;
+	td->ed = ed;
+	td->type = type;
+	td->toggle = toggle;
+	td->data = data;
+	td->len = len;
+	td->iso_index = index;
+	td->interval = interval;
+	td->start_frame = start_frame;
+	td->ioc = ioc;
+	td->status = USB_TD_OK;
+
+	urb_priv->tds[index] = td;
+
+	return td;
+}
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
new file mode 100644
index 0000000..669c240
--- /dev/null
+++ b/drivers/usb/host/fhci-q.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "fhci.h"
+
+/* maps the hardware error code to the USB error code */
+static int status_to_error(u32 status)
+{
+	if (status == USB_TD_OK)
+		return 0;
+	else if (status & USB_TD_RX_ER_CRC)
+		return -EILSEQ;
+	else if (status & USB_TD_RX_ER_NONOCT)
+		return -EPROTO;
+	else if (status & USB_TD_RX_ER_OVERUN)
+		return -ECOMM;
+	else if (status & USB_TD_RX_ER_BITSTUFF)
+		return -EPROTO;
+	else if (status & USB_TD_RX_ER_PID)
+		return -EILSEQ;
+	else if (status & (USB_TD_TX_ER_NAK | USB_TD_TX_ER_TIMEOUT))
+		return -ETIMEDOUT;
+	else if (status & USB_TD_TX_ER_STALL)
+		return -EPIPE;
+	else if (status & USB_TD_TX_ER_UNDERUN)
+		return -ENOSR;
+	else if (status & USB_TD_RX_DATA_UNDERUN)
+		return -EREMOTEIO;
+	else if (status & USB_TD_RX_DATA_OVERUN)
+		return -EOVERFLOW;
+	else
+		return -EINVAL;
+}
+
+void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td)
+{
+	list_add_tail(&td->frame_lh, &frame->tds_list);
+}
+
+void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number)
+{
+	int i;
+
+	for (i = 0; i < number; i++) {
+		struct td *td = td_list[i];
+		list_add_tail(&td->node, &ed->td_list);
+	}
+	if (ed->td_head == NULL)
+		ed->td_head = td_list[0];
+}
+
+static struct td *peek_td_from_ed(struct ed *ed)
+{
+	struct td *td;
+
+	if (!list_empty(&ed->td_list))
+		td = list_entry(ed->td_list.next, struct td, node);
+	else
+		td = NULL;
+
+	return td;
+}
+
+struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame)
+{
+	struct td *td;
+
+	if (!list_empty(&frame->tds_list)) {
+		td = list_entry(frame->tds_list.next, struct td, frame_lh);
+		list_del_init(frame->tds_list.next);
+	} else
+		td = NULL;
+
+	return td;
+}
+
+struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame)
+{
+	struct td *td;
+
+	if (!list_empty(&frame->tds_list))
+		td = list_entry(frame->tds_list.next, struct td, frame_lh);
+	else
+		td = NULL;
+
+	return td;
+}
+
+struct td *fhci_remove_td_from_ed(struct ed *ed)
+{
+	struct td *td;
+
+	if (!list_empty(&ed->td_list)) {
+		td = list_entry(ed->td_list.next, struct td, node);
+		list_del_init(ed->td_list.next);
+
+		/* if this TD was the ED's head, find next TD */
+		if (!list_empty(&ed->td_list))
+			ed->td_head = list_entry(ed->td_list.next, struct td,
+						 node);
+		else
+			ed->td_head = NULL;
+	} else
+		td = NULL;
+
+	return td;
+}
+
+struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list)
+{
+	struct td *td;
+
+	if (!list_empty(&p_list->done_list)) {
+		td = list_entry(p_list->done_list.next, struct td, node);
+		list_del_init(p_list->done_list.next);
+	} else
+		td = NULL;
+
+	return td;
+}
+
+void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed)
+{
+	struct td *td;
+
+	td = ed->td_head;
+	list_del_init(&td->node);
+
+	/* If this TD was the ED's head,find next TD */
+	if (!list_empty(&ed->td_list))
+		ed->td_head = list_entry(ed->td_list.next, struct td, node);
+	else {
+		ed->td_head = NULL;
+		ed->state = FHCI_ED_SKIP;
+	}
+	ed->toggle_carry = td->toggle;
+	list_add_tail(&td->node, &usb->hc_list->done_list);
+	if (td->ioc)
+		usb->transfer_confirm(usb->fhci);
+}
+
+/* free done FHCI URB resource such as ED and TD */
+static void free_urb_priv(struct fhci_hcd *fhci, struct urb *urb)
+{
+	int i;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	struct ed *ed = urb_priv->ed;
+
+	for (i = 0; i < urb_priv->num_of_tds; i++) {
+		list_del_init(&urb_priv->tds[i]->node);
+		fhci_recycle_empty_td(fhci, urb_priv->tds[i]);
+	}
+
+	/* if this TD was the ED's head,find the next TD */
+	if (!list_empty(&ed->td_list))
+		ed->td_head = list_entry(ed->td_list.next, struct td, node);
+	else
+		ed->td_head = NULL;
+
+	kfree(urb_priv->tds);
+	kfree(urb_priv);
+	urb->hcpriv = NULL;
+
+	/* if this TD was the ED's head,find next TD */
+	if (ed->td_head == NULL)
+		list_del_init(&ed->node);
+	fhci->active_urbs--;
+}
+
+/* this routine called to complete and free done URB */
+void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb)
+{
+	free_urb_priv(fhci, urb);
+
+	if (urb->status == -EINPROGRESS) {
+		if (urb->actual_length != urb->transfer_buffer_length &&
+				urb->transfer_flags & URB_SHORT_NOT_OK)
+			urb->status = -EREMOTEIO;
+		else
+			urb->status = 0;
+	}
+
+	usb_hcd_unlink_urb_from_ep(fhci_to_hcd(fhci), urb);
+
+	spin_unlock(&fhci->lock);
+
+	usb_hcd_giveback_urb(fhci_to_hcd(fhci), urb, urb->status);
+
+	spin_lock(&fhci->lock);
+}
+
+/*
+ * caculate transfer length/stats and update the urb
+ * Precondition: irqsafe(only for urb-?status locking)
+ */
+void fhci_done_td(struct urb *urb, struct td *td)
+{
+	struct ed *ed = td->ed;
+	u32 cc = td->status;
+
+	/* ISO...drivers see per-TD length/status */
+	if (ed->mode == FHCI_TF_ISO) {
+		u32 len;
+		if (!(urb->transfer_flags & URB_SHORT_NOT_OK &&
+				cc == USB_TD_RX_DATA_UNDERUN))
+			cc = USB_TD_OK;
+
+		if (usb_pipeout(urb->pipe))
+			len = urb->iso_frame_desc[td->iso_index].length;
+		else
+			len = td->actual_len;
+
+		urb->actual_length += len;
+		urb->iso_frame_desc[td->iso_index].actual_length = len;
+		urb->iso_frame_desc[td->iso_index].status =
+			status_to_error(cc);
+	}
+
+	/* BULK,INT,CONTROL... drivers see aggregate length/status,
+	 * except that "setup" bytes aren't counted and "short" transfers
+	 * might not be reported as errors.
+	 */
+	else {
+		if (td->error_cnt >= 3)
+			urb->error_count = 3;
+
+		/* control endpoint only have soft stalls */
+
+		/* update packet status if needed(short may be ok) */
+		if (!(urb->transfer_flags & URB_SHORT_NOT_OK) &&
+				cc == USB_TD_RX_DATA_UNDERUN) {
+			ed->state = FHCI_ED_OPER;
+			cc = USB_TD_OK;
+		}
+		if (cc != USB_TD_OK) {
+			if (urb->status == -EINPROGRESS)
+				urb->status = status_to_error(cc);
+		}
+
+		/* count all non-empty packets except control SETUP packet */
+		if (td->type != FHCI_TA_SETUP || td->iso_index != 0)
+			urb->actual_length += td->actual_len;
+	}
+}
+
+/* there are some pedning request to unlink */
+void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed)
+{
+	struct td *td = peek_td_from_ed(ed);
+	struct urb *urb = td->urb;
+	struct urb_priv *urb_priv = urb->hcpriv;
+
+	if (urb_priv->state == URB_DEL) {
+		td = fhci_remove_td_from_ed(ed);
+		/* HC may have partly processed this TD */
+		if (td->status != USB_TD_INPROGRESS)
+			fhci_done_td(urb, td);
+
+		/* URB is done;clean up */
+		if (++(urb_priv->tds_cnt) == urb_priv->num_of_tds)
+			fhci_urb_complete_free(fhci, urb);
+	}
+}
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
new file mode 100644
index 0000000..3d12cdd
--- /dev/null
+++ b/drivers/usb/host/fhci-sched.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/fsl_gtm.h>
+#include "fhci.h"
+
+static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
+{
+	pkt->data = NULL;
+	pkt->len = 0;
+	pkt->status = USB_TD_OK;
+	pkt->info = 0;
+	pkt->priv_data = NULL;
+
+	cq_put(&usb->ep0->empty_frame_Q, pkt);
+}
+
+/* confirm submitted packet */
+void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
+{
+	struct td *td;
+	struct packet *td_pkt;
+	struct ed *ed;
+	u32 trans_len;
+	bool td_done = false;
+
+	td = fhci_remove_td_from_frame(usb->actual_frame);
+	td_pkt = td->pkt;
+	trans_len = pkt->len;
+	td->status = pkt->status;
+	if (td->type == FHCI_TA_IN && td_pkt->info & PKT_DUMMY_PACKET) {
+		if ((td->data + td->actual_len) && trans_len)
+			memcpy(td->data + td->actual_len, pkt->data,
+			       trans_len);
+		cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
+	}
+
+	recycle_frame(usb, pkt);
+
+	ed = td->ed;
+	if (ed->mode == FHCI_TF_ISO) {
+		if (ed->td_list.next->next != &ed->td_list) {
+			struct td *td_next =
+			    list_entry(ed->td_list.next->next, struct td,
+				       node);
+
+			td_next->start_frame = usb->actual_frame->frame_num;
+		}
+		td->actual_len = trans_len;
+		td_done = true;
+	} else if ((td->status & USB_TD_ERROR) &&
+			!(td->status & USB_TD_TX_ER_NAK)) {
+		/*
+		 * There was an error on the transaction (but not NAK).
+		 * If it is fatal error (data underrun, stall, bad pid or 3
+		 * errors exceeded), mark this TD as done.
+		 */
+		if ((td->status & USB_TD_RX_DATA_UNDERUN) ||
+				(td->status & USB_TD_TX_ER_STALL) ||
+				(td->status & USB_TD_RX_ER_PID) ||
+				(++td->error_cnt >= 3)) {
+			ed->state = FHCI_ED_HALTED;
+			td_done = true;
+
+			if (td->status & USB_TD_RX_DATA_UNDERUN) {
+				fhci_dbg(usb->fhci, "td err fu\n");
+				td->toggle = !td->toggle;
+				td->actual_len += trans_len;
+			} else {
+				fhci_dbg(usb->fhci, "td err f!u\n");
+			}
+		} else {
+			fhci_dbg(usb->fhci, "td err !f\n");
+			/* it is not a fatal error -retry this transaction */
+			td->nak_cnt = 0;
+			td->error_cnt++;
+			td->status = USB_TD_OK;
+		}
+	} else if (td->status & USB_TD_TX_ER_NAK) {
+		/* there was a NAK response */
+		fhci_vdbg(usb->fhci, "td nack\n");
+		td->nak_cnt++;
+		td->error_cnt = 0;
+		td->status = USB_TD_OK;
+	} else {
+		/* there was no error on transaction */
+		td->error_cnt = 0;
+		td->nak_cnt = 0;
+		td->toggle = !td->toggle;
+		td->actual_len += trans_len;
+
+		if (td->len == td->actual_len)
+			td_done = true;
+	}
+
+	if (td_done)
+		fhci_move_td_from_ed_to_done_list(usb, ed);
+}
+
+/*
+ * Flush all transmitted packets from BDs
+ * This routine is called when disabling the USB port to flush all
+ * transmissions that are already scheduled in the BDs
+ */
+void fhci_flush_all_transmissions(struct fhci_usb *usb)
+{
+	u8 mode;
+	struct td *td;
+
+	mode = in_8(&usb->fhci->regs->usb_usmod);
+	clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN);
+
+	fhci_flush_bds(usb);
+
+	while ((td = fhci_peek_td_from_frame(usb->actual_frame)) != NULL) {
+		struct packet *pkt = td->pkt;
+
+		pkt->status = USB_TD_TX_ER_TIMEOUT;
+		fhci_transaction_confirm(usb, pkt);
+	}
+
+	usb->actual_frame->frame_status = FRAME_END_TRANSMISSION;
+
+	/* reset the event register */
+	out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+	/* enable the USB controller */
+	out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
+}
+
+/*
+ * This function forms the packet and transmit the packet. This function
+ * will handle all endpoint type:ISO,interrupt,control and bulk
+ */
+static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
+{
+	u32 fw_transaction_time, len = 0;
+	struct packet *pkt;
+	u8 *data = NULL;
+
+	/* calcalate data address,len and toggle and then add the transaction */
+	if (td->toggle == USB_TD_TOGGLE_CARRY)
+		td->toggle = ed->toggle_carry;
+
+	switch (ed->mode) {
+	case FHCI_TF_ISO:
+		len = td->len;
+		if (td->type != FHCI_TA_IN)
+			data = td->data;
+		break;
+	case FHCI_TF_CTRL:
+	case FHCI_TF_BULK:
+		len = min(td->len - td->actual_len, ed->max_pkt_size);
+		if (!((td->type == FHCI_TA_IN) &&
+		      ((len + td->actual_len) == td->len)))
+			data = td->data + td->actual_len;
+		break;
+	case FHCI_TF_INTR:
+		len = min(td->len, ed->max_pkt_size);
+		if (!((td->type == FHCI_TA_IN) &&
+		      ((td->len + CRC_SIZE) >= ed->max_pkt_size)))
+			data = td->data;
+		break;
+	default:
+		break;
+	}
+
+	if (usb->port_status == FHCI_PORT_FULL)
+		fw_transaction_time = (((len + PROTOCOL_OVERHEAD) * 11) >> 4);
+	else
+		fw_transaction_time = ((len + PROTOCOL_OVERHEAD) * 6);
+
+	/* check if there's enough space in this frame to submit this TD */
+	if (usb->actual_frame->total_bytes + len + PROTOCOL_OVERHEAD >=
+			usb->max_bytes_per_frame) {
+		fhci_vdbg(usb->fhci, "not enough space in this frame: "
+			  "%d %d %d\n", usb->actual_frame->total_bytes, len,
+			  usb->max_bytes_per_frame);
+		return -1;
+	}
+
+	/* check if there's enough time in this frame to submit this TD */
+	if (usb->actual_frame->frame_status != FRAME_IS_PREPARED &&
+	    (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION ||
+	     (fw_transaction_time + usb->sw_transaction_time >=
+	      1000 - fhci_get_sof_timer_count(usb)))) {
+		fhci_dbg(usb->fhci, "not enough time in this frame\n");
+		return -1;
+	}
+
+	/* update frame object fields before transmitting */
+	pkt = cq_get(&usb->ep0->empty_frame_Q);
+	if (!pkt) {
+		fhci_dbg(usb->fhci, "there is no empty frame\n");
+		return -1;
+	}
+	td->pkt = pkt;
+
+	pkt->info = 0;
+	if (data == NULL) {
+		data = cq_get(&usb->ep0->dummy_packets_Q);
+		BUG_ON(!data);
+		pkt->info = PKT_DUMMY_PACKET;
+	}
+	pkt->data = data;
+	pkt->len = len;
+	pkt->status = USB_TD_OK;
+	/* update TD status field before transmitting */
+	td->status = USB_TD_INPROGRESS;
+	/* update actual frame time object with the actual transmission */
+	usb->actual_frame->total_bytes += (len + PROTOCOL_OVERHEAD);
+	fhci_add_td_to_frame(usb->actual_frame, td);
+
+	if (usb->port_status != FHCI_PORT_FULL &&
+			usb->port_status != FHCI_PORT_LOW) {
+		pkt->status = USB_TD_TX_ER_TIMEOUT;
+		pkt->len = 0;
+		fhci_transaction_confirm(usb, pkt);
+	} else if (fhci_host_transaction(usb, pkt, td->type, ed->dev_addr,
+			ed->ep_addr, ed->mode, ed->speed, td->toggle)) {
+		/* remove TD from actual frame */
+		list_del_init(&td->frame_lh);
+		td->status = USB_TD_OK;
+		if (pkt->info & PKT_DUMMY_PACKET)
+			cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
+		recycle_frame(usb, pkt);
+		usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
+		fhci_err(usb->fhci, "host transaction failed\n");
+		return -1;
+	}
+
+	return len;
+}
+
+static void move_head_to_tail(struct list_head *list)
+{
+	struct list_head *node = list->next;
+
+	if (!list_empty(list)) {
+		list_move_tail(node, list);
+	}
+}
+
+/*
+ * This function goes through the endpoint list and schedules the
+ * transactions within this list
+ */
+static int scan_ed_list(struct fhci_usb *usb,
+			struct list_head *list, enum fhci_tf_mode list_type)
+{
+	static const int frame_part[4] = {
+		[FHCI_TF_CTRL] = MAX_BYTES_PER_FRAME,
+		[FHCI_TF_ISO] = (MAX_BYTES_PER_FRAME *
+				 MAX_PERIODIC_FRAME_USAGE) / 100,
+		[FHCI_TF_BULK] = MAX_BYTES_PER_FRAME,
+		[FHCI_TF_INTR] = (MAX_BYTES_PER_FRAME *
+				  MAX_PERIODIC_FRAME_USAGE) / 100
+	};
+	struct ed *ed;
+	struct td *td;
+	int ans = 1;
+	u32 save_transaction_time = usb->sw_transaction_time;
+
+	list_for_each_entry(ed, list, node) {
+		td = ed->td_head;
+
+		if (!td || td->status == USB_TD_INPROGRESS)
+			continue;
+
+		if (ed->state != FHCI_ED_OPER) {
+			if (ed->state == FHCI_ED_URB_DEL) {
+				td->status = USB_TD_OK;
+				fhci_move_td_from_ed_to_done_list(usb, ed);
+				ed->state = FHCI_ED_SKIP;
+			}
+			continue;
+		}
+
+		/*
+		 * if it isn't interrupt pipe or it is not iso pipe and the
+		 * interval time passed
+		 */
+		if ((list_type == FHCI_TF_INTR || list_type == FHCI_TF_ISO) &&
+				(((usb->actual_frame->frame_num -
+				   td->start_frame) & 0x7ff) < td->interval))
+			continue;
+
+		if (add_packet(usb, ed, td) < 0)
+			continue;
+
+		/* update time stamps in the TD */
+		td->start_frame = usb->actual_frame->frame_num;
+		usb->sw_transaction_time += save_transaction_time;
+
+		if (usb->actual_frame->total_bytes >=
+					usb->max_bytes_per_frame) {
+			usb->actual_frame->frame_status =
+				FRAME_DATA_END_TRANSMISSION;
+			fhci_push_dummy_bd(usb->ep0);
+			ans = 0;
+			break;
+		}
+
+		if (usb->actual_frame->total_bytes >= frame_part[list_type])
+			break;
+	}
+
+	/* be fair to each ED(move list head around) */
+	move_head_to_tail(list);
+	usb->sw_transaction_time = save_transaction_time;
+
+	return ans;
+}
+
+static u32 rotate_frames(struct fhci_usb *usb)
+{
+	struct fhci_hcd *fhci = usb->fhci;
+
+	if (!list_empty(&usb->actual_frame->tds_list)) {
+		if ((((in_be16(&fhci->pram->frame_num) & 0x07ff) -
+		      usb->actual_frame->frame_num) & 0x7ff) > 5)
+			fhci_flush_actual_frame(usb);
+		else
+			return -EINVAL;
+	}
+
+	usb->actual_frame->frame_status = FRAME_IS_PREPARED;
+	usb->actual_frame->frame_num = in_be16(&fhci->pram->frame_num) & 0x7ff;
+	usb->actual_frame->total_bytes = 0;
+
+	return 0;
+}
+
+/*
+ * This function schedule the USB transaction and will process the
+ * endpoint in the following order: iso, interrupt, control and bulk.
+ */
+void fhci_schedule_transactions(struct fhci_usb *usb)
+{
+	int left = 1;
+
+	if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
+		if (rotate_frames(usb) != 0)
+			return;
+
+	if (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)
+		return;
+
+	if (usb->actual_frame->total_bytes == 0) {
+		/*
+		 * schedule the next available ISO transfer
+		 *or next stage of the ISO transfer
+		 */
+		scan_ed_list(usb, &usb->hc_list->iso_list, FHCI_TF_ISO);
+
+		/*
+		 * schedule the next available interrupt transfer or
+		 * the next stage of the interrupt transfer
+		 */
+		scan_ed_list(usb, &usb->hc_list->intr_list, FHCI_TF_INTR);
+
+		/*
+		 * schedule the next available control transfer
+		 * or the next stage of the control transfer
+		 */
+		left = scan_ed_list(usb, &usb->hc_list->ctrl_list,
+				    FHCI_TF_CTRL);
+	}
+
+	/*
+	 * schedule the next available bulk transfer or the next stage of the
+	 * bulk transfer
+	 */
+	if (left > 0)
+		scan_ed_list(usb, &usb->hc_list->bulk_list, FHCI_TF_BULK);
+}
+
+/* Handles SOF interrupt */
+static void sof_interrupt(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	if ((usb->port_status == FHCI_PORT_DISABLED) &&
+	    (usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_CONNECTION) &&
+	    !(usb->vroot_hub->port.wPortChange & USB_PORT_STAT_C_CONNECTION)) {
+		if (usb->vroot_hub->port.wPortStatus & USB_PORT_STAT_LOW_SPEED)
+			usb->port_status = FHCI_PORT_LOW;
+		else
+			usb->port_status = FHCI_PORT_FULL;
+		/* Disable IDLE */
+		usb->saved_msk &= ~USB_E_IDLE_MASK;
+		out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+	}
+
+	gtm_set_exact_timer16(fhci->timer, usb->max_frame_usage, false);
+
+	fhci_host_transmit_actual_frame(usb);
+	usb->actual_frame->frame_status = FRAME_IS_TRANSMITTED;
+
+	fhci_schedule_transactions(usb);
+}
+
+/* Handles device disconnected interrupt on port */
+void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci)
+{
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	fhci_usb_disable_interrupt(usb);
+	clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
+	usb->port_status = FHCI_PORT_DISABLED;
+
+	fhci_stop_sof_timer(fhci);
+
+	/* Enable IDLE since we want to know if something comes along */
+	usb->saved_msk |= USB_E_IDLE_MASK;
+	out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk);
+
+	usb->vroot_hub->port.wPortStatus &= ~USB_PORT_STAT_CONNECTION;
+	usb->vroot_hub->port.wPortChange |= USB_PORT_STAT_C_CONNECTION;
+	usb->max_bytes_per_frame = 0;
+	fhci_usb_enable_interrupt(usb);
+
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+/* detect a new device connected on the USB port */
+void fhci_device_connected_interrupt(struct fhci_hcd *fhci)
+{
+
+	struct fhci_usb *usb = fhci->usb_lld;
+	int state;
+	int ret;
+
+	fhci_dbg(fhci, "-> %s\n", __func__);
+
+	fhci_usb_disable_interrupt(usb);
+	state = fhci_ioports_check_bus_state(fhci);
+
+	/* low-speed device was connected to the USB port */
+	if (state == 1) {
+		ret = qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3);
+		if (ret) {
+			fhci_warn(fhci, "Low-Speed device is not supported, "
+				  "try use BRGx\n");
+			goto out;
+		}
+
+		usb->port_status = FHCI_PORT_LOW;
+		setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
+		usb->vroot_hub->port.wPortStatus |=
+		    (USB_PORT_STAT_LOW_SPEED |
+		     USB_PORT_STAT_CONNECTION);
+		usb->vroot_hub->port.wPortChange |=
+		    USB_PORT_STAT_C_CONNECTION;
+		usb->max_bytes_per_frame =
+		    (MAX_BYTES_PER_FRAME >> 3) - 7;
+		fhci_port_enable(usb);
+	} else if (state == 2) {
+		ret = qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK);
+		if (ret) {
+			fhci_warn(fhci, "Full-Speed device is not supported, "
+				  "try use CLKx\n");
+			goto out;
+		}
+
+		usb->port_status = FHCI_PORT_FULL;
+		clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_LSS);
+		usb->vroot_hub->port.wPortStatus &=
+		    ~USB_PORT_STAT_LOW_SPEED;
+		usb->vroot_hub->port.wPortStatus |=
+		    USB_PORT_STAT_CONNECTION;
+		usb->vroot_hub->port.wPortChange |=
+		    USB_PORT_STAT_C_CONNECTION;
+		usb->max_bytes_per_frame = (MAX_BYTES_PER_FRAME - 15);
+		fhci_port_enable(usb);
+	}
+out:
+	fhci_usb_enable_interrupt(usb);
+	fhci_dbg(fhci, "<- %s\n", __func__);
+}
+
+irqreturn_t fhci_frame_limit_timer_irq(int irq, void *_hcd)
+{
+	struct usb_hcd *hcd = _hcd;
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	struct fhci_usb *usb = fhci->usb_lld;
+
+	spin_lock(&fhci->lock);
+
+	gtm_set_exact_timer16(fhci->timer, 1000, false);
+
+	if (usb->actual_frame->frame_status == FRAME_IS_TRANSMITTED) {
+		usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
+		fhci_push_dummy_bd(usb->ep0);
+	}
+
+	fhci_schedule_transactions(usb);
+
+	spin_unlock(&fhci->lock);
+
+	return IRQ_HANDLED;
+}
+
+/* Cancel transmission on the USB endpoint */
+static void abort_transmission(struct fhci_usb *usb)
+{
+	fhci_dbg(usb->fhci, "-> %s\n", __func__);
+	/* issue stop Tx command */
+	qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
+	/* flush Tx FIFOs */
+	out_8(&usb->fhci->regs->usb_uscom, USB_CMD_FLUSH_FIFO | EP_ZERO);
+	udelay(1000);
+	/* reset Tx BDs */
+	fhci_flush_bds(usb);
+	/* issue restart Tx command */
+	qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB, EP_ZERO, 0);
+	fhci_dbg(usb->fhci, "<- %s\n", __func__);
+}
+
+irqreturn_t fhci_irq(struct usb_hcd *hcd)
+{
+	struct fhci_hcd *fhci = hcd_to_fhci(hcd);
+	struct fhci_usb *usb;
+	u16 usb_er = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fhci->lock, flags);
+
+	usb = fhci->usb_lld;
+
+	usb_er |= in_be16(&usb->fhci->regs->usb_usber) &
+		  in_be16(&usb->fhci->regs->usb_usbmr);
+
+	/* clear event bits for next time */
+	out_be16(&usb->fhci->regs->usb_usber, usb_er);
+
+	fhci_dbg_isr(fhci, usb_er);
+
+	if (usb_er & USB_E_RESET_MASK) {
+		if ((usb->port_status == FHCI_PORT_FULL) ||
+				(usb->port_status == FHCI_PORT_LOW)) {
+			fhci_device_disconnected_interrupt(fhci);
+			usb_er &= ~USB_E_IDLE_MASK;
+		} else if (usb->port_status == FHCI_PORT_WAITING) {
+			usb->port_status = FHCI_PORT_DISCONNECTING;
+
+			/* Turn on IDLE since we want to disconnect */
+			usb->saved_msk |= USB_E_IDLE_MASK;
+			out_be16(&usb->fhci->regs->usb_usber,
+				 usb->saved_msk);
+		} else if (usb->port_status == FHCI_PORT_DISABLED) {
+			if (fhci_ioports_check_bus_state(fhci) == 1)
+				fhci_device_connected_interrupt(fhci);
+		}
+		usb_er &= ~USB_E_RESET_MASK;
+	}
+
+	if (usb_er & USB_E_MSF_MASK) {
+		abort_transmission(fhci->usb_lld);
+		usb_er &= ~USB_E_MSF_MASK;
+	}
+
+	if (usb_er & (USB_E_SOF_MASK | USB_E_SFT_MASK)) {
+		sof_interrupt(fhci);
+		usb_er &= ~(USB_E_SOF_MASK | USB_E_SFT_MASK);
+	}
+
+	if (usb_er & USB_E_TXB_MASK) {
+		fhci_tx_conf_interrupt(fhci->usb_lld);
+		usb_er &= ~USB_E_TXB_MASK;
+	}
+
+	if (usb_er & USB_E_TXE1_MASK) {
+		fhci_tx_conf_interrupt(fhci->usb_lld);
+		usb_er &= ~USB_E_TXE1_MASK;
+	}
+
+	if (usb_er & USB_E_IDLE_MASK) {
+		if (usb->port_status == FHCI_PORT_DISABLED) {
+			usb_er &= ~USB_E_RESET_MASK;
+			fhci_device_connected_interrupt(fhci);
+		} else if (usb->port_status ==
+				FHCI_PORT_DISCONNECTING) {
+			/* XXX usb->port_status = FHCI_PORT_WAITING; */
+			/* Disable IDLE */
+			usb->saved_msk &= ~USB_E_IDLE_MASK;
+			out_be16(&usb->fhci->regs->usb_usbmr,
+				 usb->saved_msk);
+		} else {
+			fhci_dbg_isr(fhci, -1);
+		}
+
+		usb_er &= ~USB_E_IDLE_MASK;
+	}
+
+	spin_unlock_irqrestore(&fhci->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * Process normal completions(error or success) and clean the schedule.
+ *
+ * This is the main path for handing urbs back to drivers. The only other patth
+ * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning
+ * the (re-reversed) done list as this does.
+ */
+static void process_done_list(unsigned long data)
+{
+	struct urb *urb;
+	struct ed *ed;
+	struct td *td;
+	struct urb_priv *urb_priv;
+	struct fhci_hcd *fhci = (struct fhci_hcd *)data;
+
+	disable_irq(fhci->timer->irq);
+	disable_irq(fhci_to_hcd(fhci)->irq);
+	spin_lock(&fhci->lock);
+
+	td = fhci_remove_td_from_done_list(fhci->hc_list);
+	while (td != NULL) {
+		urb = td->urb;
+		urb_priv = urb->hcpriv;
+		ed = td->ed;
+
+		/* update URB's length and status from TD */
+		fhci_done_td(urb, td);
+		urb_priv->tds_cnt++;
+
+		/*
+		 * if all this urb's TDs are done, call complete()
+		 * Interrupt transfers are the onley special case:
+		 * they are reissued,until "deleted" by usb_unlink_urb
+		 * (real work done in a SOF intr, by process_del_list)
+		 */
+		if (urb_priv->tds_cnt == urb_priv->num_of_tds) {
+			fhci_urb_complete_free(fhci, urb);
+		} else if (urb_priv->state == URB_DEL &&
+				ed->state == FHCI_ED_SKIP) {
+			fhci_del_ed_list(fhci, ed);
+			ed->state = FHCI_ED_OPER;
+		} else if (ed->state == FHCI_ED_HALTED) {
+			urb_priv->state = URB_DEL;
+			ed->state = FHCI_ED_URB_DEL;
+			fhci_del_ed_list(fhci, ed);
+			ed->state = FHCI_ED_OPER;
+		}
+
+		td = fhci_remove_td_from_done_list(fhci->hc_list);
+	}
+
+	spin_unlock(&fhci->lock);
+	enable_irq(fhci->timer->irq);
+	enable_irq(fhci_to_hcd(fhci)->irq);
+}
+
+DECLARE_TASKLET(fhci_tasklet, process_done_list, 0);
+
+/* transfer complted callback */
+u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
+{
+	if (!fhci->process_done_task->state)
+		tasklet_schedule(fhci->process_done_task);
+	return 0;
+}
+
+/*
+ * adds urb to the endpoint descriptor list
+ * arguments:
+ * fhci		data structure for the Low level host controller
+ * ep		USB Host endpoint data structure
+ * urb		USB request block data structure
+ */
+void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
+{
+	struct ed *ed = urb->ep->hcpriv;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	u32 data_len = urb->transfer_buffer_length;
+	int urb_state = 0;
+	int toggle = 0;
+	struct td *td;
+	u8 *data;
+	u16 cnt = 0;
+
+	if (ed == NULL) {
+		ed = fhci_get_empty_ed(fhci);
+		ed->dev_addr = usb_pipedevice(urb->pipe);
+		ed->ep_addr = usb_pipeendpoint(urb->pipe);
+		switch (usb_pipetype(urb->pipe)) {
+		case PIPE_CONTROL:
+			ed->mode = FHCI_TF_CTRL;
+			break;
+		case PIPE_BULK:
+			ed->mode = FHCI_TF_BULK;
+			break;
+		case PIPE_INTERRUPT:
+			ed->mode = FHCI_TF_INTR;
+			break;
+		case PIPE_ISOCHRONOUS:
+			ed->mode = FHCI_TF_ISO;
+			break;
+		default:
+			break;
+		}
+		ed->speed = (urb->dev->speed == USB_SPEED_LOW) ?
+			FHCI_LOW_SPEED : FHCI_FULL_SPEED;
+		ed->max_pkt_size = usb_maxpacket(urb->dev,
+			urb->pipe, usb_pipeout(urb->pipe));
+		urb->ep->hcpriv = ed;
+		fhci_dbg(fhci, "new ep speed=%d max_pkt_size=%d\n",
+			 ed->speed, ed->max_pkt_size);
+	}
+
+	/* for ISO transfer calculate start frame index */
+	if (ed->mode == FHCI_TF_ISO) {
+		/* Ignore the possibility of underruns */
+		urb->start_frame = ed->td_head ? ed->next_iso :
+						 get_frame_num(fhci);
+		ed->next_iso = (urb->start_frame + urb->interval *
+				urb->number_of_packets) & 0x07ff;
+	}
+
+	/*
+	 * OHCI handles the DATA toggle itself,we just use the USB
+	 * toggle bits
+	 */
+	if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+			  usb_pipeout(urb->pipe)))
+		toggle = USB_TD_TOGGLE_CARRY;
+	else {
+		toggle = USB_TD_TOGGLE_DATA0;
+		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+			      usb_pipeout(urb->pipe), 1);
+	}
+
+	urb_priv->tds_cnt = 0;
+	urb_priv->ed = ed;
+	if (data_len > 0)
+		data = urb->transfer_buffer;
+	else
+		data = NULL;
+
+	switch (ed->mode) {
+	case FHCI_TF_BULK:
+		if (urb->transfer_flags & URB_ZERO_PACKET &&
+				urb->transfer_buffer_length > 0 &&
+				((urb->transfer_buffer_length %
+				usb_maxpacket(urb->dev, urb->pipe,
+				usb_pipeout(urb->pipe))) == 0))
+			urb_state = US_BULK0;
+		while (data_len > 4096) {
+			td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
+				usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
+							 FHCI_TA_IN,
+				cnt ? USB_TD_TOGGLE_CARRY :
+				      toggle,
+				data, 4096, 0, 0, true);
+			data += 4096;
+			data_len -= 4096;
+			cnt++;
+		}
+
+		td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
+			usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
+			cnt ? USB_TD_TOGGLE_CARRY : toggle,
+			data, data_len, 0, 0, true);
+		cnt++;
+
+		if (urb->transfer_flags & URB_ZERO_PACKET &&
+				cnt < urb_priv->num_of_tds) {
+			td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
+				usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
+							 FHCI_TA_IN,
+				USB_TD_TOGGLE_CARRY, NULL, 0, 0, 0, true);
+			cnt++;
+		}
+		break;
+	case FHCI_TF_INTR:
+		urb->start_frame = get_frame_num(fhci) + 1;
+		td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+			usb_pipeout(urb->pipe) ? FHCI_TA_OUT : FHCI_TA_IN,
+			USB_TD_TOGGLE_DATA0, data, data_len,
+			urb->interval, urb->start_frame, true);
+		break;
+	case FHCI_TF_CTRL:
+		ed->dev_addr = usb_pipedevice(urb->pipe);
+		ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
+			usb_pipeout(urb->pipe));
+		/* setup stage */
+		td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
+			USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
+
+		/* data stage */
+		if (data_len > 0) {
+			td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+				usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
+							 FHCI_TA_IN,
+				USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
+				true);
+		}
+
+		/* status stage */
+		if (data_len > 0)
+			td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+				(usb_pipeout(urb->pipe) ? FHCI_TA_IN :
+							  FHCI_TA_OUT),
+				USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+		else
+			 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+				FHCI_TA_IN,
+				USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
+		urb_state = US_CTRL_SETUP;
+		break;
+	case FHCI_TF_ISO:
+		for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
+			u16 frame = urb->start_frame;
+
+			/*
+			 * FIXME scheduling should handle frame counter
+			 * roll-around ... exotic case (and OHCI has
+			 * a 2^16 iso range, vs other HCs max of 2^10)
+			 */
+			frame += cnt * urb->interval;
+			frame &= 0x07ff;
+			td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt,
+				usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
+							 FHCI_TA_IN,
+				USB_TD_TOGGLE_DATA0,
+				data + urb->iso_frame_desc[cnt].offset,
+				urb->iso_frame_desc[cnt].length,
+				urb->interval, frame, true);
+		}
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * set the state of URB
+	 * control pipe:3 states -- setup,data,status
+	 * interrupt and bulk pipe:1 state -- data
+	 */
+	urb->pipe &= ~0x1f;
+	urb->pipe |= urb_state & 0x1f;
+
+	urb_priv->state = URB_INPROGRESS;
+
+	if (!ed->td_head) {
+		ed->state = FHCI_ED_OPER;
+		switch (ed->mode) {
+		case FHCI_TF_CTRL:
+			list_add(&ed->node, &fhci->hc_list->ctrl_list);
+			break;
+		case FHCI_TF_BULK:
+			list_add(&ed->node, &fhci->hc_list->bulk_list);
+			break;
+		case FHCI_TF_INTR:
+			list_add(&ed->node, &fhci->hc_list->intr_list);
+			break;
+		case FHCI_TF_ISO:
+			list_add(&ed->node, &fhci->hc_list->iso_list);
+			break;
+		default:
+			break;
+		}
+	}
+
+	fhci_add_tds_to_ed(ed, urb_priv->tds, urb_priv->num_of_tds);
+	fhci->active_urbs++;
+}
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
new file mode 100644
index 0000000..f3308ce
--- /dev/null
+++ b/drivers/usb/host/fhci-tds.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include "fhci.h"
+
+#define DUMMY_BD_BUFFER  0xdeadbeef
+#define DUMMY2_BD_BUFFER 0xbaadf00d
+
+/* Transaction Descriptors bits */
+#define TD_R		0x8000 /* ready bit */
+#define TD_W		0x2000 /* wrap bit */
+#define TD_I		0x1000 /* interrupt on completion */
+#define TD_L		0x0800 /* last */
+#define TD_TC		0x0400 /* transmit CRC */
+#define TD_CNF		0x0200 /* CNF - Must be always 1 */
+#define TD_LSP		0x0100 /* Low-speed transaction */
+#define TD_PID		0x00c0 /* packet id */
+#define TD_RXER		0x0020 /* Rx error or not */
+
+#define TD_NAK		0x0010 /* No ack. */
+#define TD_STAL		0x0008 /* Stall received */
+#define TD_TO		0x0004 /* time out */
+#define TD_UN		0x0002 /* underrun */
+#define TD_NO		0x0010 /* Rx Non Octet Aligned Packet */
+#define TD_AB		0x0008 /* Frame Aborted */
+#define TD_CR		0x0004 /* CRC Error */
+#define TD_OV		0x0002 /* Overrun */
+#define TD_BOV		0x0001 /* Buffer Overrun */
+
+#define TD_ERRORS	(TD_NAK | TD_STAL | TD_TO | TD_UN | \
+			 TD_NO | TD_AB | TD_CR | TD_OV | TD_BOV)
+
+#define TD_PID_DATA0	0x0080 /* Data 0 toggle */
+#define TD_PID_DATA1	0x00c0 /* Data 1 toggle */
+#define TD_PID_TOGGLE	0x00c0 /* Data 0/1 toggle mask */
+
+#define TD_TOK_SETUP	0x0000
+#define TD_TOK_OUT	0x4000
+#define TD_TOK_IN	0x8000
+#define TD_ISO		0x1000
+#define TD_ENDP		0x0780
+#define TD_ADDR		0x007f
+
+#define TD_ENDP_SHIFT 7
+
+struct usb_td {
+	__be16 status;
+	__be16 length;
+	__be32 buf_ptr;
+	__be16 extra;
+	__be16 reserved;
+};
+
+static struct usb_td __iomem *next_bd(struct usb_td __iomem *base,
+				      struct usb_td __iomem *td,
+				      u16 status)
+{
+	if (status & TD_W)
+		return base;
+	else
+		return ++td;
+}
+
+void fhci_push_dummy_bd(struct endpoint *ep)
+{
+	if (!ep->already_pushed_dummy_bd) {
+		u16 td_status = in_be16(&ep->empty_td->status);
+
+		out_be32(&ep->empty_td->buf_ptr, DUMMY_BD_BUFFER);
+		/* get the next TD in the ring */
+		ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
+		ep->already_pushed_dummy_bd = true;
+	}
+}
+
+/* destroy an USB endpoint */
+void fhci_ep0_free(struct fhci_usb *usb)
+{
+	struct endpoint *ep;
+	int size;
+
+	ep = usb->ep0;
+	if (ep) {
+		if (ep->td_base)
+			cpm_muram_free(cpm_muram_offset(ep->td_base));
+
+		if (kfifo_initialized(&ep->conf_frame_Q)) {
+			size = cq_howmany(&ep->conf_frame_Q);
+			for (; size; size--) {
+				struct packet *pkt = cq_get(&ep->conf_frame_Q);
+
+				kfree(pkt);
+			}
+			cq_delete(&ep->conf_frame_Q);
+		}
+
+		if (kfifo_initialized(&ep->empty_frame_Q)) {
+			size = cq_howmany(&ep->empty_frame_Q);
+			for (; size; size--) {
+				struct packet *pkt = cq_get(&ep->empty_frame_Q);
+
+				kfree(pkt);
+			}
+			cq_delete(&ep->empty_frame_Q);
+		}
+
+		if (kfifo_initialized(&ep->dummy_packets_Q)) {
+			size = cq_howmany(&ep->dummy_packets_Q);
+			for (; size; size--) {
+				u8 *buff = cq_get(&ep->dummy_packets_Q);
+
+				kfree(buff);
+			}
+			cq_delete(&ep->dummy_packets_Q);
+		}
+
+		kfree(ep);
+		usb->ep0 = NULL;
+	}
+}
+
+/*
+ * create the endpoint structure
+ *
+ * arguments:
+ * usb		A pointer to the data structure of the USB
+ * data_mem	The data memory partition(BUS)
+ * ring_len	TD ring length
+ */
+u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
+			   u32 ring_len)
+{
+	struct endpoint *ep;
+	struct usb_td __iomem *td;
+	unsigned long ep_offset;
+	char *err_for = "endpoint PRAM";
+	int ep_mem_size;
+	u32 i;
+
+	/* we need at least 3 TDs in the ring */
+	if (!(ring_len > 2)) {
+		fhci_err(usb->fhci, "illegal TD ring length parameters\n");
+		return -EINVAL;
+	}
+
+	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	ep_mem_size = ring_len * sizeof(*td) + sizeof(struct fhci_ep_pram);
+	ep_offset = cpm_muram_alloc(ep_mem_size, 32);
+	if (IS_ERR_VALUE(ep_offset))
+		goto err;
+	ep->td_base = cpm_muram_addr(ep_offset);
+
+	/* zero all queue pointers */
+	if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
+	    cq_new(&ep->empty_frame_Q, ring_len + 2) ||
+	    cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
+		err_for = "frame_queues";
+		goto err;
+	}
+
+	for (i = 0; i < (ring_len + 1); i++) {
+		struct packet *pkt;
+		u8 *buff;
+
+		pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+		if (!pkt) {
+			err_for = "frame";
+			goto err;
+		}
+
+		buff = kmalloc_array(1028, sizeof(*buff), GFP_KERNEL);
+		if (!buff) {
+			kfree(pkt);
+			err_for = "buffer";
+			goto err;
+		}
+		cq_put(&ep->empty_frame_Q, pkt);
+		cq_put(&ep->dummy_packets_Q, buff);
+	}
+
+	/* we put the endpoint parameter RAM right behind the TD ring */
+	ep->ep_pram_ptr = (void __iomem *)ep->td_base + sizeof(*td) * ring_len;
+
+	ep->conf_td = ep->td_base;
+	ep->empty_td = ep->td_base;
+
+	ep->already_pushed_dummy_bd = false;
+
+	/* initialize tds */
+	td = ep->td_base;
+	for (i = 0; i < ring_len; i++) {
+		out_be32(&td->buf_ptr, 0);
+		out_be16(&td->status, 0);
+		out_be16(&td->length, 0);
+		out_be16(&td->extra, 0);
+		td++;
+	}
+	td--;
+	out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
+	out_be16(&td->length, 0);
+
+	/* endpoint structure has been created */
+	usb->ep0 = ep;
+
+	return 0;
+err:
+	fhci_ep0_free(usb);
+	kfree(ep);
+	fhci_err(usb->fhci, "no memory for the %s\n", err_for);
+	return -ENOMEM;
+}
+
+/*
+ * initialize the endpoint register according to the given parameters
+ *
+ * artuments:
+ * usb		A pointer to the data strucutre of the USB
+ * ep		A pointer to the endpoint structre
+ * data_mem	The data memory partition(BUS)
+ */
+void fhci_init_ep_registers(struct fhci_usb *usb, struct endpoint *ep,
+			    enum fhci_mem_alloc data_mem)
+{
+	u8 rt;
+
+	/* set the endpoint registers according to the endpoint */
+	out_be16(&usb->fhci->regs->usb_usep[0],
+		 USB_TRANS_CTR | USB_EP_MF | USB_EP_RTE);
+	out_be16(&usb->fhci->pram->ep_ptr[0],
+		 cpm_muram_offset(ep->ep_pram_ptr));
+
+	rt = (BUS_MODE_BO_BE | BUS_MODE_GBL);
+#ifdef MULTI_DATA_BUS
+	if (data_mem == MEM_SECONDARY)
+		rt |= BUS_MODE_DTB;
+#endif
+	out_8(&ep->ep_pram_ptr->rx_func_code, rt);
+	out_8(&ep->ep_pram_ptr->tx_func_code, rt);
+	out_be16(&ep->ep_pram_ptr->rx_buff_len, 1028);
+	out_be16(&ep->ep_pram_ptr->rx_base, 0);
+	out_be16(&ep->ep_pram_ptr->tx_base, cpm_muram_offset(ep->td_base));
+	out_be16(&ep->ep_pram_ptr->rx_bd_ptr, 0);
+	out_be16(&ep->ep_pram_ptr->tx_bd_ptr, cpm_muram_offset(ep->td_base));
+	out_be32(&ep->ep_pram_ptr->tx_state, 0);
+}
+
+/*
+ * Collect the submitted frames and inform the application about them
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
+ * confirmation about the submitted frames. Otherwise, the routine is
+ * called from the interrupt service routine during the Tx interrupt.
+ * In that case the application is informed by calling the application
+ * specific 'fhci_transaction_confirm' routine
+ */
+static void fhci_td_transaction_confirm(struct fhci_usb *usb)
+{
+	struct endpoint *ep = usb->ep0;
+	struct packet *pkt;
+	struct usb_td __iomem *td;
+	u16 extra_data;
+	u16 td_status;
+	u16 td_length;
+	u32 buf;
+
+	/*
+	 * collect transmitted BDs from the chip. The routine clears all BDs
+	 * with R bit = 0 and the pointer to data buffer is not NULL, that is
+	 * BDs which point to the transmitted data buffer
+	 */
+	while (1) {
+		td = ep->conf_td;
+		td_status = in_be16(&td->status);
+		td_length = in_be16(&td->length);
+		buf = in_be32(&td->buf_ptr);
+		extra_data = in_be16(&td->extra);
+
+		/* check if the TD is empty */
+		if (!(!(td_status & TD_R) && ((td_status & ~TD_W) || buf)))
+			break;
+		/* check if it is a dummy buffer */
+		else if ((buf == DUMMY_BD_BUFFER) && !(td_status & ~TD_W))
+			break;
+
+		/* mark TD as empty */
+		clrbits16(&td->status, ~TD_W);
+		out_be16(&td->length, 0);
+		out_be32(&td->buf_ptr, 0);
+		out_be16(&td->extra, 0);
+		/* advance the TD pointer */
+		ep->conf_td = next_bd(ep->td_base, ep->conf_td, td_status);
+
+		/* check if it is a dummy buffer(type2) */
+		if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
+			continue;
+
+		pkt = cq_get(&ep->conf_frame_Q);
+		if (!pkt)
+			fhci_err(usb->fhci, "no frame to confirm\n");
+
+		if (td_status & TD_ERRORS) {
+			if (td_status & TD_RXER) {
+				if (td_status & TD_CR)
+					pkt->status = USB_TD_RX_ER_CRC;
+				else if (td_status & TD_AB)
+					pkt->status = USB_TD_RX_ER_BITSTUFF;
+				else if (td_status & TD_OV)
+					pkt->status = USB_TD_RX_ER_OVERUN;
+				else if (td_status & TD_BOV)
+					pkt->status = USB_TD_RX_DATA_OVERUN;
+				else if (td_status & TD_NO)
+					pkt->status = USB_TD_RX_ER_NONOCT;
+				else
+					fhci_err(usb->fhci, "illegal error "
+						 "occurred\n");
+			} else if (td_status & TD_NAK)
+				pkt->status = USB_TD_TX_ER_NAK;
+			else if (td_status & TD_TO)
+				pkt->status = USB_TD_TX_ER_TIMEOUT;
+			else if (td_status & TD_UN)
+				pkt->status = USB_TD_TX_ER_UNDERUN;
+			else if (td_status & TD_STAL)
+				pkt->status = USB_TD_TX_ER_STALL;
+			else
+				fhci_err(usb->fhci, "illegal error occurred\n");
+		} else if ((extra_data & TD_TOK_IN) &&
+				pkt->len > td_length - CRC_SIZE) {
+			pkt->status = USB_TD_RX_DATA_UNDERUN;
+		}
+
+		if (extra_data & TD_TOK_IN)
+			pkt->len = td_length - CRC_SIZE;
+		else if (pkt->info & PKT_ZLP)
+			pkt->len = 0;
+		else
+			pkt->len = td_length;
+
+		fhci_transaction_confirm(usb, pkt);
+	}
+}
+
+/*
+ * Submitting a data frame to a specified endpoint of a USB device
+ * The frame is put in the driver's transmit queue for this endpoint
+ *
+ * Arguments:
+ * usb          A pointer to the USB structure
+ * pkt          A pointer to the user frame structure
+ * trans_type   Transaction tyep - IN,OUT or SETUP
+ * dest_addr    Device address - 0~127
+ * dest_ep      Endpoint number of the device - 0~16
+ * trans_mode   Pipe type - ISO,Interrupt,bulk or control
+ * dest_speed   USB speed - Low speed or FULL speed
+ * data_toggle  Data sequence toggle - 0 or 1
+ */
+u32 fhci_host_transaction(struct fhci_usb *usb,
+			  struct packet *pkt,
+			  enum fhci_ta_type trans_type,
+			  u8 dest_addr,
+			  u8 dest_ep,
+			  enum fhci_tf_mode trans_mode,
+			  enum fhci_speed dest_speed, u8 data_toggle)
+{
+	struct endpoint *ep = usb->ep0;
+	struct usb_td __iomem *td;
+	u16 extra_data;
+	u16 td_status;
+
+	fhci_usb_disable_interrupt(usb);
+	/* start from the next BD that should be filled */
+	td = ep->empty_td;
+	td_status = in_be16(&td->status);
+
+	if (td_status & TD_R && in_be16(&td->length)) {
+		/* if the TD is not free */
+		fhci_usb_enable_interrupt(usb);
+		return -1;
+	}
+
+	/* get the next TD in the ring */
+	ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
+	fhci_usb_enable_interrupt(usb);
+	pkt->priv_data = td;
+	out_be32(&td->buf_ptr, virt_to_phys(pkt->data));
+	/* sets up transaction parameters - addr,endp,dir,and type */
+	extra_data = (dest_ep << TD_ENDP_SHIFT) | dest_addr;
+	switch (trans_type) {
+	case FHCI_TA_IN:
+		extra_data |= TD_TOK_IN;
+		break;
+	case FHCI_TA_OUT:
+		extra_data |= TD_TOK_OUT;
+		break;
+	case FHCI_TA_SETUP:
+		extra_data |= TD_TOK_SETUP;
+		break;
+	}
+	if (trans_mode == FHCI_TF_ISO)
+		extra_data |= TD_ISO;
+	out_be16(&td->extra, extra_data);
+
+	/* sets up the buffer descriptor */
+	td_status = ((td_status & TD_W) | TD_R | TD_L | TD_I | TD_CNF);
+	if (!(pkt->info & PKT_NO_CRC))
+		td_status |= TD_TC;
+
+	switch (trans_type) {
+	case FHCI_TA_IN:
+		if (data_toggle)
+			pkt->info |= PKT_PID_DATA1;
+		else
+			pkt->info |= PKT_PID_DATA0;
+		break;
+	default:
+		if (data_toggle) {
+			td_status |= TD_PID_DATA1;
+			pkt->info |= PKT_PID_DATA1;
+		} else {
+			td_status |= TD_PID_DATA0;
+			pkt->info |= PKT_PID_DATA0;
+		}
+		break;
+	}
+
+	if ((dest_speed == FHCI_LOW_SPEED) &&
+	    (usb->port_status == FHCI_PORT_FULL))
+		td_status |= TD_LSP;
+
+	out_be16(&td->status, td_status);
+
+	/* set up buffer length */
+	if (trans_type == FHCI_TA_IN)
+		out_be16(&td->length, pkt->len + CRC_SIZE);
+	else
+		out_be16(&td->length, pkt->len);
+
+	/* put the frame to the confirmation queue */
+	cq_put(&ep->conf_frame_Q, pkt);
+
+	if (cq_howmany(&ep->conf_frame_Q) == 1)
+		out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
+
+	return 0;
+}
+
+/* Reset the Tx BD ring */
+void fhci_flush_bds(struct fhci_usb *usb)
+{
+	u16 extra_data;
+	u16 td_status;
+	u32 buf;
+	struct usb_td __iomem *td;
+	struct endpoint *ep = usb->ep0;
+
+	td = ep->td_base;
+	while (1) {
+		td_status = in_be16(&td->status);
+		buf = in_be32(&td->buf_ptr);
+		extra_data = in_be16(&td->extra);
+
+		/* if the TD is not empty - we'll confirm it as Timeout */
+		if (td_status & TD_R)
+			out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
+		/* if this TD is dummy - let's skip this TD */
+		else if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER)
+			out_be32(&td->buf_ptr, DUMMY2_BD_BUFFER);
+		/* if this is the last TD - break */
+		if (td_status & TD_W)
+			break;
+
+		td++;
+	}
+
+	fhci_td_transaction_confirm(usb);
+
+	td = ep->td_base;
+	do {
+		out_be16(&td->status, 0);
+		out_be16(&td->length, 0);
+		out_be32(&td->buf_ptr, 0);
+		out_be16(&td->extra, 0);
+		td++;
+	} while (!(in_be16(&td->status) & TD_W));
+	out_be16(&td->status, TD_W); /* for last TD set Wrap bit */
+	out_be16(&td->length, 0);
+	out_be32(&td->buf_ptr, 0);
+	out_be16(&td->extra, 0);
+
+	out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
+		 in_be16(&ep->ep_pram_ptr->tx_base));
+	out_be32(&ep->ep_pram_ptr->tx_state, 0);
+	out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
+	ep->empty_td = ep->td_base;
+	ep->conf_td = ep->td_base;
+}
+
+/*
+ * Flush all transmitted packets from TDs in the actual frame.
+ * This routine is called when something wrong with the controller and
+ * we want to get rid of the actual frame and start again next frame
+ */
+void fhci_flush_actual_frame(struct fhci_usb *usb)
+{
+	u8 mode;
+	u16 tb_ptr;
+	u16 extra_data;
+	u16 td_status;
+	u32 buf_ptr;
+	struct usb_td __iomem *td;
+	struct endpoint *ep = usb->ep0;
+
+	/* disable the USB controller */
+	mode = in_8(&usb->fhci->regs->usb_usmod);
+	out_8(&usb->fhci->regs->usb_usmod, mode & ~USB_MODE_EN);
+
+	tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
+	td = cpm_muram_addr(tb_ptr);
+	td_status = in_be16(&td->status);
+	buf_ptr = in_be32(&td->buf_ptr);
+	extra_data = in_be16(&td->extra);
+	do {
+		if (td_status & TD_R) {
+			out_be16(&td->status, (td_status & ~TD_R) | TD_TO);
+		} else {
+			out_be32(&td->buf_ptr, 0);
+			ep->already_pushed_dummy_bd = false;
+			break;
+		}
+
+		/* advance the TD pointer */
+		td = next_bd(ep->td_base, td, td_status);
+		td_status = in_be16(&td->status);
+		buf_ptr = in_be32(&td->buf_ptr);
+		extra_data = in_be16(&td->extra);
+	} while ((td_status & TD_R) || buf_ptr);
+
+	fhci_td_transaction_confirm(usb);
+
+	out_be16(&ep->ep_pram_ptr->tx_bd_ptr,
+		 in_be16(&ep->ep_pram_ptr->tx_base));
+	out_be32(&ep->ep_pram_ptr->tx_state, 0);
+	out_be16(&ep->ep_pram_ptr->tx_cnt, 0);
+	ep->empty_td = ep->td_base;
+	ep->conf_td = ep->td_base;
+
+	usb->actual_frame->frame_status = FRAME_TIMER_END_TRANSMISSION;
+
+	/* reset the event register */
+	out_be16(&usb->fhci->regs->usb_usber, 0xffff);
+	/* enable the USB controller */
+	out_8(&usb->fhci->regs->usb_usmod, mode | USB_MODE_EN);
+}
+
+/* handles Tx confirm and Tx error interrupt */
+void fhci_tx_conf_interrupt(struct fhci_usb *usb)
+{
+	fhci_td_transaction_confirm(usb);
+
+	/*
+	 * Schedule another transaction to this frame only if we have
+	 * already confirmed all transaction in the frame.
+	 */
+	if (((fhci_get_sof_timer_count(usb) < usb->max_frame_usage) ||
+	     (usb->actual_frame->frame_status & FRAME_END_TRANSMISSION)) &&
+	    (list_empty(&usb->actual_frame->tds_list)))
+		fhci_schedule_transactions(usb);
+}
+
+void fhci_host_transmit_actual_frame(struct fhci_usb *usb)
+{
+	u16 tb_ptr;
+	u16 td_status;
+	struct usb_td __iomem *td;
+	struct endpoint *ep = usb->ep0;
+
+	tb_ptr = in_be16(&ep->ep_pram_ptr->tx_bd_ptr);
+	td = cpm_muram_addr(tb_ptr);
+
+	if (in_be32(&td->buf_ptr) == DUMMY_BD_BUFFER) {
+		struct usb_td __iomem *old_td = td;
+
+		ep->already_pushed_dummy_bd = false;
+		td_status = in_be16(&td->status);
+		/* gets the next TD in the ring */
+		td = next_bd(ep->td_base, td, td_status);
+		tb_ptr = cpm_muram_offset(td);
+		out_be16(&ep->ep_pram_ptr->tx_bd_ptr, tb_ptr);
+
+		/* start transmit only if we have something in the TDs */
+		if (in_be16(&td->status) & TD_R)
+			out_8(&usb->fhci->regs->usb_uscom, USB_CMD_STR_FIFO);
+
+		if (in_be32(&ep->conf_td->buf_ptr) == DUMMY_BD_BUFFER) {
+			out_be32(&old_td->buf_ptr, 0);
+			ep->conf_td = next_bd(ep->td_base, ep->conf_td,
+					      td_status);
+		} else {
+			out_be32(&old_td->buf_ptr, DUMMY2_BD_BUFFER);
+		}
+	}
+}
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
new file mode 100644
index 0000000..2ce5031
--- /dev/null
+++ b/drivers/usb/host/fhci.h
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale QUICC Engine USB Host Controller Driver
+ *
+ * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ *               Shlomi Gridish <gridish@freescale.com>
+ *               Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) Logic Product Development, Inc. 2007
+ *               Peter Barada <peterb@logicpd.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *               Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#ifndef __FHCI_H
+#define __FHCI_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+
+#define USB_CLOCK	48000000
+
+#define FHCI_PRAM_SIZE 0x100
+
+#define MAX_EDS		32
+#define MAX_TDS		32
+
+
+/* CRC16 field size */
+#define CRC_SIZE 2
+
+/* USB protocol overhead for each frame transmitted from the host */
+#define PROTOCOL_OVERHEAD 7
+
+/* Packet structure, info field */
+#define PKT_PID_DATA0		0x80000000 /* PID - Data toggle zero */
+#define PKT_PID_DATA1		0x40000000 /* PID - Data toggle one  */
+#define PKT_PID_SETUP		0x20000000 /* PID - Setup bit */
+#define PKT_SETUP_STATUS	0x10000000 /* Setup status bit */
+#define PKT_SETADDR_STATUS	0x08000000 /* Set address status bit */
+#define PKT_SET_HOST_LAST	0x04000000 /* Last data packet */
+#define PKT_HOST_DATA		0x02000000 /* Data packet */
+#define PKT_FIRST_IN_FRAME	0x01000000 /* First packet in the frame */
+#define PKT_TOKEN_FRAME		0x00800000 /* Token packet */
+#define PKT_ZLP			0x00400000 /* Zero length packet */
+#define PKT_IN_TOKEN_FRAME	0x00200000 /* IN token packet */
+#define PKT_OUT_TOKEN_FRAME	0x00100000 /* OUT token packet */
+#define PKT_SETUP_TOKEN_FRAME	0x00080000 /* SETUP token packet */
+#define PKT_STALL_FRAME		0x00040000 /* STALL packet */
+#define PKT_NACK_FRAME		0x00020000 /* NACK packet */
+#define PKT_NO_PID		0x00010000 /* No PID */
+#define PKT_NO_CRC		0x00008000 /* don't append CRC */
+#define PKT_HOST_COMMAND	0x00004000 /* Host command packet */
+#define PKT_DUMMY_PACKET	0x00002000 /* Dummy packet, used for mmm */
+#define PKT_LOW_SPEED_PACKET	0x00001000 /* Low-Speed packet */
+
+#define TRANS_OK		(0)
+#define TRANS_INPROGRESS	(-1)
+#define TRANS_DISCARD		(-2)
+#define TRANS_FAIL		(-3)
+
+#define PS_INT		0
+#define PS_DISCONNECTED	1
+#define PS_CONNECTED	2
+#define PS_READY	3
+#define PS_MISSING	4
+
+/* Transfer Descriptor status field */
+#define USB_TD_OK		0x00000000 /* TD transmited or received ok */
+#define USB_TD_INPROGRESS	0x80000000 /* TD is being transmitted */
+#define USB_TD_RX_ER_NONOCT	0x40000000 /* Tx Non Octet Aligned Packet */
+#define USB_TD_RX_ER_BITSTUFF	0x20000000 /* Frame Aborted-Received pkt */
+#define USB_TD_RX_ER_CRC	0x10000000 /* CRC error */
+#define USB_TD_RX_ER_OVERUN	0x08000000 /* Over - run occurred */
+#define USB_TD_RX_ER_PID	0x04000000 /* wrong PID received */
+#define USB_TD_RX_DATA_UNDERUN	0x02000000 /* shorter than expected */
+#define USB_TD_RX_DATA_OVERUN	0x01000000 /* longer than expected */
+#define USB_TD_TX_ER_NAK	0x00800000 /* NAK handshake */
+#define USB_TD_TX_ER_STALL	0x00400000 /* STALL handshake */
+#define USB_TD_TX_ER_TIMEOUT	0x00200000 /* transmit time out */
+#define USB_TD_TX_ER_UNDERUN	0x00100000 /* transmit underrun */
+
+#define USB_TD_ERROR (USB_TD_RX_ER_NONOCT | USB_TD_RX_ER_BITSTUFF | \
+		USB_TD_RX_ER_CRC | USB_TD_RX_ER_OVERUN | USB_TD_RX_ER_PID | \
+		USB_TD_RX_DATA_UNDERUN | USB_TD_RX_DATA_OVERUN | \
+		USB_TD_TX_ER_NAK | USB_TD_TX_ER_STALL | \
+		USB_TD_TX_ER_TIMEOUT | USB_TD_TX_ER_UNDERUN)
+
+/* Transfer Descriptor toggle field */
+#define USB_TD_TOGGLE_DATA0	0
+#define USB_TD_TOGGLE_DATA1	1
+#define USB_TD_TOGGLE_CARRY	2
+
+/* #define MULTI_DATA_BUS */
+
+/* Bus mode register RBMR/TBMR */
+#define BUS_MODE_GBL	0x20	/* Global snooping */
+#define BUS_MODE_BO	0x18	/* Byte ordering */
+#define BUS_MODE_BO_BE	0x10	/* Byte ordering - Big-endian */
+#define BUS_MODE_DTB	0x02	/* Data bus */
+
+/* FHCI QE USB Register Description */
+
+/* USB Mode Register bit define */
+#define USB_MODE_EN		0x01
+#define USB_MODE_HOST		0x02
+#define USB_MODE_TEST		0x04
+#define USB_MODE_SFTE		0x08
+#define USB_MODE_RESUME		0x40
+#define USB_MODE_LSS		0x80
+
+/* USB Slave Address Register Mask */
+#define USB_SLVADDR_MASK	0x7F
+
+/* USB Endpoint register define */
+#define USB_EPNUM_MASK		0xF000
+#define USB_EPNUM_SHIFT		12
+
+#define USB_TRANS_MODE_SHIFT	8
+#define USB_TRANS_CTR		0x0000
+#define USB_TRANS_INT		0x0100
+#define USB_TRANS_BULK		0x0200
+#define USB_TRANS_ISO		0x0300
+
+#define USB_EP_MF		0x0020
+#define USB_EP_RTE		0x0010
+
+#define USB_THS_SHIFT		2
+#define USB_THS_MASK		0x000c
+#define USB_THS_NORMAL		0x0
+#define USB_THS_IGNORE_IN	0x0004
+#define USB_THS_NACK		0x0008
+#define USB_THS_STALL		0x000c
+
+#define USB_RHS_SHIFT   	0
+#define USB_RHS_MASK		0x0003
+#define USB_RHS_NORMAL  	0x0
+#define USB_RHS_IGNORE_OUT	0x0001
+#define USB_RHS_NACK		0x0002
+#define USB_RHS_STALL		0x0003
+
+#define USB_RTHS_MASK		0x000f
+
+/* USB Command Register define */
+#define USB_CMD_STR_FIFO	0x80
+#define USB_CMD_FLUSH_FIFO	0x40
+#define USB_CMD_ISFT		0x20
+#define USB_CMD_DSFT		0x10
+#define USB_CMD_EP_MASK		0x03
+
+/* USB Event and Mask Register define */
+#define USB_E_MSF_MASK		0x0800
+#define USB_E_SFT_MASK		0x0400
+#define USB_E_RESET_MASK	0x0200
+#define USB_E_IDLE_MASK		0x0100
+#define USB_E_TXE4_MASK		0x0080
+#define USB_E_TXE3_MASK		0x0040
+#define USB_E_TXE2_MASK		0x0020
+#define USB_E_TXE1_MASK		0x0010
+#define USB_E_SOF_MASK		0x0008
+#define USB_E_BSY_MASK		0x0004
+#define USB_E_TXB_MASK		0x0002
+#define USB_E_RXB_MASK		0x0001
+
+/* Freescale USB HOST */
+struct fhci_pram {
+	__be16 ep_ptr[4];	/* Endpoint porter reg */
+	__be32 rx_state;	/* Rx internal state */
+	__be32 rx_ptr;		/* Rx internal data pointer */
+	__be16 frame_num;	/* Frame number */
+	__be16 rx_cnt;		/* Rx byte count */
+	__be32 rx_temp;		/* Rx temp */
+	__be32 rx_data_temp;	/* Rx data temp */
+	__be16 rx_u_ptr;	/* Rx microcode return address temp */
+	u8 reserved1[2];	/* reserved area */
+	__be32 sof_tbl;		/* SOF lookup table pointer */
+	u8 sof_u_crc_temp;	/* SOF micorcode CRC5 temp reg */
+	u8 reserved2[0xdb];
+};
+
+/* Freescale USB Endpoint*/
+struct fhci_ep_pram {
+	__be16 rx_base;		/* Rx BD base address */
+	__be16 tx_base;		/* Tx BD base address */
+	u8 rx_func_code;	/* Rx function code */
+	u8 tx_func_code;	/* Tx function code */
+	__be16 rx_buff_len;	/* Rx buffer length */
+	__be16 rx_bd_ptr;	/* Rx BD pointer */
+	__be16 tx_bd_ptr;	/* Tx BD pointer */
+	__be32 tx_state;	/* Tx internal state */
+	__be32 tx_ptr;		/* Tx internal data pointer */
+	__be16 tx_crc;		/* temp transmit CRC */
+	__be16 tx_cnt;		/* Tx byte count */
+	__be32 tx_temp;		/* Tx temp */
+	__be16 tx_u_ptr;	/* Tx microcode return address temp */
+	__be16 reserved;
+};
+
+struct fhci_controller_list {
+	struct list_head ctrl_list;	/* control endpoints */
+	struct list_head bulk_list;	/* bulk endpoints */
+	struct list_head iso_list;	/* isochronous endpoints */
+	struct list_head intr_list;	/* interruput endpoints */
+	struct list_head done_list;	/* done transfers */
+};
+
+struct virtual_root_hub {
+	int dev_num;	/* USB address of the root hub */
+	u32 feature;	/* indicates what feature has been set */
+	struct usb_hub_status hub;
+	struct usb_port_status port;
+};
+
+enum fhci_gpios {
+	GPIO_USBOE = 0,
+	GPIO_USBTP,
+	GPIO_USBTN,
+	GPIO_USBRP,
+	GPIO_USBRN,
+	/* these are optional */
+	GPIO_SPEED,
+	GPIO_POWER,
+	NUM_GPIOS,
+};
+
+enum fhci_pins {
+	PIN_USBOE = 0,
+	PIN_USBTP,
+	PIN_USBTN,
+	NUM_PINS,
+};
+
+struct fhci_hcd {
+	enum qe_clock fullspeed_clk;
+	enum qe_clock lowspeed_clk;
+	struct qe_pin *pins[NUM_PINS];
+	int gpios[NUM_GPIOS];
+	bool alow_gpios[NUM_GPIOS];
+
+	struct qe_usb_ctlr __iomem *regs; /* I/O memory used to communicate */
+	struct fhci_pram __iomem *pram;	/* Parameter RAM */
+	struct gtm_timer *timer;
+
+	spinlock_t lock;
+	struct fhci_usb *usb_lld; /* Low-level driver */
+	struct virtual_root_hub *vroot_hub; /* the virtual root hub */
+	int active_urbs;
+	struct fhci_controller_list *hc_list;
+	struct tasklet_struct *process_done_task; /* tasklet for done list */
+
+	struct list_head empty_eds;
+	struct list_head empty_tds;
+
+#ifdef CONFIG_FHCI_DEBUG
+	int usb_irq_stat[13];
+	struct dentry *dfs_root;
+#endif
+};
+
+#define USB_FRAME_USAGE 90
+#define FRAME_TIME_USAGE (USB_FRAME_USAGE*10)	/* frame time usage */
+#define SW_FIX_TIME_BETWEEN_TRANSACTION 150	/* SW */
+#define MAX_BYTES_PER_FRAME (USB_FRAME_USAGE*15)
+#define MAX_PERIODIC_FRAME_USAGE 90
+
+/* transaction type */
+enum fhci_ta_type {
+	FHCI_TA_IN = 0,	/* input transaction */
+	FHCI_TA_OUT,	/* output transaction */
+	FHCI_TA_SETUP,	/* setup transaction */
+};
+
+/* transfer mode */
+enum fhci_tf_mode {
+	FHCI_TF_CTRL = 0,
+	FHCI_TF_ISO,
+	FHCI_TF_BULK,
+	FHCI_TF_INTR,
+};
+
+enum fhci_speed {
+	FHCI_FULL_SPEED,
+	FHCI_LOW_SPEED,
+};
+
+/* endpoint state */
+enum fhci_ed_state {
+	FHCI_ED_NEW = 0, /* pipe is new */
+	FHCI_ED_OPER,    /* pipe is operating */
+	FHCI_ED_URB_DEL, /* pipe is in hold because urb is being deleted */
+	FHCI_ED_SKIP,    /* skip this pipe */
+	FHCI_ED_HALTED,  /* pipe is halted */
+};
+
+enum fhci_port_status {
+	FHCI_PORT_POWER_OFF = 0,
+	FHCI_PORT_DISABLED,
+	FHCI_PORT_DISCONNECTING,
+	FHCI_PORT_WAITING,	/* waiting for connection */
+	FHCI_PORT_FULL,		/* full speed connected */
+	FHCI_PORT_LOW,		/* low speed connected */
+};
+
+enum fhci_mem_alloc {
+	MEM_CACHABLE_SYS = 0x00000001,	/* primary DDR,cachable */
+	MEM_NOCACHE_SYS = 0x00000004,	/* primary DDR,non-cachable */
+	MEM_SECONDARY = 0x00000002,	/* either secondary DDR or SDRAM */
+	MEM_PRAM = 0x00000008,		/* multi-user RAM identifier */
+};
+
+/* USB default parameters*/
+#define DEFAULT_RING_LEN	8
+#define DEFAULT_DATA_MEM	MEM_CACHABLE_SYS
+
+struct ed {
+	u8 dev_addr;		/* device address */
+	u8 ep_addr;		/* endpoint address */
+	enum fhci_tf_mode mode;	/* USB transfer mode */
+	enum fhci_speed speed;
+	unsigned int max_pkt_size;
+	enum fhci_ed_state state;
+	struct list_head td_list; /* a list of all queued TD to this pipe */
+	struct list_head node;
+
+	/* read only parameters, should be cleared upon initialization */
+	u8 toggle_carry;	/* toggle carry from the last TD submitted */
+	u16 next_iso;		/* time stamp of next queued ISO transfer */
+	struct td *td_head;	/* a pointer to the current TD handled */
+};
+
+struct td {
+	void *data;		 /* a pointer to the data buffer */
+	unsigned int len;	 /* length of the data to be submitted */
+	unsigned int actual_len; /* actual bytes transferred on this td */
+	enum fhci_ta_type type;	 /* transaction type */
+	u8 toggle;		 /* toggle for next trans. within this TD */
+	u16 iso_index;		 /* ISO transaction index */
+	u16 start_frame;	 /* start frame time stamp */
+	u16 interval;		 /* interval between trans. (for ISO/Intr) */
+	u32 status;		 /* status of the TD */
+	struct ed *ed;		 /* a handle to the corresponding ED */
+	struct urb *urb;	 /* a handle to the corresponding URB */
+	bool ioc;		 /* Inform On Completion */
+	struct list_head node;
+
+	/* read only parameters should be cleared upon initialization */
+	struct packet *pkt;
+	int nak_cnt;
+	int error_cnt;
+	struct list_head frame_lh;
+};
+
+struct packet {
+	u8 *data;	/* packet data */
+	u32 len;	/* packet length */
+	u32 status;	/* status of the packet - equivalent to the status
+			 * field for the corresponding structure td */
+	u32 info;	/* packet information */
+	void __iomem *priv_data; /* private data of the driver (TDs or BDs) */
+};
+
+/* struct for each URB */
+#define URB_INPROGRESS	0
+#define URB_DEL		1
+
+/* URB states (state field) */
+#define US_BULK		0
+#define US_BULK0	1
+
+/* three setup states */
+#define US_CTRL_SETUP	2
+#define US_CTRL_DATA	1
+#define US_CTRL_ACK	0
+
+#define EP_ZERO	0
+
+struct urb_priv {
+	int num_of_tds;
+	int tds_cnt;
+	int state;
+
+	struct td **tds;
+	struct ed *ed;
+	struct timer_list time_out;
+};
+
+struct endpoint {
+	/* Pointer to ep parameter RAM */
+	struct fhci_ep_pram __iomem *ep_pram_ptr;
+
+	/* Host transactions */
+	struct usb_td __iomem *td_base; /* first TD in the ring */
+	struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
+	struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
+	struct kfifo empty_frame_Q;  /* Empty frames list to use */
+	struct kfifo conf_frame_Q;   /* frames passed to TDs,waiting for tx */
+	struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
+
+	bool already_pushed_dummy_bd;
+};
+
+/* struct for each 1mSec frame time */
+#define FRAME_IS_TRANSMITTED		0x00
+#define FRAME_TIMER_END_TRANSMISSION	0x01
+#define FRAME_DATA_END_TRANSMISSION	0x02
+#define FRAME_END_TRANSMISSION		0x03
+#define FRAME_IS_PREPARED		0x04
+
+struct fhci_time_frame {
+	u16 frame_num;	 /* frame number */
+	u16 total_bytes; /* total bytes submitted within this frame */
+	u8 frame_status; /* flag that indicates to stop fill this frame */
+	struct list_head tds_list; /* all tds of this frame */
+};
+
+/* internal driver structure*/
+struct fhci_usb {
+	u16 saved_msk;		 /* saving of the USB mask register */
+	struct endpoint *ep0;	 /* pointer for endpoint0 structure */
+	int intr_nesting_cnt;	 /* interrupt nesting counter */
+	u16 max_frame_usage;	 /* max frame time usage,in micro-sec */
+	u16 max_bytes_per_frame; /* max byte can be tx in one time frame */
+	u32 sw_transaction_time; /* sw complete trans time,in micro-sec */
+	struct fhci_time_frame *actual_frame;
+	struct fhci_controller_list *hc_list;	/* main structure for hc */
+	struct virtual_root_hub *vroot_hub;
+	enum fhci_port_status port_status;	/* v_rh port status */
+
+	u32 (*transfer_confirm)(struct fhci_hcd *fhci);
+
+	struct fhci_hcd *fhci;
+};
+
+/*
+ * Various helpers and prototypes below.
+ */
+
+static inline u16 get_frame_num(struct fhci_hcd *fhci)
+{
+	return in_be16(&fhci->pram->frame_num) & 0x07ff;
+}
+
+#define fhci_dbg(fhci, fmt, args...) \
+		dev_dbg(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
+#define fhci_vdbg(fhci, fmt, args...) \
+		dev_vdbg(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
+#define fhci_err(fhci, fmt, args...) \
+		dev_err(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
+#define fhci_info(fhci, fmt, args...) \
+		dev_info(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
+#define fhci_warn(fhci, fmt, args...) \
+		dev_warn(fhci_to_hcd(fhci)->self.controller, fmt, ##args)
+
+static inline struct fhci_hcd *hcd_to_fhci(struct usb_hcd *hcd)
+{
+	return (struct fhci_hcd *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
+{
+	return container_of((void *)fhci, struct usb_hcd, hcd_priv);
+}
+
+/* fifo of pointers */
+static inline int cq_new(struct kfifo *fifo, int size)
+{
+	return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
+}
+
+static inline void cq_delete(struct kfifo *kfifo)
+{
+	kfifo_free(kfifo);
+}
+
+static inline unsigned int cq_howmany(struct kfifo *kfifo)
+{
+	return kfifo_len(kfifo) / sizeof(void *);
+}
+
+static inline int cq_put(struct kfifo *kfifo, void *p)
+{
+	return kfifo_in(kfifo, (void *)&p, sizeof(p));
+}
+
+static inline void *cq_get(struct kfifo *kfifo)
+{
+	unsigned int sz;
+	void *p;
+
+	sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
+	if (sz != sizeof(p))
+		return NULL;
+
+	return p;
+}
+
+/* fhci-hcd.c */
+void fhci_start_sof_timer(struct fhci_hcd *fhci);
+void fhci_stop_sof_timer(struct fhci_hcd *fhci);
+u16 fhci_get_sof_timer_count(struct fhci_usb *usb);
+void fhci_usb_enable_interrupt(struct fhci_usb *usb);
+void fhci_usb_disable_interrupt(struct fhci_usb *usb);
+int fhci_ioports_check_bus_state(struct fhci_hcd *fhci);
+
+/* fhci-mem.c */
+void fhci_recycle_empty_td(struct fhci_hcd *fhci, struct td *td);
+void fhci_recycle_empty_ed(struct fhci_hcd *fhci, struct ed *ed);
+struct ed *fhci_get_empty_ed(struct fhci_hcd *fhci);
+struct td *fhci_td_fill(struct fhci_hcd *fhci, struct urb *urb,
+			struct urb_priv *urb_priv, struct ed *ed, u16 index,
+			enum fhci_ta_type type, int toggle, u8 *data, u32 len,
+			u16 interval, u16 start_frame, bool ioc);
+void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number);
+
+/* fhci-hub.c */
+void fhci_config_transceiver(struct fhci_hcd *fhci,
+			enum fhci_port_status status);
+void fhci_port_disable(struct fhci_hcd *fhci);
+void fhci_port_enable(void *lld);
+void fhci_io_port_generate_reset(struct fhci_hcd *fhci);
+void fhci_port_reset(void *lld);
+int fhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+int fhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+		     u16 wIndex, char *buf, u16 wLength);
+
+/* fhci-tds.c */
+void fhci_flush_bds(struct fhci_usb *usb);
+void fhci_flush_actual_frame(struct fhci_usb *usb);
+u32 fhci_host_transaction(struct fhci_usb *usb, struct packet *pkt,
+			  enum fhci_ta_type trans_type, u8 dest_addr,
+			  u8 dest_ep, enum fhci_tf_mode trans_mode,
+			  enum fhci_speed dest_speed, u8 data_toggle);
+void fhci_host_transmit_actual_frame(struct fhci_usb *usb);
+void fhci_tx_conf_interrupt(struct fhci_usb *usb);
+void fhci_push_dummy_bd(struct endpoint *ep);
+u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
+		   u32 ring_len);
+void fhci_init_ep_registers(struct fhci_usb *usb,
+			    struct endpoint *ep,
+			    enum fhci_mem_alloc data_mem);
+void fhci_ep0_free(struct fhci_usb *usb);
+
+/* fhci-sched.c */
+extern struct tasklet_struct fhci_tasklet;
+void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt);
+void fhci_flush_all_transmissions(struct fhci_usb *usb);
+void fhci_schedule_transactions(struct fhci_usb *usb);
+void fhci_device_connected_interrupt(struct fhci_hcd *fhci);
+void fhci_device_disconnected_interrupt(struct fhci_hcd *fhci);
+void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb);
+u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci);
+irqreturn_t fhci_irq(struct usb_hcd *hcd);
+irqreturn_t fhci_frame_limit_timer_irq(int irq, void *_hcd);
+
+/* fhci-q.h */
+void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb);
+struct td *fhci_remove_td_from_ed(struct ed *ed);
+struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame);
+void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed);
+struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame);
+void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td);
+struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list);
+void fhci_done_td(struct urb *urb, struct td *td);
+void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed);
+
+#ifdef CONFIG_FHCI_DEBUG
+
+void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er);
+void fhci_dfs_destroy(struct fhci_hcd *fhci);
+void fhci_dfs_create(struct fhci_hcd *fhci);
+
+#else
+
+static inline void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er) {}
+static inline void fhci_dfs_destroy(struct fhci_hcd *fhci) {}
+static inline void fhci_dfs_create(struct fhci_hcd *fhci) {}
+
+#endif /* CONFIG_FHCI_DEBUG */
+
+#endif /* __FHCI_H */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
new file mode 100644
index 0000000..e64eb47
--- /dev/null
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -0,0 +1,5698 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Faraday FOTG210 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ *	   Feng-Hsin Chiang <john453@faraday-tech.com>
+ *	   Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
+static const char hcd_name[] = "fotg210_hcd";
+
+#undef FOTG210_URB_TRACE
+#define FOTG210_STATS
+
+/* magic numbers that can affect system performance */
+#define FOTG210_TUNE_CERR	3 /* 0-3 qtd retries; 0 == don't stop */
+#define FOTG210_TUNE_RL_HS	4 /* nak throttle; see 4.9 */
+#define FOTG210_TUNE_RL_TT	0
+#define FOTG210_TUNE_MULT_HS	1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FOTG210_TUNE_MULT_TT	1
+
+/* Some drivers think it's safe to schedule isochronous transfers more than 256
+ * ms into the future (partly as a result of an old bug in the scheduling
+ * code).  In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency:  faster than hw default */
+static int log2_irq_thresh; /* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting:  slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fotg210.h"
+
+#define fotg210_dbg(fotg210, fmt, args...) \
+	dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
+#define fotg210_err(fotg210, fmt, args...) \
+	dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
+#define fotg210_info(fotg210, fmt, args...) \
+	dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
+#define fotg210_warn(fotg210, fmt, args...) \
+	dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
+
+/* check the values in the HCSPARAMS register (host controller _Structural_
+ * parameters) see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
+{
+	u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+
+	fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params,
+			HCS_N_PORTS(params));
+}
+
+/* check the values in the HCCPARAMS register (host controller _Capability_
+ * parameters) see EHCI Spec, Table 2-5 for each value
+ */
+static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
+{
+	u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+	fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label,
+			params,
+			HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+			HCC_CANPARK(params) ? " park" : "");
+}
+
+static void __maybe_unused
+dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
+{
+	fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+			hc32_to_cpup(fotg210, &qtd->hw_next),
+			hc32_to_cpup(fotg210, &qtd->hw_alt_next),
+			hc32_to_cpup(fotg210, &qtd->hw_token),
+			hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
+	if (qtd->hw_buf[1])
+		fotg210_dbg(fotg210, "  p1=%08x p2=%08x p3=%08x p4=%08x\n",
+				hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
+				hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
+				hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
+				hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	struct fotg210_qh_hw *hw = qh->hw;
+
+	fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
+			hw->hw_next, hw->hw_info1, hw->hw_info2,
+			hw->hw_current);
+
+	dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+	fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label,
+			itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
+			itd->urb);
+
+	fotg210_dbg(fotg210,
+			"  trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			hc32_to_cpu(fotg210, itd->hw_transaction[0]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[1]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[2]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[3]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[4]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[5]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[6]),
+			hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+
+	fotg210_dbg(fotg210,
+			"  buf:   %08x %08x %08x %08x %08x %08x %08x\n",
+			hc32_to_cpu(fotg210, itd->hw_bufp[0]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[1]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[2]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[3]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[4]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[5]),
+			hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+
+	fotg210_dbg(fotg210, "  index: %d %d %d %d %d %d %d %d\n",
+			itd->index[0], itd->index[1], itd->index[2],
+			itd->index[3], itd->index[4], itd->index[5],
+			itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+	return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+			label, label[0] ? " " : "", status,
+			(status & STS_ASS) ? " Async" : "",
+			(status & STS_PSS) ? " Periodic" : "",
+			(status & STS_RECL) ? " Recl" : "",
+			(status & STS_HALT) ? " Halt" : "",
+			(status & STS_IAA) ? " IAA" : "",
+			(status & STS_FATAL) ? " FATAL" : "",
+			(status & STS_FLR) ? " FLR" : "",
+			(status & STS_PCD) ? " PCD" : "",
+			(status & STS_ERR) ? " ERR" : "",
+			(status & STS_INT) ? " INT" : "");
+}
+
+static int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+	return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
+			label, label[0] ? " " : "", enable,
+			(enable & STS_IAA) ? " IAA" : "",
+			(enable & STS_FATAL) ? " FATAL" : "",
+			(enable & STS_FLR) ? " FLR" : "",
+			(enable & STS_PCD) ? " PCD" : "",
+			(enable & STS_ERR) ? " ERR" : "",
+			(enable & STS_INT) ? " INT" : "");
+}
+
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
+
+static int dbg_command_buf(char *buf, unsigned len, const char *label,
+		u32 command)
+{
+	return scnprintf(buf, len,
+			"%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s",
+			label, label[0] ? " " : "", command,
+			(command & CMD_PARK) ? " park" : "(park)",
+			CMD_PARK_CNT(command),
+			(command >> 16) & 0x3f,
+			(command & CMD_IAAD) ? " IAAD" : "",
+			(command & CMD_ASE) ? " Async" : "",
+			(command & CMD_PSE) ? " Periodic" : "",
+			fls_strings[(command >> 2) & 0x3],
+			(command & CMD_RESET) ? " Reset" : "",
+			(command & CMD_RUN) ? "RUN" : "HALT");
+}
+
+static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port,
+		u32 status)
+{
+	char *sig;
+
+	/* signaling state */
+	switch (status & (3 << 10)) {
+	case 0 << 10:
+		sig = "se0";
+		break;
+	case 1 << 10:
+		sig = "k";
+		break; /* low speed */
+	case 2 << 10:
+		sig = "j";
+		break;
+	default:
+		sig = "?";
+		break;
+	}
+
+	scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s",
+			label, label[0] ? " " : "", port, status,
+			status >> 25, /*device address */
+			sig,
+			(status & PORT_RESET) ? " RESET" : "",
+			(status & PORT_SUSPEND) ? " SUSPEND" : "",
+			(status & PORT_RESUME) ? " RESUME" : "",
+			(status & PORT_PEC) ? " PEC" : "",
+			(status & PORT_PE) ? " PE" : "",
+			(status & PORT_CSC) ? " CSC" : "",
+			(status & PORT_CONNECT) ? " CONNECT" : "");
+
+	return buf;
+}
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fotg210, label, status) {			\
+	char _buf[80];						\
+	dbg_status_buf(_buf, sizeof(_buf), label, status);	\
+	fotg210_dbg(fotg210, "%s\n", _buf);			\
+}
+
+#define dbg_cmd(fotg210, label, command) {			\
+	char _buf[80];						\
+	dbg_command_buf(_buf, sizeof(_buf), label, command);	\
+	fotg210_dbg(fotg210, "%s\n", _buf);			\
+}
+
+#define dbg_port(fotg210, label, port, status) {			       \
+	char _buf[80];							       \
+	fotg210_dbg(fotg210, "%s\n",					       \
+			dbg_port_buf(_buf, sizeof(_buf), label, port, status));\
+}
+
+/* troubleshooting help: expose state in debugfs */
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_async_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_periodic_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_registers_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static struct dentry *fotg210_debug_root;
+
+struct debug_buffer {
+	ssize_t (*fill_func)(struct debug_buffer *);	/* fill method */
+	struct usb_bus *bus;
+	struct mutex mutex;	/* protect filling of buffer */
+	size_t count;		/* number of characters filled into buffer */
+	char *output_buf;
+	size_t alloc_size;
+};
+
+static inline char speed_char(u32 scratch)
+{
+	switch (scratch & (3 << 12)) {
+	case QH_FULL_SPEED:
+		return 'f';
+
+	case QH_LOW_SPEED:
+		return 'l';
+
+	case QH_HIGH_SPEED:
+		return 'h';
+
+	default:
+		return '?';
+	}
+}
+
+static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
+{
+	__u32 v = hc32_to_cpu(fotg210, token);
+
+	if (v & QTD_STS_ACTIVE)
+		return '*';
+	if (v & QTD_STS_HALT)
+		return '-';
+	if (!IS_SHORT_READ(v))
+		return ' ';
+	/* tries to advance through hw_alt_next */
+	return '/';
+}
+
+static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+		char **nextp, unsigned *sizep)
+{
+	u32 scratch;
+	u32 hw_curr;
+	struct fotg210_qtd *td;
+	unsigned temp;
+	unsigned size = *sizep;
+	char *next = *nextp;
+	char mark;
+	__le32 list_end = FOTG210_LIST_END(fotg210);
+	struct fotg210_qh_hw *hw = qh->hw;
+
+	if (hw->hw_qtd_next == list_end) /* NEC does this */
+		mark = '@';
+	else
+		mark = token_mark(fotg210, hw->hw_token);
+	if (mark == '/') { /* qh_alt_next controls qh advance? */
+		if ((hw->hw_alt_next & QTD_MASK(fotg210)) ==
+		    fotg210->async->hw->hw_alt_next)
+			mark = '#'; /* blocked */
+		else if (hw->hw_alt_next == list_end)
+			mark = '.'; /* use hw_qtd_next */
+		/* else alt_next points to some other qtd */
+	}
+	scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+	hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
+	temp = scnprintf(next, size,
+			"qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
+			qh, scratch & 0x007f,
+			speed_char(scratch),
+			(scratch >> 8) & 0x000f,
+			scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
+			hc32_to_cpup(fotg210, &hw->hw_token), mark,
+			(cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
+				? "data1" : "data0",
+			(hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
+	size -= temp;
+	next += temp;
+
+	/* hc may be modifying the list as we read it ... */
+	list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+		scratch = hc32_to_cpup(fotg210, &td->hw_token);
+		mark = ' ';
+		if (hw_curr == td->qtd_dma)
+			mark = '*';
+		else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
+			mark = '+';
+		else if (QTD_LENGTH(scratch)) {
+			if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
+				mark = '#';
+			else if (td->hw_alt_next != list_end)
+				mark = '/';
+		}
+		temp = snprintf(next, size,
+				"\n\t%p%c%s len=%d %08x urb %p",
+				td, mark, ({ char *tmp;
+				 switch ((scratch>>8)&0x03) {
+				 case 0:
+					tmp = "out";
+					break;
+				 case 1:
+					tmp = "in";
+					break;
+				 case 2:
+					tmp = "setup";
+					break;
+				 default:
+					tmp = "?";
+					break;
+				 } tmp; }),
+				(scratch >> 16) & 0x7fff,
+				scratch,
+				td->urb);
+		if (size < temp)
+			temp = size;
+		size -= temp;
+		next += temp;
+		if (temp == size)
+			goto done;
+	}
+
+	temp = snprintf(next, size, "\n");
+	if (size < temp)
+		temp = size;
+
+	size -= temp;
+	next += temp;
+
+done:
+	*sizep = size;
+	*nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd *hcd;
+	struct fotg210_hcd *fotg210;
+	unsigned long flags;
+	unsigned temp, size;
+	char *next;
+	struct fotg210_qh *qh;
+
+	hcd = bus_to_hcd(buf->bus);
+	fotg210 = hcd_to_fotg210(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	*next = 0;
+
+	/* dumps a snapshot of the async schedule.
+	 * usually empty except for long-term bulk reads, or head.
+	 * one QH per line, and TDs we know about
+	 */
+	spin_lock_irqsave(&fotg210->lock, flags);
+	for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
+			qh = qh->qh_next.qh)
+		qh_lines(fotg210, qh, &next, &size);
+	if (fotg210->async_unlink && size > 0) {
+		temp = scnprintf(next, size, "\nunlink =\n");
+		size -= temp;
+		next += temp;
+
+		for (qh = fotg210->async_unlink; size > 0 && qh;
+				qh = qh->unlink_next)
+			qh_lines(fotg210, qh, &next, &size);
+	}
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+
+	return strlen(buf->output_buf);
+}
+
+/* count tds, get ep direction */
+static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210,
+		struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size)
+{
+	u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+	struct fotg210_qtd *qtd;
+	char *type = "";
+	unsigned temp = 0;
+
+	/* count tds, get ep direction */
+	list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+		temp++;
+		switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
+		case 0:
+			type = "out";
+			continue;
+		case 1:
+			type = "in";
+			continue;
+		}
+	}
+
+	return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)",
+			speed_char(scratch), scratch & 0x007f,
+			(scratch >> 8) & 0x000f, type, qh->usecs,
+			qh->c_usecs, temp, (scratch >> 16) & 0x7ff);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd *hcd;
+	struct fotg210_hcd *fotg210;
+	unsigned long flags;
+	union fotg210_shadow p, *seen;
+	unsigned temp, size, seen_count;
+	char *next;
+	unsigned i;
+	__hc32 tag;
+
+	seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
+	if (!seen)
+		return 0;
+
+	seen_count = 0;
+
+	hcd = bus_to_hcd(buf->bus);
+	fotg210 = hcd_to_fotg210(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
+	size -= temp;
+	next += temp;
+
+	/* dump a snapshot of the periodic schedule.
+	 * iso changes, interrupt usually doesn't.
+	 */
+	spin_lock_irqsave(&fotg210->lock, flags);
+	for (i = 0; i < fotg210->periodic_size; i++) {
+		p = fotg210->pshadow[i];
+		if (likely(!p.ptr))
+			continue;
+
+		tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
+
+		temp = scnprintf(next, size, "%4d: ", i);
+		size -= temp;
+		next += temp;
+
+		do {
+			struct fotg210_qh_hw *hw;
+
+			switch (hc32_to_cpu(fotg210, tag)) {
+			case Q_TYPE_QH:
+				hw = p.qh->hw;
+				temp = scnprintf(next, size, " qh%d-%04x/%p",
+						p.qh->period,
+						hc32_to_cpup(fotg210,
+							&hw->hw_info2)
+							/* uframe masks */
+							& (QH_CMASK | QH_SMASK),
+						p.qh);
+				size -= temp;
+				next += temp;
+				/* don't repeat what follows this qh */
+				for (temp = 0; temp < seen_count; temp++) {
+					if (seen[temp].ptr != p.ptr)
+						continue;
+					if (p.qh->qh_next.ptr) {
+						temp = scnprintf(next, size,
+								" ...");
+						size -= temp;
+						next += temp;
+					}
+					break;
+				}
+				/* show more info the first time around */
+				if (temp == seen_count) {
+					temp = output_buf_tds_dir(next,
+							fotg210, hw,
+							p.qh, size);
+
+					if (seen_count < DBG_SCHED_LIMIT)
+						seen[seen_count++].qh = p.qh;
+				} else
+					temp = 0;
+				tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
+				p = p.qh->qh_next;
+				break;
+			case Q_TYPE_FSTN:
+				temp = scnprintf(next, size,
+						" fstn-%8x/%p",
+						p.fstn->hw_prev, p.fstn);
+				tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
+				p = p.fstn->fstn_next;
+				break;
+			case Q_TYPE_ITD:
+				temp = scnprintf(next, size,
+						" itd/%p", p.itd);
+				tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
+				p = p.itd->itd_next;
+				break;
+			}
+			size -= temp;
+			next += temp;
+		} while (p.ptr);
+
+		temp = scnprintf(next, size, "\n");
+		size -= temp;
+		next += temp;
+	}
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	kfree(seen);
+
+	return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fotg210_hcd *fotg210)
+{
+	switch (fotg210->rh_state) {
+	case FOTG210_RH_HALTED:
+		return "halted";
+	case FOTG210_RH_SUSPENDED:
+		return "suspended";
+	case FOTG210_RH_RUNNING:
+		return "running";
+	case FOTG210_RH_STOPPING:
+		return "stopping";
+	}
+	return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd *hcd;
+	struct fotg210_hcd *fotg210;
+	unsigned long flags;
+	unsigned temp, size, i;
+	char *next, scratch[80];
+	static const char fmt[] = "%*s\n";
+	static const char label[] = "";
+
+	hcd = bus_to_hcd(buf->bus);
+	fotg210 = hcd_to_fotg210(hcd);
+	next = buf->output_buf;
+	size = buf->alloc_size;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		size = scnprintf(next, size,
+				"bus %s, device %s\n"
+				"%s\n"
+				"SUSPENDED(no register access)\n",
+				hcd->self.controller->bus->name,
+				dev_name(hcd->self.controller),
+				hcd->product_desc);
+		goto done;
+	}
+
+	/* Capability Registers */
+	i = HC_VERSION(fotg210, fotg210_readl(fotg210,
+			&fotg210->caps->hc_capbase));
+	temp = scnprintf(next, size,
+			"bus %s, device %s\n"
+			"%s\n"
+			"EHCI %x.%02x, rh state %s\n",
+			hcd->self.controller->bus->name,
+			dev_name(hcd->self.controller),
+			hcd->product_desc,
+			i >> 8, i & 0x0ff, rh_state_string(fotg210));
+	size -= temp;
+	next += temp;
+
+	/* FIXME interpret both types of params */
+	i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+	temp = scnprintf(next, size, "structural params 0x%08x\n", i);
+	size -= temp;
+	next += temp;
+
+	i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+	temp = scnprintf(next, size, "capability params 0x%08x\n", i);
+	size -= temp;
+	next += temp;
+
+	/* Operational Registers */
+	temp = dbg_status_buf(scratch, sizeof(scratch), label,
+			fotg210_readl(fotg210, &fotg210->regs->status));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = dbg_command_buf(scratch, sizeof(scratch), label,
+			fotg210_readl(fotg210, &fotg210->regs->command));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = dbg_intr_buf(scratch, sizeof(scratch), label,
+			fotg210_readl(fotg210, &fotg210->regs->intr_enable));
+	temp = scnprintf(next, size, fmt, temp, scratch);
+	size -= temp;
+	next += temp;
+
+	temp = scnprintf(next, size, "uframe %04x\n",
+			fotg210_read_frame_index(fotg210));
+	size -= temp;
+	next += temp;
+
+	if (fotg210->async_unlink) {
+		temp = scnprintf(next, size, "async unlink qh %p\n",
+				fotg210->async_unlink);
+		size -= temp;
+		next += temp;
+	}
+
+#ifdef FOTG210_STATS
+	temp = scnprintf(next, size,
+			"irq normal %ld err %ld iaa %ld(lost %ld)\n",
+			fotg210->stats.normal, fotg210->stats.error,
+			fotg210->stats.iaa, fotg210->stats.lost_iaa);
+	size -= temp;
+	next += temp;
+
+	temp = scnprintf(next, size, "complete %ld unlink %ld\n",
+			fotg210->stats.complete, fotg210->stats.unlink);
+	size -= temp;
+	next += temp;
+#endif
+
+done:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+
+	return buf->alloc_size - size;
+}
+
+static struct debug_buffer
+*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *))
+{
+	struct debug_buffer *buf;
+
+	buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+	if (buf) {
+		buf->bus = bus;
+		buf->fill_func = fill_func;
+		mutex_init(&buf->mutex);
+		buf->alloc_size = PAGE_SIZE;
+	}
+
+	return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+	int ret = 0;
+
+	if (!buf->output_buf)
+		buf->output_buf = vmalloc(buf->alloc_size);
+
+	if (!buf->output_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = buf->fill_func(buf);
+
+	if (ret >= 0) {
+		buf->count = ret;
+		ret = 0;
+	}
+
+out:
+	return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+		size_t len, loff_t *offset)
+{
+	struct debug_buffer *buf = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&buf->mutex);
+	if (buf->count == 0) {
+		ret = fill_buffer(buf);
+		if (ret != 0) {
+			mutex_unlock(&buf->mutex);
+			goto out;
+		}
+	}
+	mutex_unlock(&buf->mutex);
+
+	ret = simple_read_from_buffer(user_buf, len, offset,
+			buf->output_buf, buf->count);
+
+out:
+	return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *buf = file->private_data;
+
+	if (buf) {
+		vfree(buf->output_buf);
+		kfree(buf);
+	}
+
+	return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *buf;
+
+	buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+	if (!buf)
+		return -ENOMEM;
+
+	buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+	file->private_data = buf;
+	return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private,
+			fill_registers_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct fotg210_hcd *fotg210)
+{
+	struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+	struct dentry *root;
+
+	root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
+	fotg210->debug_dir = root;
+
+	debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
+	debugfs_create_file("periodic", S_IRUGO, root, bus,
+			    &debug_periodic_fops);
+	debugfs_create_file("registers", S_IRUGO, root, bus,
+			    &debug_registers_fops);
+}
+
+static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+{
+	debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+/* handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown:  shutting down the bridge before the devices using it.
+ */
+static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
+		u32 mask, u32 done, int usec)
+{
+	u32 result;
+
+	do {
+		result = fotg210_readl(fotg210, ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+/* Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_halt(struct fotg210_hcd *fotg210)
+{
+	u32 temp;
+
+	spin_lock_irq(&fotg210->lock);
+
+	/* disable any irqs left enabled by previous code */
+	fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+
+	/*
+	 * This routine gets called during probe before fotg210->command
+	 * has been initialized, so we can't rely on its value.
+	 */
+	fotg210->command &= ~CMD_RUN;
+	temp = fotg210_readl(fotg210, &fotg210->regs->command);
+	temp &= ~(CMD_RUN | CMD_IAAD);
+	fotg210_writel(fotg210, temp, &fotg210->regs->command);
+
+	spin_unlock_irq(&fotg210->lock);
+	synchronize_irq(fotg210_to_hcd(fotg210)->irq);
+
+	return handshake(fotg210, &fotg210->regs->status,
+			STS_HALT, STS_HALT, 16 * 125);
+}
+
+/* Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_reset(struct fotg210_hcd *fotg210)
+{
+	int retval;
+	u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
+
+	/* If the EHCI debug controller is active, special care must be
+	 * taken before and after a host controller reset
+	 */
+	if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
+		fotg210->debug = NULL;
+
+	command |= CMD_RESET;
+	dbg_cmd(fotg210, "reset", command);
+	fotg210_writel(fotg210, command, &fotg210->regs->command);
+	fotg210->rh_state = FOTG210_RH_HALTED;
+	fotg210->next_statechange = jiffies;
+	retval = handshake(fotg210, &fotg210->regs->command,
+			CMD_RESET, 0, 250 * 1000);
+
+	if (retval)
+		return retval;
+
+	if (fotg210->debug)
+		dbgp_external_startup(fotg210_to_hcd(fotg210));
+
+	fotg210->port_c_suspend = fotg210->suspended_ports =
+			fotg210->resuming_ports = 0;
+	return retval;
+}
+
+/* Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_quiesce(struct fotg210_hcd *fotg210)
+{
+	u32 temp;
+
+	if (fotg210->rh_state != FOTG210_RH_RUNNING)
+		return;
+
+	/* wait for any schedule enables/disables to take effect */
+	temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
+	handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
+			16 * 125);
+
+	/* then disable anything that's still active */
+	spin_lock_irq(&fotg210->lock);
+	fotg210->command &= ~(CMD_ASE | CMD_PSE);
+	fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+	spin_unlock_irq(&fotg210->lock);
+
+	/* hardware can take 16 microframes to turn off ... */
+	handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
+			16 * 125);
+}
+
+static void end_unlink_async(struct fotg210_hcd *fotg210);
+static void unlink_empty_async(struct fotg210_hcd *fotg210);
+static void fotg210_work(struct fotg210_hcd *fotg210);
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+			      struct fotg210_qh *qh);
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/* Set a bit in the USBCMD register */
+static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+	fotg210->command |= bit;
+	fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+	/* unblock posted write */
+	fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+	fotg210->command &= ~bit;
+	fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+	/* unblock posted write */
+	fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/* EHCI timer support...  Now using hrtimers.
+ *
+ * Lots of different events are triggered from fotg210->hrtimer.  Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fotg210->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fotg210->next_hrtimer_event.  Whenever fotg210->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay.  This doesn't
+ * matter, because none of the events are especially time-critical.  The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay.  In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/* Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
+ */
+static unsigned event_delays_ns[] = {
+	1 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_POLL_ASS */
+	1 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_POLL_PSS */
+	1 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_POLL_DEAD */
+	1125 * NSEC_PER_USEC,	/* FOTG210_HRTIMER_UNLINK_INTR */
+	2 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_FREE_ITDS */
+	6 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_ASYNC_UNLINKS */
+	10 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_IAA_WATCHDOG */
+	10 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_DISABLE_PERIODIC */
+	15 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_DISABLE_ASYNC */
+	100 * NSEC_PER_MSEC,	/* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
+		bool resched)
+{
+	ktime_t *timeout = &fotg210->hr_timeouts[event];
+
+	if (resched)
+		*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
+	fotg210->enabled_hrtimer_events |= (1 << event);
+
+	/* Track only the lowest-numbered pending event */
+	if (event < fotg210->next_hrtimer_event) {
+		fotg210->next_hrtimer_event = event;
+		hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
+				NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+	}
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
+{
+	unsigned actual, want;
+
+	/* Don't enable anything if the controller isn't running (e.g., died) */
+	if (fotg210->rh_state != FOTG210_RH_RUNNING)
+		return;
+
+	want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
+	actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
+
+	if (want != actual) {
+
+		/* Poll again later, but give up after about 20 ms */
+		if (fotg210->ASS_poll_count++ < 20) {
+			fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
+					true);
+			return;
+		}
+		fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
+				want, actual);
+	}
+	fotg210->ASS_poll_count = 0;
+
+	/* The status is up-to-date; restart or stop the schedule as needed */
+	if (want == 0) {	/* Stopped */
+		if (fotg210->async_count > 0)
+			fotg210_set_command_bit(fotg210, CMD_ASE);
+
+	} else {		/* Running */
+		if (fotg210->async_count == 0) {
+
+			/* Turn off the schedule after a while */
+			fotg210_enable_event(fotg210,
+					FOTG210_HRTIMER_DISABLE_ASYNC,
+					true);
+		}
+	}
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
+{
+	fotg210_clear_command_bit(fotg210, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
+{
+	unsigned actual, want;
+
+	/* Don't do anything if the controller isn't running (e.g., died) */
+	if (fotg210->rh_state != FOTG210_RH_RUNNING)
+		return;
+
+	want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
+	actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
+
+	if (want != actual) {
+
+		/* Poll again later, but give up after about 20 ms */
+		if (fotg210->PSS_poll_count++ < 20) {
+			fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
+					true);
+			return;
+		}
+		fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+				want, actual);
+	}
+	fotg210->PSS_poll_count = 0;
+
+	/* The status is up-to-date; restart or stop the schedule as needed */
+	if (want == 0) {	/* Stopped */
+		if (fotg210->periodic_count > 0)
+			fotg210_set_command_bit(fotg210, CMD_PSE);
+
+	} else {		/* Running */
+		if (fotg210->periodic_count == 0) {
+
+			/* Turn off the schedule after a while */
+			fotg210_enable_event(fotg210,
+					FOTG210_HRTIMER_DISABLE_PERIODIC,
+					true);
+		}
+	}
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
+{
+	fotg210_clear_command_bit(fotg210, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
+{
+	if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
+
+		/* Give up after a few milliseconds */
+		if (fotg210->died_poll_count++ < 5) {
+			/* Try again later */
+			fotg210_enable_event(fotg210,
+					FOTG210_HRTIMER_POLL_DEAD, true);
+			return;
+		}
+		fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
+	}
+
+	/* Clean up the mess */
+	fotg210->rh_state = FOTG210_RH_HALTED;
+	fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+	fotg210_work(fotg210);
+	end_unlink_async(fotg210);
+
+	/* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
+{
+	bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+
+	/*
+	 * Process all the QHs on the intr_unlink list that were added
+	 * before the current unlink cycle began.  The list is in
+	 * temporal order, so stop when we reach the first entry in the
+	 * current cycle.  But if the root hub isn't running then
+	 * process all the QHs on the list.
+	 */
+	fotg210->intr_unlinking = true;
+	while (fotg210->intr_unlink) {
+		struct fotg210_qh *qh = fotg210->intr_unlink;
+
+		if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
+			break;
+		fotg210->intr_unlink = qh->unlink_next;
+		qh->unlink_next = NULL;
+		end_unlink_intr(fotg210, qh);
+	}
+
+	/* Handle remaining entries later */
+	if (fotg210->intr_unlink) {
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+				true);
+		++fotg210->intr_unlink_cycle;
+	}
+	fotg210->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fotg210_hcd *fotg210)
+{
+	if (!(fotg210->enabled_hrtimer_events &
+			BIT(FOTG210_HRTIMER_FREE_ITDS))) {
+		fotg210->last_itd_to_free = list_entry(
+				fotg210->cached_itd_list.prev,
+				struct fotg210_itd, itd_list);
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
+	}
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fotg210_hcd *fotg210)
+{
+	struct fotg210_itd *itd, *n;
+
+	if (fotg210->rh_state < FOTG210_RH_RUNNING)
+		fotg210->last_itd_to_free = NULL;
+
+	list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
+		list_del(&itd->itd_list);
+		dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
+		if (itd == fotg210->last_itd_to_free)
+			break;
+	}
+
+	if (!list_empty(&fotg210->cached_itd_list))
+		start_free_itds(fotg210);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
+{
+	if (fotg210->rh_state != FOTG210_RH_RUNNING)
+		return;
+
+	/*
+	 * Lost IAA irqs wedge things badly; seen first with a vt8235.
+	 * So we need this watchdog, but must protect it against both
+	 * (a) SMP races against real IAA firing and retriggering, and
+	 * (b) clean HC shutdown, when IAA watchdog was pending.
+	 */
+	if (fotg210->async_iaa) {
+		u32 cmd, status;
+
+		/* If we get here, IAA is *REALLY* late.  It's barely
+		 * conceivable that the system is so busy that CMD_IAAD
+		 * is still legitimately set, so let's be sure it's
+		 * clear before we read STS_IAA.  (The HC should clear
+		 * CMD_IAAD when it sets STS_IAA.)
+		 */
+		cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+
+		/*
+		 * If IAA is set here it either legitimately triggered
+		 * after the watchdog timer expired (_way_ late, so we'll
+		 * still count it as lost) ... or a silicon erratum:
+		 * - VIA seems to set IAA without triggering the IRQ;
+		 * - IAAD potentially cleared without setting IAA.
+		 */
+		status = fotg210_readl(fotg210, &fotg210->regs->status);
+		if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+			COUNT(fotg210->stats.lost_iaa);
+			fotg210_writel(fotg210, STS_IAA,
+					&fotg210->regs->status);
+		}
+
+		fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
+				status, cmd);
+		end_unlink_async(fotg210);
+	}
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
+{
+	/* Not needed if the controller isn't running or it's already enabled */
+	if (fotg210->rh_state != FOTG210_RH_RUNNING ||
+			(fotg210->enabled_hrtimer_events &
+			BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
+		return;
+
+	/*
+	 * Isochronous transfers always need the watchdog.
+	 * For other sorts we use it only if the flag is set.
+	 */
+	if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
+			fotg210->async_count + fotg210->intr_count > 0))
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
+				true);
+}
+
+
+/* Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fotg210_hrtimer_event in fotg210.h.
+ */
+static void (*event_handlers[])(struct fotg210_hcd *) = {
+	fotg210_poll_ASS,			/* FOTG210_HRTIMER_POLL_ASS */
+	fotg210_poll_PSS,			/* FOTG210_HRTIMER_POLL_PSS */
+	fotg210_handle_controller_death,	/* FOTG210_HRTIMER_POLL_DEAD */
+	fotg210_handle_intr_unlinks,	/* FOTG210_HRTIMER_UNLINK_INTR */
+	end_free_itds,			/* FOTG210_HRTIMER_FREE_ITDS */
+	unlink_empty_async,		/* FOTG210_HRTIMER_ASYNC_UNLINKS */
+	fotg210_iaa_watchdog,		/* FOTG210_HRTIMER_IAA_WATCHDOG */
+	fotg210_disable_PSE,		/* FOTG210_HRTIMER_DISABLE_PERIODIC */
+	fotg210_disable_ASE,		/* FOTG210_HRTIMER_DISABLE_ASYNC */
+	fotg210_work,			/* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
+{
+	struct fotg210_hcd *fotg210 =
+			container_of(t, struct fotg210_hcd, hrtimer);
+	ktime_t now;
+	unsigned long events;
+	unsigned long flags;
+	unsigned e;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+
+	events = fotg210->enabled_hrtimer_events;
+	fotg210->enabled_hrtimer_events = 0;
+	fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+	/*
+	 * Check each pending event.  If its time has expired, handle
+	 * the event; otherwise re-enable it.
+	 */
+	now = ktime_get();
+	for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
+		if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
+			event_handlers[e](fotg210);
+		else
+			fotg210_enable_event(fotg210, e, false);
+	}
+
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return HRTIMER_NORESTART;
+}
+
+#define fotg210_bus_suspend NULL
+#define fotg210_bus_resume NULL
+
+static int check_reset_complete(struct fotg210_hcd *fotg210, int index,
+		u32 __iomem *status_reg, int port_status)
+{
+	if (!(port_status & PORT_CONNECT))
+		return port_status;
+
+	/* if reset finished and it's still not enabled -- handoff */
+	if (!(port_status & PORT_PE))
+		/* with integrated TT, there's nobody to hand it to! */
+		fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n",
+				index + 1);
+	else
+		fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
+				index + 1);
+
+	return port_status;
+}
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	u32 temp, status;
+	u32 mask;
+	int retval = 1;
+	unsigned long flags;
+
+	/* init status to no-changes */
+	buf[0] = 0;
+
+	/* Inform the core about resumes-in-progress by returning
+	 * a non-zero value even if there are no status changes.
+	 */
+	status = fotg210->resuming_ports;
+
+	mask = PORT_CSC | PORT_PEC;
+	/* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
+
+	/* no hub change reports (bit 0) for now (power, ...) */
+
+	/* port N changes (bit N)? */
+	spin_lock_irqsave(&fotg210->lock, flags);
+
+	temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
+
+	/*
+	 * Return status information even for ports with OWNER set.
+	 * Otherwise hub_wq wouldn't see the disconnect event when a
+	 * high-speed device is switched over to the companion
+	 * controller by the user.
+	 */
+
+	if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) ||
+			(fotg210->reset_done[0] &&
+			time_after_eq(jiffies, fotg210->reset_done[0]))) {
+		buf[0] |= 1 << 1;
+		status = STS_PCD;
+	}
+	/* FIXME autosuspend idle root hubs */
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return status ? retval : 0;
+}
+
+static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210,
+		struct usb_hub_descriptor *desc)
+{
+	int ports = HCS_N_PORTS(fotg210->hcs_params);
+	u16 temp;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bPwrOn2PwrGood = 10;	/* fotg210 1.0, 2.3.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	/* two bitmaps:  ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+	temp = HUB_CHAR_INDV_PORT_OCPM;	/* per-port overcurrent reporting */
+	temp |= HUB_CHAR_NO_LPSM;	/* no power switching */
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+		u16 wIndex, char *buf, u16 wLength)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	int ports = HCS_N_PORTS(fotg210->hcs_params);
+	u32 __iomem *status_reg = &fotg210->regs->port_status;
+	u32 temp, temp1, status;
+	unsigned long flags;
+	int retval = 0;
+	unsigned selector;
+
+	/*
+	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+	 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+	 * (track current state ourselves) ... blink for diagnostics,
+	 * power, "this is the one", etc.  EHCI spec supports this.
+	 */
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = fotg210_readl(fotg210, status_reg);
+		temp &= ~PORT_RWC_BITS;
+
+		/*
+		 * Even if OWNER is set, so the port is owned by the
+		 * companion controller, hub_wq needs to be able to clear
+		 * the port-change status bits (especially
+		 * USB_PORT_STAT_C_CONNECTION).
+		 */
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (temp & PORT_RESET)
+				goto error;
+			if (!(temp & PORT_SUSPEND))
+				break;
+			if ((temp & PORT_PE) == 0)
+				goto error;
+
+			/* resume signaling for 20 msec */
+			fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+			fotg210->reset_done[wIndex] = jiffies
+					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			clear_bit(wIndex, &fotg210->port_c_suspend);
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			fotg210_writel(fotg210, temp | OTGISR_OVC,
+					&fotg210->regs->otgisr);
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* GetPortStatus clears reset */
+			break;
+		default:
+			goto error;
+		}
+		fotg210_readl(fotg210, &fotg210->regs->command);
+		break;
+	case GetHubDescriptor:
+		fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
+				buf);
+		break;
+	case GetHubStatus:
+		/* no hub-wide feature/status flags */
+		memset(buf, 0, 4);
+		/*cpu_to_le32s ((u32 *) buf); */
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		status = 0;
+		temp = fotg210_readl(fotg210, status_reg);
+
+		/* wPortChange bits */
+		if (temp & PORT_CSC)
+			status |= USB_PORT_STAT_C_CONNECTION << 16;
+		if (temp & PORT_PEC)
+			status |= USB_PORT_STAT_C_ENABLE << 16;
+
+		temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+		if (temp1 & OTGISR_OVC)
+			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+		/* whoever resumes must GetPortStatus to complete it!! */
+		if (temp & PORT_RESUME) {
+
+			/* Remote Wakeup received? */
+			if (!fotg210->reset_done[wIndex]) {
+				/* resume signaling for 20 msec */
+				fotg210->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+				/* check the port again */
+				mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
+						fotg210->reset_done[wIndex]);
+			}
+
+			/* resume completed? */
+			else if (time_after_eq(jiffies,
+					fotg210->reset_done[wIndex])) {
+				clear_bit(wIndex, &fotg210->suspended_ports);
+				set_bit(wIndex, &fotg210->port_c_suspend);
+				fotg210->reset_done[wIndex] = 0;
+
+				/* stop resume signaling */
+				temp = fotg210_readl(fotg210, status_reg);
+				fotg210_writel(fotg210, temp &
+						~(PORT_RWC_BITS | PORT_RESUME),
+						status_reg);
+				clear_bit(wIndex, &fotg210->resuming_ports);
+				retval = handshake(fotg210, status_reg,
+						PORT_RESUME, 0, 2000);/* 2ms */
+				if (retval != 0) {
+					fotg210_err(fotg210,
+							"port %d resume error %d\n",
+							wIndex + 1, retval);
+					goto error;
+				}
+				temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+			}
+		}
+
+		/* whoever resets must GetPortStatus to complete it!! */
+		if ((temp & PORT_RESET) && time_after_eq(jiffies,
+				fotg210->reset_done[wIndex])) {
+			status |= USB_PORT_STAT_C_RESET << 16;
+			fotg210->reset_done[wIndex] = 0;
+			clear_bit(wIndex, &fotg210->resuming_ports);
+
+			/* force reset to complete */
+			fotg210_writel(fotg210,
+					temp & ~(PORT_RWC_BITS | PORT_RESET),
+					status_reg);
+			/* REVISIT:  some hardware needs 550+ usec to clear
+			 * this bit; seems too long to spin routinely...
+			 */
+			retval = handshake(fotg210, status_reg,
+					PORT_RESET, 0, 1000);
+			if (retval != 0) {
+				fotg210_err(fotg210, "port %d reset error %d\n",
+						wIndex + 1, retval);
+				goto error;
+			}
+
+			/* see what we found out */
+			temp = check_reset_complete(fotg210, wIndex, status_reg,
+					fotg210_readl(fotg210, status_reg));
+		}
+
+		if (!(temp & (PORT_RESUME|PORT_RESET))) {
+			fotg210->reset_done[wIndex] = 0;
+			clear_bit(wIndex, &fotg210->resuming_ports);
+		}
+
+		/* transfer dedicated ports to the companion hc */
+		if ((temp & PORT_CONNECT) &&
+				test_bit(wIndex, &fotg210->companion_ports)) {
+			temp &= ~PORT_RWC_BITS;
+			fotg210_writel(fotg210, temp, status_reg);
+			fotg210_dbg(fotg210, "port %d --> companion\n",
+					wIndex + 1);
+			temp = fotg210_readl(fotg210, status_reg);
+		}
+
+		/*
+		 * Even if OWNER is set, there's no harm letting hub_wq
+		 * see the wPortStatus values (they should all be 0 except
+		 * for PORT_POWER anyway).
+		 */
+
+		if (temp & PORT_CONNECT) {
+			status |= USB_PORT_STAT_CONNECTION;
+			status |= fotg210_port_speed(fotg210, temp);
+		}
+		if (temp & PORT_PE)
+			status |= USB_PORT_STAT_ENABLE;
+
+		/* maybe the port was unsuspended without our knowledge */
+		if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+			status |= USB_PORT_STAT_SUSPEND;
+		} else if (test_bit(wIndex, &fotg210->suspended_ports)) {
+			clear_bit(wIndex, &fotg210->suspended_ports);
+			clear_bit(wIndex, &fotg210->resuming_ports);
+			fotg210->reset_done[wIndex] = 0;
+			if (temp & PORT_PE)
+				set_bit(wIndex, &fotg210->port_c_suspend);
+		}
+
+		temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+		if (temp1 & OTGISR_OVC)
+			status |= USB_PORT_STAT_OVERCURRENT;
+		if (temp & PORT_RESET)
+			status |= USB_PORT_STAT_RESET;
+		if (test_bit(wIndex, &fotg210->port_c_suspend))
+			status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+		if (status & ~0xffff)	/* only if wPortChange is interesting */
+			dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
+		put_unaligned_le32(status, buf);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		selector = wIndex >> 8;
+		wIndex &= 0xff;
+
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = fotg210_readl(fotg210, status_reg);
+		temp &= ~PORT_RWC_BITS;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if ((temp & PORT_PE) == 0
+					|| (temp & PORT_RESET) != 0)
+				goto error;
+
+			/* After above check the port must be connected.
+			 * Set appropriate bit thus could put phy into low power
+			 * mode if we have hostpc feature
+			 */
+			fotg210_writel(fotg210, temp | PORT_SUSPEND,
+					status_reg);
+			set_bit(wIndex, &fotg210->suspended_ports);
+			break;
+		case USB_PORT_FEAT_RESET:
+			if (temp & PORT_RESUME)
+				goto error;
+			/* line status bits may report this as low speed,
+			 * which can be fine if this root hub has a
+			 * transaction translator built in.
+			 */
+			fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
+			temp |= PORT_RESET;
+			temp &= ~PORT_PE;
+
+			/*
+			 * caller must wait, then call GetPortStatus
+			 * usb 2.0 spec says 50 ms resets on root
+			 */
+			fotg210->reset_done[wIndex] = jiffies
+					+ msecs_to_jiffies(50);
+			fotg210_writel(fotg210, temp, status_reg);
+			break;
+
+		/* For downstream facing ports (these):  one hub port is put
+		 * into test mode according to USB2 11.24.2.13, then the hub
+		 * must be reset (which for root hub now means rmmod+modprobe,
+		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+		 * about the EHCI-specific stuff.
+		 */
+		case USB_PORT_FEAT_TEST:
+			if (!selector || selector > 5)
+				goto error;
+			spin_unlock_irqrestore(&fotg210->lock, flags);
+			fotg210_quiesce(fotg210);
+			spin_lock_irqsave(&fotg210->lock, flags);
+
+			/* Put all enabled ports into suspend */
+			temp = fotg210_readl(fotg210, status_reg) &
+				~PORT_RWC_BITS;
+			if (temp & PORT_PE)
+				fotg210_writel(fotg210, temp | PORT_SUSPEND,
+						status_reg);
+
+			spin_unlock_irqrestore(&fotg210->lock, flags);
+			fotg210_halt(fotg210);
+			spin_lock_irqsave(&fotg210->lock, flags);
+
+			temp = fotg210_readl(fotg210, status_reg);
+			temp |= selector << 16;
+			fotg210_writel(fotg210, temp, status_reg);
+			break;
+
+		default:
+			goto error;
+		}
+		fotg210_readl(fotg210, &fotg210->regs->command);
+		break;
+
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return retval;
+}
+
+static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
+		int portnum)
+{
+	return;
+}
+
+static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
+		int portnum)
+{
+	return 0;
+}
+
+/* There's basically three types of memory:
+ *	- data used only by the HCD ... kmalloc is fine
+ *	- async and periodic schedules, shared by HC and HCD ... these
+ *	  need to use dma_pool or dma_alloc_coherent
+ *	- driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/* Allocate the key transfer structures from the previously allocated pool */
+static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
+		struct fotg210_qtd *qtd, dma_addr_t dma)
+{
+	memset(qtd, 0, sizeof(*qtd));
+	qtd->qtd_dma = dma;
+	qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+	qtd->hw_next = FOTG210_LIST_END(fotg210);
+	qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+	INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
+		gfp_t flags)
+{
+	struct fotg210_qtd *qtd;
+	dma_addr_t dma;
+
+	qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
+	if (qtd != NULL)
+		fotg210_qtd_init(fotg210, qtd, dma);
+
+	return qtd;
+}
+
+static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
+		struct fotg210_qtd *qtd)
+{
+	dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	/* clean qtds first, and know this is not linked */
+	if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+		fotg210_dbg(fotg210, "unused qh not empty!\n");
+		BUG();
+	}
+	if (qh->dummy)
+		fotg210_qtd_free(fotg210, qh->dummy);
+	dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+	kfree(qh);
+}
+
+static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
+		gfp_t flags)
+{
+	struct fotg210_qh *qh;
+	dma_addr_t dma;
+
+	qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
+	if (!qh)
+		goto done;
+	qh->hw = dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
+	if (!qh->hw)
+		goto fail;
+	qh->qh_dma = dma;
+	INIT_LIST_HEAD(&qh->qtd_list);
+
+	/* dummy td enables safe urb queuing */
+	qh->dummy = fotg210_qtd_alloc(fotg210, flags);
+	if (qh->dummy == NULL) {
+		fotg210_dbg(fotg210, "no dummy td\n");
+		goto fail1;
+	}
+done:
+	return qh;
+fail1:
+	dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+fail:
+	kfree(qh);
+	return NULL;
+}
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
+{
+	if (fotg210->async)
+		qh_destroy(fotg210, fotg210->async);
+	fotg210->async = NULL;
+
+	if (fotg210->dummy)
+		qh_destroy(fotg210, fotg210->dummy);
+	fotg210->dummy = NULL;
+
+	/* DMA consistent memory and pools */
+	dma_pool_destroy(fotg210->qtd_pool);
+	fotg210->qtd_pool = NULL;
+
+	dma_pool_destroy(fotg210->qh_pool);
+	fotg210->qh_pool = NULL;
+
+	dma_pool_destroy(fotg210->itd_pool);
+	fotg210->itd_pool = NULL;
+
+	if (fotg210->periodic)
+		dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
+				fotg210->periodic_size * sizeof(u32),
+				fotg210->periodic, fotg210->periodic_dma);
+	fotg210->periodic = NULL;
+
+	/* shadow periodic table */
+	kfree(fotg210->pshadow);
+	fotg210->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
+{
+	int i;
+
+	/* QTDs for control/bulk/intr transfers */
+	fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
+			fotg210_to_hcd(fotg210)->self.controller,
+			sizeof(struct fotg210_qtd),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!fotg210->qtd_pool)
+		goto fail;
+
+	/* QHs for control/bulk/intr transfers */
+	fotg210->qh_pool = dma_pool_create("fotg210_qh",
+			fotg210_to_hcd(fotg210)->self.controller,
+			sizeof(struct fotg210_qh_hw),
+			32 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!fotg210->qh_pool)
+		goto fail;
+
+	fotg210->async = fotg210_qh_alloc(fotg210, flags);
+	if (!fotg210->async)
+		goto fail;
+
+	/* ITD for high speed ISO transfers */
+	fotg210->itd_pool = dma_pool_create("fotg210_itd",
+			fotg210_to_hcd(fotg210)->self.controller,
+			sizeof(struct fotg210_itd),
+			64 /* byte alignment (for hw parts) */,
+			4096 /* can't cross 4K */);
+	if (!fotg210->itd_pool)
+		goto fail;
+
+	/* Hardware periodic table */
+	fotg210->periodic = (__le32 *)
+		dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
+				fotg210->periodic_size * sizeof(__le32),
+				&fotg210->periodic_dma, 0);
+	if (fotg210->periodic == NULL)
+		goto fail;
+
+	for (i = 0; i < fotg210->periodic_size; i++)
+		fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
+
+	/* software shadow of hardware table */
+	fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
+			flags);
+	if (fotg210->pshadow != NULL)
+		return 0;
+
+fail:
+	fotg210_dbg(fotg210, "couldn't init memory\n");
+	fotg210_mem_cleanup(fotg210);
+	return -ENOMEM;
+}
+/* EHCI hardware queue manipulation ... the core.  QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists.  They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number).  We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint.  URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling.  Performance improvements can be
+ * an ongoing challenge.  That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries.  TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
+		dma_addr_t buf, size_t len, int token, int maxpacket)
+{
+	int i, count;
+	u64 addr = buf;
+
+	/* one buffer entry per 4K ... first might be short or unaligned */
+	qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
+	qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
+	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
+	if (likely(len < count))		/* ... iff needed */
+		count = len;
+	else {
+		buf +=  0x1000;
+		buf &= ~0x0fff;
+
+		/* per-qtd limit: from 16K to 20K (best alignment) */
+		for (i = 1; count < len && i < 5; i++) {
+			addr = buf;
+			qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
+			qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
+					(u32)(addr >> 32));
+			buf += 0x1000;
+			if ((count + 0x1000) < len)
+				count += 0x1000;
+			else
+				count = len;
+		}
+
+		/* short packets may only terminate transfers */
+		if (count != len)
+			count -= (count % maxpacket);
+	}
+	qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
+	qtd->length = count;
+
+	return count;
+}
+
+static inline void qh_update(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh, struct fotg210_qtd *qtd)
+{
+	struct fotg210_qh_hw *hw = qh->hw;
+
+	/* writes to an active overlay are unsafe */
+	BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+	hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+	hw->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+	/* Except for control endpoints, we make hardware maintain data
+	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+	 * ever clear it.
+	 */
+	if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
+		unsigned is_out, epnum;
+
+		is_out = qh->is_out;
+		epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
+		if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+			hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
+			usb_settoggle(qh->dev, epnum, is_out, 1);
+		}
+	}
+
+	hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	struct fotg210_qtd *qtd;
+
+	if (list_empty(&qh->qtd_list))
+		qtd = qh->dummy;
+	else {
+		qtd = list_entry(qh->qtd_list.next,
+				struct fotg210_qtd, qtd_list);
+		/*
+		 * first qtd may already be partially processed.
+		 * If we come here during unlink, the QH overlay region
+		 * might have reference to the just unlinked qtd. The
+		 * qtd is updated in qh_completions(). Update the QH
+		 * overlay here.
+		 */
+		if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
+			qh->hw->hw_qtd_next = qtd->hw_next;
+			qtd = NULL;
+		}
+	}
+
+	if (qtd)
+		qh_update(fotg210, qh, qtd);
+}
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
+		struct usb_host_endpoint *ep)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	struct fotg210_qh *qh = ep->hcpriv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	qh->clearing_tt = 0;
+	if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+			&& fotg210->rh_state == FOTG210_RH_RUNNING)
+		qh_link_async(fotg210, qh);
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh, struct urb *urb, u32 token)
+{
+
+	/* If an async split transaction gets an error or is unlinked,
+	 * the TT buffer may be left in an indeterminate state.  We
+	 * have to clear the TT buffer.
+	 *
+	 * Note: this routine is never called for Isochronous transfers.
+	 */
+	if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+		struct usb_device *tt = urb->dev->tt->hub;
+
+		dev_dbg(&tt->dev,
+				"clear tt buffer port %d, a%d ep%d t%08x\n",
+				urb->dev->ttport, urb->dev->devnum,
+				usb_pipeendpoint(urb->pipe), token);
+
+		if (urb->dev->tt->hub !=
+				fotg210_to_hcd(fotg210)->self.root_hub) {
+			if (usb_hub_clear_tt_buffer(urb) == 0)
+				qh->clearing_tt = 1;
+		}
+	}
+}
+
+static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb,
+		size_t length, u32 token)
+{
+	int status = -EINPROGRESS;
+
+	/* count IN/OUT bytes, not SETUP (even short packets) */
+	if (likely(QTD_PID(token) != 2))
+		urb->actual_length += length - QTD_LENGTH(token);
+
+	/* don't modify error codes */
+	if (unlikely(urb->unlinked))
+		return status;
+
+	/* force cleanup after short read; not always an error */
+	if (unlikely(IS_SHORT_READ(token)))
+		status = -EREMOTEIO;
+
+	/* serious "can't proceed" faults reported by the hardware */
+	if (token & QTD_STS_HALT) {
+		if (token & QTD_STS_BABBLE) {
+			/* FIXME "must" disable babbling device's port too */
+			status = -EOVERFLOW;
+		/* CERR nonzero + halt --> stall */
+		} else if (QTD_CERR(token)) {
+			status = -EPIPE;
+
+		/* In theory, more than one of the following bits can be set
+		 * since they are sticky and the transaction is retried.
+		 * Which to test first is rather arbitrary.
+		 */
+		} else if (token & QTD_STS_MMF) {
+			/* fs/ls interrupt xfer missed the complete-split */
+			status = -EPROTO;
+		} else if (token & QTD_STS_DBE) {
+			status = (QTD_PID(token) == 1) /* IN ? */
+				? -ENOSR  /* hc couldn't read data */
+				: -ECOMM; /* hc couldn't write data */
+		} else if (token & QTD_STS_XACT) {
+			/* timeout, bad CRC, wrong PID, etc */
+			fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
+					urb->dev->devpath,
+					usb_pipeendpoint(urb->pipe),
+					usb_pipein(urb->pipe) ? "in" : "out");
+			status = -EPROTO;
+		} else {	/* unknown */
+			status = -EPROTO;
+		}
+
+		fotg210_dbg(fotg210,
+				"dev%d ep%d%s qtd token %08x --> status %d\n",
+				usb_pipedevice(urb->pipe),
+				usb_pipeendpoint(urb->pipe),
+				usb_pipein(urb->pipe) ? "in" : "out",
+				token, status);
+	}
+
+	return status;
+}
+
+static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb,
+		int status)
+__releases(fotg210->lock)
+__acquires(fotg210->lock)
+{
+	if (likely(urb->hcpriv != NULL)) {
+		struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
+
+		/* S-mask in a QH means it's an interrupt urb */
+		if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
+
+			/* ... update hc-wide periodic stats (for usbfs) */
+			fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
+		}
+	}
+
+	if (unlikely(urb->unlinked)) {
+		COUNT(fotg210->stats.unlink);
+	} else {
+		/* report non-error and short read status as zero */
+		if (status == -EINPROGRESS || status == -EREMOTEIO)
+			status = 0;
+		COUNT(fotg210->stats.complete);
+	}
+
+#ifdef FOTG210_URB_TRACE
+	fotg210_dbg(fotg210,
+			"%s %s urb %p ep%d%s status %d len %d/%d\n",
+			__func__, urb->dev->devpath, urb,
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			status,
+			urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+	/* complete() can reenter this HCD */
+	usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+	spin_unlock(&fotg210->lock);
+	usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
+	spin_lock(&fotg210->lock);
+}
+
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/* Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current.  Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned qh_completions(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh)
+{
+	struct fotg210_qtd *last, *end = qh->dummy;
+	struct fotg210_qtd *qtd, *tmp;
+	int last_status;
+	int stopped;
+	unsigned count = 0;
+	u8 state;
+	struct fotg210_qh_hw *hw = qh->hw;
+
+	if (unlikely(list_empty(&qh->qtd_list)))
+		return count;
+
+	/* completions (or tasks on other cpus) must never clobber HALT
+	 * till we've gone through and cleaned everything up, even when
+	 * they add urbs to this qh's queue or mark them for unlinking.
+	 *
+	 * NOTE:  unlinking expects to be done in queue order.
+	 *
+	 * It's a bug for qh->qh_state to be anything other than
+	 * QH_STATE_IDLE, unless our caller is scan_async() or
+	 * scan_intr().
+	 */
+	state = qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	stopped = (state == QH_STATE_IDLE);
+
+rescan:
+	last = NULL;
+	last_status = -EINPROGRESS;
+	qh->needs_rescan = 0;
+
+	/* remove de-activated QTDs from front of queue.
+	 * after faults (including short reads), cleanup this urb
+	 * then let the queue advance.
+	 * if queue is stopped, handles unlinks.
+	 */
+	list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
+		struct urb *urb;
+		u32 token = 0;
+
+		urb = qtd->urb;
+
+		/* clean up any state from previous QTD ...*/
+		if (last) {
+			if (likely(last->urb != urb)) {
+				fotg210_urb_done(fotg210, last->urb,
+						last_status);
+				count++;
+				last_status = -EINPROGRESS;
+			}
+			fotg210_qtd_free(fotg210, last);
+			last = NULL;
+		}
+
+		/* ignore urbs submitted during completions we reported */
+		if (qtd == end)
+			break;
+
+		/* hardware copies qtd out of qh overlay */
+		rmb();
+		token = hc32_to_cpu(fotg210, qtd->hw_token);
+
+		/* always clean up qtds the hc de-activated */
+retry_xacterr:
+		if ((token & QTD_STS_ACTIVE) == 0) {
+
+			/* Report Data Buffer Error: non-fatal but useful */
+			if (token & QTD_STS_DBE)
+				fotg210_dbg(fotg210,
+					"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+					urb, usb_endpoint_num(&urb->ep->desc),
+					usb_endpoint_dir_in(&urb->ep->desc)
+						? "in" : "out",
+					urb->transfer_buffer_length, qtd, qh);
+
+			/* on STALL, error, and short reads this urb must
+			 * complete and all its qtds must be recycled.
+			 */
+			if ((token & QTD_STS_HALT) != 0) {
+
+				/* retry transaction errors until we
+				 * reach the software xacterr limit
+				 */
+				if ((token & QTD_STS_XACT) &&
+						QTD_CERR(token) == 0 &&
+						++qh->xacterrs < QH_XACTERR_MAX &&
+						!urb->unlinked) {
+					fotg210_dbg(fotg210,
+						"detected XactErr len %zu/%zu retry %d\n",
+						qtd->length - QTD_LENGTH(token),
+						qtd->length,
+						qh->xacterrs);
+
+					/* reset the token in the qtd and the
+					 * qh overlay (which still contains
+					 * the qtd) so that we pick up from
+					 * where we left off
+					 */
+					token &= ~QTD_STS_HALT;
+					token |= QTD_STS_ACTIVE |
+						 (FOTG210_TUNE_CERR << 10);
+					qtd->hw_token = cpu_to_hc32(fotg210,
+							token);
+					wmb();
+					hw->hw_token = cpu_to_hc32(fotg210,
+							token);
+					goto retry_xacterr;
+				}
+				stopped = 1;
+
+			/* magic dummy for some short reads; qh won't advance.
+			 * that silicon quirk can kick in with this dummy too.
+			 *
+			 * other short reads won't stop the queue, including
+			 * control transfers (status stage handles that) or
+			 * most other single-qtd reads ... the queue stops if
+			 * URB_SHORT_NOT_OK was set so the driver submitting
+			 * the urbs could clean it up.
+			 */
+			} else if (IS_SHORT_READ(token) &&
+					!(qtd->hw_alt_next &
+					FOTG210_LIST_END(fotg210))) {
+				stopped = 1;
+			}
+
+		/* stop scanning when we reach qtds the hc is using */
+		} else if (likely(!stopped
+				&& fotg210->rh_state >= FOTG210_RH_RUNNING)) {
+			break;
+
+		/* scan the whole queue for unlinks whenever it stops */
+		} else {
+			stopped = 1;
+
+			/* cancel everything if we halt, suspend, etc */
+			if (fotg210->rh_state < FOTG210_RH_RUNNING)
+				last_status = -ESHUTDOWN;
+
+			/* this qtd is active; skip it unless a previous qtd
+			 * for its urb faulted, or its urb was canceled.
+			 */
+			else if (last_status == -EINPROGRESS && !urb->unlinked)
+				continue;
+
+			/* qh unlinked; token in overlay may be most current */
+			if (state == QH_STATE_IDLE &&
+					cpu_to_hc32(fotg210, qtd->qtd_dma)
+					== hw->hw_current) {
+				token = hc32_to_cpu(fotg210, hw->hw_token);
+
+				/* An unlink may leave an incomplete
+				 * async transaction in the TT buffer.
+				 * We have to clear it.
+				 */
+				fotg210_clear_tt_buffer(fotg210, qh, urb,
+						token);
+			}
+		}
+
+		/* unless we already know the urb's status, collect qtd status
+		 * and update count of bytes transferred.  in common short read
+		 * cases with only one data qtd (including control transfers),
+		 * queue processing won't halt.  but with two or more qtds (for
+		 * example, with a 32 KB transfer), when the first qtd gets a
+		 * short read the second must be removed by hand.
+		 */
+		if (last_status == -EINPROGRESS) {
+			last_status = qtd_copy_status(fotg210, urb,
+					qtd->length, token);
+			if (last_status == -EREMOTEIO &&
+					(qtd->hw_alt_next &
+					FOTG210_LIST_END(fotg210)))
+				last_status = -EINPROGRESS;
+
+			/* As part of low/full-speed endpoint-halt processing
+			 * we must clear the TT buffer (11.17.5).
+			 */
+			if (unlikely(last_status != -EINPROGRESS &&
+					last_status != -EREMOTEIO)) {
+				/* The TT's in some hubs malfunction when they
+				 * receive this request following a STALL (they
+				 * stop sending isochronous packets).  Since a
+				 * STALL can't leave the TT buffer in a busy
+				 * state (if you believe Figures 11-48 - 11-51
+				 * in the USB 2.0 spec), we won't clear the TT
+				 * buffer in this case.  Strictly speaking this
+				 * is a violation of the spec.
+				 */
+				if (last_status != -EPIPE)
+					fotg210_clear_tt_buffer(fotg210, qh,
+							urb, token);
+			}
+		}
+
+		/* if we're removing something not at the queue head,
+		 * patch the hardware queue pointer.
+		 */
+		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+			last = list_entry(qtd->qtd_list.prev,
+					struct fotg210_qtd, qtd_list);
+			last->hw_next = qtd->hw_next;
+		}
+
+		/* remove qtd; it's recycled after possible urb completion */
+		list_del(&qtd->qtd_list);
+		last = qtd;
+
+		/* reinit the xacterr counter for the next qtd */
+		qh->xacterrs = 0;
+	}
+
+	/* last urb's completion might still need calling */
+	if (likely(last != NULL)) {
+		fotg210_urb_done(fotg210, last->urb, last_status);
+		count++;
+		fotg210_qtd_free(fotg210, last);
+	}
+
+	/* Do we need to rescan for URBs dequeued during a giveback? */
+	if (unlikely(qh->needs_rescan)) {
+		/* If the QH is already unlinked, do the rescan now. */
+		if (state == QH_STATE_IDLE)
+			goto rescan;
+
+		/* Otherwise we have to wait until the QH is fully unlinked.
+		 * Our caller will start an unlink if qh->needs_rescan is
+		 * set.  But if an unlink has already started, nothing needs
+		 * to be done.
+		 */
+		if (state != QH_STATE_LINKED)
+			qh->needs_rescan = 0;
+	}
+
+	/* restore original state; caller must unlink or relink */
+	qh->qh_state = state;
+
+	/* be sure the hardware's done with the qh before refreshing
+	 * it after fault cleanup, or recovering from silicon wrongly
+	 * overlaying the dummy qtd (which reduces DMA chatter).
+	 */
+	if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
+		switch (state) {
+		case QH_STATE_IDLE:
+			qh_refresh(fotg210, qh);
+			break;
+		case QH_STATE_LINKED:
+			/* We won't refresh a QH that's linked (after the HC
+			 * stopped the queue).  That avoids a race:
+			 *  - HC reads first part of QH;
+			 *  - CPU updates that first part and the token;
+			 *  - HC reads rest of that QH, including token
+			 * Result:  HC gets an inconsistent image, and then
+			 * DMAs to/from the wrong memory (corrupting it).
+			 *
+			 * That should be rare for interrupt transfers,
+			 * except maybe high bandwidth ...
+			 */
+
+			/* Tell the caller to start an unlink */
+			qh->needs_rescan = 1;
+			break;
+		/* otherwise, unlink already started */
+		}
+	}
+
+	return count;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/* reverse of qh_urb_transaction:  free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
+		struct list_head *head)
+{
+	struct fotg210_qtd *qtd, *temp;
+
+	list_for_each_entry_safe(qtd, temp, head, qtd_list) {
+		list_del(&qtd->qtd_list);
+		fotg210_qtd_free(fotg210, qtd);
+	}
+}
+
+/* create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
+		struct urb *urb, struct list_head *head, gfp_t flags)
+{
+	struct fotg210_qtd *qtd, *qtd_prev;
+	dma_addr_t buf;
+	int len, this_sg_len, maxpacket;
+	int is_input;
+	u32 token;
+	int i;
+	struct scatterlist *sg;
+
+	/*
+	 * URBs map to sequences of QTDs:  one logical transaction
+	 */
+	qtd = fotg210_qtd_alloc(fotg210, flags);
+	if (unlikely(!qtd))
+		return NULL;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (FOTG210_TUNE_CERR << 10);
+	/* for split transactions, SplitXState initialized to zero */
+
+	len = urb->transfer_buffer_length;
+	is_input = usb_pipein(urb->pipe);
+	if (usb_pipecontrol(urb->pipe)) {
+		/* SETUP pid */
+		qtd_fill(fotg210, qtd, urb->setup_dma,
+				sizeof(struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+
+		/* ... and always at least one more pid */
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = fotg210_qtd_alloc(fotg210, flags);
+		if (unlikely(!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+
+		/* for zero length DATA stages, STATUS is always IN */
+		if (len == 0)
+			token |= (1 /* "in" */ << 8);
+	}
+
+	/*
+	 * data transfer stage:  buffer setup
+	 */
+	i = urb->num_mapped_sgs;
+	if (len > 0 && i > 0) {
+		sg = urb->sg;
+		buf = sg_dma_address(sg);
+
+		/* urb->transfer_buffer_length may be smaller than the
+		 * size of the scatterlist (or vice versa)
+		 */
+		this_sg_len = min_t(int, sg_dma_len(sg), len);
+	} else {
+		sg = NULL;
+		buf = urb->transfer_dma;
+		this_sg_len = len;
+	}
+
+	if (is_input)
+		token |= (1 /* "in" */ << 8);
+	/* else it's already initted to "out" pid (0 << 8) */
+
+	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last one may be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+	for (;;) {
+		int this_qtd_len;
+
+		this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
+				maxpacket);
+		this_sg_len -= this_qtd_len;
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+
+		/*
+		 * short reads advance to a "magic" dummy instead of the next
+		 * qtd ... that forces the queue to stop, for manual cleanup.
+		 * (this will usually be overridden later.)
+		 */
+		if (is_input)
+			qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
+
+		/* qh makes control packets use qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+			token ^= QTD_TOGGLE;
+
+		if (likely(this_sg_len <= 0)) {
+			if (--i <= 0 || len <= 0)
+				break;
+			sg = sg_next(sg);
+			buf = sg_dma_address(sg);
+			this_sg_len = min_t(int, sg_dma_len(sg), len);
+		}
+
+		qtd_prev = qtd;
+		qtd = fotg210_qtd_alloc(fotg210, flags);
+		if (unlikely(!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/*
+	 * unless the caller requires manual cleanup after short reads,
+	 * have the alt_next mechanism keep the queue running after the
+	 * last data qtd (the only one, for control and most other cases).
+	 */
+	if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 ||
+			usb_pipecontrol(urb->pipe)))
+		qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+	/*
+	 * control requests may need a terminating data "status" ack;
+	 * other OUT ones may need a terminating short packet
+	 * (zero length).
+	 */
+	if (likely(urb->transfer_buffer_length != 0)) {
+		int one_more = 0;
+
+		if (usb_pipecontrol(urb->pipe)) {
+			one_more = 1;
+			token ^= 0x0100;	/* "in" <--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+		} else if (usb_pipeout(urb->pipe)
+				&& (urb->transfer_flags & URB_ZERO_PACKET)
+				&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = fotg210_qtd_alloc(fotg210, flags);
+			if (unlikely(!qtd))
+				goto cleanup;
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+			list_add_tail(&qtd->qtd_list, head);
+
+			/* never any data in such packets */
+			qtd_fill(fotg210, qtd, 0, 0, token, 0);
+		}
+	}
+
+	/* by default, enable interrupt on urb completion */
+	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+		qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
+	return head;
+
+cleanup:
+	qtd_list_free(fotg210, urb, head);
+	return NULL;
+}
+
+/* Would be best to create all qh's from config descriptors,
+ * when each interface/altsetting is established.  Unlink
+ * any previous qh and cancel its urbs first; endpoints are
+ * implicitly reset then (data toggle too).
+ * That'd mean updating how usbcore talks to HCDs. (2.7?)
+*/
+
+
+/* Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled.  For highspeed, that's
+ * just one microframe in the s-mask.  For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
+		gfp_t flags)
+{
+	struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
+	u32 info1 = 0, info2 = 0;
+	int is_input, type;
+	int maxp = 0;
+	struct usb_tt *tt = urb->dev->tt;
+	struct fotg210_qh_hw *hw;
+
+	if (!qh)
+		return qh;
+
+	/*
+	 * init endpoint/device data for this QH
+	 */
+	info1 |= usb_pipeendpoint(urb->pipe) << 8;
+	info1 |= usb_pipedevice(urb->pipe) << 0;
+
+	is_input = usb_pipein(urb->pipe);
+	type = usb_pipetype(urb->pipe);
+	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+	/* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
+	 * acts like up to 3KB, but is built from smaller packets.
+	 */
+	if (max_packet(maxp) > 1024) {
+		fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
+				max_packet(maxp));
+		goto done;
+	}
+
+	/* Compute interrupt scheduling parameters just once, and save.
+	 * - allowing for high bandwidth, how many nsec/uframe are used?
+	 * - split transactions need a second CSPLIT uframe; same question
+	 * - splits also need a schedule gap (for full/low speed I/O)
+	 * - qh has a polling interval
+	 *
+	 * For control/bulk requests, the HC or TT handles these.
+	 */
+	if (type == PIPE_INTERRUPT) {
+		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+				is_input, 0,
+				hb_mult(maxp) * max_packet(maxp)));
+		qh->start = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->c_usecs = 0;
+			qh->gap_uf = 0;
+
+			qh->period = urb->interval >> 3;
+			if (qh->period == 0 && urb->interval != 1) {
+				/* NOTE interval 2 or 4 uframes could work.
+				 * But interval 1 scheduling is simpler, and
+				 * includes high bandwidth.
+				 */
+				urb->interval = 1;
+			} else if (qh->period > fotg210->periodic_size) {
+				qh->period = fotg210->periodic_size;
+				urb->interval = qh->period << 3;
+			}
+		} else {
+			int think_time;
+
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, maxp) / (125 * 1000);
+
+			/* FIXME this just approximates SPLIT/CSPLIT times */
+			if (is_input) {		/* SPLIT, gap, CSPLIT+DATA */
+				qh->c_usecs = qh->usecs + HS_USECS(0);
+				qh->usecs = HS_USECS(1);
+			} else {		/* SPLIT+DATA, gap, CSPLIT */
+				qh->usecs += HS_USECS(1);
+				qh->c_usecs = HS_USECS(0);
+			}
+
+			think_time = tt ? tt->think_time : 0;
+			qh->tt_usecs = NS_TO_US(think_time +
+					usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, max_packet(maxp)));
+			qh->period = urb->interval;
+			if (qh->period > fotg210->periodic_size) {
+				qh->period = fotg210->periodic_size;
+				urb->interval = qh->period;
+			}
+		}
+	}
+
+	/* support for tt scheduling, and access to toggles */
+	qh->dev = urb->dev;
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= QH_LOW_SPEED;
+		/* FALL THROUGH */
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT)
+			info1 |= (FOTG210_TUNE_RL_TT << 28);
+		if (type == PIPE_CONTROL) {
+			info1 |= QH_CONTROL_EP;		/* for TT */
+			info1 |= QH_TOGGLE_CTL;		/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (FOTG210_TUNE_MULT_TT << 30);
+
+		/* Some Freescale processors have an erratum in which the
+		 * port number in the queue head was 0..N-1 instead of 1..N.
+		 */
+		if (fotg210_has_fsl_portno_bug(fotg210))
+			info2 |= (urb->dev->ttport-1) << 23;
+		else
+			info2 |= urb->dev->ttport << 23;
+
+		/* set the address of the TT; for TDI's integrated
+		 * root hub tt, leave it zeroed.
+		 */
+		if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
+			info2 |= tt->hub->devnum << 16;
+
+		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+		break;
+
+	case USB_SPEED_HIGH:		/* no TT involved */
+		info1 |= QH_HIGH_SPEED;
+		if (type == PIPE_CONTROL) {
+			info1 |= (FOTG210_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
+			info1 |= QH_TOGGLE_CTL;	/* toggle from qtd */
+			info2 |= (FOTG210_TUNE_MULT_HS << 30);
+		} else if (type == PIPE_BULK) {
+			info1 |= (FOTG210_TUNE_RL_HS << 28);
+			/* The USB spec says that high speed bulk endpoints
+			 * always use 512 byte maxpacket.  But some device
+			 * vendors decided to ignore that, and MSFT is happy
+			 * to help them do so.  So now people expect to use
+			 * such nonconformant devices with Linux too; sigh.
+			 */
+			info1 |= max_packet(maxp) << 16;
+			info2 |= (FOTG210_TUNE_MULT_HS << 30);
+		} else {		/* PIPE_INTERRUPT */
+			info1 |= max_packet(maxp) << 16;
+			info2 |= hb_mult(maxp) << 30;
+		}
+		break;
+	default:
+		fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
+				urb->dev->speed);
+done:
+		qh_destroy(fotg210, qh);
+		return NULL;
+	}
+
+	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+	/* init as live, toggle clear, advance to dummy */
+	qh->qh_state = QH_STATE_IDLE;
+	hw = qh->hw;
+	hw->hw_info1 = cpu_to_hc32(fotg210, info1);
+	hw->hw_info2 = cpu_to_hc32(fotg210, info2);
+	qh->is_out = !is_input;
+	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+	qh_refresh(fotg210, qh);
+	return qh;
+}
+
+static void enable_async(struct fotg210_hcd *fotg210)
+{
+	if (fotg210->async_count++)
+		return;
+
+	/* Stop waiting to turn off the async schedule */
+	fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
+
+	/* Don't start the schedule until ASS is 0 */
+	fotg210_poll_ASS(fotg210);
+	turn_on_io_watchdog(fotg210);
+}
+
+static void disable_async(struct fotg210_hcd *fotg210)
+{
+	if (--fotg210->async_count)
+		return;
+
+	/* The async schedule and async_unlink list are supposed to be empty */
+	WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
+
+	/* Don't turn off the schedule until ASS is 1 */
+	fotg210_poll_ASS(fotg210);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue.  */
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	__hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
+	struct fotg210_qh *head;
+
+	/* Don't link a QH if there's a Clear-TT-Buffer pending */
+	if (unlikely(qh->clearing_tt))
+		return;
+
+	WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+	/* clear halt and/or toggle; and maybe recover from silicon quirk */
+	qh_refresh(fotg210, qh);
+
+	/* splice right after start */
+	head = fotg210->async;
+	qh->qh_next = head->qh_next;
+	qh->hw->hw_next = head->hw->hw_next;
+	wmb();
+
+	head->qh_next.qh = qh;
+	head->hw->hw_next = dma;
+
+	qh->xacterrs = 0;
+	qh->qh_state = QH_STATE_LINKED;
+	/* qtd completions reported later by interrupt */
+
+	enable_async(fotg210);
+}
+
+/* For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210,
+		struct urb *urb, struct list_head *qtd_list,
+		int epnum, void **ptr)
+{
+	struct fotg210_qh *qh = NULL;
+	__hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
+
+	qh = (struct fotg210_qh *) *ptr;
+	if (unlikely(qh == NULL)) {
+		/* can't sleep here, we have fotg210->lock... */
+		qh = qh_make(fotg210, urb, GFP_ATOMIC);
+		*ptr = qh;
+	}
+	if (likely(qh != NULL)) {
+		struct fotg210_qtd *qtd;
+
+		if (unlikely(list_empty(qtd_list)))
+			qtd = NULL;
+		else
+			qtd = list_entry(qtd_list->next, struct fotg210_qtd,
+					qtd_list);
+
+		/* control qh may need patching ... */
+		if (unlikely(epnum == 0)) {
+			/* usb_reset_device() briefly reverts to address 0 */
+			if (usb_pipedevice(urb->pipe) == 0)
+				qh->hw->hw_info1 &= ~qh_addr_mask;
+		}
+
+		/* just one way to queue requests: swap with the dummy qtd.
+		 * only hc or qh_refresh() ever modify the overlay.
+		 */
+		if (likely(qtd != NULL)) {
+			struct fotg210_qtd *dummy;
+			dma_addr_t dma;
+			__hc32 token;
+
+			/* to avoid racing the HC, use the dummy td instead of
+			 * the first td of our list (becomes new dummy).  both
+			 * tds stay deactivated until we're done, when the
+			 * HC is allowed to fetch the old dummy (4.10.2).
+			 */
+			token = qtd->hw_token;
+			qtd->hw_token = HALT_BIT(fotg210);
+
+			dummy = qh->dummy;
+
+			dma = dummy->qtd_dma;
+			*dummy = *qtd;
+			dummy->qtd_dma = dma;
+
+			list_del(&qtd->qtd_list);
+			list_add(&dummy->qtd_list, qtd_list);
+			list_splice_tail(qtd_list, &qh->qtd_list);
+
+			fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
+			qh->dummy = qtd;
+
+			/* hc must see the new dummy at list end */
+			dma = qtd->qtd_dma;
+			qtd = list_entry(qh->qtd_list.prev,
+					struct fotg210_qtd, qtd_list);
+			qtd->hw_next = QTD_NEXT(fotg210, dma);
+
+			/* let the hc process these next qtds */
+			wmb();
+			dummy->hw_token = token;
+
+			urb->hcpriv = qh;
+		}
+	}
+	return qh;
+}
+
+static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb,
+		struct list_head *qtd_list, gfp_t mem_flags)
+{
+	int epnum;
+	unsigned long flags;
+	struct fotg210_qh *qh = NULL;
+	int rc;
+
+	epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FOTG210_URB_TRACE
+	{
+		struct fotg210_qtd *qtd;
+
+		qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
+		fotg210_dbg(fotg210,
+				"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+				__func__, urb->dev->devpath, urb,
+				epnum & 0x0f, (epnum & USB_DIR_IN)
+					? "in" : "out",
+				urb->transfer_buffer_length,
+				qtd, urb->ep->hcpriv);
+	}
+#endif
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+		rc = -ESHUTDOWN;
+		goto done;
+	}
+	rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+	if (unlikely(rc))
+		goto done;
+
+	qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	if (unlikely(qh == NULL)) {
+		usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	/* Control/bulk operations through TTs don't need scheduling,
+	 * the HC and TT handle it when the TT has a buffer ready.
+	 */
+	if (likely(qh->qh_state == QH_STATE_IDLE))
+		qh_link_async(fotg210, qh);
+done:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	if (unlikely(qh == NULL))
+		qtd_list_free(fotg210, urb, qtd_list);
+	return rc;
+}
+
+static void single_unlink_async(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh)
+{
+	struct fotg210_qh *prev;
+
+	/* Add to the end of the list of QHs waiting for the next IAAD */
+	qh->qh_state = QH_STATE_UNLINK;
+	if (fotg210->async_unlink)
+		fotg210->async_unlink_last->unlink_next = qh;
+	else
+		fotg210->async_unlink = qh;
+	fotg210->async_unlink_last = qh;
+
+	/* Unlink it from the schedule */
+	prev = fotg210->async;
+	while (prev->qh_next.qh != qh)
+		prev = prev->qh_next.qh;
+
+	prev->hw->hw_next = qh->hw->hw_next;
+	prev->qh_next = qh->qh_next;
+	if (fotg210->qh_scan_next == qh)
+		fotg210->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
+{
+	/*
+	 * Do nothing if an IAA cycle is already running or
+	 * if one will be started shortly.
+	 */
+	if (fotg210->async_iaa || fotg210->async_unlinking)
+		return;
+
+	/* Do all the waiting QHs at once */
+	fotg210->async_iaa = fotg210->async_unlink;
+	fotg210->async_unlink = NULL;
+
+	/* If the controller isn't running, we don't have to wait for it */
+	if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
+		if (!nested)		/* Avoid recursion */
+			end_unlink_async(fotg210);
+
+	/* Otherwise start a new IAA cycle */
+	} else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
+		/* Make sure the unlinks are all visible to the hardware */
+		wmb();
+
+		fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
+				&fotg210->regs->command);
+		fotg210_readl(fotg210, &fotg210->regs->command);
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
+				true);
+	}
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fotg210_hcd *fotg210)
+{
+	struct fotg210_qh *qh;
+
+	/* Process the idle QHs */
+restart:
+	fotg210->async_unlinking = true;
+	while (fotg210->async_iaa) {
+		qh = fotg210->async_iaa;
+		fotg210->async_iaa = qh->unlink_next;
+		qh->unlink_next = NULL;
+
+		qh->qh_state = QH_STATE_IDLE;
+		qh->qh_next.qh = NULL;
+
+		qh_completions(fotg210, qh);
+		if (!list_empty(&qh->qtd_list) &&
+				fotg210->rh_state == FOTG210_RH_RUNNING)
+			qh_link_async(fotg210, qh);
+		disable_async(fotg210);
+	}
+	fotg210->async_unlinking = false;
+
+	/* Start a new IAA cycle if any QHs are waiting for it */
+	if (fotg210->async_unlink) {
+		start_iaa_cycle(fotg210, true);
+		if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
+			goto restart;
+	}
+}
+
+static void unlink_empty_async(struct fotg210_hcd *fotg210)
+{
+	struct fotg210_qh *qh, *next;
+	bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+	bool check_unlinks_later = false;
+
+	/* Unlink all the async QHs that have been empty for a timer cycle */
+	next = fotg210->async->qh_next.qh;
+	while (next) {
+		qh = next;
+		next = qh->qh_next.qh;
+
+		if (list_empty(&qh->qtd_list) &&
+				qh->qh_state == QH_STATE_LINKED) {
+			if (!stopped && qh->unlink_cycle ==
+					fotg210->async_unlink_cycle)
+				check_unlinks_later = true;
+			else
+				single_unlink_async(fotg210, qh);
+		}
+	}
+
+	/* Start a new IAA cycle if any QHs are waiting for it */
+	if (fotg210->async_unlink)
+		start_iaa_cycle(fotg210, false);
+
+	/* QHs that haven't been empty for long enough will be handled later */
+	if (check_unlinks_later) {
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
+				true);
+		++fotg210->async_unlink_cycle;
+	}
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fotg210->lock */
+
+static void start_unlink_async(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh)
+{
+	/*
+	 * If the QH isn't linked then there's nothing we can do
+	 * unless we were called during a giveback, in which case
+	 * qh_completions() has to deal with it.
+	 */
+	if (qh->qh_state != QH_STATE_LINKED) {
+		if (qh->qh_state == QH_STATE_COMPLETING)
+			qh->needs_rescan = 1;
+		return;
+	}
+
+	single_unlink_async(fotg210, qh);
+	start_iaa_cycle(fotg210, false);
+}
+
+static void scan_async(struct fotg210_hcd *fotg210)
+{
+	struct fotg210_qh *qh;
+	bool check_unlinks_later = false;
+
+	fotg210->qh_scan_next = fotg210->async->qh_next.qh;
+	while (fotg210->qh_scan_next) {
+		qh = fotg210->qh_scan_next;
+		fotg210->qh_scan_next = qh->qh_next.qh;
+rescan:
+		/* clean any finished work for this qh */
+		if (!list_empty(&qh->qtd_list)) {
+			int temp;
+
+			/*
+			 * Unlinks could happen here; completion reporting
+			 * drops the lock.  That's why fotg210->qh_scan_next
+			 * always holds the next qh to scan; if the next qh
+			 * gets unlinked then fotg210->qh_scan_next is adjusted
+			 * in single_unlink_async().
+			 */
+			temp = qh_completions(fotg210, qh);
+			if (qh->needs_rescan) {
+				start_unlink_async(fotg210, qh);
+			} else if (list_empty(&qh->qtd_list)
+					&& qh->qh_state == QH_STATE_LINKED) {
+				qh->unlink_cycle = fotg210->async_unlink_cycle;
+				check_unlinks_later = true;
+			} else if (temp != 0)
+				goto rescan;
+		}
+	}
+
+	/*
+	 * Unlink empty entries, reducing DMA usage as well
+	 * as HCD schedule-scanning costs.  Delay for any qh
+	 * we just scanned, there's a not-unusual case that it
+	 * doesn't stay idle for long.
+	 */
+	if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
+			!(fotg210->enabled_hrtimer_events &
+			BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
+		fotg210_enable_event(fotg210,
+				FOTG210_HRTIMER_ASYNC_UNLINKS, true);
+		++fotg210->async_unlink_cycle;
+	}
+}
+/* EHCI scheduled transaction support:  interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+static int fotg210_get_frame(struct usb_hcd *hcd);
+
+/* periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210,
+		union fotg210_shadow *periodic, __hc32 tag)
+{
+	switch (hc32_to_cpu(fotg210, tag)) {
+	case Q_TYPE_QH:
+		return &periodic->qh->qh_next;
+	case Q_TYPE_FSTN:
+		return &periodic->fstn->fstn_next;
+	default:
+		return &periodic->itd->itd_next;
+	}
+}
+
+static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210,
+		union fotg210_shadow *periodic, __hc32 tag)
+{
+	switch (hc32_to_cpu(fotg210, tag)) {
+	/* our fotg210_shadow.qh is actually software part */
+	case Q_TYPE_QH:
+		return &periodic->qh->hw->hw_next;
+	/* others are hw parts */
+	default:
+		return periodic->hw_next;
+	}
+}
+
+/* caller must hold fotg210->lock */
+static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
+		void *ptr)
+{
+	union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
+	__hc32 *hw_p = &fotg210->periodic[frame];
+	union fotg210_shadow here = *prev_p;
+
+	/* find predecessor of "ptr"; hw and shadow lists are in sync */
+	while (here.ptr && here.ptr != ptr) {
+		prev_p = periodic_next_shadow(fotg210, prev_p,
+				Q_NEXT_TYPE(fotg210, *hw_p));
+		hw_p = shadow_next_periodic(fotg210, &here,
+				Q_NEXT_TYPE(fotg210, *hw_p));
+		here = *prev_p;
+	}
+	/* an interrupt entry (at list end) could have been shared */
+	if (!here.ptr)
+		return;
+
+	/* update shadow and hardware lists ... the old "next" pointers
+	 * from ptr may still be in use, the caller updates them.
+	 */
+	*prev_p = *periodic_next_shadow(fotg210, &here,
+			Q_NEXT_TYPE(fotg210, *hw_p));
+
+	*hw_p = *shadow_next_periodic(fotg210, &here,
+			Q_NEXT_TYPE(fotg210, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short periodic_usecs(struct fotg210_hcd *fotg210,
+		unsigned frame, unsigned uframe)
+{
+	__hc32 *hw_p = &fotg210->periodic[frame];
+	union fotg210_shadow *q = &fotg210->pshadow[frame];
+	unsigned usecs = 0;
+	struct fotg210_qh_hw *hw;
+
+	while (q->ptr) {
+		switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
+		case Q_TYPE_QH:
+			hw = q->qh->hw;
+			/* is it in the S-mask? */
+			if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
+				usecs += q->qh->usecs;
+			/* ... or C-mask? */
+			if (hw->hw_info2 & cpu_to_hc32(fotg210,
+					1 << (8 + uframe)))
+				usecs += q->qh->c_usecs;
+			hw_p = &hw->hw_next;
+			q = &q->qh->qh_next;
+			break;
+		/* case Q_TYPE_FSTN: */
+		default:
+			/* for "save place" FSTNs, count the relevant INTR
+			 * bandwidth from the previous frame
+			 */
+			if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
+				fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
+
+			hw_p = &q->fstn->hw_next;
+			q = &q->fstn->fstn_next;
+			break;
+		case Q_TYPE_ITD:
+			if (q->itd->hw_transaction[uframe])
+				usecs += q->itd->stream->usecs;
+			hw_p = &q->itd->hw_next;
+			q = &q->itd->itd_next;
+			break;
+		}
+	}
+	if (usecs > fotg210->uframe_periodic_max)
+		fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
+				frame * 8 + uframe, usecs);
+	return usecs;
+}
+
+static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
+{
+	if (!dev1->tt || !dev2->tt)
+		return 0;
+	if (dev1->tt != dev2->tt)
+		return 0;
+	if (dev1->tt->multi)
+		return dev1->ttport == dev2->ttport;
+	else
+		return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period,
+		struct usb_device *dev, unsigned frame, u32 uf_mask)
+{
+	if (period == 0)	/* error */
+		return 0;
+
+	/* note bandwidth wastage:  split never follows csplit
+	 * (different dev or endpoint) until the next uframe.
+	 * calling convention doesn't make that distinction.
+	 */
+	for (; frame < fotg210->periodic_size; frame += period) {
+		union fotg210_shadow here;
+		__hc32 type;
+		struct fotg210_qh_hw *hw;
+
+		here = fotg210->pshadow[frame];
+		type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
+		while (here.ptr) {
+			switch (hc32_to_cpu(fotg210, type)) {
+			case Q_TYPE_ITD:
+				type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
+				here = here.itd->itd_next;
+				continue;
+			case Q_TYPE_QH:
+				hw = here.qh->hw;
+				if (same_tt(dev, here.qh->dev)) {
+					u32 mask;
+
+					mask = hc32_to_cpu(fotg210,
+							hw->hw_info2);
+					/* "knows" no gap is needed */
+					mask |= mask >> 8;
+					if (mask & uf_mask)
+						break;
+				}
+				type = Q_NEXT_TYPE(fotg210, hw->hw_next);
+				here = here.qh->qh_next;
+				continue;
+			/* case Q_TYPE_FSTN: */
+			default:
+				fotg210_dbg(fotg210,
+						"periodic frame %d bogus type %d\n",
+						frame, type);
+			}
+
+			/* collision or error */
+			return 0;
+		}
+	}
+
+	/* no collision */
+	return 1;
+}
+
+static void enable_periodic(struct fotg210_hcd *fotg210)
+{
+	if (fotg210->periodic_count++)
+		return;
+
+	/* Stop waiting to turn off the periodic schedule */
+	fotg210->enabled_hrtimer_events &=
+		~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
+
+	/* Don't start the schedule until PSS is 0 */
+	fotg210_poll_PSS(fotg210);
+	turn_on_io_watchdog(fotg210);
+}
+
+static void disable_periodic(struct fotg210_hcd *fotg210)
+{
+	if (--fotg210->periodic_count)
+		return;
+
+	/* Don't turn off the schedule until PSS is 1 */
+	fotg210_poll_PSS(fotg210);
+}
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fotg210 0.96+)
+ */
+static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	unsigned i;
+	unsigned period = qh->period;
+
+	dev_dbg(&qh->dev->dev,
+			"link qh%d-%04x/%p start %d [%d/%d us]\n", period,
+			hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+			(QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
+			qh->c_usecs);
+
+	/* high bandwidth, or otherwise every microframe */
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->start; i < fotg210->periodic_size; i += period) {
+		union fotg210_shadow *prev = &fotg210->pshadow[i];
+		__hc32 *hw_p = &fotg210->periodic[i];
+		union fotg210_shadow here = *prev;
+		__hc32 type = 0;
+
+		/* skip the iso nodes at list head */
+		while (here.ptr) {
+			type = Q_NEXT_TYPE(fotg210, *hw_p);
+			if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+				break;
+			prev = periodic_next_shadow(fotg210, prev, type);
+			hw_p = shadow_next_periodic(fotg210, &here, type);
+			here = *prev;
+		}
+
+		/* sorting each branch by period (slow-->fast)
+		 * enables sharing interior tree nodes
+		 */
+		while (here.ptr && qh != here.qh) {
+			if (qh->period > here.qh->period)
+				break;
+			prev = &here.qh->qh_next;
+			hw_p = &here.qh->hw->hw_next;
+			here = *prev;
+		}
+		/* link in this qh, unless some earlier pass did that */
+		if (qh != here.qh) {
+			qh->qh_next = here;
+			if (here.qh)
+				qh->hw->hw_next = *hw_p;
+			wmb();
+			prev->qh = qh;
+			*hw_p = QH_NEXT(fotg210, qh->qh_dma);
+		}
+	}
+	qh->qh_state = QH_STATE_LINKED;
+	qh->xacterrs = 0;
+
+	/* update per-qh bandwidth for usbfs */
+	fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	list_add(&qh->intr_node, &fotg210->intr_qh_list);
+
+	/* maybe enable periodic schedule processing */
+	++fotg210->intr_count;
+	enable_periodic(fotg210);
+}
+
+static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh)
+{
+	unsigned i;
+	unsigned period;
+
+	/*
+	 * If qh is for a low/full-speed device, simply unlinking it
+	 * could interfere with an ongoing split transaction.  To unlink
+	 * it safely would require setting the QH_INACTIVATE bit and
+	 * waiting at least one frame, as described in EHCI 4.12.2.5.
+	 *
+	 * We won't bother with any of this.  Instead, we assume that the
+	 * only reason for unlinking an interrupt QH while the current URB
+	 * is still active is to dequeue all the URBs (flush the whole
+	 * endpoint queue).
+	 *
+	 * If rebalancing the periodic schedule is ever implemented, this
+	 * approach will no longer be valid.
+	 */
+
+	/* high bandwidth, or otherwise part of every microframe */
+	period = qh->period;
+	if (!period)
+		period = 1;
+
+	for (i = qh->start; i < fotg210->periodic_size; i += period)
+		periodic_unlink(fotg210, i, qh);
+
+	/* update per-qh bandwidth for usbfs */
+	fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	dev_dbg(&qh->dev->dev,
+			"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+			qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+			(QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
+			qh->c_usecs);
+
+	/* qh->qh_next still "live" to HC */
+	qh->qh_state = QH_STATE_UNLINK;
+	qh->qh_next.ptr = NULL;
+
+	if (fotg210->qh_scan_next == qh)
+		fotg210->qh_scan_next = list_entry(qh->intr_node.next,
+				struct fotg210_qh, intr_node);
+	list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+		struct fotg210_qh *qh)
+{
+	/* If the QH isn't linked then there's nothing we can do
+	 * unless we were called during a giveback, in which case
+	 * qh_completions() has to deal with it.
+	 */
+	if (qh->qh_state != QH_STATE_LINKED) {
+		if (qh->qh_state == QH_STATE_COMPLETING)
+			qh->needs_rescan = 1;
+		return;
+	}
+
+	qh_unlink_periodic(fotg210, qh);
+
+	/* Make sure the unlinks are visible before starting the timer */
+	wmb();
+
+	/*
+	 * The EHCI spec doesn't say how long it takes the controller to
+	 * stop accessing an unlinked interrupt QH.  The timer delay is
+	 * 9 uframes; presumably that will be long enough.
+	 */
+	qh->unlink_cycle = fotg210->intr_unlink_cycle;
+
+	/* New entries go at the end of the intr_unlink list */
+	if (fotg210->intr_unlink)
+		fotg210->intr_unlink_last->unlink_next = qh;
+	else
+		fotg210->intr_unlink = qh;
+	fotg210->intr_unlink_last = qh;
+
+	if (fotg210->intr_unlinking)
+		;	/* Avoid recursive calls */
+	else if (fotg210->rh_state < FOTG210_RH_RUNNING)
+		fotg210_handle_intr_unlinks(fotg210);
+	else if (fotg210->intr_unlink == qh) {
+		fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+				true);
+		++fotg210->intr_unlink_cycle;
+	}
+}
+
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	struct fotg210_qh_hw *hw = qh->hw;
+	int rc;
+
+	qh->qh_state = QH_STATE_IDLE;
+	hw->hw_next = FOTG210_LIST_END(fotg210);
+
+	qh_completions(fotg210, qh);
+
+	/* reschedule QH iff another request is queued */
+	if (!list_empty(&qh->qtd_list) &&
+			fotg210->rh_state == FOTG210_RH_RUNNING) {
+		rc = qh_schedule(fotg210, qh);
+
+		/* An error here likely indicates handshake failure
+		 * or no space left in the schedule.  Neither fault
+		 * should happen often ...
+		 *
+		 * FIXME kill the now-dysfunctional queued urbs
+		 */
+		if (rc != 0)
+			fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
+					qh, rc);
+	}
+
+	/* maybe turn off periodic schedule */
+	--fotg210->intr_count;
+	disable_periodic(fotg210);
+}
+
+static int check_period(struct fotg210_hcd *fotg210, unsigned frame,
+		unsigned uframe, unsigned period, unsigned usecs)
+{
+	int claimed;
+
+	/* complete split running into next frame?
+	 * given FSTN support, we could sometimes check...
+	 */
+	if (uframe >= 8)
+		return 0;
+
+	/* convert "usecs we need" to "max already claimed" */
+	usecs = fotg210->uframe_periodic_max - usecs;
+
+	/* we "know" 2 and 4 uframe intervals were rejected; so
+	 * for period 0, check _every_ microframe in the schedule.
+	 */
+	if (unlikely(period == 0)) {
+		do {
+			for (uframe = 0; uframe < 7; uframe++) {
+				claimed = periodic_usecs(fotg210, frame,
+						uframe);
+				if (claimed > usecs)
+					return 0;
+			}
+		} while ((frame += 1) < fotg210->periodic_size);
+
+	/* just check the specified uframe, at that period */
+	} else {
+		do {
+			claimed = periodic_usecs(fotg210, frame, uframe);
+			if (claimed > usecs)
+				return 0;
+		} while ((frame += period) < fotg210->periodic_size);
+	}
+
+	/* success! */
+	return 1;
+}
+
+static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame,
+		unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp)
+{
+	int retval = -ENOSPC;
+	u8 mask = 0;
+
+	if (qh->c_usecs && uframe >= 6)		/* FSTN territory? */
+		goto done;
+
+	if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
+		goto done;
+	if (!qh->c_usecs) {
+		retval = 0;
+		*c_maskp = 0;
+		goto done;
+	}
+
+	/* Make sure this tt's buffer is also available for CSPLITs.
+	 * We pessimize a bit; probably the typical full speed case
+	 * doesn't need the second CSPLIT.
+	 *
+	 * NOTE:  both SPLIT and CSPLIT could be checked in just
+	 * one smart pass...
+	 */
+	mask = 0x03 << (uframe + qh->gap_uf);
+	*c_maskp = cpu_to_hc32(fotg210, mask << 8);
+
+	mask |= 1 << uframe;
+	if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
+		if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
+				qh->period, qh->c_usecs))
+			goto done;
+		if (!check_period(fotg210, frame, uframe + qh->gap_uf,
+				qh->period, qh->c_usecs))
+			goto done;
+		retval = 0;
+	}
+done:
+	return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+	int status;
+	unsigned uframe;
+	__hc32 c_mask;
+	unsigned frame;	/* 0..(qh->period - 1), or NO_FRAME */
+	struct fotg210_qh_hw *hw = qh->hw;
+
+	qh_refresh(fotg210, qh);
+	hw->hw_next = FOTG210_LIST_END(fotg210);
+	frame = qh->start;
+
+	/* reuse the previous schedule slots, if we can */
+	if (frame < qh->period) {
+		uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
+		status = check_intr_schedule(fotg210, frame, --uframe,
+				qh, &c_mask);
+	} else {
+		uframe = 0;
+		c_mask = 0;
+		status = -ENOSPC;
+	}
+
+	/* else scan the schedule to find a group of slots such that all
+	 * uframes have enough periodic bandwidth available.
+	 */
+	if (status) {
+		/* "normal" case, uframing flexible except with splits */
+		if (qh->period) {
+			int i;
+
+			for (i = qh->period; status && i > 0; --i) {
+				frame = ++fotg210->random_frame % qh->period;
+				for (uframe = 0; uframe < 8; uframe++) {
+					status = check_intr_schedule(fotg210,
+							frame, uframe, qh,
+							&c_mask);
+					if (status == 0)
+						break;
+				}
+			}
+
+		/* qh->period == 0 means every uframe */
+		} else {
+			frame = 0;
+			status = check_intr_schedule(fotg210, 0, 0, qh,
+					&c_mask);
+		}
+		if (status)
+			goto done;
+		qh->start = frame;
+
+		/* reset S-frame and (maybe) C-frame masks */
+		hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
+		hw->hw_info2 |= qh->period
+			? cpu_to_hc32(fotg210, 1 << uframe)
+			: cpu_to_hc32(fotg210, QH_SMASK);
+		hw->hw_info2 |= c_mask;
+	} else
+		fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
+
+	/* stuff into the periodic schedule */
+	qh_link_periodic(fotg210, qh);
+done:
+	return status;
+}
+
+static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+		struct list_head *qtd_list, gfp_t mem_flags)
+{
+	unsigned epnum;
+	unsigned long flags;
+	struct fotg210_qh *qh;
+	int status;
+	struct list_head empty;
+
+	/* get endpoint and transfer/schedule data */
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+		status = -ESHUTDOWN;
+		goto done_not_linked;
+	}
+	status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+	if (unlikely(status))
+		goto done_not_linked;
+
+	/* get qh and force any scheduling errors */
+	INIT_LIST_HEAD(&empty);
+	qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
+	if (qh == NULL) {
+		status = -ENOMEM;
+		goto done;
+	}
+	if (qh->qh_state == QH_STATE_IDLE) {
+		status = qh_schedule(fotg210, qh);
+		if (status)
+			goto done;
+	}
+
+	/* then queue the urb's tds to the qh */
+	qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	BUG_ON(qh == NULL);
+
+	/* ... update usbfs periodic stats */
+	fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
+
+done:
+	if (unlikely(status))
+		usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+done_not_linked:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	if (status)
+		qtd_list_free(fotg210, urb, qtd_list);
+
+	return status;
+}
+
+static void scan_intr(struct fotg210_hcd *fotg210)
+{
+	struct fotg210_qh *qh;
+
+	list_for_each_entry_safe(qh, fotg210->qh_scan_next,
+			&fotg210->intr_qh_list, intr_node) {
+rescan:
+		/* clean any finished work for this qh */
+		if (!list_empty(&qh->qtd_list)) {
+			int temp;
+
+			/*
+			 * Unlinks could happen here; completion reporting
+			 * drops the lock.  That's why fotg210->qh_scan_next
+			 * always holds the next qh to scan; if the next qh
+			 * gets unlinked then fotg210->qh_scan_next is adjusted
+			 * in qh_unlink_periodic().
+			 */
+			temp = qh_completions(fotg210, qh);
+			if (unlikely(qh->needs_rescan ||
+					(list_empty(&qh->qtd_list) &&
+					qh->qh_state == QH_STATE_LINKED)))
+				start_unlink_intr(fotg210, qh);
+			else if (temp != 0)
+				goto rescan;
+		}
+	}
+}
+
+/* fotg210_iso_stream ops work with both ITD and SITD */
+
+static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags)
+{
+	struct fotg210_iso_stream *stream;
+
+	stream = kzalloc(sizeof(*stream), mem_flags);
+	if (likely(stream != NULL)) {
+		INIT_LIST_HEAD(&stream->td_list);
+		INIT_LIST_HEAD(&stream->free_list);
+		stream->next_uframe = -1;
+	}
+	return stream;
+}
+
+static void iso_stream_init(struct fotg210_hcd *fotg210,
+		struct fotg210_iso_stream *stream, struct usb_device *dev,
+		int pipe, unsigned interval)
+{
+	u32 buf1;
+	unsigned epnum, maxp;
+	int is_input;
+	long bandwidth;
+	unsigned multi;
+
+	/*
+	 * this might be a "high bandwidth" highspeed endpoint,
+	 * as encoded in the ep descriptor's wMaxPacket field
+	 */
+	epnum = usb_pipeendpoint(pipe);
+	is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
+	maxp = usb_maxpacket(dev, pipe, !is_input);
+	if (is_input)
+		buf1 = (1 << 11);
+	else
+		buf1 = 0;
+
+	maxp = max_packet(maxp);
+	multi = hb_mult(maxp);
+	buf1 |= maxp;
+	maxp *= multi;
+
+	stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
+	stream->buf1 = cpu_to_hc32(fotg210, buf1);
+	stream->buf2 = cpu_to_hc32(fotg210, multi);
+
+	/* usbfs wants to report the average usecs per frame tied up
+	 * when transfers on this endpoint are scheduled ...
+	 */
+	if (dev->speed == USB_SPEED_FULL) {
+		interval <<= 3;
+		stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+				is_input, 1, maxp));
+		stream->usecs /= 8;
+	} else {
+		stream->highspeed = 1;
+		stream->usecs = HS_USECS_ISO(maxp);
+	}
+	bandwidth = stream->usecs * 8;
+	bandwidth /= interval;
+
+	stream->bandwidth = bandwidth;
+	stream->udev = dev;
+	stream->bEndpointAddress = is_input | epnum;
+	stream->interval = interval;
+	stream->maxp = maxp;
+}
+
+static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210,
+		struct urb *urb)
+{
+	unsigned epnum;
+	struct fotg210_iso_stream *stream;
+	struct usb_host_endpoint *ep;
+	unsigned long flags;
+
+	epnum = usb_pipeendpoint(urb->pipe);
+	if (usb_pipein(urb->pipe))
+		ep = urb->dev->ep_in[epnum];
+	else
+		ep = urb->dev->ep_out[epnum];
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	stream = ep->hcpriv;
+
+	if (unlikely(stream == NULL)) {
+		stream = iso_stream_alloc(GFP_ATOMIC);
+		if (likely(stream != NULL)) {
+			ep->hcpriv = stream;
+			stream->ep = ep;
+			iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
+					urb->interval);
+		}
+
+	/* if dev->ep[epnum] is a QH, hw is set */
+	} else if (unlikely(stream->hw != NULL)) {
+		fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
+				urb->dev->devpath, epnum,
+				usb_pipein(urb->pipe) ? "in" : "out");
+		stream = NULL;
+	}
+
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return stream;
+}
+
+/* fotg210_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets,
+		gfp_t mem_flags)
+{
+	struct fotg210_iso_sched *iso_sched;
+	int size = sizeof(*iso_sched);
+
+	size += packets * sizeof(struct fotg210_iso_packet);
+	iso_sched = kzalloc(size, mem_flags);
+	if (likely(iso_sched != NULL))
+		INIT_LIST_HEAD(&iso_sched->td_list);
+
+	return iso_sched;
+}
+
+static inline void itd_sched_init(struct fotg210_hcd *fotg210,
+		struct fotg210_iso_sched *iso_sched,
+		struct fotg210_iso_stream *stream, struct urb *urb)
+{
+	unsigned i;
+	dma_addr_t dma = urb->transfer_dma;
+
+	/* how many uframes are needed for these transfers */
+	iso_sched->span = urb->number_of_packets * stream->interval;
+
+	/* figure out per-uframe itd fields that we'll need later
+	 * when we fit new itds into the schedule.
+	 */
+	for (i = 0; i < urb->number_of_packets; i++) {
+		struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
+		unsigned length;
+		dma_addr_t buf;
+		u32 trans;
+
+		length = urb->iso_frame_desc[i].length;
+		buf = dma + urb->iso_frame_desc[i].offset;
+
+		trans = FOTG210_ISOC_ACTIVE;
+		trans |= buf & 0x0fff;
+		if (unlikely(((i + 1) == urb->number_of_packets))
+				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
+			trans |= FOTG210_ITD_IOC;
+		trans |= length << 16;
+		uframe->transaction = cpu_to_hc32(fotg210, trans);
+
+		/* might need to cross a buffer page within a uframe */
+		uframe->bufp = (buf & ~(u64)0x0fff);
+		buf += length;
+		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
+			uframe->cross = 1;
+	}
+}
+
+static void iso_sched_free(struct fotg210_iso_stream *stream,
+		struct fotg210_iso_sched *iso_sched)
+{
+	if (!iso_sched)
+		return;
+	/* caller must hold fotg210->lock!*/
+	list_splice(&iso_sched->td_list, &stream->free_list);
+	kfree(iso_sched);
+}
+
+static int itd_urb_transaction(struct fotg210_iso_stream *stream,
+		struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags)
+{
+	struct fotg210_itd *itd;
+	dma_addr_t itd_dma;
+	int i;
+	unsigned num_itds;
+	struct fotg210_iso_sched *sched;
+	unsigned long flags;
+
+	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+	if (unlikely(sched == NULL))
+		return -ENOMEM;
+
+	itd_sched_init(fotg210, sched, stream, urb);
+
+	if (urb->interval < 8)
+		num_itds = 1 + (sched->span + 7) / 8;
+	else
+		num_itds = urb->number_of_packets;
+
+	/* allocate/init ITDs */
+	spin_lock_irqsave(&fotg210->lock, flags);
+	for (i = 0; i < num_itds; i++) {
+
+		/*
+		 * Use iTDs from the free list, but not iTDs that may
+		 * still be in use by the hardware.
+		 */
+		if (likely(!list_empty(&stream->free_list))) {
+			itd = list_first_entry(&stream->free_list,
+					struct fotg210_itd, itd_list);
+			if (itd->frame == fotg210->now_frame)
+				goto alloc_itd;
+			list_del(&itd->itd_list);
+			itd_dma = itd->itd_dma;
+		} else {
+alloc_itd:
+			spin_unlock_irqrestore(&fotg210->lock, flags);
+			itd = dma_pool_zalloc(fotg210->itd_pool, mem_flags,
+					&itd_dma);
+			spin_lock_irqsave(&fotg210->lock, flags);
+			if (!itd) {
+				iso_sched_free(stream, sched);
+				spin_unlock_irqrestore(&fotg210->lock, flags);
+				return -ENOMEM;
+			}
+		}
+
+		itd->itd_dma = itd_dma;
+		list_add(&itd->itd_list, &sched->td_list);
+	}
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+
+	/* temporarily store schedule info in hcpriv */
+	urb->hcpriv = sched;
+	urb->error_count = 0;
+	return 0;
+}
+
+static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe,
+		u8 usecs, u32 period)
+{
+	uframe %= period;
+	do {
+		/* can't commit more than uframe_periodic_max usec */
+		if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
+				> (fotg210->uframe_periodic_max - usecs))
+			return 0;
+
+		/* we know urb->interval is 2^N uframes */
+		uframe += period;
+	} while (uframe < mod);
+	return 1;
+}
+
+/* This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.)  That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fotg210's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FOTG210_TUNE_FLS and the slop).  Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb,
+		struct fotg210_iso_stream *stream)
+{
+	u32 now, next, start, period, span;
+	int status;
+	unsigned mod = fotg210->periodic_size << 3;
+	struct fotg210_iso_sched *sched = urb->hcpriv;
+
+	period = urb->interval;
+	span = sched->span;
+
+	if (span > mod - SCHEDULE_SLOP) {
+		fotg210_dbg(fotg210, "iso request %p too long\n", urb);
+		status = -EFBIG;
+		goto fail;
+	}
+
+	now = fotg210_read_frame_index(fotg210) & (mod - 1);
+
+	/* Typical case: reuse current schedule, stream is still active.
+	 * Hopefully there are no gaps from the host falling behind
+	 * (irq delays etc), but if there are we'll take the next
+	 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+	 */
+	if (likely(!list_empty(&stream->td_list))) {
+		u32 excess;
+
+		/* For high speed devices, allow scheduling within the
+		 * isochronous scheduling threshold.  For full speed devices
+		 * and Intel PCI-based controllers, don't (work around for
+		 * Intel ICH9 bug).
+		 */
+		if (!stream->highspeed && fotg210->fs_i_thresh)
+			next = now + fotg210->i_thresh;
+		else
+			next = now;
+
+		/* Fell behind (by up to twice the slop amount)?
+		 * We decide based on the time of the last currently-scheduled
+		 * slot, not the time of the next available slot.
+		 */
+		excess = (stream->next_uframe - period - next) & (mod - 1);
+		if (excess >= mod - 2 * SCHEDULE_SLOP)
+			start = next + excess - mod + period *
+					DIV_ROUND_UP(mod - excess, period);
+		else
+			start = next + excess + period;
+		if (start - now >= mod) {
+			fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+					urb, start - now - period, period,
+					mod);
+			status = -EFBIG;
+			goto fail;
+		}
+	}
+
+	/* need to schedule; when's the next (u)frame we could start?
+	 * this is bigger than fotg210->i_thresh allows; scheduling itself
+	 * isn't free, the slop should handle reasonably slow cpus.  it
+	 * can also help high bandwidth if the dma and irq loads don't
+	 * jump until after the queue is primed.
+	 */
+	else {
+		int done = 0;
+
+		start = SCHEDULE_SLOP + (now & ~0x07);
+
+		/* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+		/* find a uframe slot with enough bandwidth.
+		 * Early uframes are more precious because full-speed
+		 * iso IN transfers can't use late uframes,
+		 * and therefore they should be allocated last.
+		 */
+		next = start;
+		start += period;
+		do {
+			start--;
+			/* check schedule: enough space? */
+			if (itd_slot_ok(fotg210, mod, start,
+					stream->usecs, period))
+				done = 1;
+		} while (start > next && !done);
+
+		/* no room in the schedule */
+		if (!done) {
+			fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
+					urb, now, now + mod);
+			status = -ENOSPC;
+			goto fail;
+		}
+	}
+
+	/* Tried to schedule too far into the future? */
+	if (unlikely(start - now + span - period >=
+			mod - 2 * SCHEDULE_SLOP)) {
+		fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+				urb, start - now, span - period,
+				mod - 2 * SCHEDULE_SLOP);
+		status = -EFBIG;
+		goto fail;
+	}
+
+	stream->next_uframe = start & (mod - 1);
+
+	/* report high speed start in uframes; full speed, in frames */
+	urb->start_frame = stream->next_uframe;
+	if (!stream->highspeed)
+		urb->start_frame >>= 3;
+
+	/* Make sure scan_isoc() sees these */
+	if (fotg210->isoc_count == 0)
+		fotg210->next_frame = now >> 3;
+	return 0;
+
+fail:
+	iso_sched_free(stream, sched);
+	urb->hcpriv = NULL;
+	return status;
+}
+
+static inline void itd_init(struct fotg210_hcd *fotg210,
+		struct fotg210_iso_stream *stream, struct fotg210_itd *itd)
+{
+	int i;
+
+	/* it's been recently zeroed */
+	itd->hw_next = FOTG210_LIST_END(fotg210);
+	itd->hw_bufp[0] = stream->buf0;
+	itd->hw_bufp[1] = stream->buf1;
+	itd->hw_bufp[2] = stream->buf2;
+
+	for (i = 0; i < 8; i++)
+		itd->index[i] = -1;
+
+	/* All other fields are filled when scheduling */
+}
+
+static inline void itd_patch(struct fotg210_hcd *fotg210,
+		struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched,
+		unsigned index, u16 uframe)
+{
+	struct fotg210_iso_packet *uf = &iso_sched->packet[index];
+	unsigned pg = itd->pg;
+
+	uframe &= 0x07;
+	itd->index[uframe] = index;
+
+	itd->hw_transaction[uframe] = uf->transaction;
+	itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
+	itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
+	itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
+
+	/* iso_frame_desc[].offset must be strictly increasing */
+	if (unlikely(uf->cross)) {
+		u64 bufp = uf->bufp + 4096;
+
+		itd->pg = ++pg;
+		itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
+		itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
+	}
+}
+
+static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame,
+		struct fotg210_itd *itd)
+{
+	union fotg210_shadow *prev = &fotg210->pshadow[frame];
+	__hc32 *hw_p = &fotg210->periodic[frame];
+	union fotg210_shadow here = *prev;
+	__hc32 type = 0;
+
+	/* skip any iso nodes which might belong to previous microframes */
+	while (here.ptr) {
+		type = Q_NEXT_TYPE(fotg210, *hw_p);
+		if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+			break;
+		prev = periodic_next_shadow(fotg210, prev, type);
+		hw_p = shadow_next_periodic(fotg210, &here, type);
+		here = *prev;
+	}
+
+	itd->itd_next = here;
+	itd->hw_next = *hw_p;
+	prev->itd = itd;
+	itd->frame = frame;
+	wmb();
+	*hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb,
+		unsigned mod, struct fotg210_iso_stream *stream)
+{
+	int packet;
+	unsigned next_uframe, uframe, frame;
+	struct fotg210_iso_sched *iso_sched = urb->hcpriv;
+	struct fotg210_itd *itd;
+
+	next_uframe = stream->next_uframe & (mod - 1);
+
+	if (unlikely(list_empty(&stream->td_list))) {
+		fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+				+= stream->bandwidth;
+		fotg210_dbg(fotg210,
+			"schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+			urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+			(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+			urb->interval,
+			next_uframe >> 3, next_uframe & 0x7);
+	}
+
+	/* fill iTDs uframe by uframe */
+	for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
+		if (itd == NULL) {
+			/* ASSERT:  we have all necessary itds */
+
+			/* ASSERT:  no itds for this endpoint in this uframe */
+
+			itd = list_entry(iso_sched->td_list.next,
+					struct fotg210_itd, itd_list);
+			list_move_tail(&itd->itd_list, &stream->td_list);
+			itd->stream = stream;
+			itd->urb = urb;
+			itd_init(fotg210, stream, itd);
+		}
+
+		uframe = next_uframe & 0x07;
+		frame = next_uframe >> 3;
+
+		itd_patch(fotg210, itd, iso_sched, packet, uframe);
+
+		next_uframe += stream->interval;
+		next_uframe &= mod - 1;
+		packet++;
+
+		/* link completed itds into the schedule */
+		if (((next_uframe >> 3) != frame)
+				|| packet == urb->number_of_packets) {
+			itd_link(fotg210, frame & (fotg210->periodic_size - 1),
+					itd);
+			itd = NULL;
+		}
+	}
+	stream->next_uframe = next_uframe;
+
+	/* don't need that schedule data any more */
+	iso_sched_free(stream, iso_sched);
+	urb->hcpriv = NULL;
+
+	++fotg210->isoc_count;
+	enable_periodic(fotg210);
+}
+
+#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
+		FOTG210_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD.  Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly.  That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs.  It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+	struct urb *urb = itd->urb;
+	struct usb_iso_packet_descriptor *desc;
+	u32 t;
+	unsigned uframe;
+	int urb_index = -1;
+	struct fotg210_iso_stream *stream = itd->stream;
+	struct usb_device *dev;
+	bool retval = false;
+
+	/* for each uframe with a packet */
+	for (uframe = 0; uframe < 8; uframe++) {
+		if (likely(itd->index[uframe] == -1))
+			continue;
+		urb_index = itd->index[uframe];
+		desc = &urb->iso_frame_desc[urb_index];
+
+		t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
+		itd->hw_transaction[uframe] = 0;
+
+		/* report transfer status */
+		if (unlikely(t & ISO_ERRS)) {
+			urb->error_count++;
+			if (t & FOTG210_ISOC_BUF_ERR)
+				desc->status = usb_pipein(urb->pipe)
+					? -ENOSR  /* hc couldn't read */
+					: -ECOMM; /* hc couldn't write */
+			else if (t & FOTG210_ISOC_BABBLE)
+				desc->status = -EOVERFLOW;
+			else /* (t & FOTG210_ISOC_XACTERR) */
+				desc->status = -EPROTO;
+
+			/* HC need not update length with this error */
+			if (!(t & FOTG210_ISOC_BABBLE)) {
+				desc->actual_length =
+					fotg210_itdlen(urb, desc, t);
+				urb->actual_length += desc->actual_length;
+			}
+		} else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
+			desc->status = 0;
+			desc->actual_length = fotg210_itdlen(urb, desc, t);
+			urb->actual_length += desc->actual_length;
+		} else {
+			/* URB was too late */
+			desc->status = -EXDEV;
+		}
+	}
+
+	/* handle completion now? */
+	if (likely((urb_index + 1) != urb->number_of_packets))
+		goto done;
+
+	/* ASSERT: it's really the last itd for this urb
+	 * list_for_each_entry (itd, &stream->td_list, itd_list)
+	 *	BUG_ON (itd->urb == urb);
+	 */
+
+	/* give urb back to the driver; completion often (re)submits */
+	dev = urb->dev;
+	fotg210_urb_done(fotg210, urb, 0);
+	retval = true;
+	urb = NULL;
+
+	--fotg210->isoc_count;
+	disable_periodic(fotg210);
+
+	if (unlikely(list_is_singular(&stream->td_list))) {
+		fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+				-= stream->bandwidth;
+		fotg210_dbg(fotg210,
+			"deschedule devp %s ep%d%s-iso\n",
+			dev->devpath, stream->bEndpointAddress & 0x0f,
+			(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+	}
+
+done:
+	itd->urb = NULL;
+
+	/* Add to the end of the free list for later reuse */
+	list_move_tail(&itd->itd_list, &stream->free_list);
+
+	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+	if (list_empty(&stream->td_list)) {
+		list_splice_tail_init(&stream->free_list,
+				&fotg210->cached_itd_list);
+		start_free_itds(fotg210);
+	}
+
+	return retval;
+}
+
+static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+		gfp_t mem_flags)
+{
+	int status = -EINVAL;
+	unsigned long flags;
+	struct fotg210_iso_stream *stream;
+
+	/* Get iso_stream head */
+	stream = iso_stream_find(fotg210, urb);
+	if (unlikely(stream == NULL)) {
+		fotg210_dbg(fotg210, "can't get iso stream\n");
+		return -ENOMEM;
+	}
+	if (unlikely(urb->interval != stream->interval &&
+			fotg210_port_speed(fotg210, 0) ==
+			USB_PORT_STAT_HIGH_SPEED)) {
+		fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
+				stream->interval, urb->interval);
+		goto done;
+	}
+
+#ifdef FOTG210_URB_TRACE
+	fotg210_dbg(fotg210,
+			"%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
+			__func__, urb->dev->devpath, urb,
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			urb->transfer_buffer_length,
+			urb->number_of_packets, urb->interval,
+			stream);
+#endif
+
+	/* allocate ITDs w/o locking anything */
+	status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
+	if (unlikely(status < 0)) {
+		fotg210_dbg(fotg210, "can't init itds\n");
+		goto done;
+	}
+
+	/* schedule ... need to lock */
+	spin_lock_irqsave(&fotg210->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+		status = -ESHUTDOWN;
+		goto done_not_linked;
+	}
+	status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+	if (unlikely(status))
+		goto done_not_linked;
+	status = iso_stream_schedule(fotg210, urb, stream);
+	if (likely(status == 0))
+		itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
+	else
+		usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+done_not_linked:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+done:
+	return status;
+}
+
+static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame,
+		unsigned now_frame, bool live)
+{
+	unsigned uf;
+	bool modified;
+	union fotg210_shadow q, *q_p;
+	__hc32 type, *hw_p;
+
+	/* scan each element in frame's queue for completions */
+	q_p = &fotg210->pshadow[frame];
+	hw_p = &fotg210->periodic[frame];
+	q.ptr = q_p->ptr;
+	type = Q_NEXT_TYPE(fotg210, *hw_p);
+	modified = false;
+
+	while (q.ptr) {
+		switch (hc32_to_cpu(fotg210, type)) {
+		case Q_TYPE_ITD:
+			/* If this ITD is still active, leave it for
+			 * later processing ... check the next entry.
+			 * No need to check for activity unless the
+			 * frame is current.
+			 */
+			if (frame == now_frame && live) {
+				rmb();
+				for (uf = 0; uf < 8; uf++) {
+					if (q.itd->hw_transaction[uf] &
+							ITD_ACTIVE(fotg210))
+						break;
+				}
+				if (uf < 8) {
+					q_p = &q.itd->itd_next;
+					hw_p = &q.itd->hw_next;
+					type = Q_NEXT_TYPE(fotg210,
+							q.itd->hw_next);
+					q = *q_p;
+					break;
+				}
+			}
+
+			/* Take finished ITDs out of the schedule
+			 * and process them:  recycle, maybe report
+			 * URB completion.  HC won't cache the
+			 * pointer for much longer, if at all.
+			 */
+			*q_p = q.itd->itd_next;
+			*hw_p = q.itd->hw_next;
+			type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
+			wmb();
+			modified = itd_complete(fotg210, q.itd);
+			q = *q_p;
+			break;
+		default:
+			fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
+					type, frame, q.ptr);
+			/* FALL THROUGH */
+		case Q_TYPE_QH:
+		case Q_TYPE_FSTN:
+			/* End of the iTDs and siTDs */
+			q.ptr = NULL;
+			break;
+		}
+
+		/* assume completion callbacks modify the queue */
+		if (unlikely(modified && fotg210->isoc_count > 0))
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static void scan_isoc(struct fotg210_hcd *fotg210)
+{
+	unsigned uf, now_frame, frame, ret;
+	unsigned fmask = fotg210->periodic_size - 1;
+	bool live;
+
+	/*
+	 * When running, scan from last scan point up to "now"
+	 * else clean up by scanning everything that's left.
+	 * Touches as few pages as possible:  cache-friendly.
+	 */
+	if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
+		uf = fotg210_read_frame_index(fotg210);
+		now_frame = (uf >> 3) & fmask;
+		live = true;
+	} else  {
+		now_frame = (fotg210->next_frame - 1) & fmask;
+		live = false;
+	}
+	fotg210->now_frame = now_frame;
+
+	frame = fotg210->next_frame;
+	for (;;) {
+		ret = 1;
+		while (ret != 0)
+			ret = scan_frame_queue(fotg210, frame,
+					now_frame, live);
+
+		/* Stop when we have reached the current frame */
+		if (frame == now_frame)
+			break;
+		frame = (frame + 1) & fmask;
+	}
+	fotg210->next_frame = now_frame;
+}
+
+/* Display / Set uframe_periodic_max
+ */
+static ssize_t uframe_periodic_max_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fotg210_hcd *fotg210;
+	int n;
+
+	fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+	n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
+	return n;
+}
+
+
+static ssize_t uframe_periodic_max_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fotg210_hcd *fotg210;
+	unsigned uframe_periodic_max;
+	unsigned frame, uframe;
+	unsigned short allocated_max;
+	unsigned long flags;
+	ssize_t ret;
+
+	fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+		return -EINVAL;
+
+	if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+		fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
+				uframe_periodic_max);
+		return -EINVAL;
+	}
+
+	ret = -EINVAL;
+
+	/*
+	 * lock, so that our checking does not race with possible periodic
+	 * bandwidth allocation through submitting new urbs.
+	 */
+	spin_lock_irqsave(&fotg210->lock, flags);
+
+	/*
+	 * for request to decrease max periodic bandwidth, we have to check
+	 * every microframe in the schedule to see whether the decrease is
+	 * possible.
+	 */
+	if (uframe_periodic_max < fotg210->uframe_periodic_max) {
+		allocated_max = 0;
+
+		for (frame = 0; frame < fotg210->periodic_size; ++frame)
+			for (uframe = 0; uframe < 7; ++uframe)
+				allocated_max = max(allocated_max,
+						periodic_usecs(fotg210, frame,
+						uframe));
+
+		if (allocated_max > uframe_periodic_max) {
+			fotg210_info(fotg210,
+					"cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n",
+					allocated_max, uframe_periodic_max);
+			goto out_unlock;
+		}
+	}
+
+	/* increasing is always ok */
+
+	fotg210_info(fotg210,
+			"setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
+			100 * uframe_periodic_max/125, uframe_periodic_max);
+
+	if (uframe_periodic_max != 100)
+		fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
+
+	fotg210->uframe_periodic_max = uframe_periodic_max;
+	ret = count;
+
+out_unlock:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return ret;
+}
+
+static DEVICE_ATTR_RW(uframe_periodic_max);
+
+static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
+{
+	struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+
+	return device_create_file(controller, &dev_attr_uframe_periodic_max);
+}
+
+static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
+{
+	struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+
+	device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
+{
+	u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+	fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
+}
+
+/* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
+{
+	fotg210_halt(fotg210);
+
+	spin_lock_irq(&fotg210->lock);
+	fotg210->rh_state = FOTG210_RH_HALTED;
+	fotg210_turn_off_all_ports(fotg210);
+	spin_unlock_irq(&fotg210->lock);
+}
+
+/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fotg210_shutdown(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+	spin_lock_irq(&fotg210->lock);
+	fotg210->shutdown = true;
+	fotg210->rh_state = FOTG210_RH_STOPPING;
+	fotg210->enabled_hrtimer_events = 0;
+	spin_unlock_irq(&fotg210->lock);
+
+	fotg210_silence_controller(fotg210);
+
+	hrtimer_cancel(&fotg210->hrtimer);
+}
+
+/* fotg210_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fotg210->lock.
+ */
+static void fotg210_work(struct fotg210_hcd *fotg210)
+{
+	/* another CPU may drop fotg210->lock during a schedule scan while
+	 * it reports urb completions.  this flag guards against bogus
+	 * attempts at re-entrant schedule scanning.
+	 */
+	if (fotg210->scanning) {
+		fotg210->need_rescan = true;
+		return;
+	}
+	fotg210->scanning = true;
+
+rescan:
+	fotg210->need_rescan = false;
+	if (fotg210->async_count)
+		scan_async(fotg210);
+	if (fotg210->intr_count > 0)
+		scan_intr(fotg210);
+	if (fotg210->isoc_count > 0)
+		scan_isoc(fotg210);
+	if (fotg210->need_rescan)
+		goto rescan;
+	fotg210->scanning = false;
+
+	/* the IO watchdog guards against hardware or driver bugs that
+	 * misplace IRQs, and should let us run completely without IRQs.
+	 * such lossage has been observed on both VT6202 and VT8235.
+	 */
+	turn_on_io_watchdog(fotg210);
+}
+
+/* Called when the fotg210_hcd module is removed.
+ */
+static void fotg210_stop(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+	fotg210_dbg(fotg210, "stop\n");
+
+	/* no more interrupts ... */
+
+	spin_lock_irq(&fotg210->lock);
+	fotg210->enabled_hrtimer_events = 0;
+	spin_unlock_irq(&fotg210->lock);
+
+	fotg210_quiesce(fotg210);
+	fotg210_silence_controller(fotg210);
+	fotg210_reset(fotg210);
+
+	hrtimer_cancel(&fotg210->hrtimer);
+	remove_sysfs_files(fotg210);
+	remove_debug_files(fotg210);
+
+	/* root hub is shut down separately (first, when possible) */
+	spin_lock_irq(&fotg210->lock);
+	end_free_itds(fotg210);
+	spin_unlock_irq(&fotg210->lock);
+	fotg210_mem_cleanup(fotg210);
+
+#ifdef FOTG210_STATS
+	fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+			fotg210->stats.normal, fotg210->stats.error,
+			fotg210->stats.iaa, fotg210->stats.lost_iaa);
+	fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
+			fotg210->stats.complete, fotg210->stats.unlink);
+#endif
+
+	dbg_status(fotg210, "fotg210_stop completed",
+			fotg210_readl(fotg210, &fotg210->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fotg210_init(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	u32 temp;
+	int retval;
+	u32 hcc_params;
+	struct fotg210_qh_hw *hw;
+
+	spin_lock_init(&fotg210->lock);
+
+	/*
+	 * keep io watchdog by default, those good HCDs could turn off it later
+	 */
+	fotg210->need_io_watchdog = 1;
+
+	hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	fotg210->hrtimer.function = fotg210_hrtimer_func;
+	fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+	hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+	/*
+	 * by default set standard 80% (== 100 usec/uframe) max periodic
+	 * bandwidth as required by USB 2.0
+	 */
+	fotg210->uframe_periodic_max = 100;
+
+	/*
+	 * hw default: 1K periodic list heads, one per frame.
+	 * periodic_size can shrink by USBCMD update if hcc_params allows.
+	 */
+	fotg210->periodic_size = DEFAULT_I_TDPS;
+	INIT_LIST_HEAD(&fotg210->intr_qh_list);
+	INIT_LIST_HEAD(&fotg210->cached_itd_list);
+
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		switch (FOTG210_TUNE_FLS) {
+		case 0:
+			fotg210->periodic_size = 1024;
+			break;
+		case 1:
+			fotg210->periodic_size = 512;
+			break;
+		case 2:
+			fotg210->periodic_size = 256;
+			break;
+		default:
+			BUG();
+		}
+	}
+	retval = fotg210_mem_init(fotg210, GFP_KERNEL);
+	if (retval < 0)
+		return retval;
+
+	/* controllers may cache some of the periodic schedule ... */
+	fotg210->i_thresh = 2;
+
+	/*
+	 * dedicate a qh for the async ring head, since we couldn't unlink
+	 * a 'real' qh without stopping the async schedule [4.8].  use it
+	 * as the 'reclamation list head' too.
+	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
+	 * from automatically advancing to the next td after short reads.
+	 */
+	fotg210->async->qh_next.qh = NULL;
+	hw = fotg210->async->hw;
+	hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
+	hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
+	hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+	hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
+	fotg210->async->qh_state = QH_STATE_LINKED;
+	hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
+
+	/* clear interrupt enables, set irq latency */
+	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+		log2_irq_thresh = 0;
+	temp = 1 << (16 + log2_irq_thresh);
+	if (HCC_CANPARK(hcc_params)) {
+		/* HW default park == 3, on hardware that supports it (like
+		 * NVidia and ALI silicon), maximizes throughput on the async
+		 * schedule by avoiding QH fetches between transfers.
+		 *
+		 * With fast usb storage devices and NForce2, "park" seems to
+		 * make problems:  throughput reduction (!), data errors...
+		 */
+		if (park) {
+			park = min_t(unsigned, park, 3);
+			temp |= CMD_PARK;
+			temp |= park << 8;
+		}
+		fotg210_dbg(fotg210, "park %d\n", park);
+	}
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		temp &= ~(3 << 2);
+		temp |= (FOTG210_TUNE_FLS << 2);
+	}
+	fotg210->command = temp;
+
+	/* Accept arbitrarily long scatter-gather lists */
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
+	return 0;
+}
+
+/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
+static int fotg210_run(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	u32 temp;
+	u32 hcc_params;
+
+	hcd->uses_new_polling = 1;
+
+	/* EHCI spec section 4.1 */
+
+	fotg210_writel(fotg210, fotg210->periodic_dma,
+			&fotg210->regs->frame_list);
+	fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
+			&fotg210->regs->async_next);
+
+	/*
+	 * hcc_params controls whether fotg210->regs->segment must (!!!)
+	 * be used; it constrains QH/ITD/SITD and QTD locations.
+	 * dma_pool consistent memory always uses segment zero.
+	 * streaming mappings for I/O buffers, like pci_map_single(),
+	 * can return segments above 4GB, if the device allows.
+	 *
+	 * NOTE:  the dma mask is visible through dev->dma_mask, so
+	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+	 * host side drivers though.
+	 */
+	hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+	/*
+	 * Philips, Intel, and maybe others need CMD_RUN before the
+	 * root hub will detect new devices (why?); NEC doesn't
+	 */
+	fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+	fotg210->command |= CMD_RUN;
+	fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+	dbg_cmd(fotg210, "init", fotg210->command);
+
+	/*
+	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+	 * are explicitly handed to companion controller(s), so no TT is
+	 * involved with the root hub.  (Except where one is integrated,
+	 * and there's no companion controller unless maybe for USB OTG.)
+	 *
+	 * Turning on the CF flag will transfer ownership of all ports
+	 * from the companions to the EHCI controller.  If any of the
+	 * companions are in the middle of a port reset at the time, it
+	 * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
+	 * guarantees that no resets are in progress.  After we set CF,
+	 * a short delay lets the hardware catch up; new resets shouldn't
+	 * be started before the port switching actions could complete.
+	 */
+	down_write(&ehci_cf_port_reset_rwsem);
+	fotg210->rh_state = FOTG210_RH_RUNNING;
+	/* unblock posted writes */
+	fotg210_readl(fotg210, &fotg210->regs->command);
+	usleep_range(5000, 10000);
+	up_write(&ehci_cf_port_reset_rwsem);
+	fotg210->last_periodic_enable = ktime_get_real();
+
+	temp = HC_VERSION(fotg210,
+			fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+	fotg210_info(fotg210,
+			"USB %x.%x started, EHCI %x.%02x\n",
+			((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f),
+			temp >> 8, temp & 0xff);
+
+	fotg210_writel(fotg210, INTR_MASK,
+			&fotg210->regs->intr_enable); /* Turn On Interrupts */
+
+	/* GRR this is run-once init(), being done every time the HC starts.
+	 * So long as they're part of class devices, we can't do it init()
+	 * since the class device isn't created that early.
+	 */
+	create_debug_files(fotg210);
+	create_sysfs_files(fotg210);
+
+	return 0;
+}
+
+static int fotg210_setup(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	int retval;
+
+	fotg210->regs = (void __iomem *)fotg210->caps +
+			HC_LENGTH(fotg210,
+			fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+	dbg_hcs_params(fotg210, "reset");
+	dbg_hcc_params(fotg210, "reset");
+
+	/* cache this readonly data; minimize chip reads */
+	fotg210->hcs_params = fotg210_readl(fotg210,
+			&fotg210->caps->hcs_params);
+
+	fotg210->sbrn = HCD_USB2;
+
+	/* data structure init */
+	retval = hcd_fotg210_init(hcd);
+	if (retval)
+		return retval;
+
+	retval = fotg210_halt(fotg210);
+	if (retval)
+		return retval;
+
+	fotg210_reset(fotg210);
+
+	return 0;
+}
+
+static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	u32 status, masked_status, pcd_status = 0, cmd;
+	int bh;
+
+	spin_lock(&fotg210->lock);
+
+	status = fotg210_readl(fotg210, &fotg210->regs->status);
+
+	/* e.g. cardbus physical eject */
+	if (status == ~(u32) 0) {
+		fotg210_dbg(fotg210, "device removed\n");
+		goto dead;
+	}
+
+	/*
+	 * We don't use STS_FLR, but some controllers don't like it to
+	 * remain on, so mask it out along with the other status bits.
+	 */
+	masked_status = status & (INTR_MASK | STS_FLR);
+
+	/* Shared IRQ? */
+	if (!masked_status ||
+			unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
+		spin_unlock(&fotg210->lock);
+		return IRQ_NONE;
+	}
+
+	/* clear (just) interrupts */
+	fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
+	cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+	bh = 0;
+
+	/* unrequested/ignored: Frame List Rollover */
+	dbg_status(fotg210, "irq", status);
+
+	/* INT, ERR, and IAA interrupt rates can be throttled */
+
+	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+	if (likely((status & (STS_INT|STS_ERR)) != 0)) {
+		if (likely((status & STS_ERR) == 0))
+			COUNT(fotg210->stats.normal);
+		else
+			COUNT(fotg210->stats.error);
+		bh = 1;
+	}
+
+	/* complete the unlinking of some qh [4.15.2.3] */
+	if (status & STS_IAA) {
+
+		/* Turn off the IAA watchdog */
+		fotg210->enabled_hrtimer_events &=
+			~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
+
+		/*
+		 * Mild optimization: Allow another IAAD to reset the
+		 * hrtimer, if one occurs before the next expiration.
+		 * In theory we could always cancel the hrtimer, but
+		 * tests show that about half the time it will be reset
+		 * for some other event anyway.
+		 */
+		if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
+			++fotg210->next_hrtimer_event;
+
+		/* guard against (alleged) silicon errata */
+		if (cmd & CMD_IAAD)
+			fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
+		if (fotg210->async_iaa) {
+			COUNT(fotg210->stats.iaa);
+			end_unlink_async(fotg210);
+		} else
+			fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
+	}
+
+	/* remote wakeup [4.3.1] */
+	if (status & STS_PCD) {
+		int pstatus;
+		u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+		/* kick root hub later */
+		pcd_status = status;
+
+		/* resume root hub? */
+		if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
+			usb_hcd_resume_root_hub(hcd);
+
+		pstatus = fotg210_readl(fotg210, status_reg);
+
+		if (test_bit(0, &fotg210->suspended_ports) &&
+				((pstatus & PORT_RESUME) ||
+				!(pstatus & PORT_SUSPEND)) &&
+				(pstatus & PORT_PE) &&
+				fotg210->reset_done[0] == 0) {
+
+			/* start 20 msec resume signaling from this port,
+			 * and make hub_wq collect PORT_STAT_C_SUSPEND to
+			 * stop that signaling.  Use 5 ms extra for safety,
+			 * like usb_port_resume() does.
+			 */
+			fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
+			set_bit(0, &fotg210->resuming_ports);
+			fotg210_dbg(fotg210, "port 1 remote wakeup\n");
+			mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
+		}
+	}
+
+	/* PCI errors [4.15.2.4] */
+	if (unlikely((status & STS_FATAL) != 0)) {
+		fotg210_err(fotg210, "fatal error\n");
+		dbg_cmd(fotg210, "fatal", cmd);
+		dbg_status(fotg210, "fatal", status);
+dead:
+		usb_hc_died(hcd);
+
+		/* Don't let the controller do anything more */
+		fotg210->shutdown = true;
+		fotg210->rh_state = FOTG210_RH_STOPPING;
+		fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+		fotg210_writel(fotg210, fotg210->command,
+				&fotg210->regs->command);
+		fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+		fotg210_handle_controller_death(fotg210);
+
+		/* Handle completions when the controller stops */
+		bh = 0;
+	}
+
+	if (bh)
+		fotg210_work(fotg210);
+	spin_unlock(&fotg210->lock);
+	if (pcd_status)
+		usb_hcd_poll_rh_status(hcd);
+	return IRQ_HANDLED;
+}
+
+/* non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+		gfp_t mem_flags)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	struct list_head qtd_list;
+
+	INIT_LIST_HEAD(&qtd_list);
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		/* qh_completions() code doesn't handle all the fault cases
+		 * in multi-TD control transfers.  Even 1KB is rare anyway.
+		 */
+		if (urb->transfer_buffer_length > (16 * 1024))
+			return -EMSGSIZE;
+		/* FALLTHROUGH */
+	/* case PIPE_BULK: */
+	default:
+		if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return submit_async(fotg210, urb, &qtd_list, mem_flags);
+
+	case PIPE_INTERRUPT:
+		if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return intr_submit(fotg210, urb, &qtd_list, mem_flags);
+
+	case PIPE_ISOCHRONOUS:
+		return itd_submit(fotg210, urb, mem_flags);
+	}
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	struct fotg210_qh *qh;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc)
+		goto done;
+
+	switch (usb_pipetype(urb->pipe)) {
+	/* case PIPE_CONTROL: */
+	/* case PIPE_BULK:*/
+	default:
+		qh = (struct fotg210_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		switch (qh->qh_state) {
+		case QH_STATE_LINKED:
+		case QH_STATE_COMPLETING:
+			start_unlink_async(fotg210, qh);
+			break;
+		case QH_STATE_UNLINK:
+		case QH_STATE_UNLINK_WAIT:
+			/* already started */
+			break;
+		case QH_STATE_IDLE:
+			/* QH might be waiting for a Clear-TT-Buffer */
+			qh_completions(fotg210, qh);
+			break;
+		}
+		break;
+
+	case PIPE_INTERRUPT:
+		qh = (struct fotg210_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		switch (qh->qh_state) {
+		case QH_STATE_LINKED:
+		case QH_STATE_COMPLETING:
+			start_unlink_intr(fotg210, qh);
+			break;
+		case QH_STATE_IDLE:
+			qh_completions(fotg210, qh);
+			break;
+		default:
+			fotg210_dbg(fotg210, "bogus qh %p state %d\n",
+					qh, qh->qh_state);
+			goto done;
+		}
+		break;
+
+	case PIPE_ISOCHRONOUS:
+		/* itd... */
+
+		/* wait till next completion, do it then. */
+		/* completion irqs can wait up to 1024 msec, */
+		break;
+	}
+done:
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+	return rc;
+}
+
+/* bulk qh holds the data toggle */
+
+static void fotg210_endpoint_disable(struct usb_hcd *hcd,
+		struct usb_host_endpoint *ep)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	unsigned long flags;
+	struct fotg210_qh *qh, *tmp;
+
+	/* ASSERT:  any requests/urbs are being unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+
+rescan:
+	spin_lock_irqsave(&fotg210->lock, flags);
+	qh = ep->hcpriv;
+	if (!qh)
+		goto done;
+
+	/* endpoints can be iso streams.  for now, we don't
+	 * accelerate iso completions ... so spin a while.
+	 */
+	if (qh->hw == NULL) {
+		struct fotg210_iso_stream *stream = ep->hcpriv;
+
+		if (!list_empty(&stream->td_list))
+			goto idle_timeout;
+
+		/* BUG_ON(!list_empty(&stream->free_list)); */
+		kfree(stream);
+		goto done;
+	}
+
+	if (fotg210->rh_state < FOTG210_RH_RUNNING)
+		qh->qh_state = QH_STATE_IDLE;
+	switch (qh->qh_state) {
+	case QH_STATE_LINKED:
+	case QH_STATE_COMPLETING:
+		for (tmp = fotg210->async->qh_next.qh;
+				tmp && tmp != qh;
+				tmp = tmp->qh_next.qh)
+			continue;
+		/* periodic qh self-unlinks on empty, and a COMPLETING qh
+		 * may already be unlinked.
+		 */
+		if (tmp)
+			start_unlink_async(fotg210, qh);
+		/* FALL THROUGH */
+	case QH_STATE_UNLINK:		/* wait for hw to finish? */
+	case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+		spin_unlock_irqrestore(&fotg210->lock, flags);
+		schedule_timeout_uninterruptible(1);
+		goto rescan;
+	case QH_STATE_IDLE:		/* fully unlinked */
+		if (qh->clearing_tt)
+			goto idle_timeout;
+		if (list_empty(&qh->qtd_list)) {
+			qh_destroy(fotg210, qh);
+			break;
+		}
+		/* fall through */
+	default:
+		/* caller was supposed to have unlinked any requests;
+		 * that's not our job.  just leak this memory.
+		 */
+		fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
+				qh, ep->desc.bEndpointAddress, qh->qh_state,
+				list_empty(&qh->qtd_list) ? "" : "(has tds)");
+		break;
+	}
+done:
+	ep->hcpriv = NULL;
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void fotg210_endpoint_reset(struct usb_hcd *hcd,
+		struct usb_host_endpoint *ep)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+	struct fotg210_qh *qh;
+	int eptype = usb_endpoint_type(&ep->desc);
+	int epnum = usb_endpoint_num(&ep->desc);
+	int is_out = usb_endpoint_dir_out(&ep->desc);
+	unsigned long flags;
+
+	if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+		return;
+
+	spin_lock_irqsave(&fotg210->lock, flags);
+	qh = ep->hcpriv;
+
+	/* For Bulk and Interrupt endpoints we maintain the toggle state
+	 * in the hardware; the toggle bits in udev aren't used at all.
+	 * When an endpoint is reset by usb_clear_halt() we must reset
+	 * the toggle bit in the QH.
+	 */
+	if (qh) {
+		usb_settoggle(qh->dev, epnum, is_out, 0);
+		if (!list_empty(&qh->qtd_list)) {
+			WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+		} else if (qh->qh_state == QH_STATE_LINKED ||
+				qh->qh_state == QH_STATE_COMPLETING) {
+
+			/* The toggle value in the QH can't be updated
+			 * while the QH is active.  Unlink it now;
+			 * re-linking will call qh_refresh().
+			 */
+			if (eptype == USB_ENDPOINT_XFER_BULK)
+				start_unlink_async(fotg210, qh);
+			else
+				start_unlink_intr(fotg210, qh);
+		}
+	}
+	spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static int fotg210_get_frame(struct usb_hcd *hcd)
+{
+	struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+	return (fotg210_read_frame_index(fotg210) >> 3) %
+		fotg210->periodic_size;
+}
+
+/* The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions  and in order to facilitate role switching we cannot
+ * give the fotg210 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct hc_driver fotg210_fotg210_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "Faraday USB2.0 Host Controller",
+	.hcd_priv_size		= sizeof(struct fotg210_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= fotg210_irq,
+	.flags			= HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= hcd_fotg210_init,
+	.start			= fotg210_run,
+	.stop			= fotg210_stop,
+	.shutdown		= fotg210_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= fotg210_urb_enqueue,
+	.urb_dequeue		= fotg210_urb_dequeue,
+	.endpoint_disable	= fotg210_endpoint_disable,
+	.endpoint_reset		= fotg210_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= fotg210_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= fotg210_hub_status_data,
+	.hub_control		= fotg210_hub_control,
+	.bus_suspend		= fotg210_bus_suspend,
+	.bus_resume		= fotg210_bus_resume,
+
+	.relinquish_port	= fotg210_relinquish_port,
+	.port_handed_over	= fotg210_port_handed_over,
+
+	.clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
+};
+
+static void fotg210_init(struct fotg210_hcd *fotg210)
+{
+	u32 value;
+
+	iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+			&fotg210->regs->gmir);
+
+	value = ioread32(&fotg210->regs->otgcsr);
+	value &= ~OTGCSR_A_BUS_DROP;
+	value |= OTGCSR_A_BUS_REQ;
+	iowrite32(value, &fotg210->regs->otgcsr);
+}
+
+/**
+ * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fotg210_hcd_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int irq;
+	int retval = -ENODEV;
+	struct fotg210_hcd *fotg210;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pdev->dev.power.power_state = PMSG_ON;
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "Found HC with no IRQ. Check %s setup!\n",
+				dev_name(dev));
+		return -ENODEV;
+	}
+
+	irq = res->start;
+
+	hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+			dev_name(dev));
+	if (!hcd) {
+		dev_err(dev, "failed to create hcd with err %d\n", retval);
+		retval = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	hcd->has_tt = 1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto failed;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	fotg210 = hcd_to_fotg210(hcd);
+
+	fotg210->caps = hcd->regs;
+
+	retval = fotg210_setup(hcd);
+	if (retval)
+		goto failed;
+
+	fotg210_init(fotg210);
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval) {
+		dev_err(dev, "failed to add hcd with err %d\n", retval);
+		goto failed;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	return retval;
+
+failed:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+	return retval;
+}
+
+/**
+ * fotg210_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ */
+static int fotg210_hcd_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	if (!hcd)
+		return 0;
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static struct platform_driver fotg210_hcd_driver = {
+	.driver = {
+		.name   = "fotg210-hcd",
+	},
+	.probe  = fotg210_hcd_probe,
+	.remove = fotg210_hcd_remove,
+};
+
+static int __init fotg210_hcd_init(void)
+{
+	int retval = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+	if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+			test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+		pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
+
+	pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n",
+			hcd_name, sizeof(struct fotg210_qh),
+			sizeof(struct fotg210_qtd),
+			sizeof(struct fotg210_itd));
+
+	fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
+
+	retval = platform_driver_register(&fotg210_hcd_driver);
+	if (retval < 0)
+		goto clean;
+	return retval;
+
+clean:
+	debugfs_remove(fotg210_debug_root);
+	fotg210_debug_root = NULL;
+
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+	return retval;
+}
+module_init(fotg210_hcd_init);
+
+static void __exit fotg210_hcd_cleanup(void)
+{
+	platform_driver_unregister(&fotg210_hcd_driver);
+	debugfs_remove(fotg210_debug_root);
+	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fotg210_hcd_cleanup);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
new file mode 100644
index 0000000..7fcd785
--- /dev/null
+++ b/drivers/usb/host/fotg210.h
@@ -0,0 +1,693 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FOTG210_H
+#define __LINUX_FOTG210_H
+
+#include <linux/usb/ehci-dbgp.h>
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32	__le32
+#define __hc16	__le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fotg210_stats {
+	/* irq usage */
+	unsigned long		normal;
+	unsigned long		error;
+	unsigned long		iaa;
+	unsigned long		lost_iaa;
+
+	/* termination of urbs from core */
+	unsigned long		complete;
+	unsigned long		unlink;
+};
+
+/* fotg210_hcd->lock guards shared data against other CPUs:
+ *   fotg210_hcd:	async, unlink, periodic (and shadow), ...
+ *   usb_host_endpoint: hcpriv
+ *   fotg210_qh:	qh_next, qtd_list
+ *   fotg210_qtd:	qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define	FOTG210_MAX_ROOT_PORTS	1		/* see HCS_N_PORTS */
+
+/*
+ * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
+ * controller may be doing DMA.  Lower values mean there's no DMA.
+ */
+enum fotg210_rh_state {
+	FOTG210_RH_HALTED,
+	FOTG210_RH_SUSPENDED,
+	FOTG210_RH_RUNNING,
+	FOTG210_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fotg210_hrtimer_event {
+	FOTG210_HRTIMER_POLL_ASS,	/* Poll for async schedule off */
+	FOTG210_HRTIMER_POLL_PSS,	/* Poll for periodic schedule off */
+	FOTG210_HRTIMER_POLL_DEAD,	/* Wait for dead controller to stop */
+	FOTG210_HRTIMER_UNLINK_INTR,	/* Wait for interrupt QH unlink */
+	FOTG210_HRTIMER_FREE_ITDS,	/* Wait for unused iTDs and siTDs */
+	FOTG210_HRTIMER_ASYNC_UNLINKS,	/* Unlink empty async QHs */
+	FOTG210_HRTIMER_IAA_WATCHDOG,	/* Handle lost IAA interrupts */
+	FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+	FOTG210_HRTIMER_DISABLE_ASYNC,	/* Wait to disable async sched */
+	FOTG210_HRTIMER_IO_WATCHDOG,	/* Check for missing IRQs */
+	FOTG210_HRTIMER_NUM_EVENTS	/* Must come last */
+};
+#define FOTG210_HRTIMER_NO_EVENT	99
+
+struct fotg210_hcd {			/* one per controller */
+	/* timing support */
+	enum fotg210_hrtimer_event	next_hrtimer_event;
+	unsigned		enabled_hrtimer_events;
+	ktime_t			hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
+	struct hrtimer		hrtimer;
+
+	int			PSS_poll_count;
+	int			ASS_poll_count;
+	int			died_poll_count;
+
+	/* glue to PCI and HCD framework */
+	struct fotg210_caps __iomem *caps;
+	struct fotg210_regs __iomem *regs;
+	struct ehci_dbg_port __iomem *debug;
+
+	__u32			hcs_params;	/* cached register copy */
+	spinlock_t		lock;
+	enum fotg210_rh_state	rh_state;
+
+	/* general schedule support */
+	bool			scanning:1;
+	bool			need_rescan:1;
+	bool			intr_unlinking:1;
+	bool			async_unlinking:1;
+	bool			shutdown:1;
+	struct fotg210_qh		*qh_scan_next;
+
+	/* async schedule support */
+	struct fotg210_qh		*async;
+	struct fotg210_qh		*dummy;		/* For AMD quirk use */
+	struct fotg210_qh		*async_unlink;
+	struct fotg210_qh		*async_unlink_last;
+	struct fotg210_qh		*async_iaa;
+	unsigned		async_unlink_cycle;
+	unsigned		async_count;	/* async activity count */
+
+	/* periodic schedule support */
+#define	DEFAULT_I_TDPS		1024		/* some HCs can do less */
+	unsigned		periodic_size;
+	__hc32			*periodic;	/* hw periodic table */
+	dma_addr_t		periodic_dma;
+	struct list_head	intr_qh_list;
+	unsigned		i_thresh;	/* uframes HC might cache */
+
+	union fotg210_shadow	*pshadow;	/* mirror hw periodic table */
+	struct fotg210_qh		*intr_unlink;
+	struct fotg210_qh		*intr_unlink_last;
+	unsigned		intr_unlink_cycle;
+	unsigned		now_frame;	/* frame from HC hardware */
+	unsigned		next_frame;	/* scan periodic, start here */
+	unsigned		intr_count;	/* intr activity count */
+	unsigned		isoc_count;	/* isoc activity count */
+	unsigned		periodic_count;	/* periodic activity count */
+	/* max periodic time per uframe */
+	unsigned		uframe_periodic_max;
+
+
+	/* list of itds completed while now_frame was still active */
+	struct list_head	cached_itd_list;
+	struct fotg210_itd	*last_itd_to_free;
+
+	/* per root hub port */
+	unsigned long		reset_done[FOTG210_MAX_ROOT_PORTS];
+
+	/* bit vectors (one bit per port)
+	 * which ports were already suspended at the start of a bus suspend
+	 */
+	unsigned long		bus_suspended;
+
+	/* which ports are edicated to the companion controller */
+	unsigned long		companion_ports;
+
+	/* which ports are owned by the companion during a bus suspend */
+	unsigned long		owned_ports;
+
+	/* which ports have the change-suspend feature turned on */
+	unsigned long		port_c_suspend;
+
+	/* which ports are suspended */
+	unsigned long		suspended_ports;
+
+	/* which ports have started to resume */
+	unsigned long		resuming_ports;
+
+	/* per-HC memory pools (could be per-bus, but ...) */
+	struct dma_pool		*qh_pool;	/* qh per active urb */
+	struct dma_pool		*qtd_pool;	/* one or more per qh */
+	struct dma_pool		*itd_pool;	/* itd per iso urb */
+
+	unsigned		random_frame;
+	unsigned long		next_statechange;
+	ktime_t			last_periodic_enable;
+	u32			command;
+
+	/* SILICON QUIRKS */
+	unsigned		need_io_watchdog:1;
+	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
+
+	u8			sbrn;		/* packed release number */
+
+	/* irq statistics */
+#ifdef FOTG210_STATS
+	struct fotg210_stats	stats;
+#	define COUNT(x) ((x)++)
+#else
+#	define COUNT(x)
+#endif
+
+	/* debug files */
+	struct dentry		*debug_dir;
+};
+
+/* convert between an HCD pointer and the corresponding FOTG210_HCD */
+static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
+{
+	return (struct fotg210_hcd *)(hcd->hcd_priv);
+}
+static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
+{
+	return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fotg210_caps {
+	/* these fields are specified as 8 and 16 bit registers,
+	 * but some hosts can't perform 8 or 16 bit PCI accesses.
+	 * some hosts treat caplength and hciversion as parts of a 32-bit
+	 * register, others treat them as two separate registers, this
+	 * affects the memory map for big endian controllers.
+	 */
+	u32		hc_capbase;
+#define HC_LENGTH(fotg210, p)	(0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+				(fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
+#define HC_VERSION(fotg210, p)	(0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+				(fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
+	u32		hcs_params;     /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
+
+	u32		hcc_params;	/* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
+	u8		portroute[8];	 /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fotg210_regs {
+
+	/* USBCMD: offset 0x00 */
+	u32		command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK	(1<<11)		/* enable "park" on async qh */
+#define CMD_PARK_CNT(c)	(((c)>>8)&3)	/* how many transfers to park for */
+#define CMD_IAAD	(1<<6)		/* "doorbell" interrupt async advance */
+#define CMD_ASE		(1<<5)		/* async schedule enable */
+#define CMD_PSE		(1<<4)		/* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET	(1<<1)		/* reset HC not bus */
+#define CMD_RUN		(1<<0)		/* start/stop HC */
+
+	/* USBSTS: offset 0x04 */
+	u32		status;
+#define STS_ASS		(1<<15)		/* Async Schedule Status */
+#define STS_PSS		(1<<14)		/* Periodic Schedule Status */
+#define STS_RECL	(1<<13)		/* Reclamation */
+#define STS_HALT	(1<<12)		/* Not running (any reason) */
+/* some bits reserved */
+	/* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA		(1<<5)		/* Interrupted on async advance */
+#define STS_FATAL	(1<<4)		/* such as some PCI access errors */
+#define STS_FLR		(1<<3)		/* frame list rolled over */
+#define STS_PCD		(1<<2)		/* port change detect */
+#define STS_ERR		(1<<1)		/* "error" completion (overflow, ...) */
+#define STS_INT		(1<<0)		/* "normal" completion (short, ...) */
+
+	/* USBINTR: offset 0x08 */
+	u32		intr_enable;
+
+	/* FRINDEX: offset 0x0C */
+	u32		frame_index;	/* current microframe number */
+	/* CTRLDSSEGMENT: offset 0x10 */
+	u32		segment;	/* address bits 63:32 if needed */
+	/* PERIODICLISTBASE: offset 0x14 */
+	u32		frame_list;	/* points to periodic list */
+	/* ASYNCLISTADDR: offset 0x18 */
+	u32		async_next;	/* address of next async queue head */
+
+	u32	reserved1;
+	/* PORTSC: offset 0x20 */
+	u32	port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))	/* USB 1.1 device */
+#define PORT_RESET	(1<<8)		/* reset port */
+#define PORT_SUSPEND	(1<<7)		/* suspend port */
+#define PORT_RESUME	(1<<6)		/* resume it */
+#define PORT_PEC	(1<<3)		/* port enable change */
+#define PORT_PE		(1<<2)		/* port enable */
+#define PORT_CSC	(1<<1)		/* connect status change */
+#define PORT_CONNECT	(1<<0)		/* device connected */
+#define PORT_RWC_BITS   (PORT_CSC | PORT_PEC)
+	u32     reserved2[19];
+
+	/* OTGCSR: offet 0x70 */
+	u32     otgcsr;
+#define OTGCSR_HOST_SPD_TYP     (3 << 22)
+#define OTGCSR_A_BUS_DROP	(1 << 5)
+#define OTGCSR_A_BUS_REQ	(1 << 4)
+
+	/* OTGISR: offset 0x74 */
+	u32     otgisr;
+#define OTGISR_OVC	(1 << 10)
+
+	u32     reserved3[15];
+
+	/* GMIR: offset 0xB4 */
+	u32     gmir;
+#define GMIR_INT_POLARITY	(1 << 3) /*Active High*/
+#define GMIR_MHC_INT		(1 << 2)
+#define GMIR_MOTG_INT		(1 << 1)
+#define GMIR_MDEV_INT	(1 << 0)
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define	QTD_NEXT(fotg210, dma)	cpu_to_hc32(fotg210, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fotg210_qtd {
+	/* first part defined by EHCI spec */
+	__hc32			hw_next;	/* see EHCI 3.5.1 */
+	__hc32			hw_alt_next;    /* see EHCI 3.5.2 */
+	__hc32			hw_token;	/* see EHCI 3.5.3 */
+#define	QTD_TOGGLE	(1 << 31)	/* data toggle */
+#define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	QTD_IOC		(1 << 15)	/* interrupt on complete */
+#define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)	(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT	(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
+#define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
+#define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
+#define	QTD_STS_STS	(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING	(1 << 0)	/* issue PING? */
+
+#define ACTIVE_BIT(fotg210)	cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
+#define HALT_BIT(fotg210)		cpu_to_hc32(fotg210, QTD_STS_HALT)
+#define STATUS_BIT(fotg210)	cpu_to_hc32(fotg210, QTD_STS_STS)
+
+	__hc32			hw_buf[5];	/* see EHCI 3.5.4 */
+	__hc32			hw_buf_hi[5];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		qtd_dma;		/* qtd address */
+	struct list_head	qtd_list;		/* sw qtd list */
+	struct urb		*urb;			/* qtd's urb */
+	size_t			length;			/* length of buffer */
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fotg210)	cpu_to_hc32(fotg210, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fotg210, dma)	((dma) & cpu_to_hc32(fotg210, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD	(0 << 1)
+#define Q_TYPE_QH	(1 << 1)
+#define Q_TYPE_SITD	(2 << 1)
+#define Q_TYPE_FSTN	(3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fotg210, dma) \
+	(cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FOTG210_LIST_END(fotg210) \
+	cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure.  That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule.  Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fotg210_shadow {
+	struct fotg210_qh	*qh;		/* Q_TYPE_QH */
+	struct fotg210_itd	*itd;		/* Q_TYPE_ITD */
+	struct fotg210_fstn	*fstn;		/* Q_TYPE_FSTN */
+	__hc32			*hw_next;	/* (all types) */
+	void			*ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fotg210_qh_hw {
+	__hc32			hw_next;	/* see EHCI 3.6.1 */
+	__hc32			hw_info1;	/* see EHCI 3.6.2 */
+#define	QH_CONTROL_EP	(1 << 27)	/* FS/LS control endpoint */
+#define	QH_HEAD		(1 << 15)	/* Head of async reclamation list */
+#define	QH_TOGGLE_CTL	(1 << 14)	/* Data toggle control */
+#define	QH_HIGH_SPEED	(2 << 12)	/* Endpoint speed */
+#define	QH_LOW_SPEED	(1 << 12)
+#define	QH_FULL_SPEED	(0 << 12)
+#define	QH_INACTIVATE	(1 << 7)	/* Inactivate on next transaction */
+	__hc32			hw_info2;	/* see EHCI 3.6.2 */
+#define	QH_SMASK	0x000000ff
+#define	QH_CMASK	0x0000ff00
+#define	QH_HUBADDR	0x007f0000
+#define	QH_HUBPORT	0x3f800000
+#define	QH_MULT		0xc0000000
+	__hc32			hw_current;	/* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct fotg210_qtd) */
+	__hc32			hw_qtd_next;
+	__hc32			hw_alt_next;
+	__hc32			hw_token;
+	__hc32			hw_buf[5];
+	__hc32			hw_buf_hi[5];
+} __aligned(32);
+
+struct fotg210_qh {
+	struct fotg210_qh_hw	*hw;		/* Must come first */
+	/* the rest is HCD-private */
+	dma_addr_t		qh_dma;		/* address of qh */
+	union fotg210_shadow	qh_next;	/* ptr to qh; or periodic */
+	struct list_head	qtd_list;	/* sw qtd list */
+	struct list_head	intr_node;	/* list of intr QHs */
+	struct fotg210_qtd	*dummy;
+	struct fotg210_qh	*unlink_next;	/* next on unlink list */
+
+	unsigned		unlink_cycle;
+
+	u8			needs_rescan;	/* Dequeue during giveback */
+	u8			qh_state;
+#define	QH_STATE_LINKED		1		/* HC sees this */
+#define	QH_STATE_UNLINK		2		/* HC may still see this */
+#define	QH_STATE_IDLE		3		/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on unlink q */
+#define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
+
+	u8			xacterrs;	/* XactErr retry counter */
+#define	QH_XACTERR_MAX		32		/* XactErr retry limit */
+
+	/* periodic schedule info */
+	u8			usecs;		/* intr bandwidth */
+	u8			gap_uf;		/* uframes split/csplit gap */
+	u8			c_usecs;	/* ... split completion bw */
+	u16			tt_usecs;	/* tt downstream bandwidth */
+	unsigned short		period;		/* polling interval */
+	unsigned short		start;		/* where polling starts */
+#define NO_FRAME ((unsigned short)~0)			/* pick new start */
+
+	struct usb_device	*dev;		/* access to TT */
+	unsigned		is_out:1;	/* bulk or intr OUT */
+	unsigned		clearing_tt:1;	/* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fotg210_iso_packet {
+	/* These will be copied to iTD when scheduling */
+	u64			bufp;		/* itd->hw_bufp{,_hi}[pg] |= */
+	__hc32			transaction;	/* itd->hw_transaction[i] |= */
+	u8			cross;		/* buf crosses pages */
+	/* for full speed OUT splits */
+	u32			buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fotg210_iso_sched {
+	struct list_head	td_list;
+	unsigned		span;
+	struct fotg210_iso_packet	packet[0];
+};
+
+/*
+ * fotg210_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fotg210_iso_stream {
+	/* first field matches fotg210_hq, but is NULL */
+	struct fotg210_qh_hw	*hw;
+
+	u8			bEndpointAddress;
+	u8			highspeed;
+	struct list_head	td_list;	/* queued itds */
+	struct list_head	free_list;	/* list of unused itds */
+	struct usb_device	*udev;
+	struct usb_host_endpoint *ep;
+
+	/* output of (re)scheduling */
+	int			next_uframe;
+	__hc32			splits;
+
+	/* the rest is derived from the endpoint descriptor,
+	 * trusting urb->interval == f(epdesc->bInterval) and
+	 * including the extra info for hw_bufp[0..2]
+	 */
+	u8			usecs, c_usecs;
+	u16			interval;
+	u16			tt_usecs;
+	u16			maxp;
+	u16			raw_mask;
+	unsigned		bandwidth;
+
+	/* This is used to initialize iTD's hw_bufp fields */
+	__hc32			buf0;
+	__hc32			buf1;
+	__hc32			buf2;
+
+	/* this is used to initialize sITD's tt info */
+	__hc32			address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fotg210_itd {
+	/* first part defined by EHCI spec */
+	__hc32			hw_next;	/* see EHCI 3.3.1 */
+	__hc32			hw_transaction[8]; /* see EHCI 3.3.2 */
+#define FOTG210_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
+#define FOTG210_ISOC_BUF_ERR	(1<<30)	/* Data buffer error */
+#define FOTG210_ISOC_BABBLE	(1<<29)	/* babble detected */
+#define FOTG210_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
+#define	FOTG210_ITD_LENGTH(tok)	(((tok)>>16) & 0x0fff)
+#define	FOTG210_ITD_IOC		(1 << 15)	/* interrupt on complete */
+
+#define ITD_ACTIVE(fotg210)	cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
+
+	__hc32			hw_bufp[7];	/* see EHCI 3.3.3 */
+	__hc32			hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		itd_dma;	/* for this itd */
+	union fotg210_shadow	itd_next;	/* ptr to periodic q entry */
+
+	struct urb		*urb;
+	struct fotg210_iso_stream	*stream;	/* endpoint's queue */
+	struct list_head	itd_list;	/* list of stream's itds */
+
+	/* any/all hw_transactions here may be used by that urb */
+	unsigned		frame;		/* where scheduled */
+	unsigned		pg;
+	unsigned		index[8];	/* in urb->iso_frame_desc */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2.  In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fotg210_fstn {
+	__hc32			hw_next;	/* any periodic q entry */
+	__hc32			hw_prev;	/* qh or FOTG210_LIST_END */
+
+	/* the rest is HCD-private */
+	dma_addr_t		fstn_dma;
+	union fotg210_shadow	fstn_next;	/* ptr to periodic q entry */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
+		fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup)
+
+#define fotg210_prepare_ports_for_controller_resume(fotg210)		\
+		fotg210_adjust_port_wakeup_flags(fotg210, false, false)
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature.  Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+	return (readl(&fotg210->regs->otgcsr)
+		& OTGCSR_HOST_SPD_TYP) >> 22;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+	switch (fotg210_get_speed(fotg210, portsc)) {
+	case 0:
+		return 0;
+	case 1:
+		return USB_PORT_STAT_LOW_SPEED;
+	case 2:
+	default:
+		return USB_PORT_STAT_HIGH_SPEED;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define	fotg210_has_fsl_portno_bug(e)		(0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fotg210_big_endian_mmio(e)	0
+#define fotg210_big_endian_capbase(e)	0
+
+static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
+		__u32 __iomem *regs)
+{
+	return readl(regs);
+}
+
+static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
+		const unsigned int val, __u32 __iomem *regs)
+{
+	writel(val, regs);
+}
+
+/* cpu to fotg210 */
+static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
+{
+	return cpu_to_le32(x);
+}
+
+/* fotg210 to cpu */
+static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
+{
+	return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
+			       const __hc32 *x)
+{
+	return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
+{
+	return fotg210_readl(fotg210, &fotg210->regs->frame_index);
+}
+
+#define fotg210_itdlen(urb, desc, t) ({			\
+	usb_pipein((urb)->pipe) ?				\
+	(desc)->length - FOTG210_ITD_LENGTH(t) :			\
+	FOTG210_ITD_LENGTH(t);					\
+})
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FOTG210_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
new file mode 100644
index 0000000..677f9d5
--- /dev/null
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Setup platform devices needed by the Freescale multi-port host
+ * and/or dual-role USB controller modules based on the description
+ * in flat device tree.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+struct fsl_usb2_dev_data {
+	char *dr_mode;		/* controller mode */
+	char *drivers[3];	/* drivers to instantiate for this mode */
+	enum fsl_usb2_operating_modes op_mode;	/* operating mode */
+};
+
+static struct fsl_usb2_dev_data dr_mode_data[] = {
+	{
+		.dr_mode = "host",
+		.drivers = { "fsl-ehci", NULL, NULL, },
+		.op_mode = FSL_USB2_DR_HOST,
+	},
+	{
+		.dr_mode = "otg",
+		.drivers = { "fsl-usb2-otg", "fsl-ehci", "fsl-usb2-udc", },
+		.op_mode = FSL_USB2_DR_OTG,
+	},
+	{
+		.dr_mode = "peripheral",
+		.drivers = { "fsl-usb2-udc", NULL, NULL, },
+		.op_mode = FSL_USB2_DR_DEVICE,
+	},
+};
+
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
+{
+	const unsigned char *prop;
+	int i;
+
+	prop = of_get_property(np, "dr_mode", NULL);
+	if (prop) {
+		for (i = 0; i < ARRAY_SIZE(dr_mode_data); i++) {
+			if (!strcmp(prop, dr_mode_data[i].dr_mode))
+				return &dr_mode_data[i];
+		}
+	}
+	pr_warn("%pOF: Invalid 'dr_mode' property, fallback to host mode\n",
+		np);
+	return &dr_mode_data[0]; /* mode not specified, use host */
+}
+
+static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
+{
+	if (!phy_type)
+		return FSL_USB2_PHY_NONE;
+	if (!strcasecmp(phy_type, "ulpi"))
+		return FSL_USB2_PHY_ULPI;
+	if (!strcasecmp(phy_type, "utmi"))
+		return FSL_USB2_PHY_UTMI;
+	if (!strcasecmp(phy_type, "utmi_wide"))
+		return FSL_USB2_PHY_UTMI_WIDE;
+	if (!strcasecmp(phy_type, "utmi_dual"))
+		return FSL_USB2_PHY_UTMI_DUAL;
+	if (!strcasecmp(phy_type, "serial"))
+		return FSL_USB2_PHY_SERIAL;
+
+	return FSL_USB2_PHY_NONE;
+}
+
+static struct platform_device *fsl_usb2_device_register(
+					struct platform_device *ofdev,
+					struct fsl_usb2_platform_data *pdata,
+					const char *name, int id)
+{
+	struct platform_device *pdev;
+	const struct resource *res = ofdev->resource;
+	unsigned int num = ofdev->num_resources;
+	int retval;
+
+	pdev = platform_device_alloc(name, id);
+	if (!pdev) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	pdev->dev.parent = &ofdev->dev;
+
+	pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
+
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
+	else
+		dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+	retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+	if (retval)
+		goto error;
+
+	if (num) {
+		retval = platform_device_add_resources(pdev, res, num);
+		if (retval)
+			goto error;
+	}
+
+	retval = platform_device_add(pdev);
+	if (retval)
+		goto error;
+
+	return pdev;
+
+error:
+	platform_device_put(pdev);
+	return ERR_PTR(retval);
+}
+
+static const struct of_device_id fsl_usb2_mph_dr_of_match[];
+
+static enum fsl_usb2_controller_ver usb_get_ver_info(struct device_node *np)
+{
+	enum fsl_usb2_controller_ver ver = FSL_USB_VER_NONE;
+
+	/*
+	 * returns 1 for usb controller version 1.6
+	 * returns 2 for usb controller version 2.2
+	 * returns 3 for usb controller version 2.4
+	 * returns 4 for usb controller version 2.5
+	 * returns 0 otherwise
+	 */
+	if (of_device_is_compatible(np, "fsl-usb2-dr")) {
+		if (of_device_is_compatible(np, "fsl-usb2-dr-v1.6"))
+			ver = FSL_USB_VER_1_6;
+		else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.2"))
+			ver = FSL_USB_VER_2_2;
+		else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.4"))
+			ver = FSL_USB_VER_2_4;
+		else if (of_device_is_compatible(np, "fsl-usb2-dr-v2.5"))
+			ver = FSL_USB_VER_2_5;
+		else /* for previous controller versions */
+			ver = FSL_USB_VER_OLD;
+
+		if (ver > FSL_USB_VER_NONE)
+			return ver;
+	}
+
+	if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+		return FSL_USB_VER_OLD;
+
+	if (of_device_is_compatible(np, "fsl-usb2-mph")) {
+		if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
+			ver = FSL_USB_VER_1_6;
+		else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.2"))
+			ver = FSL_USB_VER_2_2;
+		else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.4"))
+			ver = FSL_USB_VER_2_4;
+		else if (of_device_is_compatible(np, "fsl-usb2-mph-v2.5"))
+			ver = FSL_USB_VER_2_5;
+		else /* for previous controller versions */
+			ver = FSL_USB_VER_OLD;
+	}
+
+	return ver;
+}
+
+static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
+{
+	struct device_node *np = ofdev->dev.of_node;
+	struct platform_device *usb_dev;
+	struct fsl_usb2_platform_data data, *pdata;
+	struct fsl_usb2_dev_data *dev_data;
+	const struct of_device_id *match;
+	const unsigned char *prop;
+	static unsigned int idx;
+	int i;
+
+	if (!of_device_is_available(np))
+		return -ENODEV;
+
+	match = of_match_device(fsl_usb2_mph_dr_of_match, &ofdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	pdata = &data;
+	if (match->data)
+		memcpy(pdata, match->data, sizeof(data));
+	else
+		memset(pdata, 0, sizeof(data));
+
+	dev_data = get_dr_mode_data(np);
+
+	if (of_device_is_compatible(np, "fsl-usb2-mph")) {
+		if (of_get_property(np, "port0", NULL))
+			pdata->port_enables |= FSL_USB2_PORT0_ENABLED;
+
+		if (of_get_property(np, "port1", NULL))
+			pdata->port_enables |= FSL_USB2_PORT1_ENABLED;
+
+		pdata->operating_mode = FSL_USB2_MPH_HOST;
+	} else {
+		if (of_get_property(np, "fsl,invert-drvvbus", NULL))
+			pdata->invert_drvvbus = 1;
+
+		if (of_get_property(np, "fsl,invert-pwr-fault", NULL))
+			pdata->invert_pwr_fault = 1;
+
+		/* setup mode selected in the device tree */
+		pdata->operating_mode = dev_data->op_mode;
+	}
+
+	prop = of_get_property(np, "phy_type", NULL);
+	pdata->phy_mode = determine_usb_phy(prop);
+	pdata->controller_ver = usb_get_ver_info(np);
+
+	/* Activate Erratum by reading property in device tree */
+	pdata->has_fsl_erratum_a007792 =
+		of_property_read_bool(np, "fsl,usb-erratum-a007792");
+	pdata->has_fsl_erratum_a005275 =
+		of_property_read_bool(np, "fsl,usb-erratum-a005275");
+	pdata->has_fsl_erratum_a005697 =
+		of_property_read_bool(np, "fsl,usb_erratum-a005697");
+
+	/*
+	 * Determine whether phy_clk_valid needs to be checked
+	 * by reading property in device tree
+	 */
+	pdata->check_phy_clk_valid =
+		of_property_read_bool(np, "phy-clk-valid");
+
+	if (pdata->have_sysif_regs) {
+		if (pdata->controller_ver == FSL_USB_VER_NONE) {
+			dev_warn(&ofdev->dev, "Could not get controller version\n");
+			return -ENODEV;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
+		if (!dev_data->drivers[i])
+			continue;
+		usb_dev = fsl_usb2_device_register(ofdev, pdata,
+					dev_data->drivers[i], idx);
+		if (IS_ERR(usb_dev)) {
+			dev_err(&ofdev->dev, "Can't register usb device\n");
+			return PTR_ERR(usb_dev);
+		}
+	}
+	idx++;
+	return 0;
+}
+
+static int __unregister_subdev(struct device *dev, void *d)
+{
+	platform_device_unregister(to_platform_device(dev));
+	return 0;
+}
+
+static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
+{
+	device_for_each_child(&ofdev->dev, NULL, __unregister_subdev);
+	return 0;
+}
+
+#ifdef CONFIG_PPC_MPC512x
+
+#define USBGENCTRL		0x200		/* NOTE: big endian */
+#define GC_WU_INT_CLR		(1 << 5)	/* Wakeup int clear */
+#define GC_ULPI_SEL		(1 << 4)	/* ULPI i/f select (usb0 only)*/
+#define GC_PPP			(1 << 3)	/* Inv. Port Power Polarity */
+#define GC_PFP			(1 << 2)	/* Inv. Power Fault Polarity */
+#define GC_WU_ULPI_EN		(1 << 1)	/* Wakeup on ULPI event */
+#define GC_WU_IE		(1 << 1)	/* Wakeup interrupt enable */
+
+#define ISIPHYCTRL		0x204		/* NOTE: big endian */
+#define PHYCTRL_PHYE		(1 << 4)	/* On-chip UTMI PHY enable */
+#define PHYCTRL_BSENH		(1 << 3)	/* Bit Stuff Enable High */
+#define PHYCTRL_BSEN		(1 << 2)	/* Bit Stuff Enable */
+#define PHYCTRL_LSFE		(1 << 1)	/* Line State Filter Enable */
+#define PHYCTRL_PXE		(1 << 0)	/* PHY oscillator enable */
+
+int fsl_usb2_mpc5121_init(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct clk *clk;
+	int err;
+
+	clk = devm_clk_get(pdev->dev.parent, "ipg");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clk\n");
+		return PTR_ERR(clk);
+	}
+	err = clk_prepare_enable(clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable clk\n");
+		return err;
+	}
+	pdata->clk = clk;
+
+	if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
+		u32 reg = 0;
+
+		if (pdata->invert_drvvbus)
+			reg |= GC_PPP;
+
+		if (pdata->invert_pwr_fault)
+			reg |= GC_PFP;
+
+		out_be32(pdata->regs + ISIPHYCTRL, PHYCTRL_PHYE | PHYCTRL_PXE);
+		out_be32(pdata->regs + USBGENCTRL, reg);
+	}
+	return 0;
+}
+
+static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
+{
+	struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+	pdata->regs = NULL;
+
+	if (pdata->clk)
+		clk_disable_unprepare(pdata->clk);
+}
+
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+	.big_endian_desc = 1,
+	.big_endian_mmio = 1,
+	.es = 1,
+	.have_sysif_regs = 0,
+	.le_setup_buf = 1,
+	.init = fsl_usb2_mpc5121_init,
+	.exit = fsl_usb2_mpc5121_exit,
+};
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+	.have_sysif_regs = 1,
+};
+
+static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
+	{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+	{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
+#ifdef CONFIG_PPC_MPC512x
+	{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
+#endif
+	{},
+};
+MODULE_DEVICE_TABLE(of, fsl_usb2_mph_dr_of_match);
+
+static struct platform_driver fsl_usb2_mph_dr_driver = {
+	.driver = {
+		.name = "fsl-usb2-mph-dr",
+		.of_match_table = fsl_usb2_mph_dr_of_match,
+	},
+	.probe	= fsl_usb2_mph_dr_of_probe,
+	.remove	= fsl_usb2_mph_dr_of_remove,
+};
+
+module_platform_driver(fsl_usb2_mph_dr_driver);
+
+MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
new file mode 100644
index 0000000..09a8ebd
--- /dev/null
+++ b/drivers/usb/host/hwa-hc.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Host Wire Adapter:
+ * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * The HWA driver is a simple layer that forwards requests to the WAHC
+ * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
+ * Controller) layers.
+ *
+ * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
+ * Host Controller that is connected to your system via USB (a USB
+ * dongle that implements a USB host...). There is also a Device Wired
+ * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
+ * transferring data (it is after all a USB host connected via
+ * Wireless USB), we have a common layer called Wire Adapter Host
+ * Controller that does all the hard work. The WUSBHC (Wireless USB
+ * Host Controller) is the part common to WUSB Host Controllers, the
+ * HWA and the PCI-based one, that is implemented following the WHCI
+ * spec. All these layers are implemented in ../wusbcore.
+ *
+ * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
+ * job of converting a URB to a Wire Adapter
+ *
+ * Entry points:
+ *
+ *   hwahc_driver_*()   Driver initialization, registration and
+ *                      teardown.
+ *
+ *   hwahc_probe()	New device came up, create an instance for
+ *                      it [from device enumeration].
+ *
+ *   hwahc_disconnect()	Remove device instance [from device
+ *                      enumeration].
+ *
+ *   [__]hwahc_op_*()   Host-Wire-Adaptor specific functions for
+ *                      starting/stopping/etc (some might be made also
+ *                      DWA).
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include "../wusbcore/wa-hc.h"
+#include "../wusbcore/wusbhc.h"
+
+struct hwahc {
+	struct wusbhc wusbhc;	/* has to be 1st */
+	struct wahc wa;
+};
+
+/*
+ * FIXME should be wusbhc
+ *
+ * NOTE: we need to cache the Cluster ID because later...there is no
+ *       way to get it :)
+ */
+static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
+{
+	int result;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct wahc *wa = &hwahc->wa;
+	struct device *dev = &wa->usb_iface->dev;
+
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_CLUSTER_ID,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			cluster_id,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (result < 0)
+		dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
+			cluster_id, result);
+	else
+		wusbhc->cluster_id = cluster_id;
+	dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
+	return result;
+}
+
+static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_NUM_DNTS,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			interval << 8 | slots,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Reset a WUSB host controller and wait for it to complete doing it.
+ *
+ * @usb_hcd:	Pointer to WUSB Host Controller instance.
+ *
+ */
+static int hwahc_op_reset(struct usb_hcd *usb_hcd)
+{
+	int result;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	mutex_lock(&wusbhc->mutex);
+	wa_nep_disarm(&hwahc->wa);
+	result = __wa_set_feature(&hwahc->wa, WA_RESET);
+	if (result < 0) {
+		dev_err(dev, "error commanding HC to reset: %d\n", result);
+		goto error_unlock;
+	}
+	result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
+	if (result < 0) {
+		dev_err(dev, "error waiting for HC to reset: %d\n", result);
+		goto error_unlock;
+	}
+error_unlock:
+	mutex_unlock(&wusbhc->mutex);
+	return result;
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int hwahc_op_start(struct usb_hcd *usb_hcd)
+{
+	u8 addr;
+	int result;
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	result = -ENOSPC;
+	mutex_lock(&wusbhc->mutex);
+	addr = wusb_cluster_id_get();
+	if (addr == 0)
+		goto error_cluster_id_get;
+	result = __hwahc_set_cluster_id(hwahc, addr);
+	if (result < 0)
+		goto error_set_cluster_id;
+
+	usb_hcd->uses_new_polling = 1;
+	set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
+	usb_hcd->state = HC_STATE_RUNNING;
+
+	/*
+	 * prevent USB core from suspending the root hub since
+	 * bus_suspend and bus_resume are not yet supported.
+	 */
+	pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
+
+	result = 0;
+out:
+	mutex_unlock(&wusbhc->mutex);
+	return result;
+
+error_set_cluster_id:
+	wusb_cluster_id_put(wusbhc->cluster_id);
+error_cluster_id_get:
+	goto out;
+
+}
+
+/*
+ * No need to abort pipes, as when this is called, all the children
+ * has been disconnected and that has done it [through
+ * usb_disable_interface() -> usb_disable_endpoint() ->
+ * hwahc_op_ep_disable() - >rpipe_ep_disable()].
+ */
+static void hwahc_op_stop(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+
+	mutex_lock(&wusbhc->mutex);
+	wusb_cluster_id_put(wusbhc->cluster_id);
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+
+	/*
+	 * We cannot query the HWA for the WUSB time since that requires sending
+	 * a synchronous URB and this function can be called in_interrupt.
+	 * Instead, query the USB frame number for our parent and use that.
+	 */
+	return usb_get_current_frame_number(wa->usb_dev);
+}
+
+static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+				gfp_t gfp)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
+}
+
+static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
+				int status)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	return wa_urb_dequeue(&hwahc->wa, urb, status);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, go ahead and put it.
+ */
+static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
+				      struct usb_host_endpoint *ep)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	rpipe_ep_disable(&hwahc->wa, ep);
+}
+
+static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
+{
+	int result;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct device *dev = &hwahc->wa.usb_iface->dev;
+
+	result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
+	if (result < 0) {
+		dev_err(dev, "error commanding HC to start: %d\n", result);
+		goto error_stop;
+	}
+	result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
+	if (result < 0) {
+		dev_err(dev, "error waiting for HC to start: %d\n", result);
+		goto error_stop;
+	}
+	result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
+	if (result < 0) {
+		dev_err(dev, "cannot listen to notifications: %d\n", result);
+		goto error_stop;
+	}
+	/*
+	 * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+	 *  disable transfer notifications.
+	 */
+	if (hwahc->wa.quirks &
+		WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+		struct usb_host_interface *cur_altsetting =
+			hwahc->wa.usb_iface->cur_altsetting;
+
+		result = usb_control_msg(hwahc->wa.usb_dev,
+				usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+				WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+				USB_DIR_OUT | USB_TYPE_VENDOR |
+					USB_RECIP_INTERFACE,
+				WA_REQ_ALEREON_FEATURE_SET,
+				cur_altsetting->desc.bInterfaceNumber,
+				NULL, 0,
+				USB_CTRL_SET_TIMEOUT);
+		/*
+		 * If we successfully sent the control message, start DTI here
+		 * because no transfer notifications will be received which is
+		 * where DTI is normally started.
+		 */
+		if (result == 0)
+			result = wa_dti_start(&hwahc->wa);
+		else
+			result = 0;	/* OK.  Continue normally. */
+
+		if (result < 0) {
+			dev_err(dev, "cannot start DTI: %d\n", result);
+			goto error_dti_start;
+		}
+	}
+
+	return result;
+
+error_dti_start:
+	wa_nep_disarm(&hwahc->wa);
+error_stop:
+	__wa_clear_feature(&hwahc->wa, WA_ENABLE);
+	return result;
+}
+
+static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	int ret;
+
+	ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			      WUSB_REQ_CHAN_STOP,
+			      USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			      delay * 1000,
+			      iface_no,
+			      NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (ret == 0)
+		msleep(delay);
+
+	wa_nep_disarm(&hwahc->wa);
+	__wa_stop(&hwahc->wa);
+}
+
+/*
+ * Set the UWB MAS allocation for the WUSB cluster
+ *
+ * @stream_index: stream to use (-1 for cancelling the allocation)
+ * @mas: mas bitmap to use
+ */
+static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+			      const struct uwb_mas_bm *mas)
+{
+	int result;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	struct device *dev = &wa->usb_iface->dev;
+	u8 mas_le[UWB_NUM_MAS/8];
+
+	/* Set the stream index */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_STREAM_IDX,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			stream_index,
+			wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (result < 0) {
+		dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
+		goto out;
+	}
+	uwb_mas_bm_copy_le(mas_le, mas);
+	/* Set the MAS allocation */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_WUSB_MAS,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+			mas_le, 32, USB_CTRL_SET_TIMEOUT);
+	if (result < 0)
+		dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
+out:
+	return result;
+}
+
+/*
+ * Add an IE to the host's MMC
+ *
+ * @interval:    See WUSB1.0[8.5.3.1]
+ * @repeat_cnt:  See WUSB1.0[8.5.3.1]
+ * @handle:      See WUSB1.0[8.5.3.1]
+ * @wuie:        Pointer to the header of the WUSB IE data to add.
+ *               MUST BE allocated in a kmalloc buffer (no stack or
+ *               vmalloc).
+ *
+ * NOTE: the format of the WUSB IEs for MMCs are different to the
+ *       normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
+ *       Id in WUSB IEs). Standards...you gotta love'em.
+ */
+static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
+				u8 repeat_cnt, u8 handle,
+				struct wuie_hdr *wuie)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_ADD_MMC_IE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			interval << 8 | repeat_cnt,
+			handle << 8 | iface_no,
+			wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Remove an IE to the host's MMC
+ *
+ * @handle:      See WUSB1.0[8.5.3.1]
+ */
+static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_REMOVE_MMC_IE,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, handle << 8 | iface_no,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Update device information for a given fake port
+ *
+ * @port_idx: Fake port to which device is connected (wusbhc index, not
+ *            USB port number).
+ */
+static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
+				   struct wusb_dev *wusb_dev)
+{
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	struct hwa_dev_info *dev_info;
+	int ret;
+
+	/* fill out the Device Info buffer and send it */
+	dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
+	if (!dev_info)
+		return -ENOMEM;
+	uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
+			   &wusb_dev->availability);
+	dev_info->bDeviceAddress = wusb_dev->addr;
+
+	/*
+	 * If the descriptors haven't been read yet, use a default PHY
+	 * rate of 53.3 Mbit/s only.  The correct value will be used
+	 * when this will be called again as part of the
+	 * authentication process (which occurs after the descriptors
+	 * have been read).
+	 */
+	if (wusb_dev->wusb_cap_descr)
+		dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
+	else
+		dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
+
+	ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			WUSB_REQ_SET_DEV_INFO,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			0, wusb_dev->port_idx << 8 | iface_no,
+			dev_info, sizeof(struct hwa_dev_info),
+			USB_CTRL_SET_TIMEOUT);
+	kfree(dev_info);
+	return ret;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+			       const void *key, size_t key_size,
+			       u8 key_idx)
+{
+	int result = -ENOMEM;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	struct usb_key_descriptor *keyd;
+	size_t keyd_len;
+
+	keyd_len = sizeof(*keyd) + key_size;
+	keyd = kzalloc(keyd_len, GFP_KERNEL);
+	if (keyd == NULL)
+		return -ENOMEM;
+
+	keyd->bLength = keyd_len;
+	keyd->bDescriptorType = USB_DT_KEY;
+	keyd->tTKID[0] = (tkid >>  0) & 0xff;
+	keyd->tTKID[1] = (tkid >>  8) & 0xff;
+	keyd->tTKID[2] = (tkid >> 16) & 0xff;
+	memcpy(keyd->bKeyData, key, key_size);
+
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			USB_REQ_SET_DESCRIPTOR,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			USB_DT_KEY << 8 | key_idx,
+			port_idx << 8 | iface_no,
+			keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
+
+	kzfree(keyd); /* clear keys etc. */
+	return result;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+			      const void *key, size_t key_size)
+{
+	int result = -ENOMEM;
+	struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	struct wahc *wa = &hwahc->wa;
+	u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+	u8 encryption_value;
+
+	/* Tell the host which key to use to talk to the device */
+	if (key) {
+		u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
+					    WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+		result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
+					     key, key_size, key_idx);
+		if (result < 0)
+			goto error_set_key;
+		encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
+	} else {
+		/* FIXME: this should come from wusbhc->etd[UNSECURE].value */
+		encryption_value = 0;
+	}
+
+	/* Set the encryption type for communicating with the device */
+	result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+			USB_REQ_SET_ENCRYPTION,
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			encryption_value, port_idx << 8 | iface_no,
+			NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (result < 0)
+		dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
+			"port index %u to %s (value %d): %d\n", port_idx,
+			wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
+			wusbhc->ccm1_etd->bEncryptionValue, result);
+error_set_key:
+	return result;
+}
+
+/*
+ * Set host's GTK key
+ */
+static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+			      const void *key, size_t key_size)
+{
+	u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+				    WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+	return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
+}
+
+/*
+ * Get the Wire Adapter class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ *       descriptor that includes the interfaces' and endpoints', so
+ *       we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int wa_fill_descr(struct wahc *wa)
+{
+	int result;
+	struct device *dev = &wa->usb_iface->dev;
+	char *itr;
+	struct usb_device *usb_dev = wa->usb_dev;
+	struct usb_descriptor_header *hdr;
+	struct usb_wa_descriptor *wa_descr;
+	size_t itr_size, actconfig_idx;
+
+	actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+			sizeof(usb_dev->config[0]);
+	itr = usb_dev->rawdescriptors[actconfig_idx];
+	itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+	while (itr_size >= sizeof(*hdr)) {
+		hdr = (struct usb_descriptor_header *) itr;
+		dev_dbg(dev, "Extra device descriptor: "
+			"type %02x/%u bytes @ %zu (%zu left)\n",
+			hdr->bDescriptorType, hdr->bLength,
+			(itr - usb_dev->rawdescriptors[actconfig_idx]),
+			itr_size);
+		if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
+			goto found;
+		itr += hdr->bLength;
+		itr_size -= hdr->bLength;
+	}
+	dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
+	return -ENODEV;
+
+found:
+	result = -EINVAL;
+	if (hdr->bLength > itr_size) {	/* is it available? */
+		dev_err(dev, "incomplete Wire Adapter Class descriptor "
+			"(%zu bytes left, %u needed)\n",
+			itr_size, hdr->bLength);
+		goto error;
+	}
+	if (hdr->bLength < sizeof(*wa->wa_descr)) {
+		dev_err(dev, "short Wire Adapter Class descriptor\n");
+		goto error;
+	}
+	wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
+	if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
+		dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
+			 (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
+			 le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
+	result = 0;
+error:
+	return result;
+}
+
+static const struct hc_driver hwahc_hc_driver = {
+	.description = "hwa-hcd",
+	.product_desc = "Wireless USB HWA host controller",
+	.hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
+	.irq = NULL,			/* FIXME */
+	.flags = HCD_USB25,
+	.reset = hwahc_op_reset,
+	.start = hwahc_op_start,
+	.stop = hwahc_op_stop,
+	.get_frame_number = hwahc_op_get_frame_number,
+	.urb_enqueue = hwahc_op_urb_enqueue,
+	.urb_dequeue = hwahc_op_urb_dequeue,
+	.endpoint_disable = hwahc_op_endpoint_disable,
+
+	.hub_status_data = wusbhc_rh_status_data,
+	.hub_control = wusbhc_rh_control,
+	.start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int hwahc_security_create(struct hwahc *hwahc)
+{
+	int result;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct usb_device *usb_dev = hwahc->wa.usb_dev;
+	struct device *dev = &usb_dev->dev;
+	struct usb_security_descriptor *secd;
+	struct usb_encryption_descriptor *etd;
+	void *itr, *top;
+	size_t itr_size, needed, bytes;
+	u8 index;
+	char buf[64];
+
+	/* Find the host's security descriptors in the config descr bundle */
+	index = (usb_dev->actconfig - usb_dev->config) /
+		sizeof(usb_dev->config[0]);
+	itr = usb_dev->rawdescriptors[index];
+	itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+	top = itr + itr_size;
+	result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
+			le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
+			USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
+	if (result == -1) {
+		dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
+		return 0;
+	}
+	needed = sizeof(*secd);
+	if (top - (void *)secd < needed) {
+		dev_err(dev, "BUG? Not enough data to process security "
+			"descriptor header (%zu bytes left vs %zu needed)\n",
+			top - (void *) secd, needed);
+		return 0;
+	}
+	needed = le16_to_cpu(secd->wTotalLength);
+	if (top - (void *)secd < needed) {
+		dev_err(dev, "BUG? Not enough data to process security "
+			"descriptors (%zu bytes left vs %zu needed)\n",
+			top - (void *) secd, needed);
+		return 0;
+	}
+	/* Walk over the sec descriptors and store CCM1's on wusbhc */
+	itr = (void *) secd + sizeof(*secd);
+	top = (void *) secd + le16_to_cpu(secd->wTotalLength);
+	index = 0;
+	bytes = 0;
+	while (itr < top) {
+		etd = itr;
+		if (top - itr < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad host security descriptor; "
+				"not enough data (%zu vs %zu left)\n",
+				top - itr, sizeof(*etd));
+			break;
+		}
+		if (etd->bLength < sizeof(*etd)) {
+			dev_err(dev, "BUG: bad host encryption descriptor; "
+				"descriptor is too short "
+				"(%zu vs %zu needed)\n",
+				(size_t)etd->bLength, sizeof(*etd));
+			break;
+		}
+		itr += etd->bLength;
+		bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+				  "%s (0x%02x) ",
+				  wusb_et_name(etd->bEncryptionType),
+				  etd->bEncryptionValue);
+		wusbhc->ccm1_etd = etd;
+	}
+	dev_info(dev, "supported encryption types: %s\n", buf);
+	if (wusbhc->ccm1_etd == NULL) {
+		dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
+		return 0;
+	}
+	/* Pretty print what we support */
+	return 0;
+}
+
+static void hwahc_security_release(struct hwahc *hwahc)
+{
+	/* nothing to do here so far... */
+}
+
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
+	kernel_ulong_t quirks)
+{
+	int result;
+	struct device *dev = &iface->dev;
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+	struct wahc *wa = &hwahc->wa;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+	wa->usb_dev = usb_get_dev(usb_dev);	/* bind the USB device */
+	wa->usb_iface = usb_get_intf(iface);
+	wusbhc->dev = dev;
+	/* defer getting the uwb_rc handle until it is needed since it
+	 * may not have been registered by the hwa_rc driver yet. */
+	wusbhc->uwb_rc = NULL;
+	result = wa_fill_descr(wa);	/* Get the device descriptor */
+	if (result < 0)
+		goto error_fill_descriptor;
+	if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
+		dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
+			"adapter (%u ports)\n", wa->wa_descr->bNumPorts);
+		wusbhc->ports_max = USB_MAXCHILDREN;
+	} else {
+		wusbhc->ports_max = wa->wa_descr->bNumPorts;
+	}
+	wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
+	wusbhc->start = __hwahc_op_wusbhc_start;
+	wusbhc->stop = __hwahc_op_wusbhc_stop;
+	wusbhc->mmcie_add = __hwahc_op_mmcie_add;
+	wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
+	wusbhc->dev_info_set = __hwahc_op_dev_info_set;
+	wusbhc->bwa_set = __hwahc_op_bwa_set;
+	wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
+	wusbhc->set_ptk = __hwahc_op_set_ptk;
+	wusbhc->set_gtk = __hwahc_op_set_gtk;
+	result = hwahc_security_create(hwahc);
+	if (result < 0) {
+		dev_err(dev, "Can't initialize security: %d\n", result);
+		goto error_security_create;
+	}
+	wa->wusb = wusbhc;	/* FIXME: ugly, need to fix */
+	result = wusbhc_create(&hwahc->wusbhc);
+	if (result < 0) {
+		dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
+		goto error_wusbhc_create;
+	}
+	result = wa_create(&hwahc->wa, iface, quirks);
+	if (result < 0)
+		goto error_wa_create;
+	return 0;
+
+error_wa_create:
+	wusbhc_destroy(&hwahc->wusbhc);
+error_wusbhc_create:
+	/* WA Descr fill allocs no resources */
+error_security_create:
+error_fill_descriptor:
+	usb_put_intf(iface);
+	usb_put_dev(usb_dev);
+	return result;
+}
+
+static void hwahc_destroy(struct hwahc *hwahc)
+{
+	struct wusbhc *wusbhc = &hwahc->wusbhc;
+
+	mutex_lock(&wusbhc->mutex);
+	__wa_destroy(&hwahc->wa);
+	wusbhc_destroy(&hwahc->wusbhc);
+	hwahc_security_release(hwahc);
+	hwahc->wusbhc.dev = NULL;
+	uwb_rc_put(wusbhc->uwb_rc);
+	usb_put_intf(hwahc->wa.usb_iface);
+	usb_put_dev(hwahc->wa.usb_dev);
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static void hwahc_init(struct hwahc *hwahc)
+{
+	wa_init(&hwahc->wa);
+}
+
+static int hwahc_probe(struct usb_interface *usb_iface,
+		       const struct usb_device_id *id)
+{
+	int result;
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc;
+	struct hwahc *hwahc;
+	struct device *dev = &usb_iface->dev;
+
+	result = -ENOMEM;
+	usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
+	if (usb_hcd == NULL) {
+		dev_err(dev, "unable to allocate instance\n");
+		goto error_alloc;
+	}
+	usb_hcd->wireless = 1;
+	usb_hcd->self.sg_tablesize = ~0;
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+	hwahc_init(hwahc);
+	result = hwahc_create(hwahc, usb_iface, id->driver_info);
+	if (result < 0) {
+		dev_err(dev, "Cannot initialize internals: %d\n", result);
+		goto error_hwahc_create;
+	}
+	result = usb_add_hcd(usb_hcd, 0, 0);
+	if (result < 0) {
+		dev_err(dev, "Cannot add HCD: %d\n", result);
+		goto error_add_hcd;
+	}
+	device_wakeup_enable(usb_hcd->self.controller);
+	result = wusbhc_b_create(&hwahc->wusbhc);
+	if (result < 0) {
+		dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
+		goto error_wusbhc_b_create;
+	}
+	return 0;
+
+error_wusbhc_b_create:
+	usb_remove_hcd(usb_hcd);
+error_add_hcd:
+	hwahc_destroy(hwahc);
+error_hwahc_create:
+	usb_put_hcd(usb_hcd);
+error_alloc:
+	return result;
+}
+
+static void hwahc_disconnect(struct usb_interface *usb_iface)
+{
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc;
+	struct hwahc *hwahc;
+
+	usb_hcd = usb_get_intfdata(usb_iface);
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+	wusbhc_b_destroy(&hwahc->wusbhc);
+	usb_remove_hcd(usb_hcd);
+	hwahc_destroy(hwahc);
+	usb_put_hcd(usb_hcd);
+}
+
+static const struct usb_device_id hwahc_id_table[] = {
+	/* Alereon 5310 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
+	  .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+		WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+	/* Alereon 5611 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
+	  .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+		WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+	/* FIXME: use class labels for this */
+	{ USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
+	{},
+};
+MODULE_DEVICE_TABLE(usb, hwahc_id_table);
+
+static struct usb_driver hwahc_driver = {
+	.name =		"hwa-hc",
+	.probe =	hwahc_probe,
+	.disconnect =	hwahc_disconnect,
+	.id_table =	hwahc_id_table,
+};
+
+module_usb_driver(hwahc_driver);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 0000000..7fcf1d9
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2009 by Martin Fuzzey
+ */
+
+/* this file is part of imx21-hcd.c */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#ifndef DEBUG
+
+static inline void create_debug_files(struct imx21 *imx21) { }
+static inline void remove_debug_files(struct imx21 *imx21) { }
+static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
+	int status) {}
+static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
+	struct urb *urb) {}
+static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
+	struct urb *urb) {}
+static inline void debug_etd_allocated(struct imx21 *imx21) {}
+static inline void debug_etd_freed(struct imx21 *imx21) {}
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
+static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
+static inline void debug_isoc_submitted(struct imx21 *imx21,
+	int frame, struct td *td) {}
+static inline void debug_isoc_completed(struct imx21 *imx21,
+	int frame, struct td *td, int cc, int len) {}
+
+#else
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static const char *dir_labels[] = {
+	"TD 0",
+	"OUT",
+	"IN",
+	"TD 1"
+};
+
+static const char *speed_labels[] = {
+	"Full",
+	"Low"
+};
+
+static const char *format_labels[] = {
+	"Control",
+	"ISO",
+	"Bulk",
+	"Interrupt"
+};
+
+static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
+	struct urb *urb)
+{
+	return usb_pipeisoc(urb->pipe) ?
+		&imx21->isoc_stats : &imx21->nonisoc_stats;
+}
+
+static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
+{
+	stats_for_urb(imx21, urb)->submitted++;
+}
+
+static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
+{
+	if (st)
+		stats_for_urb(imx21, urb)->completed_failed++;
+	else
+		stats_for_urb(imx21, urb)->completed_ok++;
+}
+
+static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
+{
+	stats_for_urb(imx21, urb)->unlinked++;
+}
+
+static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
+{
+	stats_for_urb(imx21, urb)->queue_etd++;
+}
+
+static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
+{
+	stats_for_urb(imx21, urb)->queue_dmem++;
+}
+
+static inline void debug_etd_allocated(struct imx21 *imx21)
+{
+	imx21->etd_usage.maximum = max(
+			++(imx21->etd_usage.value),
+			imx21->etd_usage.maximum);
+}
+
+static inline void debug_etd_freed(struct imx21 *imx21)
+{
+	imx21->etd_usage.value--;
+}
+
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
+{
+	imx21->dmem_usage.value += size;
+	imx21->dmem_usage.maximum = max(
+			imx21->dmem_usage.value,
+			imx21->dmem_usage.maximum);
+}
+
+static inline void debug_dmem_freed(struct imx21 *imx21, int size)
+{
+	imx21->dmem_usage.value -= size;
+}
+
+
+static void debug_isoc_submitted(struct imx21 *imx21,
+	int frame, struct td *td)
+{
+	struct debug_isoc_trace *trace = &imx21->isoc_trace[
+		imx21->isoc_trace_index++];
+
+	imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
+	trace->schedule_frame = td->frame;
+	trace->submit_frame = frame;
+	trace->request_len = td->len;
+	trace->td = td;
+}
+
+static inline void debug_isoc_completed(struct imx21 *imx21,
+	int frame, struct td *td, int cc, int len)
+{
+	struct debug_isoc_trace *trace, *trace_failed;
+	int i;
+	int found = 0;
+
+	trace = imx21->isoc_trace;
+	for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
+		if (trace->td == td) {
+			trace->done_frame = frame;
+			trace->done_len = len;
+			trace->cc = cc;
+			trace->td = NULL;
+			found = 1;
+			break;
+		}
+	}
+
+	if (found && cc) {
+		trace_failed = &imx21->isoc_trace_failed[
+					imx21->isoc_trace_index_failed++];
+
+		imx21->isoc_trace_index_failed %= ARRAY_SIZE(
+						imx21->isoc_trace_failed);
+		*trace_failed = *trace;
+	}
+}
+
+
+static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
+{
+	if (ep)
+		snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
+			ep->desc.bEndpointAddress,
+			usb_endpoint_type(&ep->desc),
+			ep);
+	else
+		snprintf(buf, bufsize, "none");
+	return buf;
+}
+
+static char *format_etd_dword0(u32 value, char *buf, int bufsize)
+{
+	snprintf(buf, bufsize,
+		"addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
+		value & 0x7F,
+		(value >> DW0_ENDPNT) & 0x0F,
+		dir_labels[(value >> DW0_DIRECT) & 0x03],
+		speed_labels[(value >> DW0_SPEED) & 0x01],
+		format_labels[(value >> DW0_FORMAT) & 0x03],
+		(value >> DW0_HALTED) & 0x01);
+	return buf;
+}
+
+static int debug_status_show(struct seq_file *s, void *v)
+{
+	struct imx21 *imx21 = s->private;
+	int etds_allocated = 0;
+	int etds_sw_busy = 0;
+	int etds_hw_busy = 0;
+	int dmem_blocks = 0;
+	int queued_for_etd = 0;
+	int queued_for_dmem = 0;
+	unsigned int dmem_bytes = 0;
+	int i;
+	struct etd_priv *etd;
+	u32 etd_enable_mask;
+	unsigned long flags;
+	struct imx21_dmem_area *dmem;
+	struct ep_priv *ep_priv;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
+	for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+		if (etd->alloc)
+			etds_allocated++;
+		if (etd->urb)
+			etds_sw_busy++;
+		if (etd_enable_mask & (1<<i))
+			etds_hw_busy++;
+	}
+
+	list_for_each_entry(dmem, &imx21->dmem_list, list) {
+		dmem_bytes += dmem->size;
+		dmem_blocks++;
+	}
+
+	list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
+		queued_for_etd++;
+
+	list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
+		queued_for_dmem++;
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	seq_printf(s,
+		"Frame: %d\n"
+		"ETDs allocated: %d/%d (max=%d)\n"
+		"ETDs in use sw: %d\n"
+		"ETDs in use hw: %d\n"
+		"DMEM allocated: %d/%d (max=%d)\n"
+		"DMEM blocks: %d\n"
+		"Queued waiting for ETD: %d\n"
+		"Queued waiting for DMEM: %d\n",
+		readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
+		etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
+		etds_sw_busy,
+		etds_hw_busy,
+		dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
+		dmem_blocks,
+		queued_for_etd,
+		queued_for_dmem);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_status);
+
+static int debug_dmem_show(struct seq_file *s, void *v)
+{
+	struct imx21 *imx21 = s->private;
+	struct imx21_dmem_area *dmem;
+	unsigned long flags;
+	char ep_text[40];
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	list_for_each_entry(dmem, &imx21->dmem_list, list)
+		seq_printf(s,
+			"%04X: size=0x%X "
+			"ep=%s\n",
+			dmem->offset, dmem->size,
+			format_ep(dmem->ep, ep_text, sizeof(ep_text)));
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_dmem);
+
+static int debug_etd_show(struct seq_file *s, void *v)
+{
+	struct imx21 *imx21 = s->private;
+	struct etd_priv *etd;
+	char buf[60];
+	u32 dword;
+	int i, j;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+		int state = -1;
+		struct urb_priv *urb_priv;
+		if (etd->urb) {
+			urb_priv = etd->urb->hcpriv;
+			if (urb_priv)
+				state = urb_priv->state;
+		}
+
+		seq_printf(s,
+			"etd_num: %d\n"
+			"ep: %s\n"
+			"alloc: %d\n"
+			"len: %d\n"
+			"busy sw: %d\n"
+			"busy hw: %d\n"
+			"urb state: %d\n"
+			"current urb: %p\n",
+
+			i,
+			format_ep(etd->ep, buf, sizeof(buf)),
+			etd->alloc,
+			etd->len,
+			etd->urb != NULL,
+			(readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
+			state,
+			etd->urb);
+
+		for (j = 0; j < 4; j++) {
+			dword = etd_readl(imx21, i, j);
+			switch (j) {
+			case 0:
+				format_etd_dword0(dword, buf, sizeof(buf));
+				break;
+			case 2:
+				snprintf(buf, sizeof(buf),
+					"cc=0X%02X", dword >> DW2_COMPCODE);
+				break;
+			default:
+				*buf = 0;
+				break;
+			}
+			seq_printf(s,
+				"dword %d: submitted=%08X cur=%08X [%s]\n",
+				j,
+				etd->submitted_dwords[j],
+				dword,
+				buf);
+		}
+		seq_printf(s, "\n");
+	}
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_etd);
+
+static void debug_statistics_show_one(struct seq_file *s,
+	const char *name, struct debug_stats *stats)
+{
+	seq_printf(s, "%s:\n"
+		"submitted URBs: %lu\n"
+		"completed OK: %lu\n"
+		"completed failed: %lu\n"
+		"unlinked: %lu\n"
+		"queued for ETD: %lu\n"
+		"queued for DMEM: %lu\n\n",
+		name,
+		stats->submitted,
+		stats->completed_ok,
+		stats->completed_failed,
+		stats->unlinked,
+		stats->queue_etd,
+		stats->queue_dmem);
+}
+
+static int debug_statistics_show(struct seq_file *s, void *v)
+{
+	struct imx21 *imx21 = s->private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
+	debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
+	seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_statistics);
+
+static void debug_isoc_show_one(struct seq_file *s,
+	const char *name, int index, 	struct debug_isoc_trace *trace)
+{
+	seq_printf(s, "%s %d:\n"
+		"cc=0X%02X\n"
+		"scheduled frame %d (%d)\n"
+		"submitted frame %d (%d)\n"
+		"completed frame %d (%d)\n"
+		"requested length=%d\n"
+		"completed length=%d\n\n",
+		name, index,
+		trace->cc,
+		trace->schedule_frame, trace->schedule_frame & 0xFFFF,
+		trace->submit_frame, trace->submit_frame & 0xFFFF,
+		trace->done_frame, trace->done_frame & 0xFFFF,
+		trace->request_len,
+		trace->done_len);
+}
+
+static int debug_isoc_show(struct seq_file *s, void *v)
+{
+	struct imx21 *imx21 = s->private;
+	struct debug_isoc_trace *trace;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	trace = imx21->isoc_trace_failed;
+	for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
+		debug_isoc_show_one(s, "isoc failed", i, trace);
+
+	trace = imx21->isoc_trace;
+	for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
+		debug_isoc_show_one(s, "isoc", i, trace);
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_isoc);
+
+static void create_debug_files(struct imx21 *imx21)
+{
+	struct dentry *root;
+
+	root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+	imx21->debug_root = root;
+
+	debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
+	debugfs_create_file("dmem", S_IRUGO, root, imx21, &debug_dmem_fops);
+	debugfs_create_file("etd", S_IRUGO, root, imx21, &debug_etd_fops);
+	debugfs_create_file("statistics", S_IRUGO, root, imx21,
+			    &debug_statistics_fops);
+	debugfs_create_file("isoc", S_IRUGO, root, imx21, &debug_isoc_fops);
+}
+
+static void remove_debug_files(struct imx21 *imx21)
+{
+	debugfs_remove_recursive(imx21->debug_root);
+}
+
+#endif
+
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 0000000..6e3dad1
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1935 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * USB Host Controller Driver for IMX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ */
+
+
+ /*
+  * The i.MX21 USB hardware contains
+  *    * 32 transfer descriptors (called ETDs)
+  *    * 4Kb of Data memory
+  *
+  * The data memory is shared between the host and function controllers
+  * (but this driver only supports the host controller)
+  *
+  * So setting up a transfer involves:
+  *    * Allocating a ETD
+  *    * Fill in ETD with appropriate information
+  *    * Allocating data memory (and putting the offset in the ETD)
+  *    * Activate the ETD
+  *    * Get interrupt when done.
+  *
+  * An ETD is assigned to each active endpoint.
+  *
+  * Low resource (ETD and Data memory) situations are handled differently for
+  * isochronous and non insosynchronous transactions :
+  *
+  * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
+  *
+  * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
+  * They allocate both ETDs and Data memory during URB submission
+  * (and fail if unavailable).
+  */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "imx21-hcd.h"
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#ifdef DEBUG
+#define DEBUG_LOG_FRAME(imx21, etd, event) \
+	(etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
+#else
+#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
+#endif
+
+static const char hcd_name[] = "imx21-hcd";
+
+static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
+{
+	return (struct imx21 *)hcd->hcd_priv;
+}
+
+
+/* =========================================== */
+/* Hardware access helpers			*/
+/* =========================================== */
+
+static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
+{
+	void __iomem *reg = imx21->regs + offset;
+	writel(readl(reg) | mask, reg);
+}
+
+static inline void clear_register_bits(struct imx21 *imx21,
+	u32 offset, u32 mask)
+{
+	void __iomem *reg = imx21->regs + offset;
+	writel(readl(reg) & ~mask, reg);
+}
+
+static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+	void __iomem *reg = imx21->regs + offset;
+
+	if (readl(reg) & mask)
+		writel(mask, reg);
+}
+
+static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+	void __iomem *reg = imx21->regs + offset;
+
+	if (!(readl(reg) & mask))
+		writel(mask, reg);
+}
+
+static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
+{
+	writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
+{
+	return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static inline int wrap_frame(int counter)
+{
+	return counter & 0xFFFF;
+}
+
+static inline int frame_after(int frame, int after)
+{
+	/* handle wrapping like jiffies time_afer */
+	return (s16)((s16)after - (s16)frame) < 0;
+}
+
+static int imx21_hc_get_frame(struct usb_hcd *hcd)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+
+	return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
+}
+
+static inline bool unsuitable_for_dma(dma_addr_t addr)
+{
+	return (addr & 3) != 0;
+}
+
+#include "imx21-dbg.c"
+
+static void nonisoc_urb_completed_for_etd(
+	struct imx21 *imx21, struct etd_priv *etd, int status);
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
+
+/* =========================================== */
+/* ETD management				*/
+/* ===========================================	*/
+
+static int alloc_etd(struct imx21 *imx21)
+{
+	int i;
+	struct etd_priv *etd = imx21->etd;
+
+	for (i = 0; i < USB_NUM_ETD; i++, etd++) {
+		if (etd->alloc == 0) {
+			memset(etd, 0, sizeof(imx21->etd[0]));
+			etd->alloc = 1;
+			debug_etd_allocated(imx21);
+			return i;
+		}
+	}
+	return -1;
+}
+
+static void disactivate_etd(struct imx21 *imx21, int num)
+{
+	int etd_mask = (1 << num);
+	struct etd_priv *etd = &imx21->etd[num];
+
+	writel(etd_mask, imx21->regs + USBH_ETDENCLR);
+	clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+	writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
+	clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+
+	etd->active_count = 0;
+
+	DEBUG_LOG_FRAME(imx21, etd, disactivated);
+}
+
+static void reset_etd(struct imx21 *imx21, int num)
+{
+	struct etd_priv *etd = imx21->etd + num;
+	int i;
+
+	disactivate_etd(imx21, num);
+
+	for (i = 0; i < 4; i++)
+		etd_writel(imx21, num, i, 0);
+	etd->urb = NULL;
+	etd->ep = NULL;
+	etd->td = NULL;
+	etd->bounce_buffer = NULL;
+}
+
+static void free_etd(struct imx21 *imx21, int num)
+{
+	if (num < 0)
+		return;
+
+	if (num >= USB_NUM_ETD) {
+		dev_err(imx21->dev, "BAD etd=%d!\n", num);
+		return;
+	}
+	if (imx21->etd[num].alloc == 0) {
+		dev_err(imx21->dev, "ETD %d already free!\n", num);
+		return;
+	}
+
+	debug_etd_freed(imx21);
+	reset_etd(imx21, num);
+	memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
+}
+
+
+static void setup_etd_dword0(struct imx21 *imx21,
+	int etd_num, struct urb *urb,  u8 dir, u16 maxpacket)
+{
+	etd_writel(imx21, etd_num, 0,
+		((u32) usb_pipedevice(urb->pipe)) <<  DW0_ADDRESS |
+		((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
+		((u32) dir << DW0_DIRECT) |
+		((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
+			1 : 0) << DW0_SPEED) |
+		((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
+		((u32) maxpacket << DW0_MAXPKTSIZ));
+}
+
+/**
+ * Copy buffer to data controller data memory.
+ * We cannot use memcpy_toio() because the hardware requires 32bit writes
+ */
+static void copy_to_dmem(
+	struct imx21 *imx21, int dmem_offset, void *src, int count)
+{
+	void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
+	u32 word = 0;
+	u8 *p = src;
+	int byte = 0;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		byte = i % 4;
+		word += (*p++ << (byte * 8));
+		if (byte == 3) {
+			writel(word, dmem);
+			dmem += 4;
+			word = 0;
+		}
+	}
+
+	if (count && byte != 3)
+		writel(word, dmem);
+}
+
+static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
+{
+	u32 etd_mask = 1 << etd_num;
+	struct etd_priv *etd = &imx21->etd[etd_num];
+
+	if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
+		/* For non aligned isoc the condition below is always true */
+		if (etd->len <= etd->dmem_size) {
+			/* Fits into data memory, use PIO */
+			if (dir != TD_DIR_IN) {
+				copy_to_dmem(imx21,
+						etd->dmem_offset,
+						etd->cpu_buffer, etd->len);
+			}
+			etd->dma_handle = 0;
+
+		} else {
+			/* Too big for data memory, use bounce buffer */
+			enum dma_data_direction dmadir;
+
+			if (dir == TD_DIR_IN) {
+				dmadir = DMA_FROM_DEVICE;
+				etd->bounce_buffer = kmalloc(etd->len,
+								GFP_ATOMIC);
+			} else {
+				dmadir = DMA_TO_DEVICE;
+				etd->bounce_buffer = kmemdup(etd->cpu_buffer,
+								etd->len,
+								GFP_ATOMIC);
+			}
+			if (!etd->bounce_buffer) {
+				dev_err(imx21->dev, "failed bounce alloc\n");
+				goto err_bounce_alloc;
+			}
+
+			etd->dma_handle =
+				dma_map_single(imx21->dev,
+						etd->bounce_buffer,
+						etd->len,
+						dmadir);
+			if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
+				dev_err(imx21->dev, "failed bounce map\n");
+				goto err_bounce_map;
+			}
+		}
+	}
+
+	clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+	set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+	clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+	clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+	if (etd->dma_handle) {
+		set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
+		clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
+		clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
+		writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
+		set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
+	} else {
+		if (dir != TD_DIR_IN) {
+			/* need to set for ZLP and PIO */
+			set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+			set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+		}
+	}
+
+	DEBUG_LOG_FRAME(imx21, etd, activated);
+
+#ifdef DEBUG
+	if (!etd->active_count) {
+		int i;
+		etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
+		etd->disactivated_frame = -1;
+		etd->last_int_frame = -1;
+		etd->last_req_frame = -1;
+
+		for (i = 0; i < 4; i++)
+			etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
+	}
+#endif
+
+	etd->active_count = 1;
+	writel(etd_mask, imx21->regs + USBH_ETDENSET);
+	return;
+
+err_bounce_map:
+	kfree(etd->bounce_buffer);
+
+err_bounce_alloc:
+	free_dmem(imx21, etd);
+	nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
+}
+
+/* ===========================================	*/
+/* Data memory management			*/
+/* ===========================================	*/
+
+static int alloc_dmem(struct imx21 *imx21, unsigned int size,
+		      struct usb_host_endpoint *ep)
+{
+	unsigned int offset = 0;
+	struct imx21_dmem_area *area;
+	struct imx21_dmem_area *tmp;
+
+	size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
+
+	if (size > DMEM_SIZE) {
+		dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
+			size, DMEM_SIZE);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(tmp, &imx21->dmem_list, list) {
+		if ((size + offset) < offset)
+			goto fail;
+		if ((size + offset) <= tmp->offset)
+			break;
+		offset = tmp->size + tmp->offset;
+		if ((offset + size) > DMEM_SIZE)
+			goto fail;
+	}
+
+	area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
+	if (area == NULL)
+		return -ENOMEM;
+
+	area->ep = ep;
+	area->offset = offset;
+	area->size = size;
+	list_add_tail(&area->list, &tmp->list);
+	debug_dmem_allocated(imx21, size);
+	return offset;
+
+fail:
+	return -ENOMEM;
+}
+
+/* Memory now available for a queued ETD - activate it */
+static void activate_queued_etd(struct imx21 *imx21,
+	struct etd_priv *etd, u32 dmem_offset)
+{
+	struct urb_priv *urb_priv = etd->urb->hcpriv;
+	int etd_num = etd - &imx21->etd[0];
+	u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
+	u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
+
+	dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
+		etd_num);
+	etd_writel(imx21, etd_num, 1,
+	    ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
+
+	etd->dmem_offset = dmem_offset;
+	urb_priv->active = 1;
+	activate_etd(imx21, etd_num, dir);
+}
+
+static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
+{
+	struct imx21_dmem_area *area;
+	struct etd_priv *tmp;
+	int found = 0;
+	int offset;
+
+	if (!etd->dmem_size)
+		return;
+	etd->dmem_size = 0;
+
+	offset = etd->dmem_offset;
+	list_for_each_entry(area, &imx21->dmem_list, list) {
+		if (area->offset == offset) {
+			debug_dmem_freed(imx21, area->size);
+			list_del(&area->list);
+			kfree(area);
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)  {
+		dev_err(imx21->dev,
+			"Trying to free unallocated DMEM %d\n", offset);
+		return;
+	}
+
+	/* Try again to allocate memory for anything we've queued */
+	list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
+		offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
+		if (offset >= 0) {
+			list_del(&etd->queue);
+			activate_queued_etd(imx21, etd, (u32)offset);
+		}
+	}
+}
+
+static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+	struct imx21_dmem_area *area, *tmp;
+
+	list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
+		if (area->ep == ep) {
+			dev_err(imx21->dev,
+				"Active DMEM %d for disabled ep=%p\n",
+				area->offset, ep);
+			list_del(&area->list);
+			kfree(area);
+		}
+	}
+}
+
+
+/* ===========================================	*/
+/* End handling 				*/
+/* ===========================================	*/
+
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
+static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+	int i;
+
+	for (i = 0; i < NUM_ISO_ETDS; i++) {
+		int etd_num = ep_priv->etd[i];
+		struct etd_priv *etd;
+		if (etd_num < 0)
+			continue;
+
+		etd = &imx21->etd[etd_num];
+		ep_priv->etd[i] = -1;
+
+		free_dmem(imx21, etd); /* for isoc */
+
+		if (list_empty(&imx21->queue_for_etd)) {
+			free_etd(imx21, etd_num);
+			continue;
+		}
+
+		dev_dbg(imx21->dev,
+			"assigning idle etd %d for queued request\n", etd_num);
+		ep_priv = list_first_entry(&imx21->queue_for_etd,
+			struct ep_priv, queue);
+		list_del(&ep_priv->queue);
+		reset_etd(imx21, etd_num);
+		ep_priv->waiting_etd = 0;
+		ep_priv->etd[i] = etd_num;
+
+		if (list_empty(&ep_priv->ep->urb_list)) {
+			dev_err(imx21->dev, "No urb for queued ep!\n");
+			continue;
+		}
+		schedule_nonisoc_etd(imx21, list_first_entry(
+			&ep_priv->ep->urb_list, struct urb, urb_list));
+	}
+}
+
+static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
+__releases(imx21->lock)
+__acquires(imx21->lock)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct ep_priv *ep_priv = urb->ep->hcpriv;
+	struct urb_priv *urb_priv = urb->hcpriv;
+
+	debug_urb_completed(imx21, urb, status);
+	dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
+
+	kfree(urb_priv->isoc_td);
+	kfree(urb->hcpriv);
+	urb->hcpriv = NULL;
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	spin_unlock(&imx21->lock);
+	usb_hcd_giveback_urb(hcd, urb, status);
+	spin_lock(&imx21->lock);
+	if (list_empty(&ep_priv->ep->urb_list))
+		ep_idle(imx21, ep_priv);
+}
+
+static void nonisoc_urb_completed_for_etd(
+	struct imx21 *imx21, struct etd_priv *etd, int status)
+{
+	struct usb_host_endpoint *ep = etd->ep;
+
+	urb_done(imx21->hcd, etd->urb, status);
+	etd->urb = NULL;
+
+	if (!list_empty(&ep->urb_list)) {
+		struct urb *urb = list_first_entry(
+					&ep->urb_list, struct urb, urb_list);
+
+		dev_vdbg(imx21->dev, "next URB %p\n", urb);
+		schedule_nonisoc_etd(imx21, urb);
+	}
+}
+
+
+/* ===========================================	*/
+/* ISOC Handling ... 				*/
+/* ===========================================	*/
+
+static void schedule_isoc_etds(struct usb_hcd *hcd,
+	struct usb_host_endpoint *ep)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct ep_priv *ep_priv = ep->hcpriv;
+	struct etd_priv *etd;
+	struct urb_priv *urb_priv;
+	struct td *td;
+	int etd_num;
+	int i;
+	int cur_frame;
+	u8 dir;
+
+	for (i = 0; i < NUM_ISO_ETDS; i++) {
+too_late:
+		if (list_empty(&ep_priv->td_list))
+			break;
+
+		etd_num = ep_priv->etd[i];
+		if (etd_num < 0)
+			break;
+
+		etd = &imx21->etd[etd_num];
+		if (etd->urb)
+			continue;
+
+		td = list_entry(ep_priv->td_list.next, struct td, list);
+		list_del(&td->list);
+		urb_priv = td->urb->hcpriv;
+
+		cur_frame = imx21_hc_get_frame(hcd);
+		if (frame_after(cur_frame, td->frame)) {
+			dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
+				cur_frame, td->frame);
+			urb_priv->isoc_status = -EXDEV;
+			td->urb->iso_frame_desc[
+				td->isoc_index].actual_length = 0;
+			td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
+			if (--urb_priv->isoc_remaining == 0)
+				urb_done(hcd, td->urb, urb_priv->isoc_status);
+			goto too_late;
+		}
+
+		urb_priv->active = 1;
+		etd->td = td;
+		etd->ep = td->ep;
+		etd->urb = td->urb;
+		etd->len = td->len;
+		etd->dma_handle = td->dma_handle;
+		etd->cpu_buffer = td->cpu_buffer;
+
+		debug_isoc_submitted(imx21, cur_frame, td);
+
+		dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
+		setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
+		etd_writel(imx21, etd_num, 1, etd->dmem_offset);
+		etd_writel(imx21, etd_num, 2,
+			(TD_NOTACCESSED << DW2_COMPCODE) |
+			((td->frame & 0xFFFF) << DW2_STARTFRM));
+		etd_writel(imx21, etd_num, 3,
+			(TD_NOTACCESSED << DW3_COMPCODE0) |
+			(td->len << DW3_PKTLEN0));
+
+		activate_etd(imx21, etd_num, dir);
+	}
+}
+
+static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	int etd_mask = 1 << etd_num;
+	struct etd_priv *etd = imx21->etd + etd_num;
+	struct urb *urb = etd->urb;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	struct td *td = etd->td;
+	struct usb_host_endpoint *ep = etd->ep;
+	int isoc_index = td->isoc_index;
+	unsigned int pipe = urb->pipe;
+	int dir_in = usb_pipein(pipe);
+	int cc;
+	int bytes_xfrd;
+
+	disactivate_etd(imx21, etd_num);
+
+	cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
+	bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
+
+	/* Input doesn't always fill the buffer, don't generate an error
+	 * when this happens.
+	 */
+	if (dir_in && (cc == TD_DATAUNDERRUN))
+		cc = TD_CC_NOERROR;
+
+	if (cc == TD_NOTACCESSED)
+		bytes_xfrd = 0;
+
+	debug_isoc_completed(imx21,
+		imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
+	if (cc) {
+		urb_priv->isoc_status = -EXDEV;
+		dev_dbg(imx21->dev,
+			"bad iso cc=0x%X frame=%d sched frame=%d "
+			"cnt=%d len=%d urb=%p etd=%d index=%d\n",
+			cc,  imx21_hc_get_frame(hcd), td->frame,
+			bytes_xfrd, td->len, urb, etd_num, isoc_index);
+	}
+
+	if (dir_in) {
+		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+		if (!etd->dma_handle)
+			memcpy_fromio(etd->cpu_buffer,
+				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+				bytes_xfrd);
+	}
+
+	urb->actual_length += bytes_xfrd;
+	urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
+	urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
+
+	etd->td = NULL;
+	etd->urb = NULL;
+	etd->ep = NULL;
+
+	if (--urb_priv->isoc_remaining == 0)
+		urb_done(hcd, urb, urb_priv->isoc_status);
+
+	schedule_isoc_etds(hcd, ep);
+}
+
+static struct ep_priv *alloc_isoc_ep(
+	struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+	struct ep_priv *ep_priv;
+	int i;
+
+	ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+	if (!ep_priv)
+		return NULL;
+
+	for (i = 0; i < NUM_ISO_ETDS; i++)
+		ep_priv->etd[i] = -1;
+
+	INIT_LIST_HEAD(&ep_priv->td_list);
+	ep_priv->ep = ep;
+	ep->hcpriv = ep_priv;
+	return ep_priv;
+}
+
+static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+	int i, j;
+	int etd_num;
+
+	/* Allocate the ETDs if required */
+	for (i = 0; i < NUM_ISO_ETDS; i++) {
+		if (ep_priv->etd[i] < 0) {
+			etd_num = alloc_etd(imx21);
+			if (etd_num < 0)
+				goto alloc_etd_failed;
+
+			ep_priv->etd[i] = etd_num;
+			imx21->etd[etd_num].ep = ep_priv->ep;
+		}
+	}
+	return 0;
+
+alloc_etd_failed:
+	dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
+	for (j = 0; j < i; j++) {
+		free_etd(imx21, ep_priv->etd[j]);
+		ep_priv->etd[j] = -1;
+	}
+	return -ENOMEM;
+}
+
+static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
+				     struct usb_host_endpoint *ep,
+				     struct urb *urb, gfp_t mem_flags)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct urb_priv *urb_priv;
+	unsigned long flags;
+	struct ep_priv *ep_priv;
+	struct td *td = NULL;
+	int i;
+	int ret;
+	int cur_frame;
+	u16 maxpacket;
+
+	urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+	if (urb_priv == NULL)
+		return -ENOMEM;
+
+	urb_priv->isoc_td = kcalloc(urb->number_of_packets, sizeof(struct td),
+				    mem_flags);
+	if (urb_priv->isoc_td == NULL) {
+		ret = -ENOMEM;
+		goto alloc_td_failed;
+	}
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	if (ep->hcpriv == NULL) {
+		ep_priv = alloc_isoc_ep(imx21, ep);
+		if (ep_priv == NULL) {
+			ret = -ENOMEM;
+			goto alloc_ep_failed;
+		}
+	} else {
+		ep_priv = ep->hcpriv;
+	}
+
+	ret = alloc_isoc_etds(imx21, ep_priv);
+	if (ret)
+		goto alloc_etd_failed;
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto link_failed;
+
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->error_count = 0;
+	urb->hcpriv = urb_priv;
+	urb_priv->ep = ep;
+
+	/* allocate data memory for largest packets if not already done */
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+	for (i = 0; i < NUM_ISO_ETDS; i++) {
+		struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
+
+		if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
+			/* not sure if this can really occur.... */
+			dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
+				etd->dmem_size, maxpacket);
+			ret = -EMSGSIZE;
+			goto alloc_dmem_failed;
+		}
+
+		if (etd->dmem_size == 0) {
+			etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
+			if (etd->dmem_offset < 0) {
+				dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
+				ret = -EAGAIN;
+				goto alloc_dmem_failed;
+			}
+			etd->dmem_size = maxpacket;
+		}
+	}
+
+	/* calculate frame */
+	cur_frame = imx21_hc_get_frame(hcd);
+	i = 0;
+	if (list_empty(&ep_priv->td_list)) {
+		urb->start_frame = wrap_frame(cur_frame + 5);
+	} else {
+		urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
+				struct td, list)->frame + urb->interval);
+
+		if (frame_after(cur_frame, urb->start_frame)) {
+			dev_dbg(imx21->dev,
+				"enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+				urb->start_frame, cur_frame,
+				(urb->transfer_flags & URB_ISO_ASAP) != 0);
+			i = DIV_ROUND_UP(wrap_frame(
+					cur_frame - urb->start_frame),
+					urb->interval);
+
+			/* Treat underruns as if URB_ISO_ASAP was set */
+			if ((urb->transfer_flags & URB_ISO_ASAP) ||
+					i >= urb->number_of_packets) {
+				urb->start_frame = wrap_frame(urb->start_frame
+						+ i * urb->interval);
+				i = 0;
+			}
+		}
+	}
+
+	/* set up transfers */
+	urb_priv->isoc_remaining = urb->number_of_packets - i;
+	td = urb_priv->isoc_td;
+	for (; i < urb->number_of_packets; i++, td++) {
+		unsigned int offset = urb->iso_frame_desc[i].offset;
+		td->ep = ep;
+		td->urb = urb;
+		td->len = urb->iso_frame_desc[i].length;
+		td->isoc_index = i;
+		td->frame = wrap_frame(urb->start_frame + urb->interval * i);
+		td->dma_handle = urb->transfer_dma + offset;
+		td->cpu_buffer = urb->transfer_buffer + offset;
+		list_add_tail(&td->list, &ep_priv->td_list);
+	}
+
+	dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
+		urb->number_of_packets, urb->start_frame, td->frame);
+
+	debug_urb_submitted(imx21, urb);
+	schedule_isoc_etds(hcd, ep);
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	return 0;
+
+alloc_dmem_failed:
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+link_failed:
+alloc_etd_failed:
+alloc_ep_failed:
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	kfree(urb_priv->isoc_td);
+
+alloc_td_failed:
+	kfree(urb_priv);
+	return ret;
+}
+
+static void dequeue_isoc_urb(struct imx21 *imx21,
+	struct urb *urb, struct ep_priv *ep_priv)
+{
+	struct urb_priv *urb_priv = urb->hcpriv;
+	struct td *td, *tmp;
+	int i;
+
+	if (urb_priv->active) {
+		for (i = 0; i < NUM_ISO_ETDS; i++) {
+			int etd_num = ep_priv->etd[i];
+			if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
+				struct etd_priv *etd = imx21->etd + etd_num;
+
+				reset_etd(imx21, etd_num);
+				free_dmem(imx21, etd);
+			}
+		}
+	}
+
+	list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
+		if (td->urb == urb) {
+			dev_vdbg(imx21->dev, "removing td %p\n", td);
+			list_del(&td->list);
+		}
+	}
+}
+
+/* =========================================== */
+/* NON ISOC Handling ... 			*/
+/* =========================================== */
+
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
+{
+	unsigned int pipe = urb->pipe;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
+	int state = urb_priv->state;
+	int etd_num = ep_priv->etd[0];
+	struct etd_priv *etd;
+	u32 count;
+	u16 etd_buf_size;
+	u16 maxpacket;
+	u8 dir;
+	u8 bufround;
+	u8 datatoggle;
+	u8 interval = 0;
+	u8 relpolpos = 0;
+
+	if (etd_num < 0) {
+		dev_err(imx21->dev, "No valid ETD\n");
+		return;
+	}
+	if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
+		dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
+
+	etd = &imx21->etd[etd_num];
+	maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+	if (!maxpacket)
+		maxpacket = 8;
+
+	if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
+		if (state == US_CTRL_SETUP) {
+			dir = TD_DIR_SETUP;
+			if (unsuitable_for_dma(urb->setup_dma))
+				usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
+					urb);
+			etd->dma_handle = urb->setup_dma;
+			etd->cpu_buffer = urb->setup_packet;
+			bufround = 0;
+			count = 8;
+			datatoggle = TD_TOGGLE_DATA0;
+		} else {	/* US_CTRL_ACK */
+			dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
+			bufround = 0;
+			count = 0;
+			datatoggle = TD_TOGGLE_DATA1;
+		}
+	} else {
+		dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
+		bufround = (dir == TD_DIR_IN) ? 1 : 0;
+		if (unsuitable_for_dma(urb->transfer_dma))
+			usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
+
+		etd->dma_handle = urb->transfer_dma;
+		etd->cpu_buffer = urb->transfer_buffer;
+		if (usb_pipebulk(pipe) && (state == US_BULK0))
+			count = 0;
+		else
+			count = urb->transfer_buffer_length;
+
+		if (usb_pipecontrol(pipe)) {
+			datatoggle = TD_TOGGLE_DATA1;
+		} else {
+			if (usb_gettoggle(
+					urb->dev,
+					usb_pipeendpoint(urb->pipe),
+					usb_pipeout(urb->pipe)))
+				datatoggle = TD_TOGGLE_DATA1;
+			else
+				datatoggle = TD_TOGGLE_DATA0;
+		}
+	}
+
+	etd->urb = urb;
+	etd->ep = urb_priv->ep;
+	etd->len = count;
+
+	if (usb_pipeint(pipe)) {
+		interval = urb->interval;
+		relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
+	}
+
+	/* Write ETD to device memory */
+	setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
+
+	etd_writel(imx21, etd_num, 2,
+		(u32) interval << DW2_POLINTERV |
+		((u32) relpolpos << DW2_RELPOLPOS) |
+		((u32) dir << DW2_DIRPID) |
+		((u32) bufround << DW2_BUFROUND) |
+		((u32) datatoggle << DW2_DATATOG) |
+		((u32) TD_NOTACCESSED << DW2_COMPCODE));
+
+	/* DMA will always transfer buffer size even if TOBYCNT in DWORD3
+	   is smaller. Make sure we don't overrun the buffer!
+	 */
+	if (count && count < maxpacket)
+		etd_buf_size = count;
+	else
+		etd_buf_size = maxpacket;
+
+	etd_writel(imx21, etd_num, 3,
+		((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
+
+	if (!count)
+		etd->dma_handle = 0;
+
+	/* allocate x and y buffer space at once */
+	etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
+	etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
+	if (etd->dmem_offset < 0) {
+		/* Setup everything we can in HW and update when we get DMEM */
+		etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
+
+		dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
+		debug_urb_queued_for_dmem(imx21, urb);
+		list_add_tail(&etd->queue, &imx21->queue_for_dmem);
+		return;
+	}
+
+	etd_writel(imx21, etd_num, 1,
+		(((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
+		(u32) etd->dmem_offset);
+
+	urb_priv->active = 1;
+
+	/* enable the ETD to kick off transfer */
+	dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
+		etd_num, count, dir != TD_DIR_IN ? "out" : "in");
+	activate_etd(imx21, etd_num, dir);
+
+}
+
+static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct etd_priv *etd = &imx21->etd[etd_num];
+	struct urb *urb = etd->urb;
+	u32 etd_mask = 1 << etd_num;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	int dir;
+	int cc;
+	u32 bytes_xfrd;
+	int etd_done;
+
+	disactivate_etd(imx21, etd_num);
+
+	dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
+	cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
+	bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
+
+	/* save toggle carry */
+	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+		      usb_pipeout(urb->pipe),
+		      (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
+
+	if (dir == TD_DIR_IN) {
+		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+		clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+		if (etd->bounce_buffer) {
+			memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
+			dma_unmap_single(imx21->dev,
+				etd->dma_handle, etd->len, DMA_FROM_DEVICE);
+		} else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
+			memcpy_fromio(etd->cpu_buffer,
+				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
+				bytes_xfrd);
+		}
+	}
+
+	kfree(etd->bounce_buffer);
+	etd->bounce_buffer = NULL;
+	free_dmem(imx21, etd);
+
+	urb->error_count = 0;
+	if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
+			&& (cc == TD_DATAUNDERRUN))
+		cc = TD_CC_NOERROR;
+
+	if (cc != 0)
+		dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
+
+	etd_done = (cc_to_error[cc] != 0);	/* stop if error */
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		switch (urb_priv->state) {
+		case US_CTRL_SETUP:
+			if (urb->transfer_buffer_length > 0)
+				urb_priv->state = US_CTRL_DATA;
+			else
+				urb_priv->state = US_CTRL_ACK;
+			break;
+		case US_CTRL_DATA:
+			urb->actual_length += bytes_xfrd;
+			urb_priv->state = US_CTRL_ACK;
+			break;
+		case US_CTRL_ACK:
+			etd_done = 1;
+			break;
+		default:
+			dev_err(imx21->dev,
+				"Invalid pipe state %d\n", urb_priv->state);
+			etd_done = 1;
+			break;
+		}
+		break;
+
+	case PIPE_BULK:
+		urb->actual_length += bytes_xfrd;
+		if ((urb_priv->state == US_BULK)
+		    && (urb->transfer_flags & URB_ZERO_PACKET)
+		    && urb->transfer_buffer_length > 0
+		    && ((urb->transfer_buffer_length %
+			 usb_maxpacket(urb->dev, urb->pipe,
+				       usb_pipeout(urb->pipe))) == 0)) {
+			/* need a 0-packet */
+			urb_priv->state = US_BULK0;
+		} else {
+			etd_done = 1;
+		}
+		break;
+
+	case PIPE_INTERRUPT:
+		urb->actual_length += bytes_xfrd;
+		etd_done = 1;
+		break;
+	}
+
+	if (etd_done)
+		nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
+	else {
+		dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
+		schedule_nonisoc_etd(imx21, urb);
+	}
+}
+
+
+static struct ep_priv *alloc_ep(void)
+{
+	int i;
+	struct ep_priv *ep_priv;
+
+	ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+	if (!ep_priv)
+		return NULL;
+
+	for (i = 0; i < NUM_ISO_ETDS; ++i)
+		ep_priv->etd[i] = -1;
+
+	return ep_priv;
+}
+
+static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
+				struct urb *urb, gfp_t mem_flags)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct usb_host_endpoint *ep = urb->ep;
+	struct urb_priv *urb_priv;
+	struct ep_priv *ep_priv;
+	struct etd_priv *etd;
+	int ret;
+	unsigned long flags;
+
+	dev_vdbg(imx21->dev,
+		"enqueue urb=%p ep=%p len=%d "
+		"buffer=%p dma=%pad setupBuf=%p setupDma=%pad\n",
+		urb, ep,
+		urb->transfer_buffer_length,
+		urb->transfer_buffer, &urb->transfer_dma,
+		urb->setup_packet, &urb->setup_dma);
+
+	if (usb_pipeisoc(urb->pipe))
+		return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
+
+	urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+	if (!urb_priv)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	ep_priv = ep->hcpriv;
+	if (ep_priv == NULL) {
+		ep_priv = alloc_ep();
+		if (!ep_priv) {
+			ret = -ENOMEM;
+			goto failed_alloc_ep;
+		}
+		ep->hcpriv = ep_priv;
+		ep_priv->ep = ep;
+	}
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto failed_link;
+
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->error_count = 0;
+	urb->hcpriv = urb_priv;
+	urb_priv->ep = ep;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		urb_priv->state = US_CTRL_SETUP;
+		break;
+	case PIPE_BULK:
+		urb_priv->state = US_BULK;
+		break;
+	}
+
+	debug_urb_submitted(imx21, urb);
+	if (ep_priv->etd[0] < 0) {
+		if (ep_priv->waiting_etd) {
+			dev_dbg(imx21->dev,
+				"no ETD available already queued %p\n",
+				ep_priv);
+			debug_urb_queued_for_etd(imx21, urb);
+			goto out;
+		}
+		ep_priv->etd[0] = alloc_etd(imx21);
+		if (ep_priv->etd[0] < 0) {
+			dev_dbg(imx21->dev,
+				"no ETD available queueing %p\n", ep_priv);
+			debug_urb_queued_for_etd(imx21, urb);
+			list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
+			ep_priv->waiting_etd = 1;
+			goto out;
+		}
+	}
+
+	/* Schedule if no URB already active for this endpoint */
+	etd = &imx21->etd[ep_priv->etd[0]];
+	if (etd->urb == NULL) {
+		DEBUG_LOG_FRAME(imx21, etd, last_req);
+		schedule_nonisoc_etd(imx21, urb);
+	}
+
+out:
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	return 0;
+
+failed_link:
+failed_alloc_ep:
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	kfree(urb_priv);
+	return ret;
+}
+
+static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+				int status)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	unsigned long flags;
+	struct usb_host_endpoint *ep;
+	struct ep_priv *ep_priv;
+	struct urb_priv *urb_priv = urb->hcpriv;
+	int ret = -EINVAL;
+
+	dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
+		urb, usb_pipeisoc(urb->pipe), status);
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto fail;
+	ep = urb_priv->ep;
+	ep_priv = ep->hcpriv;
+
+	debug_urb_unlinked(imx21, urb);
+
+	if (usb_pipeisoc(urb->pipe)) {
+		dequeue_isoc_urb(imx21, urb, ep_priv);
+		schedule_isoc_etds(hcd, ep);
+	} else if (urb_priv->active) {
+		int etd_num = ep_priv->etd[0];
+		if (etd_num != -1) {
+			struct etd_priv *etd = &imx21->etd[etd_num];
+
+			disactivate_etd(imx21, etd_num);
+			free_dmem(imx21, etd);
+			etd->urb = NULL;
+			kfree(etd->bounce_buffer);
+			etd->bounce_buffer = NULL;
+		}
+	}
+
+	urb_done(hcd, urb, status);
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	return 0;
+
+fail:
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	return ret;
+}
+
+/* =========================================== */
+/* Interrupt dispatch	 			*/
+/* =========================================== */
+
+static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
+{
+	int etd_num;
+	int enable_sof_int = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
+		u32 etd_mask = 1 << etd_num;
+		u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
+		u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
+		struct etd_priv *etd = &imx21->etd[etd_num];
+
+
+		if (done) {
+			DEBUG_LOG_FRAME(imx21, etd, last_int);
+		} else {
+/*
+ * Kludge warning!
+ *
+ * When multiple transfers are using the bus we sometimes get into a state
+ * where the transfer has completed (the CC field of the ETD is != 0x0F),
+ * the ETD has self disabled but the ETDDONESTAT flag is not set
+ * (and hence no interrupt occurs).
+ * This causes the transfer in question to hang.
+ * The kludge below checks for this condition at each SOF and processes any
+ * blocked ETDs (after an arbitrary 10 frame wait)
+ *
+ * With a single active transfer the usbtest test suite will run for days
+ * without the kludge.
+ * With other bus activity (eg mass storage) even just test1 will hang without
+ * the kludge.
+ */
+			u32 dword0;
+			int cc;
+
+			if (etd->active_count && !enabled) /* suspicious... */
+				enable_sof_int = 1;
+
+			if (!sof || enabled || !etd->active_count)
+				continue;
+
+			cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
+			if (cc == TD_NOTACCESSED)
+				continue;
+
+			if (++etd->active_count < 10)
+				continue;
+
+			dword0 = etd_readl(imx21, etd_num, 0);
+			dev_dbg(imx21->dev,
+				"unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
+				etd_num, dword0 & 0x7F,
+				(dword0 >> DW0_ENDPNT) & 0x0F,
+				cc);
+
+#ifdef DEBUG
+			dev_dbg(imx21->dev,
+				"frame: act=%d disact=%d"
+				" int=%d req=%d cur=%d\n",
+				etd->activated_frame,
+				etd->disactivated_frame,
+				etd->last_int_frame,
+				etd->last_req_frame,
+				readl(imx21->regs + USBH_FRMNUB));
+			imx21->debug_unblocks++;
+#endif
+			etd->active_count = 0;
+/* End of kludge */
+		}
+
+		if (etd->ep == NULL || etd->urb == NULL) {
+			dev_dbg(imx21->dev,
+				"Interrupt for unexpected etd %d"
+				" ep=%p urb=%p\n",
+				etd_num, etd->ep, etd->urb);
+			disactivate_etd(imx21, etd_num);
+			continue;
+		}
+
+		if (usb_pipeisoc(etd->urb->pipe))
+			isoc_etd_done(hcd, etd_num);
+		else
+			nonisoc_etd_done(hcd, etd_num);
+	}
+
+	/* only enable SOF interrupt if it may be needed for the kludge */
+	if (enable_sof_int)
+		set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+	else
+		clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+static irqreturn_t imx21_irq(struct usb_hcd *hcd)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	u32 ints = readl(imx21->regs + USBH_SYSISR);
+
+	if (ints & USBH_SYSIEN_HERRINT)
+		dev_dbg(imx21->dev, "Scheduling error\n");
+
+	if (ints & USBH_SYSIEN_SORINT)
+		dev_dbg(imx21->dev, "Scheduling overrun\n");
+
+	if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
+		process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
+
+	writel(ints, imx21->regs + USBH_SYSISR);
+	return IRQ_HANDLED;
+}
+
+static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
+				      struct usb_host_endpoint *ep)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	unsigned long flags;
+	struct ep_priv *ep_priv;
+	int i;
+
+	if (ep == NULL)
+		return;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+	ep_priv = ep->hcpriv;
+	dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
+
+	if (!list_empty(&ep->urb_list))
+		dev_dbg(imx21->dev, "ep's URB list is not empty\n");
+
+	if (ep_priv != NULL) {
+		for (i = 0; i < NUM_ISO_ETDS; i++) {
+			if (ep_priv->etd[i] > -1)
+				dev_dbg(imx21->dev, "free etd %d for disable\n",
+					ep_priv->etd[i]);
+
+			free_etd(imx21, ep_priv->etd[i]);
+		}
+		kfree(ep_priv);
+		ep->hcpriv = NULL;
+	}
+
+	for (i = 0; i < USB_NUM_ETD; i++) {
+		if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
+			dev_err(imx21->dev,
+				"Active etd %d for disabled ep=%p!\n", i, ep);
+			free_etd(imx21, i);
+		}
+	}
+	free_epdmem(imx21, ep);
+	spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Hub handling		 			*/
+/* =========================================== */
+
+static int get_hub_descriptor(struct usb_hcd *hcd,
+			      struct usb_hub_descriptor *desc)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	desc->bDescriptorType = USB_DT_HUB; /* HUB descriptor */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
+		& USBH_ROOTHUBA_NDNSTMPRT_MASK;
+	desc->bDescLength = 9;
+	desc->bPwrOn2PwrGood = 0;
+	desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
+		HUB_CHAR_NO_LPSM |	/* No power switching */
+		HUB_CHAR_NO_OCPM);	/* No over current protection */
+
+	desc->u.hs.DeviceRemovable[0] = 1 << 1;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+	return 0;
+}
+
+static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	int ports;
+	int changed = 0;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+	ports = readl(imx21->regs + USBH_ROOTHUBA)
+		& USBH_ROOTHUBA_NDNSTMPRT_MASK;
+	if (ports > 7) {
+		ports = 7;
+		dev_err(imx21->dev, "ports %d > 7\n", ports);
+	}
+	for (i = 0; i < ports; i++) {
+		if (readl(imx21->regs + USBH_PORTSTAT(i)) &
+			(USBH_PORTSTAT_CONNECTSC |
+			USBH_PORTSTAT_PRTENBLSC |
+			USBH_PORTSTAT_PRTSTATSC |
+			USBH_PORTSTAT_OVRCURIC |
+			USBH_PORTSTAT_PRTRSTSC)) {
+
+			changed = 1;
+			buf[0] |= 1 << (i + 1);
+		}
+	}
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	if (changed)
+		dev_info(imx21->dev, "Hub status changed\n");
+	return changed;
+}
+
+static int imx21_hc_hub_control(struct usb_hcd *hcd,
+				u16 typeReq,
+				u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	int rc = 0;
+	u32 status_write = 0;
+
+	switch (typeReq) {
+	case ClearHubFeature:
+		dev_dbg(imx21->dev, "ClearHubFeature\n");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			dev_dbg(imx21->dev, "    OVER_CURRENT\n");
+			break;
+		case C_HUB_LOCAL_POWER:
+			dev_dbg(imx21->dev, "    LOCAL_POWER\n");
+			break;
+		default:
+			dev_dbg(imx21->dev, "    unknown\n");
+			rc = -EINVAL;
+			break;
+		}
+		break;
+
+	case ClearPortFeature:
+		dev_dbg(imx21->dev, "ClearPortFeature\n");
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			dev_dbg(imx21->dev, "    ENABLE\n");
+			status_write = USBH_PORTSTAT_CURCONST;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(imx21->dev, "    SUSPEND\n");
+			status_write = USBH_PORTSTAT_PRTOVRCURI;
+			break;
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(imx21->dev, "    POWER\n");
+			status_write = USBH_PORTSTAT_LSDEVCON;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			dev_dbg(imx21->dev, "    C_ENABLE\n");
+			status_write = USBH_PORTSTAT_PRTENBLSC;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			dev_dbg(imx21->dev, "    C_SUSPEND\n");
+			status_write = USBH_PORTSTAT_PRTSTATSC;
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			dev_dbg(imx21->dev, "    C_CONNECTION\n");
+			status_write = USBH_PORTSTAT_CONNECTSC;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			dev_dbg(imx21->dev, "    C_OVER_CURRENT\n");
+			status_write = USBH_PORTSTAT_OVRCURIC;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			dev_dbg(imx21->dev, "    C_RESET\n");
+			status_write = USBH_PORTSTAT_PRTRSTSC;
+			break;
+		default:
+			dev_dbg(imx21->dev, "    unknown\n");
+			rc = -EINVAL;
+			break;
+		}
+
+		break;
+
+	case GetHubDescriptor:
+		dev_dbg(imx21->dev, "GetHubDescriptor\n");
+		rc = get_hub_descriptor(hcd, (void *)buf);
+		break;
+
+	case GetHubStatus:
+		dev_dbg(imx21->dev, "  GetHubStatus\n");
+		*(__le32 *) buf = 0;
+		break;
+
+	case GetPortStatus:
+		dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
+		    wIndex, USBH_PORTSTAT(wIndex - 1));
+		*(__le32 *) buf = readl(imx21->regs +
+			USBH_PORTSTAT(wIndex - 1));
+		break;
+
+	case SetHubFeature:
+		dev_dbg(imx21->dev, "SetHubFeature\n");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			dev_dbg(imx21->dev, "    OVER_CURRENT\n");
+			break;
+
+		case C_HUB_LOCAL_POWER:
+			dev_dbg(imx21->dev, "    LOCAL_POWER\n");
+			break;
+		default:
+			dev_dbg(imx21->dev, "    unknown\n");
+			rc = -EINVAL;
+			break;
+		}
+
+		break;
+
+	case SetPortFeature:
+		dev_dbg(imx21->dev, "SetPortFeature\n");
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(imx21->dev, "    SUSPEND\n");
+			status_write = USBH_PORTSTAT_PRTSUSPST;
+			break;
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(imx21->dev, "    POWER\n");
+			status_write = USBH_PORTSTAT_PRTPWRST;
+			break;
+		case USB_PORT_FEAT_RESET:
+			dev_dbg(imx21->dev, "    RESET\n");
+			status_write = USBH_PORTSTAT_PRTRSTST;
+			break;
+		default:
+			dev_dbg(imx21->dev, "    unknown\n");
+			rc = -EINVAL;
+			break;
+		}
+		break;
+
+	default:
+		dev_dbg(imx21->dev, "  unknown\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	if (status_write)
+		writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
+	return rc;
+}
+
+/* =========================================== */
+/* Host controller management 			*/
+/* =========================================== */
+
+static int imx21_hc_reset(struct usb_hcd *hcd)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	unsigned long timeout;
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	/* Reset the Host controller modules */
+	writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
+		USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
+		imx21->regs + USBOTG_RST_CTRL);
+
+	/* Wait for reset to finish */
+	timeout = jiffies + HZ;
+	while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
+		if (time_after(jiffies, timeout)) {
+			spin_unlock_irqrestore(&imx21->lock, flags);
+			dev_err(imx21->dev, "timeout waiting for reset\n");
+			return -ETIMEDOUT;
+		}
+		spin_unlock_irq(&imx21->lock);
+		schedule_timeout_uninterruptible(1);
+		spin_lock_irq(&imx21->lock);
+	}
+	spin_unlock_irqrestore(&imx21->lock, flags);
+	return 0;
+}
+
+static int imx21_hc_start(struct usb_hcd *hcd)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	unsigned long flags;
+	int i, j;
+	u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
+	u32 usb_control = 0;
+
+	hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
+			USBOTG_HWMODE_HOSTXCVR_MASK);
+	hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
+			USBOTG_HWMODE_OTGXCVR_MASK);
+
+	if (imx21->pdata->host1_txenoe)
+		usb_control |= USBCTRL_HOST1_TXEN_OE;
+
+	if (!imx21->pdata->host1_xcverless)
+		usb_control |= USBCTRL_HOST1_BYP_TLL;
+
+	if (imx21->pdata->otg_ext_xcvr)
+		usb_control |= USBCTRL_OTC_RCV_RXDP;
+
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
+		imx21->regs + USBOTG_CLK_CTRL);
+	writel(hw_mode, imx21->regs + USBOTG_HWMODE);
+	writel(usb_control, imx21->regs + USBCTRL);
+	writel(USB_MISCCONTROL_SKPRTRY  | USB_MISCCONTROL_ARBMODE,
+		imx21->regs + USB_MISCCONTROL);
+
+	/* Clear the ETDs */
+	for (i = 0; i < USB_NUM_ETD; i++)
+		for (j = 0; j < 4; j++)
+			etd_writel(imx21, i, j, 0);
+
+	/* Take the HC out of reset */
+	writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
+		imx21->regs + USBH_HOST_CTRL);
+
+	/* Enable ports */
+	if (imx21->pdata->enable_otg_host)
+		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+			imx21->regs + USBH_PORTSTAT(0));
+
+	if (imx21->pdata->enable_host1)
+		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+			imx21->regs + USBH_PORTSTAT(1));
+
+	if (imx21->pdata->enable_host2)
+		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+			imx21->regs + USBH_PORTSTAT(2));
+
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/* Enable host controller interrupts */
+	set_register_bits(imx21, USBH_SYSIEN,
+		USBH_SYSIEN_HERRINT |
+		USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
+	set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+
+	spin_unlock_irqrestore(&imx21->lock, flags);
+
+	return 0;
+}
+
+static void imx21_hc_stop(struct usb_hcd *hcd)
+{
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	unsigned long flags;
+
+	spin_lock_irqsave(&imx21->lock, flags);
+
+	writel(0, imx21->regs + USBH_SYSIEN);
+	clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+	clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
+					USBOTG_CLK_CTRL);
+	spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Driver glue		 			*/
+/* =========================================== */
+
+static const struct hc_driver imx21_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "IMX21 USB Host Controller",
+	.hcd_priv_size = sizeof(struct imx21),
+
+	.flags = HCD_USB11,
+	.irq = imx21_irq,
+
+	.reset = imx21_hc_reset,
+	.start = imx21_hc_start,
+	.stop = imx21_hc_stop,
+
+	/* I/O requests */
+	.urb_enqueue = imx21_hc_urb_enqueue,
+	.urb_dequeue = imx21_hc_urb_dequeue,
+	.endpoint_disable = imx21_hc_endpoint_disable,
+
+	/* scheduling support */
+	.get_frame_number = imx21_hc_get_frame,
+
+	/* Root hub support */
+	.hub_status_data = imx21_hc_hub_status_data,
+	.hub_control = imx21_hc_hub_control,
+
+};
+
+static struct mx21_usbh_platform_data default_pdata = {
+	.host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+	.otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+	.enable_host1 = 1,
+	.enable_host2 = 1,
+	.enable_otg_host = 1,
+
+};
+
+static int imx21_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct imx21 *imx21 = hcd_to_imx21(hcd);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	remove_debug_files(imx21);
+	usb_remove_hcd(hcd);
+
+	if (res != NULL) {
+		clk_disable_unprepare(imx21->clk);
+		clk_put(imx21->clk);
+		iounmap(imx21->regs);
+		release_mem_region(res->start, resource_size(res));
+	}
+
+	kfree(hcd);
+	return 0;
+}
+
+
+static int imx21_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct imx21 *imx21;
+	struct resource *res;
+	int ret;
+	int irq;
+
+	printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+		return irq;
+	}
+
+	hcd = usb_create_hcd(&imx21_hc_driver,
+		&pdev->dev, dev_name(&pdev->dev));
+	if (hcd == NULL) {
+		dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
+		    dev_name(&pdev->dev));
+		return -ENOMEM;
+	}
+
+	imx21 = hcd_to_imx21(hcd);
+	imx21->hcd = hcd;
+	imx21->dev = &pdev->dev;
+	imx21->pdata = dev_get_platdata(&pdev->dev);
+	if (!imx21->pdata)
+		imx21->pdata = &default_pdata;
+
+	spin_lock_init(&imx21->lock);
+	INIT_LIST_HEAD(&imx21->dmem_list);
+	INIT_LIST_HEAD(&imx21->queue_for_etd);
+	INIT_LIST_HEAD(&imx21->queue_for_dmem);
+	create_debug_files(imx21);
+
+	res = request_mem_region(res->start, resource_size(res), hcd_name);
+	if (!res) {
+		ret = -EBUSY;
+		goto failed_request_mem;
+	}
+
+	imx21->regs = ioremap(res->start, resource_size(res));
+	if (imx21->regs == NULL) {
+		dev_err(imx21->dev, "Cannot map registers\n");
+		ret = -ENOMEM;
+		goto failed_ioremap;
+	}
+
+	/* Enable clocks source */
+	imx21->clk = clk_get(imx21->dev, NULL);
+	if (IS_ERR(imx21->clk)) {
+		dev_err(imx21->dev, "no clock found\n");
+		ret = PTR_ERR(imx21->clk);
+		goto failed_clock_get;
+	}
+
+	ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
+	if (ret)
+		goto failed_clock_set;
+	ret = clk_prepare_enable(imx21->clk);
+	if (ret)
+		goto failed_clock_enable;
+
+	dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
+		(readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
+
+	ret = usb_add_hcd(hcd, irq, 0);
+	if (ret != 0) {
+		dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
+		goto failed_add_hcd;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	return 0;
+
+failed_add_hcd:
+	clk_disable_unprepare(imx21->clk);
+failed_clock_enable:
+failed_clock_set:
+	clk_put(imx21->clk);
+failed_clock_get:
+	iounmap(imx21->regs);
+failed_ioremap:
+	release_mem_region(res->start, resource_size(res));
+failed_request_mem:
+	remove_debug_files(imx21);
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+static struct platform_driver imx21_hcd_driver = {
+	.driver = {
+		   .name = hcd_name,
+		   },
+	.probe = imx21_probe,
+	.remove = imx21_remove,
+	.suspend = NULL,
+	.resume = NULL,
+};
+
+module_platform_driver(imx21_hcd_driver);
+
+MODULE_DESCRIPTION("i.MX21 USB Host controller");
+MODULE_AUTHOR("Martin Fuzzey");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 0000000..7b9cf0a
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Macros and prototypes for i.MX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ */
+
+#ifndef __LINUX_IMX21_HCD_H__
+#define __LINUX_IMX21_HCD_H__
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/platform_data/usb-mx2.h>
+
+#define NUM_ISO_ETDS 	2
+#define USB_NUM_ETD	32
+#define DMEM_SIZE   	4096
+
+/* Register definitions */
+#define USBOTG_HWMODE		0x00
+#define USBOTG_HWMODE_ANASDBEN		(1 << 14)
+#define USBOTG_HWMODE_OTGXCVR_SHIFT	6
+#define USBOTG_HWMODE_OTGXCVR_MASK	(3 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RD	(0 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RD	(2 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RS	(1 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RS	(3 << 6)
+#define USBOTG_HWMODE_HOSTXCVR_SHIFT	4
+#define USBOTG_HWMODE_HOSTXCVR_MASK	(3 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RD	(0 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RD	(2 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RS	(1 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RS	(3 << 4)
+#define USBOTG_HWMODE_CRECFG_MASK	(3 << 0)
+#define USBOTG_HWMODE_CRECFG_HOST	(1 << 0)
+#define USBOTG_HWMODE_CRECFG_FUNC	(2 << 0)
+#define USBOTG_HWMODE_CRECFG_HNP	(3 << 0)
+
+#define USBOTG_CINT_STAT	0x04
+#define USBOTG_CINT_STEN	0x08
+#define USBOTG_ASHNPINT			(1 << 5)
+#define USBOTG_ASFCINT			(1 << 4)
+#define USBOTG_ASHCINT			(1 << 3)
+#define USBOTG_SHNPINT			(1 << 2)
+#define USBOTG_FCINT			(1 << 1)
+#define USBOTG_HCINT			(1 << 0)
+
+#define USBOTG_CLK_CTRL		0x0c
+#define USBOTG_CLK_CTRL_FUNC		(1 << 2)
+#define USBOTG_CLK_CTRL_HST		(1 << 1)
+#define USBOTG_CLK_CTRL_MAIN		(1 << 0)
+
+#define USBOTG_RST_CTRL		0x10
+#define USBOTG_RST_RSTI2C		(1 << 15)
+#define USBOTG_RST_RSTCTRL		(1 << 5)
+#define USBOTG_RST_RSTFC		(1 << 4)
+#define USBOTG_RST_RSTFSKE		(1 << 3)
+#define USBOTG_RST_RSTRH		(1 << 2)
+#define USBOTG_RST_RSTHSIE		(1 << 1)
+#define USBOTG_RST_RSTHC		(1 << 0)
+
+#define USBOTG_FRM_INTVL    	0x14
+#define USBOTG_FRM_REMAIN   	0x18
+#define USBOTG_HNP_CSR	    	0x1c
+#define USBOTG_HNP_ISR	    	0x2c
+#define USBOTG_HNP_IEN	    	0x30
+
+#define USBOTG_I2C_TXCVR_REG(x)	(0x100 + (x))
+#define USBOTG_I2C_XCVR_DEVAD		0x118
+#define USBOTG_I2C_SEQ_OP_REG		0x119
+#define USBOTG_I2C_SEQ_RD_STARTAD	0x11a
+#define USBOTG_I2C_OP_CTRL_REG	     	0x11b
+#define USBOTG_I2C_SCLK_TO_SCK_HPER  	0x11e
+#define USBOTG_I2C_MASTER_INT_REG    	0x11f
+
+#define USBH_HOST_CTRL		0x80
+#define USBH_HOST_CTRL_HCRESET			(1 << 31)
+#define USBH_HOST_CTRL_SCHDOVR(x)		((x) << 16)
+#define USBH_HOST_CTRL_RMTWUEN			(1 << 4)
+#define USBH_HOST_CTRL_HCUSBSTE_RESET		(0 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_RESUME		(1 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL	(2 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND	(3 << 2)
+#define USBH_HOST_CTRL_CTLBLKSR_1		(0 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_2		(1 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_3		(2 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_4		(3 << 0)
+
+#define USBH_SYSISR		0x88
+#define USBH_SYSISR_PSCINT		(1 << 6)
+#define USBH_SYSISR_FMOFINT		(1 << 5)
+#define USBH_SYSISR_HERRINT		(1 << 4)
+#define USBH_SYSISR_RESDETINT		(1 << 3)
+#define USBH_SYSISR_SOFINT		(1 << 2)
+#define USBH_SYSISR_DONEINT		(1 << 1)
+#define USBH_SYSISR_SORINT		(1 << 0)
+
+#define USBH_SYSIEN	    	0x8c
+#define USBH_SYSIEN_PSCINT		(1 << 6)
+#define USBH_SYSIEN_FMOFINT		(1 << 5)
+#define USBH_SYSIEN_HERRINT		(1 << 4)
+#define USBH_SYSIEN_RESDETINT		(1 << 3)
+#define USBH_SYSIEN_SOFINT		(1 << 2)
+#define USBH_SYSIEN_DONEINT		(1 << 1)
+#define USBH_SYSIEN_SORINT		(1 << 0)
+
+#define USBH_XBUFSTAT	    	0x98
+#define USBH_YBUFSTAT	    	0x9c
+#define USBH_XYINTEN	    	0xa0
+#define USBH_XFILLSTAT	    	0xa8
+#define USBH_YFILLSTAT	    	0xac
+#define USBH_ETDENSET	    	0xc0
+#define USBH_ETDENCLR	    	0xc4
+#define USBH_IMMEDINT	    	0xcc
+#define USBH_ETDDONESTAT    	0xd0
+#define USBH_ETDDONEEN	    	0xd4
+#define USBH_FRMNUB	    	0xe0
+#define USBH_LSTHRESH	    	0xe4
+
+#define USBH_ROOTHUBA	    	0xe8
+#define USBH_ROOTHUBA_PWRTOGOOD_MASK	(0xff)
+#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT	(24)
+#define USBH_ROOTHUBA_NOOVRCURP	(1 << 12)
+#define USBH_ROOTHUBA_OVRCURPM		(1 << 11)
+#define USBH_ROOTHUBA_DEVTYPE		(1 << 10)
+#define USBH_ROOTHUBA_PWRSWTMD		(1 << 9)
+#define USBH_ROOTHUBA_NOPWRSWT		(1 << 8)
+#define USBH_ROOTHUBA_NDNSTMPRT_MASK	(0xff)
+
+#define USBH_ROOTHUBB		0xec
+#define USBH_ROOTHUBB_PRTPWRCM(x)	(1 << ((x) + 16))
+#define USBH_ROOTHUBB_DEVREMOVE(x)	(1 << (x))
+
+#define USBH_ROOTSTAT		0xf0
+#define USBH_ROOTSTAT_CLRRMTWUE	(1 << 31)
+#define USBH_ROOTSTAT_OVRCURCHG	(1 << 17)
+#define USBH_ROOTSTAT_DEVCONWUE	(1 << 15)
+#define USBH_ROOTSTAT_OVRCURI		(1 << 1)
+#define USBH_ROOTSTAT_LOCPWRS		(1 << 0)
+
+#define USBH_PORTSTAT(x)	(0xf4 + ((x) * 4))
+#define USBH_PORTSTAT_PRTRSTSC		(1 << 20)
+#define USBH_PORTSTAT_OVRCURIC		(1 << 19)
+#define USBH_PORTSTAT_PRTSTATSC	(1 << 18)
+#define USBH_PORTSTAT_PRTENBLSC	(1 << 17)
+#define USBH_PORTSTAT_CONNECTSC	(1 << 16)
+#define USBH_PORTSTAT_LSDEVCON		(1 << 9)
+#define USBH_PORTSTAT_PRTPWRST		(1 << 8)
+#define USBH_PORTSTAT_PRTRSTST		(1 << 4)
+#define USBH_PORTSTAT_PRTOVRCURI	(1 << 3)
+#define USBH_PORTSTAT_PRTSUSPST	(1 << 2)
+#define USBH_PORTSTAT_PRTENABST	(1 << 1)
+#define USBH_PORTSTAT_CURCONST		(1 << 0)
+
+#define USB_DMAREV		0x800
+#define USB_DMAINTSTAT	    	0x804
+#define USB_DMAINTSTAT_EPERR		(1 << 1)
+#define USB_DMAINTSTAT_ETDERR		(1 << 0)
+
+#define USB_DMAINTEN	    	0x808
+#define USB_DMAINTEN_EPERRINTEN	(1 << 1)
+#define USB_DMAINTEN_ETDERRINTEN	(1 << 0)
+
+#define USB_ETDDMAERSTAT    	0x80c
+#define USB_EPDMAERSTAT	    	0x810
+#define USB_ETDDMAEN	    	0x820
+#define USB_EPDMAEN	    	0x824
+#define USB_ETDDMAXTEN	    	0x828
+#define USB_EPDMAXTEN	    	0x82c
+#define USB_ETDDMAENXYT	    	0x830
+#define USB_EPDMAENXYT	    	0x834
+#define USB_ETDDMABST4EN    	0x838
+#define USB_EPDMABST4EN	    	0x83c
+
+#define USB_MISCCONTROL	    	0x840
+#define USB_MISCCONTROL_ISOPREVFRM	(1 << 3)
+#define USB_MISCCONTROL_SKPRTRY	(1 << 2)
+#define USB_MISCCONTROL_ARBMODE	(1 << 1)
+#define USB_MISCCONTROL_FILTCC		(1 << 0)
+
+#define USB_ETDDMACHANLCLR  	0x848
+#define USB_EPDMACHANLCLR   	0x84c
+#define USB_ETDSMSA(x)	    	(0x900 + ((x) * 4))
+#define USB_EPSMSA(x)	    	(0x980 + ((x) * 4))
+#define USB_ETDDMABUFPTR(x) 	(0xa00 + ((x) * 4))
+#define USB_EPDMABUFPTR(x)  	(0xa80 + ((x) * 4))
+
+#define USB_ETD_DWORD(x, w)	(0x200 + ((x) * 16) + ((w) * 4))
+#define DW0_ADDRESS	0
+#define	DW0_ENDPNT	7
+#define	DW0_DIRECT	11
+#define	DW0_SPEED	13
+#define DW0_FORMAT	14
+#define DW0_MAXPKTSIZ	16
+#define DW0_HALTED	27
+#define	DW0_TOGCRY	28
+#define	DW0_SNDNAK	30
+
+#define DW1_XBUFSRTAD	0
+#define DW1_YBUFSRTAD	16
+
+#define DW2_RTRYDELAY	0
+#define DW2_POLINTERV	0
+#define DW2_STARTFRM	0
+#define DW2_RELPOLPOS	8
+#define DW2_DIRPID	16
+#define	DW2_BUFROUND	18
+#define DW2_DELAYINT	19
+#define DW2_DATATOG	22
+#define DW2_ERRORCNT	24
+#define	DW2_COMPCODE	28
+
+#define DW3_TOTBYECNT	0
+#define DW3_PKTLEN0	0
+#define DW3_COMPCODE0	12
+#define DW3_PKTLEN1	16
+#define DW3_BUFSIZE	21
+#define DW3_COMPCODE1	28
+
+#define USBCTRL		    	0x600
+#define USBCTRL_I2C_WU_INT_STAT	(1 << 27)
+#define USBCTRL_OTG_WU_INT_STAT	(1 << 26)
+#define USBCTRL_HOST_WU_INT_STAT	(1 << 25)
+#define USBCTRL_FNT_WU_INT_STAT	(1 << 24)
+#define USBCTRL_I2C_WU_INT_EN		(1 << 19)
+#define USBCTRL_OTG_WU_INT_EN		(1 << 18)
+#define USBCTRL_HOST_WU_INT_EN		(1 << 17)
+#define USBCTRL_FNT_WU_INT_EN		(1 << 16)
+#define USBCTRL_OTC_RCV_RXDP		(1 << 13)
+#define USBCTRL_HOST1_BYP_TLL		(1 << 12)
+#define USBCTRL_OTG_BYP_VAL(x)		((x) << 10)
+#define USBCTRL_HOST1_BYP_VAL(x)	((x) << 8)
+#define USBCTRL_OTG_PWR_MASK		(1 << 6)
+#define USBCTRL_HOST1_PWR_MASK		(1 << 5)
+#define USBCTRL_HOST2_PWR_MASK		(1 << 4)
+#define USBCTRL_USB_BYP			(1 << 2)
+#define USBCTRL_HOST1_TXEN_OE		(1 << 1)
+
+#define USBOTG_DMEM		0x1000
+
+/* Values in TD blocks */
+#define TD_DIR_SETUP	    0
+#define TD_DIR_OUT	    1
+#define TD_DIR_IN	    2
+#define TD_FORMAT_CONTROL   0
+#define TD_FORMAT_ISO	    1
+#define TD_FORMAT_BULK	    2
+#define TD_FORMAT_INT	    3
+#define TD_TOGGLE_CARRY	    0
+#define TD_TOGGLE_DATA0	    2
+#define TD_TOGGLE_DATA1	    3
+
+/* control transfer states */
+#define US_CTRL_SETUP	2
+#define US_CTRL_DATA	1
+#define US_CTRL_ACK	0
+
+/* bulk transfer main state and 0-length packet */
+#define US_BULK		1
+#define US_BULK0	0
+
+/*ETD format description*/
+#define IMX_FMT_CTRL   0x0
+#define IMX_FMT_ISO    0x1
+#define IMX_FMT_BULK   0x2
+#define IMX_FMT_INT    0x3
+
+static char fmt_urb_to_etd[4] = {
+/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
+/*PIPE_INTERRUPT*/ IMX_FMT_INT,
+/*PIPE_CONTROL*/ IMX_FMT_CTRL,
+/*PIPE_BULK*/ IMX_FMT_BULK
+};
+
+/* condition (error) CC codes and mapping (OHCI like) */
+
+#define TD_CC_NOERROR		0x00
+#define TD_CC_CRC		0x01
+#define TD_CC_BITSTUFFING	0x02
+#define TD_CC_DATATOGGLEM	0x03
+#define TD_CC_STALL		0x04
+#define TD_DEVNOTRESP		0x05
+#define TD_PIDCHECKFAIL		0x06
+/*#define TD_UNEXPECTEDPID	0x07 - reserved, not active on MX2*/
+#define TD_DATAOVERRUN		0x08
+#define TD_DATAUNDERRUN		0x09
+#define TD_BUFFEROVERRUN	0x0C
+#define TD_BUFFERUNDERRUN	0x0D
+#define	TD_SCHEDULEOVERRUN	0x0E
+#define TD_NOTACCESSED		0x0F
+
+static const int cc_to_error[16] = {
+	/* No  Error  */ 0,
+	/* CRC Error  */ -EILSEQ,
+	/* Bit Stuff  */ -EPROTO,
+	/* Data Togg  */ -EILSEQ,
+	/* Stall      */ -EPIPE,
+	/* DevNotResp */ -ETIMEDOUT,
+	/* PIDCheck   */ -EPROTO,
+	/* UnExpPID   */ -EPROTO,
+	/* DataOver   */ -EOVERFLOW,
+	/* DataUnder  */ -EREMOTEIO,
+	/* (for hw)   */ -EIO,
+	/* (for hw)   */ -EIO,
+	/* BufferOver */ -ECOMM,
+	/* BuffUnder  */ -ENOSR,
+	/* (for HCD)  */ -ENOSPC,
+	/* (for HCD)  */ -EALREADY
+};
+
+/* HCD data associated with a usb core URB */
+struct urb_priv {
+	struct urb *urb;
+	struct usb_host_endpoint *ep;
+	int active;
+	int state;
+	struct td *isoc_td;
+	int isoc_remaining;
+	int isoc_status;
+};
+
+/* HCD data associated with a usb core endpoint */
+struct ep_priv {
+	struct usb_host_endpoint *ep;
+	struct list_head td_list;
+	struct list_head queue;
+	int etd[NUM_ISO_ETDS];
+	int waiting_etd;
+};
+
+/* isoc packet */
+struct td {
+	struct list_head list;
+	struct urb *urb;
+	struct usb_host_endpoint *ep;
+	dma_addr_t dma_handle;
+	void *cpu_buffer;
+	int len;
+	int frame;
+	int isoc_index;
+};
+
+/* HCD data associated with a hardware ETD */
+struct etd_priv {
+	struct usb_host_endpoint *ep;
+	struct urb *urb;
+	struct td *td;
+	struct list_head queue;
+	dma_addr_t dma_handle;
+	void *cpu_buffer;
+	void *bounce_buffer;
+	int alloc;
+	int len;
+	int dmem_size;
+	int dmem_offset;
+	int active_count;
+#ifdef DEBUG
+	int activated_frame;
+	int disactivated_frame;
+	int last_int_frame;
+	int last_req_frame;
+	u32 submitted_dwords[4];
+#endif
+};
+
+/* Hardware data memory info */
+struct imx21_dmem_area {
+	struct usb_host_endpoint *ep;
+	unsigned int offset;
+	unsigned int size;
+	struct list_head list;
+};
+
+#ifdef DEBUG
+struct debug_usage_stats {
+	unsigned int value;
+	unsigned int maximum;
+};
+
+struct debug_stats {
+	unsigned long submitted;
+	unsigned long completed_ok;
+	unsigned long completed_failed;
+	unsigned long unlinked;
+	unsigned long queue_etd;
+	unsigned long queue_dmem;
+};
+
+struct debug_isoc_trace {
+	int schedule_frame;
+	int submit_frame;
+	int request_len;
+	int done_frame;
+	int done_len;
+	int cc;
+	struct td *td;
+};
+#endif
+
+/* HCD data structure */
+struct imx21 {
+	spinlock_t lock;
+	struct device *dev;
+	struct usb_hcd *hcd;
+	struct mx21_usbh_platform_data *pdata;
+	struct list_head dmem_list;
+	struct list_head queue_for_etd; /* eps queued due to etd shortage */
+	struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
+	struct etd_priv etd[USB_NUM_ETD];
+	struct clk *clk;
+	void __iomem *regs;
+#ifdef DEBUG
+	struct dentry *debug_root;
+	struct debug_stats nonisoc_stats;
+	struct debug_stats isoc_stats;
+	struct debug_usage_stats etd_usage;
+	struct debug_usage_stats dmem_usage;
+	struct debug_isoc_trace isoc_trace[20];
+	struct debug_isoc_trace isoc_trace_failed[20];
+	unsigned long debug_unblocks;
+	int isoc_trace_index;
+	int isoc_trace_index_failed;
+#endif
+};
+
+#endif
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
new file mode 100644
index 0000000..74da136
--- /dev/null
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -0,0 +1,1701 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISP116x HCD (Host Controller Driver) for USB.
+ *
+ * Derived from the SL811 HCD, rewritten for ISP116x.
+ * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
+ *
+ * Portions:
+ * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Copyright (C) 2004 David Brownell
+ *
+ * Periodic scheduling is based on Roman's OHCI code
+ * Copyright (C) 1999 Roman Weissgaerber
+ *
+ */
+
+/*
+ * The driver basically works. A number of people have used it with a range
+ * of devices.
+ *
+ * The driver passes all usbtests 1-14.
+ *
+ * Suspending/resuming of root hub via sysfs works. Remote wakeup works too.
+ * And suspending/resuming of platform device works too. Suspend/resume
+ * via HCD operations vector is not implemented.
+ *
+ * Iso transfer support is not implemented. Adding this would include
+ * implementing recovery from the failure to service the processed ITL
+ * fifo ram in time, which will involve chip reset.
+ *
+ * TODO:
+ + More testing of suspend/resume.
+*/
+
+/*
+  ISP116x chips require certain delays between accesses to its
+  registers. The following timing options exist.
+
+  1. Configure your memory controller (the best)
+  2. Implement platform-specific delay function possibly
+  combined with configuring the memory controller; see
+  include/linux/usb-isp116x.h for more info. Some broken
+  memory controllers line LH7A400 SMC need this. Also,
+  uncomment for that to work the following
+  USE_PLATFORM_DELAY macro.
+  3. Use ndelay (easiest, poorest). For that, uncomment
+  the following USE_NDELAY macro.
+*/
+#define USE_PLATFORM_DELAY
+//#define USE_NDELAY
+
+//#define DEBUG
+//#define VERBOSE
+/* Transfer descriptors. See dump_ptd() for printout format  */
+//#define PTD_TRACE
+/* enqueuing/finishing log of urbs */
+//#define URB_TRACE
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/isp116x.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include "isp116x.h"
+
+#define DRIVER_VERSION	"03 Nov 2005"
+#define DRIVER_DESC	"ISP116x USB Host Controller Driver"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+static const char hcd_name[] = "isp116x-hcd";
+
+/*-----------------------------------------------------------------*/
+
+/*
+  Write len bytes to fifo, pad till 32-bit boundary
+ */
+static void write_ptddata_to_fifo(struct isp116x *isp116x, void *buf, int len)
+{
+	u8 *dp = (u8 *) buf;
+	u16 *dp2 = (u16 *) buf;
+	u16 w;
+	int quot = len % 4;
+
+	/* buffer is already in 'usb data order', which is LE. */
+	/* When reading buffer as u16, we have to take care byte order */
+	/* doesn't get mixed up */
+
+	if ((unsigned long)dp2 & 1) {
+		/* not aligned */
+		for (; len > 1; len -= 2) {
+			w = *dp++;
+			w |= *dp++ << 8;
+			isp116x_raw_write_data16(isp116x, w);
+		}
+		if (len)
+			isp116x_write_data16(isp116x, (u16) * dp);
+	} else {
+		/* aligned */
+		for (; len > 1; len -= 2) {
+			/* Keep byte order ! */
+			isp116x_raw_write_data16(isp116x, cpu_to_le16(*dp2++));
+		}
+
+		if (len)
+			isp116x_write_data16(isp116x, 0xff & *((u8 *) dp2));
+	}
+	if (quot == 1 || quot == 2)
+		isp116x_raw_write_data16(isp116x, 0);
+}
+
+/*
+  Read len bytes from fifo and then read till 32-bit boundary.
+ */
+static void read_ptddata_from_fifo(struct isp116x *isp116x, void *buf, int len)
+{
+	u8 *dp = (u8 *) buf;
+	u16 *dp2 = (u16 *) buf;
+	u16 w;
+	int quot = len % 4;
+
+	/* buffer is already in 'usb data order', which is LE. */
+	/* When reading buffer as u16, we have to take care byte order */
+	/* doesn't get mixed up */
+
+	if ((unsigned long)dp2 & 1) {
+		/* not aligned */
+		for (; len > 1; len -= 2) {
+			w = isp116x_raw_read_data16(isp116x);
+			*dp++ = w & 0xff;
+			*dp++ = (w >> 8) & 0xff;
+		}
+
+		if (len)
+			*dp = 0xff & isp116x_read_data16(isp116x);
+	} else {
+		/* aligned */
+		for (; len > 1; len -= 2) {
+			/* Keep byte order! */
+			*dp2++ = le16_to_cpu(isp116x_raw_read_data16(isp116x));
+		}
+
+		if (len)
+			*(u8 *) dp2 = 0xff & isp116x_read_data16(isp116x);
+	}
+	if (quot == 1 || quot == 2)
+		isp116x_raw_read_data16(isp116x);
+}
+
+/*
+  Write ptd's and data for scheduled transfers into
+  the fifo ram. Fifo must be empty and ready.
+*/
+static void pack_fifo(struct isp116x *isp116x)
+{
+	struct isp116x_ep *ep;
+	struct ptd *ptd;
+	int buflen = isp116x->atl_last_dir == PTD_DIR_IN
+	    ? isp116x->atl_bufshrt : isp116x->atl_buflen;
+
+	isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT);
+	isp116x_write_reg16(isp116x, HCXFERCTR, buflen);
+	isp116x_write_addr(isp116x, HCATLPORT | ISP116x_WRITE_OFFSET);
+	for (ep = isp116x->atl_active; ep; ep = ep->active) {
+		ptd = &ep->ptd;
+		dump_ptd(ptd);
+		dump_ptd_out_data(ptd, ep->data);
+		isp116x_write_data16(isp116x, ptd->count);
+		isp116x_write_data16(isp116x, ptd->mps);
+		isp116x_write_data16(isp116x, ptd->len);
+		isp116x_write_data16(isp116x, ptd->faddr);
+		buflen -= sizeof(struct ptd);
+		/* Skip writing data for last IN PTD */
+		if (ep->active || (isp116x->atl_last_dir != PTD_DIR_IN)) {
+			write_ptddata_to_fifo(isp116x, ep->data, ep->length);
+			buflen -= ALIGN(ep->length, 4);
+		}
+	}
+	BUG_ON(buflen);
+}
+
+/*
+  Read the processed ptd's and data from fifo ram back to
+  URBs' buffers. Fifo must be full and done
+*/
+static void unpack_fifo(struct isp116x *isp116x)
+{
+	struct isp116x_ep *ep;
+	struct ptd *ptd;
+	int buflen = isp116x->atl_last_dir == PTD_DIR_IN
+	    ? isp116x->atl_buflen : isp116x->atl_bufshrt;
+
+	isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT);
+	isp116x_write_reg16(isp116x, HCXFERCTR, buflen);
+	isp116x_write_addr(isp116x, HCATLPORT);
+	for (ep = isp116x->atl_active; ep; ep = ep->active) {
+		ptd = &ep->ptd;
+		ptd->count = isp116x_read_data16(isp116x);
+		ptd->mps = isp116x_read_data16(isp116x);
+		ptd->len = isp116x_read_data16(isp116x);
+		ptd->faddr = isp116x_read_data16(isp116x);
+		buflen -= sizeof(struct ptd);
+		/* Skip reading data for last Setup or Out PTD */
+		if (ep->active || (isp116x->atl_last_dir == PTD_DIR_IN)) {
+			read_ptddata_from_fifo(isp116x, ep->data, ep->length);
+			buflen -= ALIGN(ep->length, 4);
+		}
+		dump_ptd(ptd);
+		dump_ptd_in_data(ptd, ep->data);
+	}
+	BUG_ON(buflen);
+}
+
+/*---------------------------------------------------------------*/
+
+/*
+  Set up PTD's.
+*/
+static void preproc_atl_queue(struct isp116x *isp116x)
+{
+	struct isp116x_ep *ep;
+	struct urb *urb;
+	struct ptd *ptd;
+	u16 len;
+
+	for (ep = isp116x->atl_active; ep; ep = ep->active) {
+		u16 toggle = 0, dir = PTD_DIR_SETUP;
+
+		BUG_ON(list_empty(&ep->hep->urb_list));
+		urb = container_of(ep->hep->urb_list.next,
+				   struct urb, urb_list);
+		ptd = &ep->ptd;
+		len = ep->length;
+		ep->data = (unsigned char *)urb->transfer_buffer
+		    + urb->actual_length;
+
+		switch (ep->nextpid) {
+		case USB_PID_IN:
+			toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
+			dir = PTD_DIR_IN;
+			break;
+		case USB_PID_OUT:
+			toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
+			dir = PTD_DIR_OUT;
+			break;
+		case USB_PID_SETUP:
+			len = sizeof(struct usb_ctrlrequest);
+			ep->data = urb->setup_packet;
+			break;
+		case USB_PID_ACK:
+			toggle = 1;
+			len = 0;
+			dir = (urb->transfer_buffer_length
+			       && usb_pipein(urb->pipe))
+			    ? PTD_DIR_OUT : PTD_DIR_IN;
+			break;
+		default:
+			ERR("%s %d: ep->nextpid %d\n", __func__, __LINE__,
+			    ep->nextpid);
+			BUG();
+		}
+
+		ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
+		ptd->mps = PTD_MPS(ep->maxpacket)
+		    | PTD_SPD(urb->dev->speed == USB_SPEED_LOW)
+		    | PTD_EP(ep->epnum);
+		ptd->len = PTD_LEN(len) | PTD_DIR(dir);
+		ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
+		if (!ep->active) {
+			ptd->mps |= PTD_LAST_MSK;
+			isp116x->atl_last_dir = dir;
+		}
+		isp116x->atl_bufshrt = sizeof(struct ptd) + isp116x->atl_buflen;
+		isp116x->atl_buflen = isp116x->atl_bufshrt + ALIGN(len, 4);
+	}
+}
+
+/*
+  Take done or failed requests out of schedule. Give back
+  processed urbs.
+*/
+static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
+			   struct urb *urb, int status)
+__releases(isp116x->lock) __acquires(isp116x->lock)
+{
+	unsigned i;
+
+	ep->error_count = 0;
+
+	if (usb_pipecontrol(urb->pipe))
+		ep->nextpid = USB_PID_SETUP;
+
+	urb_dbg(urb, "Finish");
+
+	usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb);
+	spin_unlock(&isp116x->lock);
+	usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb, status);
+	spin_lock(&isp116x->lock);
+
+	/* take idle endpoints out of the schedule */
+	if (!list_empty(&ep->hep->urb_list))
+		return;
+
+	/* async deschedule */
+	if (!list_empty(&ep->schedule)) {
+		list_del_init(&ep->schedule);
+		return;
+	}
+
+	/* periodic deschedule */
+	DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+	for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
+		struct isp116x_ep *temp;
+		struct isp116x_ep **prev = &isp116x->periodic[i];
+
+		while (*prev && ((temp = *prev) != ep))
+			prev = &temp->next;
+		if (*prev)
+			*prev = ep->next;
+		isp116x->load[i] -= ep->load;
+	}
+	ep->branch = PERIODIC_SIZE;
+	isp116x_to_hcd(isp116x)->self.bandwidth_allocated -=
+	    ep->load / ep->period;
+
+	/* switch irq type? */
+	if (!--isp116x->periodic_count) {
+		isp116x->irqenb &= ~HCuPINT_SOF;
+		isp116x->irqenb |= HCuPINT_ATL;
+	}
+}
+
+/*
+  Analyze transfer results, handle partial transfers and errors
+*/
+static void postproc_atl_queue(struct isp116x *isp116x)
+{
+	struct isp116x_ep *ep;
+	struct urb *urb;
+	struct usb_device *udev;
+	struct ptd *ptd;
+	int short_not_ok;
+	int status;
+	u8 cc;
+
+	for (ep = isp116x->atl_active; ep; ep = ep->active) {
+		BUG_ON(list_empty(&ep->hep->urb_list));
+		urb =
+		    container_of(ep->hep->urb_list.next, struct urb, urb_list);
+		udev = urb->dev;
+		ptd = &ep->ptd;
+		cc = PTD_GET_CC(ptd);
+		short_not_ok = 1;
+		status = -EINPROGRESS;
+
+		/* Data underrun is special. For allowed underrun
+		   we clear the error and continue as normal. For
+		   forbidden underrun we finish the DATA stage
+		   immediately while for control transfer,
+		   we do a STATUS stage. */
+		if (cc == TD_DATAUNDERRUN) {
+			if (!(urb->transfer_flags & URB_SHORT_NOT_OK) ||
+					usb_pipecontrol(urb->pipe)) {
+				DBG("Allowed or control data underrun\n");
+				cc = TD_CC_NOERROR;
+				short_not_ok = 0;
+			} else {
+				ep->error_count = 1;
+				usb_settoggle(udev, ep->epnum,
+					      ep->nextpid == USB_PID_OUT,
+					      PTD_GET_TOGGLE(ptd));
+				urb->actual_length += PTD_GET_COUNT(ptd);
+				status = cc_to_error[TD_DATAUNDERRUN];
+				goto done;
+			}
+		}
+
+		if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED
+		    && (++ep->error_count >= 3 || cc == TD_CC_STALL
+			|| cc == TD_DATAOVERRUN)) {
+			status = cc_to_error[cc];
+			if (ep->nextpid == USB_PID_ACK)
+				ep->nextpid = 0;
+			goto done;
+		}
+		/* According to usb spec, zero-length Int transfer signals
+		   finishing of the urb. Hey, does this apply only
+		   for IN endpoints? */
+		if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) {
+			status = 0;
+			goto done;
+		}
+
+		/* Relax after previously failed, but later succeeded
+		   or correctly NAK'ed retransmission attempt */
+		if (ep->error_count
+		    && (cc == TD_CC_NOERROR || cc == TD_NOTACCESSED))
+			ep->error_count = 0;
+
+		/* Take into account idiosyncracies of the isp116x chip
+		   regarding toggle bit for failed transfers */
+		if (ep->nextpid == USB_PID_OUT)
+			usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)
+				      ^ (ep->error_count > 0));
+		else if (ep->nextpid == USB_PID_IN)
+			usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)
+				      ^ (ep->error_count > 0));
+
+		switch (ep->nextpid) {
+		case USB_PID_IN:
+		case USB_PID_OUT:
+			urb->actual_length += PTD_GET_COUNT(ptd);
+			if (PTD_GET_ACTIVE(ptd)
+			    || (cc != TD_CC_NOERROR && cc < 0x0E))
+				break;
+			if (urb->transfer_buffer_length != urb->actual_length) {
+				if (short_not_ok)
+					break;
+			} else {
+				if (urb->transfer_flags & URB_ZERO_PACKET
+				    && ep->nextpid == USB_PID_OUT
+				    && !(PTD_GET_COUNT(ptd) % ep->maxpacket)) {
+					DBG("Zero packet requested\n");
+					break;
+				}
+			}
+			/* All data for this URB is transferred, let's finish */
+			if (usb_pipecontrol(urb->pipe))
+				ep->nextpid = USB_PID_ACK;
+			else
+				status = 0;
+			break;
+		case USB_PID_SETUP:
+			if (PTD_GET_ACTIVE(ptd)
+			    || (cc != TD_CC_NOERROR && cc < 0x0E))
+				break;
+			if (urb->transfer_buffer_length == urb->actual_length)
+				ep->nextpid = USB_PID_ACK;
+			else if (usb_pipeout(urb->pipe)) {
+				usb_settoggle(udev, 0, 1, 1);
+				ep->nextpid = USB_PID_OUT;
+			} else {
+				usb_settoggle(udev, 0, 0, 1);
+				ep->nextpid = USB_PID_IN;
+			}
+			break;
+		case USB_PID_ACK:
+			if (PTD_GET_ACTIVE(ptd)
+			    || (cc != TD_CC_NOERROR && cc < 0x0E))
+				break;
+			status = 0;
+			ep->nextpid = 0;
+			break;
+		default:
+			BUG();
+		}
+
+ done:
+		if (status != -EINPROGRESS || urb->unlinked)
+			finish_request(isp116x, ep, urb, status);
+	}
+}
+
+/*
+  Scan transfer lists, schedule transfers, send data off
+  to chip.
+ */
+static void start_atl_transfers(struct isp116x *isp116x)
+{
+	struct isp116x_ep *last_ep = NULL, *ep;
+	struct urb *urb;
+	u16 load = 0;
+	int len, index, speed, byte_time;
+
+	if (atomic_read(&isp116x->atl_finishing))
+		return;
+
+	if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state))
+		return;
+
+	/* FIFO not empty? */
+	if (isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_FULL)
+		return;
+
+	isp116x->atl_active = NULL;
+	isp116x->atl_buflen = isp116x->atl_bufshrt = 0;
+
+	/* Schedule int transfers */
+	if (isp116x->periodic_count) {
+		isp116x->fmindex = index =
+		    (isp116x->fmindex + 1) & (PERIODIC_SIZE - 1);
+		load = isp116x->load[index];
+		if (load) {
+			/* Bring all int transfers for this frame
+			   into the active queue */
+			isp116x->atl_active = last_ep =
+			    isp116x->periodic[index];
+			while (last_ep->next)
+				last_ep = (last_ep->active = last_ep->next);
+			last_ep->active = NULL;
+		}
+	}
+
+	/* Schedule control/bulk transfers */
+	list_for_each_entry(ep, &isp116x->async, schedule) {
+		urb = container_of(ep->hep->urb_list.next,
+				   struct urb, urb_list);
+		speed = urb->dev->speed;
+		byte_time = speed == USB_SPEED_LOW
+		    ? BYTE_TIME_LOWSPEED : BYTE_TIME_FULLSPEED;
+
+		if (ep->nextpid == USB_PID_SETUP) {
+			len = sizeof(struct usb_ctrlrequest);
+		} else if (ep->nextpid == USB_PID_ACK) {
+			len = 0;
+		} else {
+			/* Find current free length ... */
+			len = (MAX_LOAD_LIMIT - load) / byte_time;
+
+			/* ... then limit it to configured max size ... */
+			len = min(len, speed == USB_SPEED_LOW ?
+				  MAX_TRANSFER_SIZE_LOWSPEED :
+				  MAX_TRANSFER_SIZE_FULLSPEED);
+
+			/* ... and finally cut to the multiple of MaxPacketSize,
+			   or to the real length if there's enough room. */
+			if (len <
+			    (urb->transfer_buffer_length -
+			     urb->actual_length)) {
+				len -= len % ep->maxpacket;
+				if (!len)
+					continue;
+			} else
+				len = urb->transfer_buffer_length -
+				    urb->actual_length;
+			BUG_ON(len < 0);
+		}
+
+		load += len * byte_time;
+		if (load > MAX_LOAD_LIMIT)
+			break;
+
+		ep->active = NULL;
+		ep->length = len;
+		if (last_ep)
+			last_ep->active = ep;
+		else
+			isp116x->atl_active = ep;
+		last_ep = ep;
+	}
+
+	/* Avoid starving of endpoints */
+	if ((&isp116x->async)->next != (&isp116x->async)->prev)
+		list_move(&isp116x->async, (&isp116x->async)->next);
+
+	if (isp116x->atl_active) {
+		preproc_atl_queue(isp116x);
+		pack_fifo(isp116x);
+	}
+}
+
+/*
+  Finish the processed transfers
+*/
+static void finish_atl_transfers(struct isp116x *isp116x)
+{
+	if (!isp116x->atl_active)
+		return;
+	/* Fifo not ready? */
+	if (!(isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_DONE))
+		return;
+
+	atomic_inc(&isp116x->atl_finishing);
+	unpack_fifo(isp116x);
+	postproc_atl_queue(isp116x);
+	atomic_dec(&isp116x->atl_finishing);
+}
+
+static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	u16 irqstat;
+	irqreturn_t ret = IRQ_NONE;
+
+	spin_lock(&isp116x->lock);
+	isp116x_write_reg16(isp116x, HCuPINTENB, 0);
+	irqstat = isp116x_read_reg16(isp116x, HCuPINT);
+	isp116x_write_reg16(isp116x, HCuPINT, irqstat);
+
+	if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) {
+		ret = IRQ_HANDLED;
+		finish_atl_transfers(isp116x);
+	}
+
+	if (irqstat & HCuPINT_OPR) {
+		u32 intstat = isp116x_read_reg32(isp116x, HCINTSTAT);
+		isp116x_write_reg32(isp116x, HCINTSTAT, intstat);
+		if (intstat & HCINT_UE) {
+			ERR("Unrecoverable error, HC is dead!\n");
+			/* IRQ's are off, we do no DMA,
+			   perfectly ready to die ... */
+			hcd->state = HC_STATE_HALT;
+			usb_hc_died(hcd);
+			ret = IRQ_HANDLED;
+			goto done;
+		}
+		if (intstat & HCINT_RHSC)
+			/* When root hub or any of its ports is going
+			   to come out of suspend, it may take more
+			   than 10ms for status bits to stabilize. */
+			mod_timer(&hcd->rh_timer, jiffies
+				  + msecs_to_jiffies(20) + 1);
+		if (intstat & HCINT_RD) {
+			DBG("---- remote wakeup\n");
+			usb_hcd_resume_root_hub(hcd);
+		}
+		irqstat &= ~HCuPINT_OPR;
+		ret = IRQ_HANDLED;
+	}
+
+	if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) {
+		start_atl_transfers(isp116x);
+	}
+
+	isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb);
+      done:
+	spin_unlock(&isp116x->lock);
+	return ret;
+}
+
+/*-----------------------------------------------------------------*/
+
+/* usb 1.1 says max 90% of a frame is available for periodic transfers.
+ * this driver doesn't promise that much since it's got to handle an
+ * IRQ per packet; irq handling latencies also use up that time.
+ */
+
+/* out of 1000 us */
+#define	MAX_PERIODIC_LOAD	600
+static int balance(struct isp116x *isp116x, u16 period, u16 load)
+{
+	int i, branch = -ENOSPC;
+
+	/* search for the least loaded schedule branch of that period
+	   which has enough bandwidth left unreserved. */
+	for (i = 0; i < period; i++) {
+		if (branch < 0 || isp116x->load[branch] > isp116x->load[i]) {
+			int j;
+
+			for (j = i; j < PERIODIC_SIZE; j += period) {
+				if ((isp116x->load[j] + load)
+				    > MAX_PERIODIC_LOAD)
+					break;
+			}
+			if (j < PERIODIC_SIZE)
+				continue;
+			branch = i;
+		}
+	}
+	return branch;
+}
+
+/* NB! ALL the code above this point runs with isp116x->lock
+   held, irqs off
+*/
+
+/*-----------------------------------------------------------------*/
+
+static int isp116x_urb_enqueue(struct usb_hcd *hcd,
+			       struct urb *urb,
+			       gfp_t mem_flags)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	struct usb_device *udev = urb->dev;
+	unsigned int pipe = urb->pipe;
+	int is_out = !usb_pipein(pipe);
+	int type = usb_pipetype(pipe);
+	int epnum = usb_pipeendpoint(pipe);
+	struct usb_host_endpoint *hep = urb->ep;
+	struct isp116x_ep *ep = NULL;
+	unsigned long flags;
+	int i;
+	int ret = 0;
+
+	urb_dbg(urb, "Enqueue");
+
+	if (type == PIPE_ISOCHRONOUS) {
+		ERR("Isochronous transfers not supported\n");
+		urb_dbg(urb, "Refused to enqueue");
+		return -ENXIO;
+	}
+	/* avoid all allocations within spinlocks: request or endpoint */
+	if (!hep->hcpriv) {
+		ep = kzalloc(sizeof *ep, mem_flags);
+		if (!ep)
+			return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	if (!HC_IS_RUNNING(hcd->state)) {
+		kfree(ep);
+		ret = -ENODEV;
+		goto fail_not_linked;
+	}
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret) {
+		kfree(ep);
+		goto fail_not_linked;
+	}
+
+	if (hep->hcpriv)
+		ep = hep->hcpriv;
+	else {
+		INIT_LIST_HEAD(&ep->schedule);
+		ep->udev = udev;
+		ep->epnum = epnum;
+		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+		usb_settoggle(udev, epnum, is_out, 0);
+
+		if (type == PIPE_CONTROL) {
+			ep->nextpid = USB_PID_SETUP;
+		} else if (is_out) {
+			ep->nextpid = USB_PID_OUT;
+		} else {
+			ep->nextpid = USB_PID_IN;
+		}
+
+		if (urb->interval) {
+			/*
+			   With INT URBs submitted, the driver works with SOF
+			   interrupt enabled and ATL interrupt disabled. After
+			   the PTDs are written to fifo ram, the chip starts
+			   fifo processing and usb transfers after the next
+			   SOF and continues until the transfers are finished
+			   (succeeded or failed) or the frame ends. Therefore,
+			   the transfers occur only in every second frame,
+			   while fifo reading/writing and data processing
+			   occur in every other second frame. */
+			if (urb->interval < 2)
+				urb->interval = 2;
+			if (urb->interval > 2 * PERIODIC_SIZE)
+				urb->interval = 2 * PERIODIC_SIZE;
+			ep->period = urb->interval >> 1;
+			ep->branch = PERIODIC_SIZE;
+			ep->load = usb_calc_bus_time(udev->speed,
+						     !is_out,
+						     (type == PIPE_ISOCHRONOUS),
+						     usb_maxpacket(udev, pipe,
+								   is_out)) /
+			    1000;
+		}
+		hep->hcpriv = ep;
+		ep->hep = hep;
+	}
+
+	/* maybe put endpoint into schedule */
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		if (list_empty(&ep->schedule))
+			list_add_tail(&ep->schedule, &isp116x->async);
+		break;
+	case PIPE_INTERRUPT:
+		urb->interval = ep->period;
+		ep->length = min_t(u32, ep->maxpacket,
+				 urb->transfer_buffer_length);
+
+		/* urb submitted for already existing endpoint */
+		if (ep->branch < PERIODIC_SIZE)
+			break;
+
+		ep->branch = ret = balance(isp116x, ep->period, ep->load);
+		if (ret < 0)
+			goto fail;
+		ret = 0;
+
+		urb->start_frame = (isp116x->fmindex & (PERIODIC_SIZE - 1))
+		    + ep->branch;
+
+		/* sort each schedule branch by period (slow before fast)
+		   to share the faster parts of the tree without needing
+		   dummy/placeholder nodes */
+		DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+		for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
+			struct isp116x_ep **prev = &isp116x->periodic[i];
+			struct isp116x_ep *here = *prev;
+
+			while (here && ep != here) {
+				if (ep->period > here->period)
+					break;
+				prev = &here->next;
+				here = *prev;
+			}
+			if (ep != here) {
+				ep->next = here;
+				*prev = ep;
+			}
+			isp116x->load[i] += ep->load;
+		}
+		hcd->self.bandwidth_allocated += ep->load / ep->period;
+
+		/* switch over to SOFint */
+		if (!isp116x->periodic_count++) {
+			isp116x->irqenb &= ~HCuPINT_ATL;
+			isp116x->irqenb |= HCuPINT_SOF;
+			isp116x_write_reg16(isp116x, HCuPINTENB,
+					    isp116x->irqenb);
+		}
+	}
+
+	urb->hcpriv = hep;
+	start_atl_transfers(isp116x);
+
+      fail:
+	if (ret)
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+      fail_not_linked:
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return ret;
+}
+
+/*
+   Dequeue URBs.
+*/
+static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+		int status)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	struct usb_host_endpoint *hep;
+	struct isp116x_ep *ep, *ep_act;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc)
+		goto done;
+
+	hep = urb->hcpriv;
+	ep = hep->hcpriv;
+	WARN_ON(hep != ep->hep);
+
+	/* In front of queue? */
+	if (ep->hep->urb_list.next == &urb->urb_list)
+		/* active? */
+		for (ep_act = isp116x->atl_active; ep_act;
+		     ep_act = ep_act->active)
+			if (ep_act == ep) {
+				VDBG("dequeue, urb %p active; wait for irq\n",
+				     urb);
+				urb = NULL;
+				break;
+			}
+
+	if (urb)
+		finish_request(isp116x, ep, urb, status);
+ done:
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return rc;
+}
+
+static void isp116x_endpoint_disable(struct usb_hcd *hcd,
+				     struct usb_host_endpoint *hep)
+{
+	int i;
+	struct isp116x_ep *ep = hep->hcpriv;
+
+	if (!ep)
+		return;
+
+	/* assume we'd just wait for the irq */
+	for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++)
+		msleep(3);
+	if (!list_empty(&hep->urb_list))
+		WARNING("ep %p not empty?\n", ep);
+
+	kfree(ep);
+	hep->hcpriv = NULL;
+}
+
+static int isp116x_get_frame(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	u32 fmnum;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	fmnum = isp116x_read_reg32(isp116x, HCFMNUM);
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return (int)fmnum;
+}
+
+/*
+  Adapted from ohci-hub.c. Currently we don't support autosuspend.
+*/
+static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	int ports, i, changed = 0;
+	unsigned long flags;
+
+	if (!HC_IS_RUNNING(hcd->state))
+		return -ESHUTDOWN;
+
+	/* Report no status change now, if we are scheduled to be
+	   called later */
+	if (timer_pending(&hcd->rh_timer))
+		return 0;
+
+	ports = isp116x->rhdesca & RH_A_NDP;
+	spin_lock_irqsave(&isp116x->lock, flags);
+	isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS);
+	if (isp116x->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
+		buf[0] = changed = 1;
+	else
+		buf[0] = 0;
+
+	for (i = 0; i < ports; i++) {
+		u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
+
+		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
+			      | RH_PS_OCIC | RH_PS_PRSC)) {
+			changed = 1;
+			buf[0] |= 1 << (i + 1);
+		}
+	}
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return changed;
+}
+
+static void isp116x_hub_descriptor(struct isp116x *isp116x,
+				   struct usb_hub_descriptor *desc)
+{
+	u32 reg = isp116x->rhdesca;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bDescLength = 9;
+	desc->bHubContrCurrent = 0;
+	desc->bNbrPorts = (u8) (reg & 0x3);
+	/* Power switching, device type, overcurrent. */
+	desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) &
+						       (HUB_CHAR_LPSM |
+							HUB_CHAR_COMPOUND |
+							HUB_CHAR_OCPM)));
+	desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff);
+	/* ports removable, and legacy PortPwrCtrlMask */
+	desc->u.hs.DeviceRemovable[0] = 0;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+}
+
+/* Perform reset of a given port.
+   It would be great to just start the reset and let the
+   USB core to clear the reset in due time. However,
+   root hub ports should be reset for at least 50 ms, while
+   our chip stays in reset for about 10 ms. I.e., we must
+   repeatedly reset it ourself here.
+*/
+static inline void root_port_reset(struct isp116x *isp116x, unsigned port)
+{
+	u32 tmp;
+	unsigned long flags, t;
+
+	/* Root hub reset should be 50 ms, but some devices
+	   want it even longer. */
+	t = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, t)) {
+		spin_lock_irqsave(&isp116x->lock, flags);
+		/* spin until any current reset finishes */
+		for (;;) {
+			tmp = isp116x_read_reg32(isp116x, port ?
+						 HCRHPORT2 : HCRHPORT1);
+			if (!(tmp & RH_PS_PRS))
+				break;
+			udelay(500);
+		}
+		/* Don't reset a disconnected port */
+		if (!(tmp & RH_PS_CCS)) {
+			spin_unlock_irqrestore(&isp116x->lock, flags);
+			break;
+		}
+		/* Reset lasts 10ms (claims datasheet) */
+		isp116x_write_reg32(isp116x, port ? HCRHPORT2 :
+				    HCRHPORT1, (RH_PS_PRS));
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		msleep(10);
+	}
+}
+
+/* Adapted from ohci-hub.c */
+static int isp116x_hub_control(struct usb_hcd *hcd,
+			       u16 typeReq,
+			       u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	int ret = 0;
+	unsigned long flags;
+	int ports = isp116x->rhdesca & RH_A_NDP;
+	u32 tmp = 0;
+
+	switch (typeReq) {
+	case ClearHubFeature:
+		DBG("ClearHubFeature: ");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			DBG("C_HUB_OVER_CURRENT\n");
+			spin_lock_irqsave(&isp116x->lock, flags);
+			isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
+			spin_unlock_irqrestore(&isp116x->lock, flags);
+			/* fall through */
+		case C_HUB_LOCAL_POWER:
+			DBG("C_HUB_LOCAL_POWER\n");
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetHubFeature:
+		DBG("SetHubFeature: ");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			DBG("C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case GetHubDescriptor:
+		DBG("GetHubDescriptor\n");
+		isp116x_hub_descriptor(isp116x,
+				       (struct usb_hub_descriptor *)buf);
+		break;
+	case GetHubStatus:
+		DBG("GetHubStatus\n");
+		*(__le32 *) buf = 0;
+		break;
+	case GetPortStatus:
+		DBG("GetPortStatus\n");
+		if (!wIndex || wIndex > ports)
+			goto error;
+		spin_lock_irqsave(&isp116x->lock, flags);
+		tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1);
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		*(__le32 *) buf = cpu_to_le32(tmp);
+		DBG("GetPortStatus: port[%d]  %08x\n", wIndex + 1, tmp);
+		break;
+	case ClearPortFeature:
+		DBG("ClearPortFeature: ");
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			DBG("USB_PORT_FEAT_ENABLE\n");
+			tmp = RH_PS_CCS;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			DBG("USB_PORT_FEAT_C_ENABLE\n");
+			tmp = RH_PS_PESC;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			DBG("USB_PORT_FEAT_SUSPEND\n");
+			tmp = RH_PS_POCI;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			DBG("USB_PORT_FEAT_C_SUSPEND\n");
+			tmp = RH_PS_PSSC;
+			break;
+		case USB_PORT_FEAT_POWER:
+			DBG("USB_PORT_FEAT_POWER\n");
+			tmp = RH_PS_LSDA;
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			DBG("USB_PORT_FEAT_C_CONNECTION\n");
+			tmp = RH_PS_CSC;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			DBG("USB_PORT_FEAT_C_OVER_CURRENT\n");
+			tmp = RH_PS_OCIC;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			DBG("USB_PORT_FEAT_C_RESET\n");
+			tmp = RH_PS_PRSC;
+			break;
+		default:
+			goto error;
+		}
+		spin_lock_irqsave(&isp116x->lock, flags);
+		isp116x_write_reg32(isp116x, wIndex
+				    ? HCRHPORT2 : HCRHPORT1, tmp);
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		break;
+	case SetPortFeature:
+		DBG("SetPortFeature: ");
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			DBG("USB_PORT_FEAT_SUSPEND\n");
+			spin_lock_irqsave(&isp116x->lock, flags);
+			isp116x_write_reg32(isp116x, wIndex
+					    ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS);
+			spin_unlock_irqrestore(&isp116x->lock, flags);
+			break;
+		case USB_PORT_FEAT_POWER:
+			DBG("USB_PORT_FEAT_POWER\n");
+			spin_lock_irqsave(&isp116x->lock, flags);
+			isp116x_write_reg32(isp116x, wIndex
+					    ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS);
+			spin_unlock_irqrestore(&isp116x->lock, flags);
+			break;
+		case USB_PORT_FEAT_RESET:
+			DBG("USB_PORT_FEAT_RESET\n");
+			root_port_reset(isp116x, wIndex);
+			break;
+		default:
+			goto error;
+		}
+		break;
+
+	default:
+	      error:
+		/* "protocol stall" on error */
+		DBG("PROTOCOL STALL\n");
+		ret = -EPIPE;
+	}
+	return ret;
+}
+
+/*-----------------------------------------------------------------*/
+
+#ifdef CONFIG_DEBUG_FS
+
+static void dump_irq(struct seq_file *s, char *label, u16 mask)
+{
+	seq_printf(s, "%s %04x%s%s%s%s%s%s\n", label, mask,
+		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
+		   mask & HCuPINT_SUSP ? " susp" : "",
+		   mask & HCuPINT_OPR ? " opr" : "",
+		   mask & HCuPINT_AIIEOT ? " eot" : "",
+		   mask & HCuPINT_ATL ? " atl" : "",
+		   mask & HCuPINT_SOF ? " sof" : "");
+}
+
+static void dump_int(struct seq_file *s, char *label, u32 mask)
+{
+	seq_printf(s, "%s %08x%s%s%s%s%s%s%s\n", label, mask,
+		   mask & HCINT_MIE ? " MIE" : "",
+		   mask & HCINT_RHSC ? " rhsc" : "",
+		   mask & HCINT_FNO ? " fno" : "",
+		   mask & HCINT_UE ? " ue" : "",
+		   mask & HCINT_RD ? " rd" : "",
+		   mask & HCINT_SF ? " sof" : "", mask & HCINT_SO ? " so" : "");
+}
+
+static int isp116x_debug_show(struct seq_file *s, void *unused)
+{
+	struct isp116x *isp116x = s->private;
+
+	seq_printf(s, "%s\n%s version %s\n",
+		   isp116x_to_hcd(isp116x)->product_desc, hcd_name,
+		   DRIVER_VERSION);
+
+	if (HC_IS_SUSPENDED(isp116x_to_hcd(isp116x)->state)) {
+		seq_printf(s, "HCD is suspended\n");
+		return 0;
+	}
+	if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state)) {
+		seq_printf(s, "HCD not running\n");
+		return 0;
+	}
+
+	spin_lock_irq(&isp116x->lock);
+	dump_irq(s, "hc_irq_enable", isp116x_read_reg16(isp116x, HCuPINTENB));
+	dump_irq(s, "hc_irq_status", isp116x_read_reg16(isp116x, HCuPINT));
+	dump_int(s, "hc_int_enable", isp116x_read_reg32(isp116x, HCINTENB));
+	dump_int(s, "hc_int_status", isp116x_read_reg32(isp116x, HCINTSTAT));
+	isp116x_show_regs_seq(isp116x, s);
+	spin_unlock_irq(&isp116x->lock);
+	seq_printf(s, "\n");
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(isp116x_debug);
+
+static void create_debug_file(struct isp116x *isp116x)
+{
+	isp116x->dentry = debugfs_create_file(hcd_name,
+					      S_IRUGO, NULL, isp116x,
+					      &isp116x_debug_fops);
+}
+
+static void remove_debug_file(struct isp116x *isp116x)
+{
+	debugfs_remove(isp116x->dentry);
+}
+
+#else
+
+static inline void create_debug_file(struct isp116x *isp116x) { }
+static inline void remove_debug_file(struct isp116x *isp116x) { }
+
+#endif				/* CONFIG_DEBUG_FS */
+
+/*-----------------------------------------------------------------*/
+
+/*
+  Software reset - can be called from any contect.
+*/
+static int isp116x_sw_reset(struct isp116x *isp116x)
+{
+	int retries = 15;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	isp116x_write_reg16(isp116x, HCSWRES, HCSWRES_MAGIC);
+	isp116x_write_reg32(isp116x, HCCMDSTAT, HCCMDSTAT_HCR);
+	while (--retries) {
+		/* It usually resets within 1 ms */
+		mdelay(1);
+		if (!(isp116x_read_reg32(isp116x, HCCMDSTAT) & HCCMDSTAT_HCR))
+			break;
+	}
+	if (!retries) {
+		ERR("Software reset timeout\n");
+		ret = -ETIME;
+	}
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return ret;
+}
+
+static int isp116x_reset(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	unsigned long t;
+	u16 clkrdy = 0;
+	int ret, timeout = 15 /* ms */ ;
+
+	ret = isp116x_sw_reset(isp116x);
+	if (ret)
+		return ret;
+
+	t = jiffies + msecs_to_jiffies(timeout);
+	while (time_before_eq(jiffies, t)) {
+		msleep(4);
+		spin_lock_irq(&isp116x->lock);
+		clkrdy = isp116x_read_reg16(isp116x, HCuPINT) & HCuPINT_CLKRDY;
+		spin_unlock_irq(&isp116x->lock);
+		if (clkrdy)
+			break;
+	}
+	if (!clkrdy) {
+		ERR("Clock not ready after %dms\n", timeout);
+		/* After sw_reset the clock won't report to be ready, if
+		   H_WAKEUP pin is high. */
+		ERR("Please make sure that the H_WAKEUP pin is pulled low!\n");
+		ret = -ENODEV;
+	}
+	return ret;
+}
+
+static void isp116x_stop(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	isp116x_write_reg16(isp116x, HCuPINTENB, 0);
+
+	/* Switch off ports' power, some devices don't come up
+	   after next 'insmod' without this */
+	val = isp116x_read_reg32(isp116x, HCRHDESCA);
+	val &= ~(RH_A_NPS | RH_A_PSM);
+	isp116x_write_reg32(isp116x, HCRHDESCA, val);
+	isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_LPS);
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+
+	isp116x_sw_reset(isp116x);
+}
+
+/*
+  Configure the chip. The chip must be successfully reset by now.
+*/
+static int isp116x_start(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	struct isp116x_platform_data *board = isp116x->board;
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+
+	/* clear interrupt status and disable all interrupt sources */
+	isp116x_write_reg16(isp116x, HCuPINT, 0xff);
+	isp116x_write_reg16(isp116x, HCuPINTENB, 0);
+
+	val = isp116x_read_reg16(isp116x, HCCHIPID);
+	if ((val & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
+		ERR("Invalid chip ID %04x\n", val);
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		return -ENODEV;
+	}
+
+	/* To be removed in future */
+	hcd->uses_new_polling = 1;
+
+	isp116x_write_reg16(isp116x, HCITLBUFLEN, ISP116x_ITL_BUFSIZE);
+	isp116x_write_reg16(isp116x, HCATLBUFLEN, ISP116x_ATL_BUFSIZE);
+
+	/* ----- HW conf */
+	val = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
+	if (board->sel15Kres)
+		val |= HCHWCFG_15KRSEL;
+	/* Remote wakeup won't work without working clock */
+	if (board->remote_wakeup_enable)
+		val |= HCHWCFG_CLKNOTSTOP;
+	if (board->oc_enable)
+		val |= HCHWCFG_ANALOG_OC;
+	if (board->int_act_high)
+		val |= HCHWCFG_INT_POL;
+	if (board->int_edge_triggered)
+		val |= HCHWCFG_INT_TRIGGER;
+	isp116x_write_reg16(isp116x, HCHWCFG, val);
+
+	/* ----- Root hub conf */
+	val = (25 << 24) & RH_A_POTPGT;
+	/* AN10003_1.pdf recommends RH_A_NPS (no power switching) to
+	   be always set. Yet, instead, we request individual port
+	   power switching. */
+	val |= RH_A_PSM;
+	/* Report overcurrent per port */
+	val |= RH_A_OCPM;
+	isp116x_write_reg32(isp116x, HCRHDESCA, val);
+	isp116x->rhdesca = isp116x_read_reg32(isp116x, HCRHDESCA);
+
+	val = RH_B_PPCM;
+	isp116x_write_reg32(isp116x, HCRHDESCB, val);
+	isp116x->rhdescb = isp116x_read_reg32(isp116x, HCRHDESCB);
+
+	val = 0;
+	if (board->remote_wakeup_enable) {
+		if (!device_can_wakeup(hcd->self.controller))
+			device_init_wakeup(hcd->self.controller, 1);
+		val |= RH_HS_DRWE;
+	}
+	isp116x_write_reg32(isp116x, HCRHSTATUS, val);
+	isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS);
+
+	isp116x_write_reg32(isp116x, HCFMINTVL, 0x27782edf);
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/* Set up interrupts */
+	isp116x->intenb = HCINT_MIE | HCINT_RHSC | HCINT_UE;
+	if (board->remote_wakeup_enable)
+		isp116x->intenb |= HCINT_RD;
+	isp116x->irqenb = HCuPINT_ATL | HCuPINT_OPR;	/* | HCuPINT_SUSP; */
+	isp116x_write_reg32(isp116x, HCINTENB, isp116x->intenb);
+	isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb);
+
+	/* Go operational */
+	val = HCCONTROL_USB_OPER;
+	if (board->remote_wakeup_enable)
+		val |= HCCONTROL_RWE;
+	isp116x_write_reg32(isp116x, HCCONTROL, val);
+
+	/* Disable ports to avoid race in device enumeration */
+	isp116x_write_reg32(isp116x, HCRHPORT1, RH_PS_CCS);
+	isp116x_write_reg32(isp116x, HCRHPORT2, RH_PS_CCS);
+
+	isp116x_show_regs_log(isp116x);
+	spin_unlock_irqrestore(&isp116x->lock, flags);
+	return 0;
+}
+
+#ifdef	CONFIG_PM
+
+static int isp116x_bus_suspend(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	unsigned long flags;
+	u32 val;
+	int ret = 0;
+
+	spin_lock_irqsave(&isp116x->lock, flags);
+	val = isp116x_read_reg32(isp116x, HCCONTROL);
+
+	switch (val & HCCONTROL_HCFS) {
+	case HCCONTROL_USB_OPER:
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		val &= (~HCCONTROL_HCFS & ~HCCONTROL_RWE);
+		val |= HCCONTROL_USB_SUSPEND;
+		if (hcd->self.root_hub->do_remote_wakeup)
+			val |= HCCONTROL_RWE;
+		/* Wait for usb transfers to finish */
+		msleep(2);
+		spin_lock_irqsave(&isp116x->lock, flags);
+		isp116x_write_reg32(isp116x, HCCONTROL, val);
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		/* Wait for devices to suspend */
+		msleep(5);
+		break;
+	case HCCONTROL_USB_RESUME:
+		isp116x_write_reg32(isp116x, HCCONTROL,
+				    (val & ~HCCONTROL_HCFS) |
+				    HCCONTROL_USB_RESET);
+		/* fall through */
+	case HCCONTROL_USB_RESET:
+		ret = -EBUSY;
+		/* fall through */
+	default:		/* HCCONTROL_USB_SUSPEND */
+		spin_unlock_irqrestore(&isp116x->lock, flags);
+		break;
+	}
+
+	return ret;
+}
+
+static int isp116x_bus_resume(struct usb_hcd *hcd)
+{
+	struct isp116x *isp116x = hcd_to_isp116x(hcd);
+	u32 val;
+
+	msleep(5);
+	spin_lock_irq(&isp116x->lock);
+
+	val = isp116x_read_reg32(isp116x, HCCONTROL);
+	switch (val & HCCONTROL_HCFS) {
+	case HCCONTROL_USB_SUSPEND:
+		val &= ~HCCONTROL_HCFS;
+		val |= HCCONTROL_USB_RESUME;
+		isp116x_write_reg32(isp116x, HCCONTROL, val);
+	case HCCONTROL_USB_RESUME:
+		break;
+	case HCCONTROL_USB_OPER:
+		spin_unlock_irq(&isp116x->lock);
+		return 0;
+	default:
+		/* HCCONTROL_USB_RESET: this may happen, when during
+		   suspension the HC lost power. Reinitialize completely */
+		spin_unlock_irq(&isp116x->lock);
+		DBG("Chip has been reset while suspended. Reinit from scratch.\n");
+		isp116x_reset(hcd);
+		isp116x_start(hcd);
+		isp116x_hub_control(hcd, SetPortFeature,
+				    USB_PORT_FEAT_POWER, 1, NULL, 0);
+		if ((isp116x->rhdesca & RH_A_NDP) == 2)
+			isp116x_hub_control(hcd, SetPortFeature,
+					    USB_PORT_FEAT_POWER, 2, NULL, 0);
+		return 0;
+	}
+
+	val = isp116x->rhdesca & RH_A_NDP;
+	while (val--) {
+		u32 stat =
+		    isp116x_read_reg32(isp116x, val ? HCRHPORT2 : HCRHPORT1);
+		/* force global, not selective, resume */
+		if (!(stat & RH_PS_PSS))
+			continue;
+		DBG("%s: Resuming port %d\n", __func__, val);
+		isp116x_write_reg32(isp116x, RH_PS_POCI, val
+				    ? HCRHPORT2 : HCRHPORT1);
+	}
+	spin_unlock_irq(&isp116x->lock);
+
+	hcd->state = HC_STATE_RESUMING;
+	msleep(USB_RESUME_TIMEOUT);
+
+	/* Go operational */
+	spin_lock_irq(&isp116x->lock);
+	val = isp116x_read_reg32(isp116x, HCCONTROL);
+	isp116x_write_reg32(isp116x, HCCONTROL,
+			    (val & ~HCCONTROL_HCFS) | HCCONTROL_USB_OPER);
+	spin_unlock_irq(&isp116x->lock);
+	hcd->state = HC_STATE_RUNNING;
+
+	return 0;
+}
+
+#else
+
+#define	isp116x_bus_suspend	NULL
+#define	isp116x_bus_resume	NULL
+
+#endif
+
+static const struct hc_driver isp116x_hc_driver = {
+	.description = hcd_name,
+	.product_desc = "ISP116x Host Controller",
+	.hcd_priv_size = sizeof(struct isp116x),
+
+	.irq = isp116x_irq,
+	.flags = HCD_USB11,
+
+	.reset = isp116x_reset,
+	.start = isp116x_start,
+	.stop = isp116x_stop,
+
+	.urb_enqueue = isp116x_urb_enqueue,
+	.urb_dequeue = isp116x_urb_dequeue,
+	.endpoint_disable = isp116x_endpoint_disable,
+
+	.get_frame_number = isp116x_get_frame,
+
+	.hub_status_data = isp116x_hub_status_data,
+	.hub_control = isp116x_hub_control,
+	.bus_suspend = isp116x_bus_suspend,
+	.bus_resume = isp116x_bus_resume,
+};
+
+/*----------------------------------------------------------------*/
+
+static int isp116x_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct isp116x *isp116x;
+	struct resource *res;
+
+	if (!hcd)
+		return 0;
+	isp116x = hcd_to_isp116x(hcd);
+	remove_debug_file(isp116x);
+	usb_remove_hcd(hcd);
+
+	iounmap(isp116x->data_reg);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	release_mem_region(res->start, 2);
+	iounmap(isp116x->addr_reg);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, 2);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+static int isp116x_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct isp116x *isp116x;
+	struct resource *addr, *data, *ires;
+	void __iomem *addr_reg;
+	void __iomem *data_reg;
+	int irq;
+	int ret = 0;
+	unsigned long irqflags;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (pdev->num_resources < 3) {
+		ret = -ENODEV;
+		goto err1;
+	}
+
+	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+	if (!addr || !data || !ires) {
+		ret = -ENODEV;
+		goto err1;
+	}
+
+	irq = ires->start;
+	irqflags = ires->flags & IRQF_TRIGGER_MASK;
+
+	if (pdev->dev.dma_mask) {
+		DBG("DMA not supported\n");
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	if (!request_mem_region(addr->start, 2, hcd_name)) {
+		ret = -EBUSY;
+		goto err1;
+	}
+	addr_reg = ioremap(addr->start, resource_size(addr));
+	if (addr_reg == NULL) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+	if (!request_mem_region(data->start, 2, hcd_name)) {
+		ret = -EBUSY;
+		goto err3;
+	}
+	data_reg = ioremap(data->start, resource_size(data));
+	if (data_reg == NULL) {
+		ret = -ENOMEM;
+		goto err4;
+	}
+
+	/* allocate and initialize hcd */
+	hcd = usb_create_hcd(&isp116x_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto err5;
+	}
+	/* this rsrc_start is bogus */
+	hcd->rsrc_start = addr->start;
+	isp116x = hcd_to_isp116x(hcd);
+	isp116x->data_reg = data_reg;
+	isp116x->addr_reg = addr_reg;
+	spin_lock_init(&isp116x->lock);
+	INIT_LIST_HEAD(&isp116x->async);
+	isp116x->board = dev_get_platdata(&pdev->dev);
+
+	if (!isp116x->board) {
+		ERR("Platform data structure not initialized\n");
+		ret = -ENODEV;
+		goto err6;
+	}
+	if (isp116x_check_platform_delay(isp116x)) {
+		ERR("USE_PLATFORM_DELAY defined, but delay function not "
+		    "implemented.\n");
+		ERR("See comments in drivers/usb/host/isp116x-hcd.c\n");
+		ret = -ENODEV;
+		goto err6;
+	}
+
+	ret = usb_add_hcd(hcd, irq, irqflags);
+	if (ret)
+		goto err6;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	create_debug_file(isp116x);
+
+	return 0;
+
+      err6:
+	usb_put_hcd(hcd);
+      err5:
+	iounmap(data_reg);
+      err4:
+	release_mem_region(data->start, 2);
+      err3:
+	iounmap(addr_reg);
+      err2:
+	release_mem_region(addr->start, 2);
+      err1:
+	ERR("init error, %d\n", ret);
+	return ret;
+}
+
+#ifdef	CONFIG_PM
+/*
+  Suspend of platform device
+*/
+static int isp116x_suspend(struct platform_device *dev, pm_message_t state)
+{
+	VDBG("%s: state %x\n", __func__, state.event);
+	return 0;
+}
+
+/*
+  Resume platform device
+*/
+static int isp116x_resume(struct platform_device *dev)
+{
+	VDBG("%s\n", __func__);
+	return 0;
+}
+
+#else
+
+#define	isp116x_suspend    NULL
+#define	isp116x_resume     NULL
+
+#endif
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:isp116x-hcd");
+
+static struct platform_driver isp116x_driver = {
+	.probe = isp116x_probe,
+	.remove = isp116x_remove,
+	.suspend = isp116x_suspend,
+	.resume = isp116x_resume,
+	.driver = {
+		.name = hcd_name,
+	},
+};
+
+module_platform_driver(isp116x_driver);
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
new file mode 100644
index 0000000..a5e929c
--- /dev/null
+++ b/drivers/usb/host/isp116x.h
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ISP116x register declarations and HCD data structures
+ *
+ * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
+ * Portions:
+ * Copyright (C) 2004 Lothar Wassmann
+ * Copyright (C) 2004 Psion Teklogix
+ * Copyright (C) 2004 David Brownell
+ */
+
+/* us of 1ms frame */
+#define  MAX_LOAD_LIMIT		850
+
+/* Full speed: max # of bytes to transfer for a single urb
+   at a time must be < 1024 && must be multiple of 64.
+   832 allows transferring 4kiB within 5 frames. */
+#define MAX_TRANSFER_SIZE_FULLSPEED	832
+
+/* Low speed: there is no reason to schedule in very big
+   chunks; often the requested long transfers are for
+   string descriptors containing short strings. */
+#define MAX_TRANSFER_SIZE_LOWSPEED	64
+
+/* Bytetime (us), a rough indication of how much time it
+   would take to transfer a byte of useful data over USB */
+#define BYTE_TIME_FULLSPEED	1
+#define BYTE_TIME_LOWSPEED	20
+
+/* Buffer sizes */
+#define ISP116x_BUF_SIZE	4096
+#define ISP116x_ITL_BUFSIZE	0
+#define ISP116x_ATL_BUFSIZE	((ISP116x_BUF_SIZE) - 2*(ISP116x_ITL_BUFSIZE))
+
+#define ISP116x_WRITE_OFFSET	0x80
+
+/*------------ ISP116x registers/bits ------------*/
+#define	HCREVISION	0x00
+#define	HCCONTROL	0x01
+#define		HCCONTROL_HCFS	(3 << 6)	/* host controller
+						   functional state */
+#define		HCCONTROL_USB_RESET	(0 << 6)
+#define		HCCONTROL_USB_RESUME	(1 << 6)
+#define		HCCONTROL_USB_OPER	(2 << 6)
+#define		HCCONTROL_USB_SUSPEND	(3 << 6)
+#define		HCCONTROL_RWC	(1 << 9)	/* remote wakeup connected */
+#define		HCCONTROL_RWE	(1 << 10)	/* remote wakeup enable */
+#define	HCCMDSTAT	0x02
+#define		HCCMDSTAT_HCR	(1 << 0)	/* host controller reset */
+#define		HCCMDSTAT_SOC	(3 << 16)	/* scheduling overrun count */
+#define	HCINTSTAT	0x03
+#define		HCINT_SO	(1 << 0)	/* scheduling overrun */
+#define		HCINT_WDH	(1 << 1)	/* writeback of done_head */
+#define		HCINT_SF	(1 << 2)	/* start frame */
+#define		HCINT_RD	(1 << 3)	/* resume detect */
+#define		HCINT_UE	(1 << 4)	/* unrecoverable error */
+#define		HCINT_FNO	(1 << 5)	/* frame number overflow */
+#define		HCINT_RHSC	(1 << 6)	/* root hub status change */
+#define		HCINT_OC	(1 << 30)	/* ownership change */
+#define		HCINT_MIE	(1 << 31)	/* master interrupt enable */
+#define	HCINTENB	0x04
+#define	HCINTDIS	0x05
+#define	HCFMINTVL	0x0d
+#define	HCFMREM		0x0e
+#define	HCFMNUM		0x0f
+#define	HCLSTHRESH	0x11
+#define	HCRHDESCA	0x12
+#define		RH_A_NDP	(0x3 << 0)	/* # downstream ports */
+#define		RH_A_PSM	(1 << 8)	/* power switching mode */
+#define		RH_A_NPS	(1 << 9)	/* no power switching */
+#define		RH_A_DT		(1 << 10)	/* device type (mbz) */
+#define		RH_A_OCPM	(1 << 11)	/* overcurrent protection
+						   mode */
+#define		RH_A_NOCP	(1 << 12)	/* no overcurrent protection */
+#define		RH_A_POTPGT	(0xff << 24)	/* power on -> power good
+						   time */
+#define	HCRHDESCB	0x13
+#define		RH_B_DR		(0xffff << 0)	/* device removable flags */
+#define		RH_B_PPCM	(0xffff << 16)	/* port power control mask */
+#define	HCRHSTATUS	0x14
+#define		RH_HS_LPS	(1 << 0)	/* local power status */
+#define		RH_HS_OCI	(1 << 1)	/* over current indicator */
+#define		RH_HS_DRWE	(1 << 15)	/* device remote wakeup
+						   enable */
+#define		RH_HS_LPSC	(1 << 16)	/* local power status change */
+#define		RH_HS_OCIC	(1 << 17)	/* over current indicator
+						   change */
+#define		RH_HS_CRWE	(1 << 31)	/* clear remote wakeup
+						   enable */
+#define	HCRHPORT1	0x15
+#define		RH_PS_CCS	(1 << 0)	/* current connect status */
+#define		RH_PS_PES	(1 << 1)	/* port enable status */
+#define		RH_PS_PSS	(1 << 2)	/* port suspend status */
+#define		RH_PS_POCI	(1 << 3)	/* port over current
+						   indicator */
+#define		RH_PS_PRS	(1 << 4)	/* port reset status */
+#define		RH_PS_PPS	(1 << 8)	/* port power status */
+#define		RH_PS_LSDA	(1 << 9)	/* low speed device attached */
+#define		RH_PS_CSC	(1 << 16)	/* connect status change */
+#define		RH_PS_PESC	(1 << 17)	/* port enable status change */
+#define		RH_PS_PSSC	(1 << 18)	/* port suspend status
+						   change */
+#define		RH_PS_OCIC	(1 << 19)	/* over current indicator
+						   change */
+#define		RH_PS_PRSC	(1 << 20)	/* port reset status change */
+#define		HCRHPORT_CLRMASK	(0x1f << 16)
+#define	HCRHPORT2	0x16
+#define	HCHWCFG		0x20
+#define		HCHWCFG_15KRSEL		(1 << 12)
+#define		HCHWCFG_CLKNOTSTOP	(1 << 11)
+#define		HCHWCFG_ANALOG_OC	(1 << 10)
+#define		HCHWCFG_DACK_MODE	(1 << 8)
+#define		HCHWCFG_EOT_POL		(1 << 7)
+#define		HCHWCFG_DACK_POL	(1 << 6)
+#define		HCHWCFG_DREQ_POL	(1 << 5)
+#define		HCHWCFG_DBWIDTH_MASK	(0x03 << 3)
+#define		HCHWCFG_DBWIDTH(n)	(((n) << 3) & HCHWCFG_DBWIDTH_MASK)
+#define		HCHWCFG_INT_POL		(1 << 2)
+#define		HCHWCFG_INT_TRIGGER	(1 << 1)
+#define		HCHWCFG_INT_ENABLE	(1 << 0)
+#define	HCDMACFG	0x21
+#define		HCDMACFG_BURST_LEN_MASK	(0x03 << 5)
+#define		HCDMACFG_BURST_LEN(n)	(((n) << 5) & HCDMACFG_BURST_LEN_MASK)
+#define		HCDMACFG_BURST_LEN_1	HCDMACFG_BURST_LEN(0)
+#define		HCDMACFG_BURST_LEN_4	HCDMACFG_BURST_LEN(1)
+#define		HCDMACFG_BURST_LEN_8	HCDMACFG_BURST_LEN(2)
+#define		HCDMACFG_DMA_ENABLE	(1 << 4)
+#define		HCDMACFG_BUF_TYPE_MASK	(0x07 << 1)
+#define		HCDMACFG_CTR_SEL	(1 << 2)
+#define		HCDMACFG_ITLATL_SEL	(1 << 1)
+#define		HCDMACFG_DMA_RW_SELECT	(1 << 0)
+#define	HCXFERCTR	0x22
+#define	HCuPINT		0x24
+#define		HCuPINT_SOF		(1 << 0)
+#define		HCuPINT_ATL		(1 << 1)
+#define		HCuPINT_AIIEOT		(1 << 2)
+#define		HCuPINT_OPR		(1 << 4)
+#define		HCuPINT_SUSP		(1 << 5)
+#define		HCuPINT_CLKRDY		(1 << 6)
+#define	HCuPINTENB	0x25
+#define	HCCHIPID	0x27
+#define		HCCHIPID_MASK		0xff00
+#define		HCCHIPID_MAGIC		0x6100
+#define	HCSCRATCH	0x28
+#define	HCSWRES		0x29
+#define		HCSWRES_MAGIC		0x00f6
+#define	HCITLBUFLEN	0x2a
+#define	HCATLBUFLEN	0x2b
+#define	HCBUFSTAT	0x2c
+#define		HCBUFSTAT_ITL0_FULL	(1 << 0)
+#define		HCBUFSTAT_ITL1_FULL	(1 << 1)
+#define		HCBUFSTAT_ATL_FULL	(1 << 2)
+#define		HCBUFSTAT_ITL0_DONE	(1 << 3)
+#define		HCBUFSTAT_ITL1_DONE	(1 << 4)
+#define		HCBUFSTAT_ATL_DONE	(1 << 5)
+#define	HCRDITL0LEN	0x2d
+#define	HCRDITL1LEN	0x2e
+#define	HCITLPORT	0x40
+#define	HCATLPORT	0x41
+
+/* Philips transfer descriptor */
+struct ptd {
+	u16 count;
+#define	PTD_COUNT_MSK	(0x3ff << 0)
+#define	PTD_TOGGLE_MSK	(1 << 10)
+#define	PTD_ACTIVE_MSK	(1 << 11)
+#define	PTD_CC_MSK	(0xf << 12)
+	u16 mps;
+#define	PTD_MPS_MSK	(0x3ff << 0)
+#define	PTD_SPD_MSK	(1 << 10)
+#define	PTD_LAST_MSK	(1 << 11)
+#define	PTD_EP_MSK	(0xf << 12)
+	u16 len;
+#define	PTD_LEN_MSK	(0x3ff << 0)
+#define	PTD_DIR_MSK	(3 << 10)
+#define	PTD_DIR_SETUP	(0)
+#define	PTD_DIR_OUT	(1)
+#define	PTD_DIR_IN	(2)
+#define	PTD_B5_5_MSK	(1 << 13)
+	u16 faddr;
+#define	PTD_FA_MSK	(0x7f << 0)
+#define	PTD_FMT_MSK	(1 << 7)
+} __attribute__ ((packed, aligned(2)));
+
+/* PTD accessor macros. */
+#define PTD_GET_COUNT(p)	(((p)->count & PTD_COUNT_MSK) >> 0)
+#define PTD_COUNT(v)		(((v) << 0) & PTD_COUNT_MSK)
+#define PTD_GET_TOGGLE(p)	(((p)->count & PTD_TOGGLE_MSK) >> 10)
+#define PTD_TOGGLE(v)		(((v) << 10) & PTD_TOGGLE_MSK)
+#define PTD_GET_ACTIVE(p)	(((p)->count & PTD_ACTIVE_MSK) >> 11)
+#define PTD_ACTIVE(v)		(((v) << 11) & PTD_ACTIVE_MSK)
+#define PTD_GET_CC(p)		(((p)->count & PTD_CC_MSK) >> 12)
+#define PTD_CC(v)		(((v) << 12) & PTD_CC_MSK)
+#define PTD_GET_MPS(p)		(((p)->mps & PTD_MPS_MSK) >> 0)
+#define PTD_MPS(v)		(((v) << 0) & PTD_MPS_MSK)
+#define PTD_GET_SPD(p)		(((p)->mps & PTD_SPD_MSK) >> 10)
+#define PTD_SPD(v)		(((v) << 10) & PTD_SPD_MSK)
+#define PTD_GET_LAST(p)		(((p)->mps & PTD_LAST_MSK) >> 11)
+#define PTD_LAST(v)		(((v) << 11) & PTD_LAST_MSK)
+#define PTD_GET_EP(p)		(((p)->mps & PTD_EP_MSK) >> 12)
+#define PTD_EP(v)		(((v) << 12) & PTD_EP_MSK)
+#define PTD_GET_LEN(p)		(((p)->len & PTD_LEN_MSK) >> 0)
+#define PTD_LEN(v)		(((v) << 0) & PTD_LEN_MSK)
+#define PTD_GET_DIR(p)		(((p)->len & PTD_DIR_MSK) >> 10)
+#define PTD_DIR(v)		(((v) << 10) & PTD_DIR_MSK)
+#define PTD_GET_B5_5(p)		(((p)->len & PTD_B5_5_MSK) >> 13)
+#define PTD_B5_5(v)		(((v) << 13) & PTD_B5_5_MSK)
+#define PTD_GET_FA(p)		(((p)->faddr & PTD_FA_MSK) >> 0)
+#define PTD_FA(v)		(((v) << 0) & PTD_FA_MSK)
+#define PTD_GET_FMT(p)		(((p)->faddr & PTD_FMT_MSK) >> 7)
+#define PTD_FMT(v)		(((v) << 7) & PTD_FMT_MSK)
+
+/*  Hardware transfer status codes -- CC from ptd->count */
+#define TD_CC_NOERROR      0x00
+#define TD_CC_CRC          0x01
+#define TD_CC_BITSTUFFING  0x02
+#define TD_CC_DATATOGGLEM  0x03
+#define TD_CC_STALL        0x04
+#define TD_DEVNOTRESP      0x05
+#define TD_PIDCHECKFAIL    0x06
+#define TD_UNEXPECTEDPID   0x07
+#define TD_DATAOVERRUN     0x08
+#define TD_DATAUNDERRUN    0x09
+    /* 0x0A, 0x0B reserved for hardware */
+#define TD_BUFFEROVERRUN   0x0C
+#define TD_BUFFERUNDERRUN  0x0D
+    /* 0x0E, 0x0F reserved for HCD */
+#define TD_NOTACCESSED     0x0F
+
+/* map PTD status codes (CC) to errno values */
+static const int cc_to_error[16] = {
+	/* No  Error  */ 0,
+	/* CRC Error  */ -EILSEQ,
+	/* Bit Stuff  */ -EPROTO,
+	/* Data Togg  */ -EILSEQ,
+	/* Stall      */ -EPIPE,
+	/* DevNotResp */ -ETIME,
+	/* PIDCheck   */ -EPROTO,
+	/* UnExpPID   */ -EPROTO,
+	/* DataOver   */ -EOVERFLOW,
+	/* DataUnder  */ -EREMOTEIO,
+	/* (for hw)   */ -EIO,
+	/* (for hw)   */ -EIO,
+	/* BufferOver */ -ECOMM,
+	/* BuffUnder  */ -ENOSR,
+	/* (for HCD)  */ -EALREADY,
+	/* (for HCD)  */ -EALREADY
+};
+
+/*--------------------------------------------------------------*/
+
+#define	LOG2_PERIODIC_SIZE	5	/* arbitrary; this matches OHCI */
+#define	PERIODIC_SIZE		(1 << LOG2_PERIODIC_SIZE)
+
+struct isp116x {
+	spinlock_t lock;
+
+	void __iomem *addr_reg;
+	void __iomem *data_reg;
+
+	struct isp116x_platform_data *board;
+
+	struct dentry *dentry;
+	unsigned long stat1, stat2, stat4, stat8, stat16;
+
+	/* HC registers */
+	u32 intenb;		/* "OHCI" interrupts */
+	u16 irqenb;		/* uP interrupts */
+
+	/* Root hub registers */
+	u32 rhdesca;
+	u32 rhdescb;
+	u32 rhstatus;
+
+	/* async schedule: control, bulk */
+	struct list_head async;
+
+	/* periodic schedule: int */
+	u16 load[PERIODIC_SIZE];
+	struct isp116x_ep *periodic[PERIODIC_SIZE];
+	unsigned periodic_count;
+	u16 fmindex;
+
+	/* Schedule for the current frame */
+	struct isp116x_ep *atl_active;
+	int atl_buflen;
+	int atl_bufshrt;
+	int atl_last_dir;
+	atomic_t atl_finishing;
+};
+
+static inline struct isp116x *hcd_to_isp116x(struct usb_hcd *hcd)
+{
+	return (struct isp116x *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *isp116x_to_hcd(struct isp116x *isp116x)
+{
+	return container_of((void *)isp116x, struct usb_hcd, hcd_priv);
+}
+
+struct isp116x_ep {
+	struct usb_host_endpoint *hep;
+	struct usb_device *udev;
+	struct ptd ptd;
+
+	u8 maxpacket;
+	u8 epnum;
+	u8 nextpid;
+	u16 error_count;
+	u16 length;		/* of current packet */
+	unsigned char *data;	/* to databuf */
+	/* queue of active EP's (the ones scheduled for the
+	   current frame) */
+	struct isp116x_ep *active;
+
+	/* periodic schedule */
+	u16 period;
+	u16 branch;
+	u16 load;
+	struct isp116x_ep *next;
+
+	/* async schedule */
+	struct list_head schedule;
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define DBG(stuff...)		pr_debug("116x: " stuff)
+
+#ifdef VERBOSE
+#    define VDBG		DBG
+#else
+#    define VDBG(stuff...)	do{}while(0)
+#endif
+
+#define ERR(stuff...)		printk(KERN_ERR "116x: " stuff)
+#define WARNING(stuff...)	printk(KERN_WARNING "116x: " stuff)
+#define INFO(stuff...)		printk(KERN_INFO "116x: " stuff)
+
+/* ------------------------------------------------- */
+
+#if defined(USE_PLATFORM_DELAY)
+#if defined(USE_NDELAY)
+#error USE_PLATFORM_DELAY and USE_NDELAY simultaneously defined.
+#endif
+#define	isp116x_delay(h,d)	(h)->board->delay(	\
+				isp116x_to_hcd(h)->self.controller,d)
+#define isp116x_check_platform_delay(h)	((h)->board->delay == NULL)
+#elif defined(USE_NDELAY)
+#define	isp116x_delay(h,d)	ndelay(d)
+#define isp116x_check_platform_delay(h)	0
+#else
+#define	isp116x_delay(h,d)	do{}while(0)
+#define isp116x_check_platform_delay(h)	0
+#endif
+
+static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg)
+{
+	writew(reg & 0xff, isp116x->addr_reg);
+	isp116x_delay(isp116x, 300);
+}
+
+static inline void isp116x_write_data16(struct isp116x *isp116x, u16 val)
+{
+	writew(val, isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+}
+
+static inline void isp116x_raw_write_data16(struct isp116x *isp116x, u16 val)
+{
+	__raw_writew(val, isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+}
+
+static inline u16 isp116x_read_data16(struct isp116x *isp116x)
+{
+	u16 val;
+
+	val = readw(isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+	return val;
+}
+
+static inline u16 isp116x_raw_read_data16(struct isp116x *isp116x)
+{
+	u16 val;
+
+	val = __raw_readw(isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+	return val;
+}
+
+static inline void isp116x_write_data32(struct isp116x *isp116x, u32 val)
+{
+	writew(val & 0xffff, isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+	writew(val >> 16, isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+}
+
+static inline u32 isp116x_read_data32(struct isp116x *isp116x)
+{
+	u32 val;
+
+	val = (u32) readw(isp116x->data_reg);
+	isp116x_delay(isp116x, 150);
+	val |= ((u32) readw(isp116x->data_reg)) << 16;
+	isp116x_delay(isp116x, 150);
+	return val;
+}
+
+/* Let's keep register access functions out of line. Hint:
+   we wait at least 150 ns at every access.
+*/
+static u16 isp116x_read_reg16(struct isp116x *isp116x, unsigned reg)
+{
+	isp116x_write_addr(isp116x, reg);
+	return isp116x_read_data16(isp116x);
+}
+
+static u32 isp116x_read_reg32(struct isp116x *isp116x, unsigned reg)
+{
+	isp116x_write_addr(isp116x, reg);
+	return isp116x_read_data32(isp116x);
+}
+
+static void isp116x_write_reg16(struct isp116x *isp116x, unsigned reg,
+				unsigned val)
+{
+	isp116x_write_addr(isp116x, reg | ISP116x_WRITE_OFFSET);
+	isp116x_write_data16(isp116x, (u16) (val & 0xffff));
+}
+
+static void isp116x_write_reg32(struct isp116x *isp116x, unsigned reg,
+				unsigned val)
+{
+	isp116x_write_addr(isp116x, reg | ISP116x_WRITE_OFFSET);
+	isp116x_write_data32(isp116x, (u32) val);
+}
+
+#define isp116x_show_reg_log(d,r,s) {				\
+	if ((r) < 0x20) {			                \
+		DBG("%-12s[%02x]: %08x\n", #r,			\
+			r, isp116x_read_reg32(d, r));		\
+	} else {						\
+		DBG("%-12s[%02x]:     %04x\n", #r,		\
+			r, isp116x_read_reg16(d, r));	    	\
+	}							\
+}
+#define isp116x_show_reg_seq(d,r,s) {				\
+	if ((r) < 0x20) {					\
+		seq_printf(s, "%-12s[%02x]: %08x\n", #r,	\
+			r, isp116x_read_reg32(d, r));		\
+	} else {						\
+		seq_printf(s, "%-12s[%02x]:     %04x\n", #r,	\
+			r, isp116x_read_reg16(d, r));		\
+	}							\
+}
+
+#define isp116x_show_regs(d,type,s) {			\
+	isp116x_show_reg_##type(d, HCREVISION, s);	\
+	isp116x_show_reg_##type(d, HCCONTROL, s);	\
+	isp116x_show_reg_##type(d, HCCMDSTAT, s);	\
+	isp116x_show_reg_##type(d, HCINTSTAT, s);	\
+	isp116x_show_reg_##type(d, HCINTENB, s);	\
+	isp116x_show_reg_##type(d, HCFMINTVL, s);	\
+	isp116x_show_reg_##type(d, HCFMREM, s);		\
+	isp116x_show_reg_##type(d, HCFMNUM, s);		\
+	isp116x_show_reg_##type(d, HCLSTHRESH, s);	\
+	isp116x_show_reg_##type(d, HCRHDESCA, s);	\
+	isp116x_show_reg_##type(d, HCRHDESCB, s);	\
+	isp116x_show_reg_##type(d, HCRHSTATUS, s);	\
+	isp116x_show_reg_##type(d, HCRHPORT1, s);	\
+	isp116x_show_reg_##type(d, HCRHPORT2, s);	\
+	isp116x_show_reg_##type(d, HCHWCFG, s);		\
+	isp116x_show_reg_##type(d, HCDMACFG, s);	\
+	isp116x_show_reg_##type(d, HCXFERCTR, s);	\
+	isp116x_show_reg_##type(d, HCuPINT, s);		\
+	isp116x_show_reg_##type(d, HCuPINTENB, s);	\
+	isp116x_show_reg_##type(d, HCCHIPID, s);	\
+	isp116x_show_reg_##type(d, HCSCRATCH, s);	\
+	isp116x_show_reg_##type(d, HCITLBUFLEN, s);	\
+	isp116x_show_reg_##type(d, HCATLBUFLEN, s);	\
+	isp116x_show_reg_##type(d, HCBUFSTAT, s);	\
+	isp116x_show_reg_##type(d, HCRDITL0LEN, s);	\
+	isp116x_show_reg_##type(d, HCRDITL1LEN, s);	\
+}
+
+/*
+   Dump registers for debugfs.
+*/
+static inline void isp116x_show_regs_seq(struct isp116x *isp116x,
+					  struct seq_file *s)
+{
+	isp116x_show_regs(isp116x, seq, s);
+}
+
+/*
+   Dump registers to syslog.
+*/
+static inline void isp116x_show_regs_log(struct isp116x *isp116x)
+{
+	isp116x_show_regs(isp116x, log, NULL);
+}
+
+#if defined(URB_TRACE)
+
+#define PIPETYPE(pipe)  ({ char *__s;			\
+	if (usb_pipecontrol(pipe))	__s = "ctrl";	\
+	else if (usb_pipeint(pipe))	__s = "int";	\
+	else if (usb_pipebulk(pipe))	__s = "bulk";	\
+	else				__s = "iso";	\
+	__s;})
+#define PIPEDIR(pipe)   ({ usb_pipein(pipe) ? "in" : "out"; })
+#define URB_NOTSHORT(urb) ({ (urb)->transfer_flags & URB_SHORT_NOT_OK ? \
+	"short_not_ok" : ""; })
+
+/* print debug info about the URB */
+static void urb_dbg(struct urb *urb, char *msg)
+{
+	unsigned int pipe;
+
+	if (!urb) {
+		DBG("%s: zero urb\n", msg);
+		return;
+	}
+	pipe = urb->pipe;
+	DBG("%s: FA %d ep%d%s %s: len %d/%d %s\n", msg,
+	    usb_pipedevice(pipe), usb_pipeendpoint(pipe),
+	    PIPEDIR(pipe), PIPETYPE(pipe),
+	    urb->transfer_buffer_length, urb->actual_length, URB_NOTSHORT(urb));
+}
+
+#else
+
+#define  urb_dbg(urb,msg)   do{}while(0)
+
+#endif				/* ! defined(URB_TRACE) */
+
+#if defined(PTD_TRACE)
+
+#define PTD_DIR_STR(ptd)  ({char __c;		\
+	switch(PTD_GET_DIR(ptd)){		\
+	case 0:  __c = 's'; break;		\
+	case 1:  __c = 'o'; break;		\
+	default: __c = 'i'; break;		\
+	}; __c;})
+
+/*
+  Dump PTD info. The code documents the format
+  perfectly, right :)
+*/
+static inline void dump_ptd(struct ptd *ptd)
+{
+	printk(KERN_WARNING "td: %x %d%c%d %d,%d,%d  %x %x%x%x\n",
+	       PTD_GET_CC(ptd), PTD_GET_FA(ptd),
+	       PTD_DIR_STR(ptd), PTD_GET_EP(ptd),
+	       PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
+	       PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd),
+	       PTD_GET_SPD(ptd), PTD_GET_LAST(ptd));
+}
+
+static inline void dump_ptd_out_data(struct ptd *ptd, u8 * buf)
+{
+	int k;
+
+	if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
+		printk(KERN_WARNING "-> ");
+		for (k = 0; k < PTD_GET_LEN(ptd); ++k)
+			printk("%02x ", ((u8 *) buf)[k]);
+		printk("\n");
+	}
+}
+
+static inline void dump_ptd_in_data(struct ptd *ptd, u8 * buf)
+{
+	int k;
+
+	if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
+		printk(KERN_WARNING "<- ");
+		for (k = 0; k < PTD_GET_COUNT(ptd); ++k)
+			printk("%02x ", ((u8 *) buf)[k]);
+		printk("\n");
+	}
+	if (PTD_GET_LAST(ptd))
+		printk(KERN_WARNING "-\n");
+}
+
+#else
+
+#define dump_ptd(ptd)               do{}while(0)
+#define dump_ptd_in_data(ptd,buf)   do{}while(0)
+#define dump_ptd_out_data(ptd,buf)  do{}while(0)
+
+#endif				/* ! defined(PTD_TRACE) */
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
new file mode 100644
index 0000000..b21c386
--- /dev/null
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -0,0 +1,2790 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
+ *
+ * Derived from the SL811 HCD, rewritten for ISP116x.
+ * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
+ *
+ * Portions:
+ * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Copyright (C) 2004 David Brownell
+ */
+
+/*
+ * The ISP1362 chip requires a large delay (300ns and 462ns) between
+ * accesses to the address and data register.
+ * The following timing options exist:
+ *
+ * 1. Configure your memory controller to add such delays if it can (the best)
+ * 2. Implement platform-specific delay function possibly
+ *    combined with configuring the memory controller; see
+ *    include/linux/usb_isp1362.h for more info.
+ * 3. Use ndelay (easiest, poorest).
+ *
+ * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
+ * platform specific section of isp1362.h to select the appropriate variant.
+ *
+ * Also note that according to the Philips "ISP1362 Errata" document
+ * Rev 1.00 from 27 May data corruption may occur when the #WR signal
+ * is reasserted (even with #CS deasserted) within 132ns after a
+ * write cycle to any controller register. If the hardware doesn't
+ * implement the recommended fix (gating the #WR with #CS) software
+ * must ensure that no further write cycle (not necessarily to the chip!)
+ * is issued by the CPU within this interval.
+
+ * For PXA25x this can be ensured by using VLIO with the maximum
+ * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
+ */
+
+#undef ISP1362_DEBUG
+
+/*
+ * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
+ * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
+ * requests are carried out in separate frames. This will delay any SETUP
+ * packets until the start of the next frame so that this situation is
+ * unlikely to occur (and makes usbtest happy running with a PXA255 target
+ * device).
+ */
+#undef BUGGY_PXA2XX_UDC_USBTEST
+
+#undef PTD_TRACE
+#undef URB_TRACE
+#undef VERBOSE
+#undef REGISTERS
+
+/* This enables a memory test on the ISP1362 chip memory to make sure the
+ * chip access timing is correct.
+ */
+#undef CHIP_BUFFER_TEST
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/isp1362.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/bitmap.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+static int dbg_level;
+#ifdef ISP1362_DEBUG
+module_param(dbg_level, int, 0644);
+#else
+module_param(dbg_level, int, 0);
+#endif
+
+#include "../core/usb.h"
+#include "isp1362.h"
+
+
+#define DRIVER_VERSION	"2005-04-04"
+#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+static const char hcd_name[] = "isp1362-hcd";
+
+static void isp1362_hc_stop(struct usb_hcd *hcd);
+static int isp1362_hc_start(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
+ * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
+ * completion.
+ * We don't need a 'disable' counterpart, since interrupts will be disabled
+ * only by the interrupt handler.
+ */
+static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
+{
+	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
+		return;
+	if (mask & ~isp1362_hcd->irqenb)
+		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
+	isp1362_hcd->irqenb |= mask;
+	if (isp1362_hcd->irq_active)
+		return;
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
+						     u16 offset)
+{
+	struct isp1362_ep_queue *epq = NULL;
+
+	if (offset < isp1362_hcd->istl_queue[1].buf_start)
+		epq = &isp1362_hcd->istl_queue[0];
+	else if (offset < isp1362_hcd->intl_queue.buf_start)
+		epq = &isp1362_hcd->istl_queue[1];
+	else if (offset < isp1362_hcd->atl_queue.buf_start)
+		epq = &isp1362_hcd->intl_queue;
+	else if (offset < isp1362_hcd->atl_queue.buf_start +
+		   isp1362_hcd->atl_queue.buf_size)
+		epq = &isp1362_hcd->atl_queue;
+
+	if (epq)
+		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
+	else
+		pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
+
+	return epq;
+}
+
+static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
+{
+	int offset;
+
+	if (index * epq->blk_size > epq->buf_size) {
+		pr_warn("%s: Bad %s index %d(%d)\n",
+			__func__, epq->name, index,
+			epq->buf_size / epq->blk_size);
+		return -EINVAL;
+	}
+	offset = epq->buf_start + index * epq->blk_size;
+	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
+
+	return offset;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
+				    int mps)
+{
+	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
+
+	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
+	if (xfer_size < size && xfer_size % mps)
+		xfer_size -= xfer_size % mps;
+
+	return xfer_size;
+}
+
+static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
+			     struct isp1362_ep *ep, u16 len)
+{
+	int ptd_offset = -EINVAL;
+	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
+	int found;
+
+	BUG_ON(len > epq->buf_size);
+
+	if (!epq->buf_avail)
+		return -ENOMEM;
+
+	if (ep->num_ptds)
+		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
+		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
+	BUG_ON(ep->num_ptds != 0);
+
+	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
+						num_ptds, 0);
+	if (found >= epq->buf_count)
+		return -EOVERFLOW;
+
+	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
+	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
+	ptd_offset = get_ptd_offset(epq, found);
+	WARN_ON(ptd_offset < 0);
+	ep->ptd_offset = ptd_offset;
+	ep->num_ptds += num_ptds;
+	epq->buf_avail -= num_ptds;
+	BUG_ON(epq->buf_avail > epq->buf_count);
+	ep->ptd_index = found;
+	bitmap_set(&epq->buf_map, found, num_ptds);
+	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
+	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
+	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
+
+	return found;
+}
+
+static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+	int last = ep->ptd_index + ep->num_ptds;
+
+	if (last > epq->buf_count)
+		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
+		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
+		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
+		    epq->buf_map, epq->skip_map);
+	BUG_ON(last > epq->buf_count);
+
+	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
+	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
+	epq->buf_avail += ep->num_ptds;
+	epq->ptd_count--;
+
+	BUG_ON(epq->buf_avail > epq->buf_count);
+	BUG_ON(epq->ptd_count > epq->buf_count);
+
+	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
+	    __func__, epq->name,
+	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
+	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
+	    epq->buf_map, epq->skip_map);
+
+	ep->num_ptds = 0;
+	ep->ptd_offset = -EINVAL;
+	ep->ptd_index = -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+  Set up PTD's.
+*/
+static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
+			u16 fno)
+{
+	struct ptd *ptd;
+	int toggle;
+	int dir;
+	u16 len;
+	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
+
+	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
+
+	ptd = &ep->ptd;
+
+	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
+
+	switch (ep->nextpid) {
+	case USB_PID_IN:
+		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
+		dir = PTD_DIR_IN;
+		if (usb_pipecontrol(urb->pipe)) {
+			len = min_t(size_t, ep->maxpacket, buf_len);
+		} else if (usb_pipeisoc(urb->pipe)) {
+			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
+			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
+		} else
+			len = max_transfer_size(epq, buf_len, ep->maxpacket);
+		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+		    (int)buf_len);
+		break;
+	case USB_PID_OUT:
+		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
+		dir = PTD_DIR_OUT;
+		if (usb_pipecontrol(urb->pipe))
+			len = min_t(size_t, ep->maxpacket, buf_len);
+		else if (usb_pipeisoc(urb->pipe))
+			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
+		else
+			len = max_transfer_size(epq, buf_len, ep->maxpacket);
+		if (len == 0)
+			pr_info("%s: Sending ZERO packet: %d\n", __func__,
+			     urb->transfer_flags & URB_ZERO_PACKET);
+		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+		    (int)buf_len);
+		break;
+	case USB_PID_SETUP:
+		toggle = 0;
+		dir = PTD_DIR_SETUP;
+		len = sizeof(struct usb_ctrlrequest);
+		DBG(1, "%s: SETUP len %d\n", __func__, len);
+		ep->data = urb->setup_packet;
+		break;
+	case USB_PID_ACK:
+		toggle = 1;
+		len = 0;
+		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
+			PTD_DIR_OUT : PTD_DIR_IN;
+		DBG(1, "%s: ACK   len %d\n", __func__, len);
+		break;
+	default:
+		toggle = dir = len = 0;
+		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
+		BUG_ON(1);
+	}
+
+	ep->length = len;
+	if (!len)
+		ep->data = NULL;
+
+	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
+	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
+		PTD_EP(ep->epnum);
+	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
+	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
+
+	if (usb_pipeint(urb->pipe)) {
+		ptd->faddr |= PTD_SF_INT(ep->branch);
+		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
+	}
+	if (usb_pipeisoc(urb->pipe))
+		ptd->faddr |= PTD_SF_ISO(fno);
+
+	DBG(1, "%s: Finished\n", __func__);
+}
+
+static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+			      struct isp1362_ep_queue *epq)
+{
+	struct ptd *ptd = &ep->ptd;
+	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
+
+	prefetch(ptd);
+	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+	if (len)
+		isp1362_write_buffer(isp1362_hcd, ep->data,
+				     ep->ptd_offset + PTD_HEADER_SIZE, len);
+
+	dump_ptd(ptd);
+	dump_ptd_out_data(ptd, ep->data);
+}
+
+static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+			     struct isp1362_ep_queue *epq)
+{
+	struct ptd *ptd = &ep->ptd;
+	int act_len;
+
+	WARN_ON(list_empty(&ep->active));
+	BUG_ON(ep->ptd_offset < 0);
+
+	list_del_init(&ep->active);
+	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
+
+	prefetchw(ptd);
+	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+	dump_ptd(ptd);
+	act_len = PTD_GET_COUNT(ptd);
+	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
+		return;
+	if (act_len > ep->length)
+		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
+			 ep->ptd_offset, act_len, ep->length);
+	BUG_ON(act_len > ep->length);
+	/* Only transfer the amount of data that has actually been overwritten
+	 * in the chip buffer. We don't want any data that doesn't belong to the
+	 * transfer to leak out of the chip to the callers transfer buffer!
+	 */
+	prefetchw(ep->data);
+	isp1362_read_buffer(isp1362_hcd, ep->data,
+			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
+	dump_ptd_in_data(ptd, ep->data);
+}
+
+/*
+ * INT PTDs will stay in the chip until data is available.
+ * This function will remove a PTD from the chip when the URB is dequeued.
+ * Must be called with the spinlock held and IRQs disabled
+ */
+static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+
+{
+	int index;
+	struct isp1362_ep_queue *epq;
+
+	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
+	BUG_ON(ep->ptd_offset < 0);
+
+	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+	BUG_ON(!epq);
+
+	/* put ep in remove_list for cleanup */
+	WARN_ON(!list_empty(&ep->remove_list));
+	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
+	/* let SOF interrupt handle the cleanup */
+	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+
+	index = ep->ptd_index;
+	if (index < 0)
+		/* ISO queues don't have SKIP registers */
+		return;
+
+	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
+	    index, ep->ptd_offset, epq->skip_map, 1 << index);
+
+	/* prevent further processing of PTD (will be effective after next SOF) */
+	epq->skip_map |= 1 << index;
+	if (epq == &isp1362_hcd->atl_queue) {
+		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
+		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
+		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
+		if (~epq->skip_map == 0)
+			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+	} else if (epq == &isp1362_hcd->intl_queue) {
+		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
+		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
+		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
+		if (~epq->skip_map == 0)
+			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+	}
+}
+
+/*
+  Take done or failed requests out of schedule. Give back
+  processed urbs.
+*/
+static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+			   struct urb *urb, int status)
+     __releases(isp1362_hcd->lock)
+     __acquires(isp1362_hcd->lock)
+{
+	urb->hcpriv = NULL;
+	ep->error_count = 0;
+
+	if (usb_pipecontrol(urb->pipe))
+		ep->nextpid = USB_PID_SETUP;
+
+	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
+		ep->num_req, usb_pipedevice(urb->pipe),
+		usb_pipeendpoint(urb->pipe),
+		!usb_pipein(urb->pipe) ? "out" : "in",
+		usb_pipecontrol(urb->pipe) ? "ctrl" :
+			usb_pipeint(urb->pipe) ? "int" :
+			usb_pipebulk(urb->pipe) ? "bulk" :
+			"iso",
+		urb->actual_length, urb->transfer_buffer_length,
+		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+		"short_ok" : "", urb->status);
+
+
+	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
+	spin_unlock(&isp1362_hcd->lock);
+	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
+	spin_lock(&isp1362_hcd->lock);
+
+	/* take idle endpoints out of the schedule right away */
+	if (!list_empty(&ep->hep->urb_list))
+		return;
+
+	/* async deschedule */
+	if (!list_empty(&ep->schedule)) {
+		list_del_init(&ep->schedule);
+		return;
+	}
+
+
+	if (ep->interval) {
+		/* periodic deschedule */
+		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
+		    ep, ep->branch, ep->load,
+		    isp1362_hcd->load[ep->branch],
+		    isp1362_hcd->load[ep->branch] - ep->load);
+		isp1362_hcd->load[ep->branch] -= ep->load;
+		ep->branch = PERIODIC_SIZE;
+	}
+}
+
+/*
+ * Analyze transfer results, handle partial transfers and errors
+*/
+static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+{
+	struct urb *urb = get_urb(ep);
+	struct usb_device *udev;
+	struct ptd *ptd;
+	int short_ok;
+	u16 len;
+	int urbstat = -EINPROGRESS;
+	u8 cc;
+
+	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
+
+	udev = urb->dev;
+	ptd = &ep->ptd;
+	cc = PTD_GET_CC(ptd);
+	if (cc == PTD_NOTACCESSED) {
+		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
+		    ep->num_req, ptd);
+		cc = PTD_DEVNOTRESP;
+	}
+
+	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
+	len = urb->transfer_buffer_length - urb->actual_length;
+
+	/* Data underrun is special. For allowed underrun
+	   we clear the error and continue as normal. For
+	   forbidden underrun we finish the DATA stage
+	   immediately while for control transfer,
+	   we do a STATUS stage.
+	*/
+	if (cc == PTD_DATAUNDERRUN) {
+		if (short_ok) {
+			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
+			    __func__, ep->num_req, short_ok ? "" : "not_",
+			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
+			cc = PTD_CC_NOERROR;
+			urbstat = 0;
+		} else {
+			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
+			    __func__, ep->num_req,
+			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
+			    short_ok ? "" : "not_",
+			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
+			/* save the data underrun error code for later and
+			 * proceed with the status stage
+			 */
+			urb->actual_length += PTD_GET_COUNT(ptd);
+			if (usb_pipecontrol(urb->pipe)) {
+				ep->nextpid = USB_PID_ACK;
+				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+
+				if (urb->status == -EINPROGRESS)
+					urb->status = cc_to_error[PTD_DATAUNDERRUN];
+			} else {
+				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
+					      PTD_GET_TOGGLE(ptd));
+				urbstat = cc_to_error[PTD_DATAUNDERRUN];
+			}
+			goto out;
+		}
+	}
+
+	if (cc != PTD_CC_NOERROR) {
+		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
+			urbstat = cc_to_error[cc];
+			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
+			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
+			    ep->error_count);
+		}
+		goto out;
+	}
+
+	switch (ep->nextpid) {
+	case USB_PID_OUT:
+		if (PTD_GET_COUNT(ptd) != ep->length)
+			pr_err("%s: count=%d len=%d\n", __func__,
+			   PTD_GET_COUNT(ptd), ep->length);
+		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
+		urb->actual_length += ep->length;
+		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
+		if (urb->actual_length == urb->transfer_buffer_length) {
+			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+			    ep->num_req, len, ep->maxpacket, urbstat);
+			if (usb_pipecontrol(urb->pipe)) {
+				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+				    ep->num_req,
+				    usb_pipein(urb->pipe) ? "IN" : "OUT");
+				ep->nextpid = USB_PID_ACK;
+			} else {
+				if (len % ep->maxpacket ||
+				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
+					urbstat = 0;
+					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+					    urbstat, len, ep->maxpacket, urb->actual_length);
+				}
+			}
+		}
+		break;
+	case USB_PID_IN:
+		len = PTD_GET_COUNT(ptd);
+		BUG_ON(len > ep->length);
+		urb->actual_length += len;
+		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
+		/* if transfer completed or (allowed) data underrun */
+		if ((urb->transfer_buffer_length == urb->actual_length) ||
+		    len % ep->maxpacket) {
+			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+			    ep->num_req, len, ep->maxpacket, urbstat);
+			if (usb_pipecontrol(urb->pipe)) {
+				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+				    ep->num_req,
+				    usb_pipein(urb->pipe) ? "IN" : "OUT");
+				ep->nextpid = USB_PID_ACK;
+			} else {
+				urbstat = 0;
+				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+				    urbstat, len, ep->maxpacket, urb->actual_length);
+			}
+		}
+		break;
+	case USB_PID_SETUP:
+		if (urb->transfer_buffer_length == urb->actual_length) {
+			ep->nextpid = USB_PID_ACK;
+		} else if (usb_pipeout(urb->pipe)) {
+			usb_settoggle(udev, 0, 1, 1);
+			ep->nextpid = USB_PID_OUT;
+		} else {
+			usb_settoggle(udev, 0, 0, 1);
+			ep->nextpid = USB_PID_IN;
+		}
+		break;
+	case USB_PID_ACK:
+		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
+		    urbstat);
+		WARN_ON(urbstat != -EINPROGRESS);
+		urbstat = 0;
+		ep->nextpid = 0;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+ out:
+	if (urbstat != -EINPROGRESS) {
+		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
+		    ep, ep->num_req, urb, urbstat);
+		finish_request(isp1362_hcd, ep, urb, urbstat);
+	}
+}
+
+static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
+{
+	struct isp1362_ep *ep;
+	struct isp1362_ep *tmp;
+
+	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
+		struct isp1362_ep_queue *epq =
+			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+		int index = ep->ptd_index;
+
+		BUG_ON(epq == NULL);
+		if (index >= 0) {
+			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
+			BUG_ON(ep->num_ptds == 0);
+			release_ptd_buffers(epq, ep);
+		}
+		if (!list_empty(&ep->hep->urb_list)) {
+			struct urb *urb = get_urb(ep);
+
+			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
+			    ep->num_req, ep);
+			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
+		}
+		WARN_ON(list_empty(&ep->active));
+		if (!list_empty(&ep->active)) {
+			list_del_init(&ep->active);
+			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
+		}
+		list_del_init(&ep->remove_list);
+		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
+	}
+	DBG(1, "%s: Done\n", __func__);
+}
+
+static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
+{
+	if (count > 0) {
+		if (count < isp1362_hcd->atl_queue.ptd_count)
+			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
+		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
+		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+	} else
+		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+}
+
+static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
+	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
+}
+
+static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
+{
+	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
+	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
+			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
+}
+
+static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
+{
+	int index;
+
+	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
+	index = claim_ptd_buffers(epq, ep, ep->length);
+	if (index == -ENOMEM) {
+		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
+		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
+		return index;
+	} else if (index == -EOVERFLOW) {
+		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
+		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
+		    epq->buf_map, epq->skip_map);
+		return index;
+	} else
+		BUG_ON(index < 0);
+	list_add_tail(&ep->active, &epq->active);
+	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
+	    ep, ep->num_req, ep->length, &epq->active);
+	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
+	    ep->ptd_offset, ep, ep->num_req);
+	isp1362_write_ptd(isp1362_hcd, ep, epq);
+	__clear_bit(ep->ptd_index, &epq->skip_map);
+
+	return 0;
+}
+
+static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+	int ptd_count = 0;
+	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
+	struct isp1362_ep *ep;
+	int defer = 0;
+
+	if (atomic_read(&epq->finishing)) {
+		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+		return;
+	}
+
+	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+		struct urb *urb = get_urb(ep);
+		int ret;
+
+		if (!list_empty(&ep->active)) {
+			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
+			continue;
+		}
+
+		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
+		    ep, ep->num_req);
+
+		ret = submit_req(isp1362_hcd, urb, ep, epq);
+		if (ret == -ENOMEM) {
+			defer = 1;
+			break;
+		} else if (ret == -EOVERFLOW) {
+			defer = 1;
+			continue;
+		}
+#ifdef BUGGY_PXA2XX_UDC_USBTEST
+		defer = ep->nextpid == USB_PID_SETUP;
+#endif
+		ptd_count++;
+	}
+
+	/* Avoid starving of endpoints */
+	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
+		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
+		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
+	}
+	if (ptd_count || defer)
+		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
+
+	epq->ptd_count += ptd_count;
+	if (epq->ptd_count > epq->stat_maxptds) {
+		epq->stat_maxptds = epq->ptd_count;
+		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
+	}
+}
+
+static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+	int ptd_count = 0;
+	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
+	struct isp1362_ep *ep;
+
+	if (atomic_read(&epq->finishing)) {
+		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+		return;
+	}
+
+	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+		struct urb *urb = get_urb(ep);
+		int ret;
+
+		if (!list_empty(&ep->active)) {
+			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
+			    epq->name, ep);
+			continue;
+		}
+
+		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
+		    epq->name, ep, ep->num_req);
+		ret = submit_req(isp1362_hcd, urb, ep, epq);
+		if (ret == -ENOMEM)
+			break;
+		else if (ret == -EOVERFLOW)
+			continue;
+		ptd_count++;
+	}
+
+	if (ptd_count) {
+		static int last_count;
+
+		if (ptd_count != last_count) {
+			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
+			last_count = ptd_count;
+		}
+		enable_intl_transfers(isp1362_hcd);
+	}
+
+	epq->ptd_count += ptd_count;
+	if (epq->ptd_count > epq->stat_maxptds)
+		epq->stat_maxptds = epq->ptd_count;
+}
+
+static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+	u16 ptd_offset = ep->ptd_offset;
+	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
+
+	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
+	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
+
+	ptd_offset += num_ptds * epq->blk_size;
+	if (ptd_offset < epq->buf_start + epq->buf_size)
+		return ptd_offset;
+	else
+		return -ENOMEM;
+}
+
+static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+	int ptd_count = 0;
+	int flip = isp1362_hcd->istl_flip;
+	struct isp1362_ep_queue *epq;
+	int ptd_offset;
+	struct isp1362_ep *ep;
+	struct isp1362_ep *tmp;
+	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+
+ fill2:
+	epq = &isp1362_hcd->istl_queue[flip];
+	if (atomic_read(&epq->finishing)) {
+		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+		return;
+	}
+
+	if (!list_empty(&epq->active))
+		return;
+
+	ptd_offset = epq->buf_start;
+	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
+		struct urb *urb = get_urb(ep);
+		s16 diff = fno - (u16)urb->start_frame;
+
+		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
+
+		if (diff > urb->number_of_packets) {
+			/* time frame for this URB has elapsed */
+			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
+			continue;
+		} else if (diff < -1) {
+			/* URB is not due in this frame or the next one.
+			 * Comparing with '-1' instead of '0' accounts for double
+			 * buffering in the ISP1362 which enables us to queue the PTD
+			 * one frame ahead of time
+			 */
+		} else if (diff == -1) {
+			/* submit PTD's that are due in the next frame */
+			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
+			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
+			    epq->buf_start + epq->buf_size) {
+				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
+				    __func__, ep->length);
+				continue;
+			}
+			ep->ptd_offset = ptd_offset;
+			list_add_tail(&ep->active, &epq->active);
+
+			ptd_offset = next_ptd(epq, ep);
+			if (ptd_offset < 0) {
+				pr_warn("%s: req %d No more %s PTD buffers available\n",
+					__func__, ep->num_req, epq->name);
+				break;
+			}
+		}
+	}
+	list_for_each_entry(ep, &epq->active, active) {
+		if (epq->active.next == &ep->active)
+			ep->ptd.mps |= PTD_LAST_MSK;
+		isp1362_write_ptd(isp1362_hcd, ep, epq);
+		ptd_count++;
+	}
+
+	if (ptd_count)
+		enable_istl_transfers(isp1362_hcd, flip);
+
+	epq->ptd_count += ptd_count;
+	if (epq->ptd_count > epq->stat_maxptds)
+		epq->stat_maxptds = epq->ptd_count;
+
+	/* check, whether the second ISTL buffer may also be filled */
+	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
+		fno++;
+		ptd_count = 0;
+		flip = 1 - flip;
+		goto fill2;
+	}
+}
+
+static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
+			     struct isp1362_ep_queue *epq)
+{
+	struct isp1362_ep *ep;
+	struct isp1362_ep *tmp;
+
+	if (list_empty(&epq->active)) {
+		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+		return;
+	}
+
+	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
+
+	atomic_inc(&epq->finishing);
+	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+		int index = ep->ptd_index;
+
+		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
+		    index, ep->ptd_offset);
+
+		BUG_ON(index < 0);
+		if (__test_and_clear_bit(index, &done_map)) {
+			isp1362_read_ptd(isp1362_hcd, ep, epq);
+			epq->free_ptd = index;
+			BUG_ON(ep->num_ptds == 0);
+			release_ptd_buffers(epq, ep);
+
+			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
+			    ep, ep->num_req);
+			if (!list_empty(&ep->remove_list)) {
+				list_del_init(&ep->remove_list);
+				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
+			}
+			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
+			    ep, ep->num_req);
+			postproc_ep(isp1362_hcd, ep);
+		}
+		if (!done_map)
+			break;
+	}
+	if (done_map)
+		pr_warn("%s: done_map not clear: %08lx:%08lx\n",
+			__func__, done_map, epq->skip_map);
+	atomic_dec(&epq->finishing);
+}
+
+static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
+{
+	struct isp1362_ep *ep;
+	struct isp1362_ep *tmp;
+
+	if (list_empty(&epq->active)) {
+		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+		return;
+	}
+
+	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
+
+	atomic_inc(&epq->finishing);
+	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
+
+		isp1362_read_ptd(isp1362_hcd, ep, epq);
+		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
+		postproc_ep(isp1362_hcd, ep);
+	}
+	WARN_ON(epq->blk_size != 0);
+	atomic_dec(&epq->finishing);
+}
+
+static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
+{
+	int handled = 0;
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	u16 irqstat;
+	u16 svc_mask;
+
+	spin_lock(&isp1362_hcd->lock);
+
+	BUG_ON(isp1362_hcd->irq_active++);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
+	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
+
+	/* only handle interrupts that are currently enabled */
+	irqstat &= isp1362_hcd->irqenb;
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
+	svc_mask = irqstat;
+
+	if (irqstat & HCuPINT_SOF) {
+		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
+		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
+		handled = 1;
+		svc_mask &= ~HCuPINT_SOF;
+		DBG(3, "%s: SOF\n", __func__);
+		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+		if (!list_empty(&isp1362_hcd->remove_list))
+			finish_unlinks(isp1362_hcd);
+		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
+			if (list_empty(&isp1362_hcd->atl_queue.active)) {
+				start_atl_transfers(isp1362_hcd);
+			} else {
+				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
+						    isp1362_hcd->atl_queue.skip_map);
+				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+			}
+		}
+	}
+
+	if (irqstat & HCuPINT_ISTL0) {
+		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
+		handled = 1;
+		svc_mask &= ~HCuPINT_ISTL0;
+		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
+		DBG(1, "%s: ISTL0\n", __func__);
+		WARN_ON((int)!!isp1362_hcd->istl_flip);
+		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+			HCBUFSTAT_ISTL0_ACTIVE);
+		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+			HCBUFSTAT_ISTL0_DONE));
+		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
+	}
+
+	if (irqstat & HCuPINT_ISTL1) {
+		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
+		handled = 1;
+		svc_mask &= ~HCuPINT_ISTL1;
+		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
+		DBG(1, "%s: ISTL1\n", __func__);
+		WARN_ON(!(int)isp1362_hcd->istl_flip);
+		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+			HCBUFSTAT_ISTL1_ACTIVE);
+		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+			HCBUFSTAT_ISTL1_DONE));
+		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
+	}
+
+	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
+		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
+			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
+		finish_iso_transfers(isp1362_hcd,
+				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
+		start_iso_transfers(isp1362_hcd);
+		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
+	}
+
+	if (irqstat & HCuPINT_INTL) {
+		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
+		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
+
+		DBG(2, "%s: INTL\n", __func__);
+
+		svc_mask &= ~HCuPINT_INTL;
+
+		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
+		if (~(done_map | skip_map) == 0)
+			/* All PTDs are finished, disable INTL processing entirely */
+			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+
+		handled = 1;
+		WARN_ON(!done_map);
+		if (done_map) {
+			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
+			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+			start_intl_transfers(isp1362_hcd);
+		}
+	}
+
+	if (irqstat & HCuPINT_ATL) {
+		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
+		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
+
+		DBG(2, "%s: ATL\n", __func__);
+
+		svc_mask &= ~HCuPINT_ATL;
+
+		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
+		if (~(done_map | skip_map) == 0)
+			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+		if (done_map) {
+			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
+			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+			start_atl_transfers(isp1362_hcd);
+		}
+		handled = 1;
+	}
+
+	if (irqstat & HCuPINT_OPR) {
+		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
+		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
+
+		svc_mask &= ~HCuPINT_OPR;
+		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
+		intstat &= isp1362_hcd->intenb;
+		if (intstat & OHCI_INTR_UE) {
+			pr_err("Unrecoverable error\n");
+			/* FIXME: do here reset or cleanup or whatever */
+		}
+		if (intstat & OHCI_INTR_RHSC) {
+			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
+			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
+			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
+		}
+		if (intstat & OHCI_INTR_RD) {
+			pr_info("%s: RESUME DETECTED\n", __func__);
+			isp1362_show_reg(isp1362_hcd, HCCONTROL);
+			usb_hcd_resume_root_hub(hcd);
+		}
+		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
+		irqstat &= ~HCuPINT_OPR;
+		handled = 1;
+	}
+
+	if (irqstat & HCuPINT_SUSP) {
+		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
+		handled = 1;
+		svc_mask &= ~HCuPINT_SUSP;
+
+		pr_info("%s: SUSPEND IRQ\n", __func__);
+	}
+
+	if (irqstat & HCuPINT_CLKRDY) {
+		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
+		handled = 1;
+		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
+		svc_mask &= ~HCuPINT_CLKRDY;
+		pr_info("%s: CLKRDY IRQ\n", __func__);
+	}
+
+	if (svc_mask)
+		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+	isp1362_hcd->irq_active--;
+	spin_unlock(&isp1362_hcd->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
+static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
+{
+	int i, branch = -ENOSPC;
+
+	/* search for the least loaded schedule branch of that interval
+	 * which has enough bandwidth left unreserved.
+	 */
+	for (i = 0; i < interval; i++) {
+		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
+			int j;
+
+			for (j = i; j < PERIODIC_SIZE; j += interval) {
+				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
+					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
+					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
+					break;
+				}
+			}
+			if (j < PERIODIC_SIZE)
+				continue;
+			branch = i;
+		}
+	}
+	return branch;
+}
+
+/* NB! ALL the code above this point runs with isp1362_hcd->lock
+   held, irqs off
+*/
+
+/*-------------------------------------------------------------------------*/
+
+static int isp1362_urb_enqueue(struct usb_hcd *hcd,
+			       struct urb *urb,
+			       gfp_t mem_flags)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	struct usb_device *udev = urb->dev;
+	unsigned int pipe = urb->pipe;
+	int is_out = !usb_pipein(pipe);
+	int type = usb_pipetype(pipe);
+	int epnum = usb_pipeendpoint(pipe);
+	struct usb_host_endpoint *hep = urb->ep;
+	struct isp1362_ep *ep = NULL;
+	unsigned long flags;
+	int retval = 0;
+
+	DBG(3, "%s: urb %p\n", __func__, urb);
+
+	if (type == PIPE_ISOCHRONOUS) {
+		pr_err("Isochronous transfers not supported\n");
+		return -ENOSPC;
+	}
+
+	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
+		usb_pipedevice(pipe), epnum,
+		is_out ? "out" : "in",
+		usb_pipecontrol(pipe) ? "ctrl" :
+			usb_pipeint(pipe) ? "int" :
+			usb_pipebulk(pipe) ? "bulk" :
+			"iso",
+		urb->transfer_buffer_length,
+		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
+		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+		"short_ok" : "");
+
+	/* avoid all allocations within spinlocks: request or endpoint */
+	if (!hep->hcpriv) {
+		ep = kzalloc(sizeof *ep, mem_flags);
+		if (!ep)
+			return -ENOMEM;
+	}
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+	/* don't submit to a dead or disabled port */
+	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
+	      USB_PORT_STAT_ENABLE) ||
+	    !HC_IS_RUNNING(hcd->state)) {
+		kfree(ep);
+		retval = -ENODEV;
+		goto fail_not_linked;
+	}
+
+	retval = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (retval) {
+		kfree(ep);
+		goto fail_not_linked;
+	}
+
+	if (hep->hcpriv) {
+		ep = hep->hcpriv;
+	} else {
+		INIT_LIST_HEAD(&ep->schedule);
+		INIT_LIST_HEAD(&ep->active);
+		INIT_LIST_HEAD(&ep->remove_list);
+		ep->udev = usb_get_dev(udev);
+		ep->hep = hep;
+		ep->epnum = epnum;
+		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+		ep->ptd_offset = -EINVAL;
+		ep->ptd_index = -EINVAL;
+		usb_settoggle(udev, epnum, is_out, 0);
+
+		if (type == PIPE_CONTROL)
+			ep->nextpid = USB_PID_SETUP;
+		else if (is_out)
+			ep->nextpid = USB_PID_OUT;
+		else
+			ep->nextpid = USB_PID_IN;
+
+		switch (type) {
+		case PIPE_ISOCHRONOUS:
+		case PIPE_INTERRUPT:
+			if (urb->interval > PERIODIC_SIZE)
+				urb->interval = PERIODIC_SIZE;
+			ep->interval = urb->interval;
+			ep->branch = PERIODIC_SIZE;
+			ep->load = usb_calc_bus_time(udev->speed, !is_out,
+						     (type == PIPE_ISOCHRONOUS),
+						     usb_maxpacket(udev, pipe, is_out)) / 1000;
+			break;
+		}
+		hep->hcpriv = ep;
+	}
+	ep->num_req = isp1362_hcd->req_serial++;
+
+	/* maybe put endpoint into schedule */
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		if (list_empty(&ep->schedule)) {
+			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
+				__func__, ep, ep->num_req);
+			list_add_tail(&ep->schedule, &isp1362_hcd->async);
+		}
+		break;
+	case PIPE_ISOCHRONOUS:
+	case PIPE_INTERRUPT:
+		urb->interval = ep->interval;
+
+		/* urb submitted for already existing EP */
+		if (ep->branch < PERIODIC_SIZE)
+			break;
+
+		retval = balance(isp1362_hcd, ep->interval, ep->load);
+		if (retval < 0) {
+			pr_err("%s: balance returned %d\n", __func__, retval);
+			goto fail;
+		}
+		ep->branch = retval;
+		retval = 0;
+		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
+		    __func__, isp1362_hcd->fmindex, ep->branch,
+		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
+		     ~(PERIODIC_SIZE - 1)) + ep->branch,
+		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
+
+		if (list_empty(&ep->schedule)) {
+			if (type == PIPE_ISOCHRONOUS) {
+				u16 frame = isp1362_hcd->fmindex;
+
+				frame += max_t(u16, 8, ep->interval);
+				frame &= ~(ep->interval - 1);
+				frame |= ep->branch;
+				if (frame_before(frame, isp1362_hcd->fmindex))
+					frame += ep->interval;
+				urb->start_frame = frame;
+
+				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
+				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
+			} else {
+				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
+				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
+			}
+		} else
+			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
+
+		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
+		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
+		    isp1362_hcd->load[ep->branch] + ep->load);
+		isp1362_hcd->load[ep->branch] += ep->load;
+	}
+
+	urb->hcpriv = hep;
+	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
+
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		start_atl_transfers(isp1362_hcd);
+		break;
+	case PIPE_INTERRUPT:
+		start_intl_transfers(isp1362_hcd);
+		break;
+	case PIPE_ISOCHRONOUS:
+		start_iso_transfers(isp1362_hcd);
+		break;
+	default:
+		BUG();
+	}
+ fail:
+	if (retval)
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+
+ fail_not_linked:
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	if (retval)
+		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
+	return retval;
+}
+
+static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	struct usb_host_endpoint *hep;
+	unsigned long flags;
+	struct isp1362_ep *ep;
+	int retval = 0;
+
+	DBG(3, "%s: urb %p\n", __func__, urb);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (retval)
+		goto done;
+
+	hep = urb->hcpriv;
+
+	if (!hep) {
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+		return -EIDRM;
+	}
+
+	ep = hep->hcpriv;
+	if (ep) {
+		/* In front of queue? */
+		if (ep->hep->urb_list.next == &urb->urb_list) {
+			if (!list_empty(&ep->active)) {
+				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
+				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+				/* disable processing and queue PTD for removal */
+				remove_ptd(isp1362_hcd, ep);
+				urb = NULL;
+			}
+		}
+		if (urb) {
+			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
+			    ep->num_req);
+			finish_request(isp1362_hcd, ep, urb, status);
+		} else
+			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
+	} else {
+		pr_warn("%s: No EP in URB %p\n", __func__, urb);
+		retval = -EINVAL;
+	}
+done:
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	DBG(3, "%s: exit\n", __func__);
+
+	return retval;
+}
+
+static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+	struct isp1362_ep *ep = hep->hcpriv;
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+
+	DBG(1, "%s: ep %p\n", __func__, ep);
+	if (!ep)
+		return;
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	if (!list_empty(&hep->urb_list)) {
+		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
+			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
+			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+			remove_ptd(isp1362_hcd, ep);
+			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
+		}
+	}
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	/* Wait for interrupt to clear out active list */
+	while (!list_empty(&ep->active))
+		msleep(1);
+
+	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
+
+	usb_put_dev(ep->udev);
+	kfree(ep);
+	hep->hcpriv = NULL;
+}
+
+static int isp1362_get_frame(struct usb_hcd *hcd)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	u32 fmnum;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	return (int)fmnum;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	int ports, i, changed = 0;
+	unsigned long flags;
+
+	if (!HC_IS_RUNNING(hcd->state))
+		return -ESHUTDOWN;
+
+	/* Report no status change now, if we are scheduled to be
+	   called later */
+	if (timer_pending(&hcd->rh_timer))
+		return 0;
+
+	ports = isp1362_hcd->rhdesca & RH_A_NDP;
+	BUG_ON(ports > 2);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	/* init status */
+	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
+		buf[0] = changed = 1;
+	else
+		buf[0] = 0;
+
+	for (i = 0; i < ports; i++) {
+		u32 status = isp1362_hcd->rhport[i];
+
+		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
+			      RH_PS_OCIC | RH_PS_PRSC)) {
+			changed = 1;
+			buf[0] |= 1 << (i + 1);
+			continue;
+		}
+
+		if (!(status & RH_PS_CCS))
+			continue;
+	}
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	return changed;
+}
+
+static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
+				   struct usb_hub_descriptor *desc)
+{
+	u32 reg = isp1362_hcd->rhdesca;
+
+	DBG(3, "%s: enter\n", __func__);
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bDescLength = 9;
+	desc->bHubContrCurrent = 0;
+	desc->bNbrPorts = reg & 0x3;
+	/* Power switching, device type, overcurrent. */
+	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
+						(HUB_CHAR_LPSM |
+						 HUB_CHAR_COMPOUND |
+						 HUB_CHAR_OCPM));
+	DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
+			desc->wHubCharacteristics);
+	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
+	/* ports removable, and legacy PortPwrCtrlMask */
+	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+
+	DBG(3, "%s: exit\n", __func__);
+}
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+			       u16 wIndex, char *buf, u16 wLength)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	int retval = 0;
+	unsigned long flags;
+	unsigned long t1;
+	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
+	u32 tmp = 0;
+
+	switch (typeReq) {
+	case ClearHubFeature:
+		DBG(0, "ClearHubFeature: ");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			DBG(0, "C_HUB_OVER_CURRENT\n");
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			break;
+		case C_HUB_LOCAL_POWER:
+			DBG(0, "C_HUB_LOCAL_POWER\n");
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetHubFeature:
+		DBG(0, "SetHubFeature: ");
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case GetHubDescriptor:
+		DBG(0, "GetHubDescriptor\n");
+		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
+		break;
+	case GetHubStatus:
+		DBG(0, "GetHubStatus\n");
+		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
+		break;
+	case GetPortStatus:
+#ifndef VERBOSE
+		DBG(0, "GetPortStatus\n");
+#endif
+		if (!wIndex || wIndex > ports)
+			goto error;
+		tmp = isp1362_hcd->rhport[--wIndex];
+		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
+		break;
+	case ClearPortFeature:
+		DBG(0, "ClearPortFeature: ");
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			DBG(0, "USB_PORT_FEAT_ENABLE\n");
+			tmp = RH_PS_CCS;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
+			tmp = RH_PS_PESC;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+			tmp = RH_PS_POCI;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
+			tmp = RH_PS_PSSC;
+			break;
+		case USB_PORT_FEAT_POWER:
+			DBG(0, "USB_PORT_FEAT_POWER\n");
+			tmp = RH_PS_LSDA;
+
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
+			tmp = RH_PS_CSC;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+			tmp = RH_PS_OCIC;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			DBG(0, "USB_PORT_FEAT_C_RESET\n");
+			tmp = RH_PS_PRSC;
+			break;
+		default:
+			goto error;
+		}
+
+		spin_lock_irqsave(&isp1362_hcd->lock, flags);
+		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
+		isp1362_hcd->rhport[wIndex] =
+			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+		break;
+	case SetPortFeature:
+		DBG(0, "SetPortFeature: ");
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
+			isp1362_hcd->rhport[wIndex] =
+				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			break;
+		case USB_PORT_FEAT_POWER:
+			DBG(0, "USB_PORT_FEAT_POWER\n");
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
+			isp1362_hcd->rhport[wIndex] =
+				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			break;
+		case USB_PORT_FEAT_RESET:
+			DBG(0, "USB_PORT_FEAT_RESET\n");
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
+			while (time_before(jiffies, t1)) {
+				/* spin until any current reset finishes */
+				for (;;) {
+					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+					if (!(tmp & RH_PS_PRS))
+						break;
+					udelay(500);
+				}
+				if (!(tmp & RH_PS_CCS))
+					break;
+				/* Reset lasts 10ms (claims datasheet) */
+				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
+
+				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+				msleep(10);
+				spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			}
+
+			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
+									 HCRHPORT1 + wIndex);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			break;
+		default:
+			goto error;
+		}
+		break;
+
+	default:
+ error:
+		/* "protocol stall" on error */
+		DBG(0, "PROTOCOL STALL\n");
+		retval = -EPIPE;
+	}
+
+	return retval;
+}
+
+#ifdef	CONFIG_PM
+static int isp1362_bus_suspend(struct usb_hcd *hcd)
+{
+	int status = 0;
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+
+	if (time_before(jiffies, isp1362_hcd->next_statechange))
+		msleep(5);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_RESUME:
+		DBG(0, "%s: resume/suspend?\n", __func__);
+		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+		isp1362_hcd->hc_control |= OHCI_USB_RESET;
+		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+		/* FALL THROUGH */
+	case OHCI_USB_RESET:
+		status = -EBUSY;
+		pr_warn("%s: needs reinit!\n", __func__);
+		goto done;
+	case OHCI_USB_SUSPEND:
+		pr_warn("%s: already suspended?\n", __func__);
+		goto done;
+	}
+	DBG(0, "%s: suspend root hub\n", __func__);
+
+	/* First stop any processing */
+	hcd->state = HC_STATE_QUIESCING;
+	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
+	    !list_empty(&isp1362_hcd->intl_queue.active) ||
+	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
+	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
+		int limit;
+
+		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
+		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
+
+		DBG(0, "%s: stopping schedules ...\n", __func__);
+		limit = 2000;
+		while (limit > 0) {
+			udelay(250);
+			limit -= 250;
+			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
+				break;
+		}
+		mdelay(7);
+		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
+			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+		}
+		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
+			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+		}
+		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
+			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
+		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
+			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
+	}
+	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
+		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
+			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+
+	/* Suspend hub */
+	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
+	isp1362_show_reg(isp1362_hcd, HCCONTROL);
+	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+	isp1362_show_reg(isp1362_hcd, HCCONTROL);
+
+#if 1
+	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
+		pr_err("%s: controller won't suspend %08x\n", __func__,
+		    isp1362_hcd->hc_control);
+		status = -EBUSY;
+	} else
+#endif
+	{
+		/* no resumes until devices finish suspending */
+		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
+	}
+done:
+	if (status == 0) {
+		hcd->state = HC_STATE_SUSPENDED;
+		DBG(0, "%s: HCD suspended: %08x\n", __func__,
+		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+	}
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	return status;
+}
+
+static int isp1362_bus_resume(struct usb_hcd *hcd)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	u32 port;
+	unsigned long flags;
+	int status = -EINPROGRESS;
+
+	if (time_before(jiffies, isp1362_hcd->next_statechange))
+		msleep(5);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
+	if (hcd->state == HC_STATE_RESUMING) {
+		pr_warn("%s: duplicate resume\n", __func__);
+		status = 0;
+	} else
+		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+		case OHCI_USB_SUSPEND:
+			DBG(0, "%s: resume root hub\n", __func__);
+			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
+			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+			break;
+		case OHCI_USB_RESUME:
+			/* HCFS changes sometime after INTR_RD */
+			DBG(0, "%s: remote wakeup\n", __func__);
+			break;
+		case OHCI_USB_OPER:
+			DBG(0, "%s: odd resume\n", __func__);
+			status = 0;
+			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+			break;
+		default:		/* RESET, we lost power */
+			DBG(0, "%s: root hub hardware reset\n", __func__);
+			status = -EBUSY;
+		}
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	if (status == -EBUSY) {
+		DBG(0, "%s: Restarting HC\n", __func__);
+		isp1362_hc_stop(hcd);
+		return isp1362_hc_start(hcd);
+	}
+	if (status != -EINPROGRESS)
+		return status;
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
+	while (port--) {
+		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
+
+		/* force global, not selective, resume */
+		if (!(stat & RH_PS_PSS)) {
+			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
+			continue;
+		}
+		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
+		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
+	}
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	/* Some controllers (lucent) need extra-long delays */
+	hcd->state = HC_STATE_RESUMING;
+	mdelay(20 /* usb 11.5.1.10 */ + 15);
+
+	isp1362_hcd->hc_control = OHCI_USB_OPER;
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	isp1362_show_reg(isp1362_hcd, HCCONTROL);
+	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	/* TRSMRCY */
+	msleep(10);
+
+	/* keep it alive for ~5x suspend + resume costs */
+	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
+
+	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+	hcd->state = HC_STATE_RUNNING;
+	return 0;
+}
+#else
+#define	isp1362_bus_suspend	NULL
+#define	isp1362_bus_resume	NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void dump_irq(struct seq_file *s, char *label, u16 mask)
+{
+	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
+		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
+		   mask & HCuPINT_SUSP ? " susp" : "",
+		   mask & HCuPINT_OPR ? " opr" : "",
+		   mask & HCuPINT_EOT ? " eot" : "",
+		   mask & HCuPINT_ATL ? " atl" : "",
+		   mask & HCuPINT_SOF ? " sof" : "");
+}
+
+static void dump_int(struct seq_file *s, char *label, u32 mask)
+{
+	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
+		   mask & OHCI_INTR_MIE ? " MIE" : "",
+		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
+		   mask & OHCI_INTR_FNO ? " fno" : "",
+		   mask & OHCI_INTR_UE ? " ue" : "",
+		   mask & OHCI_INTR_RD ? " rd" : "",
+		   mask & OHCI_INTR_SF ? " sof" : "",
+		   mask & OHCI_INTR_SO ? " so" : "");
+}
+
+static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
+{
+	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
+		   mask & OHCI_CTRL_RWC ? " rwc" : "",
+		   mask & OHCI_CTRL_RWE ? " rwe" : "",
+		   ({
+			   char *hcfs;
+			   switch (mask & OHCI_CTRL_HCFS) {
+			   case OHCI_USB_OPER:
+				   hcfs = " oper";
+				   break;
+			   case OHCI_USB_RESET:
+				   hcfs = " reset";
+				   break;
+			   case OHCI_USB_RESUME:
+				   hcfs = " resume";
+				   break;
+			   case OHCI_USB_SUSPEND:
+				   hcfs = " suspend";
+				   break;
+			   default:
+				   hcfs = " ?";
+			   }
+			   hcfs;
+		   }));
+}
+
+static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
+{
+	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
+		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
+	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
+		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
+		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
+	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
+		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
+		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
+	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
+		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
+	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
+		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
+	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
+		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
+	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
+		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
+	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
+		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
+	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
+		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
+	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
+		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
+	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
+		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
+	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
+		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
+	seq_printf(s, "\n");
+	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
+		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
+	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
+		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
+	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
+		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
+	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
+		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
+	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
+		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
+		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
+	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
+		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
+	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
+		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
+	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
+		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
+#if 0
+	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
+		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
+#endif
+	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
+		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
+	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
+		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
+	seq_printf(s, "\n");
+	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
+		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
+	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
+		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
+	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
+		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
+	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
+		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
+	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
+		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
+	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
+		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
+	seq_printf(s, "\n");
+	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
+		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
+	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
+		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
+#if 0
+	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
+		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
+#endif
+	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
+		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
+	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
+		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
+	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
+		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
+	seq_printf(s, "\n");
+	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
+		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
+	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
+		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
+}
+
+static int isp1362_show(struct seq_file *s, void *unused)
+{
+	struct isp1362_hcd *isp1362_hcd = s->private;
+	struct isp1362_ep *ep;
+	int i;
+
+	seq_printf(s, "%s\n%s version %s\n",
+		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
+
+	/* collect statistics to help estimate potential win for
+	 * DMA engines that care about alignment (PXA)
+	 */
+	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
+		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
+		   isp1362_hcd->stat2, isp1362_hcd->stat1);
+	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
+	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
+	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
+		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
+		       isp1362_hcd->istl_queue[1] .stat_maxptds));
+
+	/* FIXME: don't show the following in suspended state */
+	spin_lock_irq(&isp1362_hcd->lock);
+
+	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
+	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
+	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+
+	for (i = 0; i < NUM_ISP1362_IRQS; i++)
+		if (isp1362_hcd->irq_stat[i])
+			seq_printf(s, "%-15s: %d\n",
+				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
+
+	dump_regs(s, isp1362_hcd);
+	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+		struct urb *urb;
+
+		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
+			   ({
+				   char *s;
+				   switch (ep->nextpid) {
+				   case USB_PID_IN:
+					   s = "in";
+					   break;
+				   case USB_PID_OUT:
+					   s = "out";
+					   break;
+				   case USB_PID_SETUP:
+					   s = "setup";
+					   break;
+				   case USB_PID_ACK:
+					   s = "status";
+					   break;
+				   default:
+					   s = "?";
+					   break;
+				   }
+				   s;}), ep->maxpacket) ;
+		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
+			seq_printf(s, "  urb%p, %d/%d\n", urb,
+				   urb->actual_length,
+				   urb->transfer_buffer_length);
+		}
+	}
+	if (!list_empty(&isp1362_hcd->async))
+		seq_printf(s, "\n");
+	dump_ptd_queue(&isp1362_hcd->atl_queue);
+
+	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
+
+	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
+			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
+
+		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
+			   ep->interval, ep,
+			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+			   ep->udev->devnum, ep->epnum,
+			   (ep->epnum == 0) ? "" :
+			   ((ep->nextpid == USB_PID_IN) ?
+			    "in" : "out"), ep->maxpacket);
+	}
+	dump_ptd_queue(&isp1362_hcd->intl_queue);
+
+	seq_printf(s, "ISO:\n");
+
+	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
+		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
+			   ep->interval, ep,
+			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+			   ep->udev->devnum, ep->epnum,
+			   (ep->epnum == 0) ? "" :
+			   ((ep->nextpid == USB_PID_IN) ?
+			    "in" : "out"), ep->maxpacket);
+	}
+
+	spin_unlock_irq(&isp1362_hcd->lock);
+	seq_printf(s, "\n");
+
+	return 0;
+}
+
+static int isp1362_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, isp1362_show, inode);
+}
+
+static const struct file_operations debug_ops = {
+	.open = isp1362_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/* expect just one isp1362_hcd per system */
+static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+	isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
+						      usb_debug_root,
+						      isp1362_hcd, &debug_ops);
+}
+
+static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+	debugfs_remove(isp1362_hcd->debug_file);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
+{
+	int tmp = 20;
+
+	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
+	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
+	while (--tmp) {
+		mdelay(1);
+		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
+			break;
+	}
+	if (!tmp)
+		pr_err("Software reset timeout\n");
+}
+
+static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	__isp1362_sw_reset(isp1362_hcd);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+static int isp1362_mem_config(struct usb_hcd *hcd)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+	u32 total;
+	u16 istl_size = ISP1362_ISTL_BUFSIZE;
+	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
+	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
+	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
+	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
+	u16 atl_size;
+	int i;
+
+	WARN_ON(istl_size & 3);
+	WARN_ON(atl_blksize & 3);
+	WARN_ON(intl_blksize & 3);
+	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
+	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
+
+	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
+	if (atl_buffers > 32)
+		atl_buffers = 32;
+	atl_size = atl_buffers * atl_blksize;
+	total = atl_size + intl_size + istl_size;
+	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
+	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
+		 istl_size / 2, istl_size, 0, istl_size / 2);
+	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
+		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
+		 intl_size, istl_size);
+	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
+		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
+		 atl_size, istl_size + intl_size);
+	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
+		 ISP1362_BUF_SIZE - total);
+
+	if (total > ISP1362_BUF_SIZE) {
+		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
+			__func__, total, ISP1362_BUF_SIZE);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+	for (i = 0; i < 2; i++) {
+		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
+		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
+		isp1362_hcd->istl_queue[i].blk_size = 4;
+		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
+		snprintf(isp1362_hcd->istl_queue[i].name,
+			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
+		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
+		     isp1362_hcd->istl_queue[i].name,
+		     isp1362_hcd->istl_queue[i].buf_start,
+		     isp1362_hcd->istl_queue[i].buf_size);
+	}
+	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
+
+	isp1362_hcd->intl_queue.buf_start = istl_size;
+	isp1362_hcd->intl_queue.buf_size = intl_size;
+	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
+	isp1362_hcd->intl_queue.blk_size = intl_blksize;
+	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
+	isp1362_hcd->intl_queue.skip_map = ~0;
+	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
+
+	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
+			    isp1362_hcd->intl_queue.buf_size);
+	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
+			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
+	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
+			    1 << (ISP1362_INTL_BUFFERS - 1));
+
+	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
+	isp1362_hcd->atl_queue.buf_size = atl_size;
+	isp1362_hcd->atl_queue.buf_count = atl_buffers;
+	isp1362_hcd->atl_queue.blk_size = atl_blksize;
+	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
+	isp1362_hcd->atl_queue.skip_map = ~0;
+	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
+
+	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
+			    isp1362_hcd->atl_queue.buf_size);
+	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
+			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
+	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
+			    1 << (atl_buffers - 1));
+
+	snprintf(isp1362_hcd->atl_queue.name,
+		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
+	snprintf(isp1362_hcd->intl_queue.name,
+		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
+	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+	     isp1362_hcd->intl_queue.name,
+	     isp1362_hcd->intl_queue.buf_start,
+	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
+	     isp1362_hcd->intl_queue.buf_size);
+	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+	     isp1362_hcd->atl_queue.name,
+	     isp1362_hcd->atl_queue.buf_start,
+	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
+	     isp1362_hcd->atl_queue.buf_size);
+
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	return 0;
+}
+
+static int isp1362_hc_reset(struct usb_hcd *hcd)
+{
+	int ret = 0;
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long t;
+	unsigned long timeout = 100;
+	unsigned long flags;
+	int clkrdy = 0;
+
+	pr_debug("%s:\n", __func__);
+
+	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
+		isp1362_hcd->board->reset(hcd->self.controller, 1);
+		msleep(20);
+		if (isp1362_hcd->board->clock)
+			isp1362_hcd->board->clock(hcd->self.controller, 1);
+		isp1362_hcd->board->reset(hcd->self.controller, 0);
+	} else
+		isp1362_sw_reset(isp1362_hcd);
+
+	/* chip has been reset. First we need to see a clock */
+	t = jiffies + msecs_to_jiffies(timeout);
+	while (!clkrdy && time_before_eq(jiffies, t)) {
+		spin_lock_irqsave(&isp1362_hcd->lock, flags);
+		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+		if (!clkrdy)
+			msleep(4);
+	}
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	if (!clkrdy) {
+		pr_err("Clock not ready after %lums\n", timeout);
+		ret = -ENODEV;
+	}
+	return ret;
+}
+
+static void isp1362_hc_stop(struct usb_hcd *hcd)
+{
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+	u32 tmp;
+
+	pr_debug("%s:\n", __func__);
+
+	del_timer_sync(&hcd->rh_timer);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+	/* Switch off power for all ports */
+	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+	tmp &= ~(RH_A_NPS | RH_A_PSM);
+	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
+	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+
+	/* Reset the chip */
+	if (isp1362_hcd->board && isp1362_hcd->board->reset)
+		isp1362_hcd->board->reset(hcd->self.controller, 1);
+	else
+		__isp1362_sw_reset(isp1362_hcd);
+
+	if (isp1362_hcd->board && isp1362_hcd->board->clock)
+		isp1362_hcd->board->clock(hcd->self.controller, 0);
+
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+#ifdef CHIP_BUFFER_TEST
+static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
+{
+	int ret = 0;
+	u16 *ref;
+	unsigned long flags;
+
+	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
+	if (ref) {
+		int offset;
+		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
+
+		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
+			ref[offset] = ~offset;
+			tst[offset] = offset;
+		}
+
+		for (offset = 0; offset < 4; offset++) {
+			int j;
+
+			for (j = 0; j < 8; j++) {
+				spin_lock_irqsave(&isp1362_hcd->lock, flags);
+				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
+				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
+				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+				if (memcmp(ref, tst, j)) {
+					ret = -ENODEV;
+					pr_err("%s: memory check with %d byte offset %d failed\n",
+					    __func__, j, offset);
+					dump_data((u8 *)ref + offset, j);
+					dump_data((u8 *)tst + offset, j);
+				}
+			}
+		}
+
+		spin_lock_irqsave(&isp1362_hcd->lock, flags);
+		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
+		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
+			ret = -ENODEV;
+			pr_err("%s: memory check failed\n", __func__);
+			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
+		}
+
+		for (offset = 0; offset < 256; offset++) {
+			int test_size = 0;
+
+			yield();
+
+			memset(tst, 0, ISP1362_BUF_SIZE);
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
+				   ISP1362_BUF_SIZE / 2)) {
+				pr_err("%s: Failed to clear buffer\n", __func__);
+				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
+				break;
+			}
+			spin_lock_irqsave(&isp1362_hcd->lock, flags);
+			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
+			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
+					     offset * 2 + PTD_HEADER_SIZE, test_size);
+			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+					    PTD_HEADER_SIZE + test_size);
+			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
+				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
+				spin_lock_irqsave(&isp1362_hcd->lock, flags);
+				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+						    PTD_HEADER_SIZE + test_size);
+				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+					ret = -ENODEV;
+					pr_err("%s: memory check with offset %02x failed\n",
+					    __func__, offset);
+					break;
+				}
+				pr_warn("%s: memory check with offset %02x ok after second read\n",
+					__func__, offset);
+			}
+		}
+		kfree(ref);
+	}
+	return ret;
+}
+#endif
+
+static int isp1362_hc_start(struct usb_hcd *hcd)
+{
+	int ret;
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	struct isp1362_platform_data *board = isp1362_hcd->board;
+	u16 hwcfg;
+	u16 chipid;
+	unsigned long flags;
+
+	pr_debug("%s:\n", __func__);
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
+		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
+		return -ENODEV;
+	}
+
+#ifdef CHIP_BUFFER_TEST
+	ret = isp1362_chip_test(isp1362_hcd);
+	if (ret)
+		return -ENODEV;
+#endif
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	/* clear interrupt status and disable all interrupt sources */
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+	/* HW conf */
+	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
+	if (board->sel15Kres)
+		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
+			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
+	if (board->clknotstop)
+		hwcfg |= HCHWCFG_CLKNOTSTOP;
+	if (board->oc_enable)
+		hwcfg |= HCHWCFG_ANALOG_OC;
+	if (board->int_act_high)
+		hwcfg |= HCHWCFG_INT_POL;
+	if (board->int_edge_triggered)
+		hwcfg |= HCHWCFG_INT_TRIGGER;
+	if (board->dreq_act_high)
+		hwcfg |= HCHWCFG_DREQ_POL;
+	if (board->dack_act_high)
+		hwcfg |= HCHWCFG_DACK_POL;
+	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
+	isp1362_show_reg(isp1362_hcd, HCHWCFG);
+	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	ret = isp1362_mem_config(hcd);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+	/* Root hub conf */
+	isp1362_hcd->rhdesca = 0;
+	if (board->no_power_switching)
+		isp1362_hcd->rhdesca |= RH_A_NPS;
+	if (board->power_switching_mode)
+		isp1362_hcd->rhdesca |= RH_A_PSM;
+	if (board->potpg)
+		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
+	else
+		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
+
+	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
+	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
+	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+
+	isp1362_hcd->rhdescb = RH_B_PPCM;
+	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
+	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
+
+	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
+	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
+	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
+
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	isp1362_hcd->hc_control = OHCI_USB_OPER;
+	hcd->state = HC_STATE_RUNNING;
+
+	spin_lock_irqsave(&isp1362_hcd->lock, flags);
+	/* Set up interrupts */
+	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
+	isp1362_hcd->intenb |= OHCI_INTR_RD;
+	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
+	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
+	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+
+	/* Go operational */
+	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+	/* enable global power */
+	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
+
+	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver isp1362_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"ISP1362 Host Controller",
+	.hcd_priv_size =	sizeof(struct isp1362_hcd),
+
+	.irq =			isp1362_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY,
+
+	.reset =		isp1362_hc_reset,
+	.start =		isp1362_hc_start,
+	.stop =			isp1362_hc_stop,
+
+	.urb_enqueue =		isp1362_urb_enqueue,
+	.urb_dequeue =		isp1362_urb_dequeue,
+	.endpoint_disable =	isp1362_endpoint_disable,
+
+	.get_frame_number =	isp1362_get_frame,
+
+	.hub_status_data =	isp1362_hub_status_data,
+	.hub_control =		isp1362_hub_control,
+	.bus_suspend =		isp1362_bus_suspend,
+	.bus_resume =		isp1362_bus_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int isp1362_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+
+	remove_debug_file(isp1362_hcd);
+	DBG(0, "%s: Removing HCD\n", __func__);
+	usb_remove_hcd(hcd);
+	DBG(0, "%s: put_hcd\n", __func__);
+	usb_put_hcd(hcd);
+	DBG(0, "%s: Done\n", __func__);
+
+	return 0;
+}
+
+static int isp1362_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct isp1362_hcd *isp1362_hcd;
+	struct resource *addr, *data, *irq_res;
+	void __iomem *addr_reg;
+	void __iomem *data_reg;
+	int irq;
+	int retval = 0;
+	unsigned int irq_flags = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/* basic sanity checks first.  board-specific init logic should
+	 * have initialized this the three resources and probably board
+	 * specific platform_data.  we don't probe for IRQs, and do only
+	 * minimal sanity checking.
+	 */
+	if (pdev->num_resources < 3)
+		return -ENODEV;
+
+	if (pdev->dev.dma_mask) {
+		DBG(1, "won't do DMA");
+		return -ENODEV;
+	}
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res)
+		return -ENODEV;
+
+	irq = irq_res->start;
+
+	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	addr_reg = devm_ioremap_resource(&pdev->dev, addr);
+	if (IS_ERR(addr_reg))
+		return PTR_ERR(addr_reg);
+
+	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data_reg = devm_ioremap_resource(&pdev->dev, data);
+	if (IS_ERR(data_reg))
+		return PTR_ERR(data_reg);
+
+	/* allocate and initialize hcd */
+	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = data->start;
+	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	isp1362_hcd->data_reg = data_reg;
+	isp1362_hcd->addr_reg = addr_reg;
+
+	isp1362_hcd->next_statechange = jiffies;
+	spin_lock_init(&isp1362_hcd->lock);
+	INIT_LIST_HEAD(&isp1362_hcd->async);
+	INIT_LIST_HEAD(&isp1362_hcd->periodic);
+	INIT_LIST_HEAD(&isp1362_hcd->isoc);
+	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
+	isp1362_hcd->board = dev_get_platdata(&pdev->dev);
+#if USE_PLATFORM_DELAY
+	if (!isp1362_hcd->board->delay) {
+		dev_err(hcd->self.controller, "No platform delay function given\n");
+		retval = -ENODEV;
+		goto err;
+	}
+#endif
+
+	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+		irq_flags |= IRQF_TRIGGER_RISING;
+	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+		irq_flags |= IRQF_TRIGGER_FALLING;
+	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+		irq_flags |= IRQF_TRIGGER_HIGH;
+	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+		irq_flags |= IRQF_TRIGGER_LOW;
+
+	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
+	if (retval != 0)
+		goto err;
+	device_wakeup_enable(hcd->self.controller);
+
+	dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
+
+	create_debug_file(isp1362_hcd);
+
+	return 0;
+
+ err:
+	usb_put_hcd(hcd);
+
+	return retval;
+}
+
+#ifdef	CONFIG_PM
+static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+	int retval = 0;
+
+	DBG(0, "%s: Suspending device\n", __func__);
+
+	if (state.event == PM_EVENT_FREEZE) {
+		DBG(0, "%s: Suspending root hub\n", __func__);
+		retval = isp1362_bus_suspend(hcd);
+	} else {
+		DBG(0, "%s: Suspending RH ports\n", __func__);
+		spin_lock_irqsave(&isp1362_hcd->lock, flags);
+		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+	}
+	if (retval == 0)
+		pdev->dev.power.power_state = state;
+	return retval;
+}
+
+static int isp1362_resume(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+	unsigned long flags;
+
+	DBG(0, "%s: Resuming\n", __func__);
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		DBG(0, "%s: Resume RH ports\n", __func__);
+		spin_lock_irqsave(&isp1362_hcd->lock, flags);
+		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
+		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+		return 0;
+	}
+
+	pdev->dev.power.power_state = PMSG_ON;
+
+	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
+}
+#else
+#define	isp1362_suspend	NULL
+#define	isp1362_resume	NULL
+#endif
+
+static struct platform_driver isp1362_driver = {
+	.probe = isp1362_probe,
+	.remove = isp1362_remove,
+
+	.suspend = isp1362_suspend,
+	.resume = isp1362_resume,
+	.driver = {
+		.name = hcd_name,
+	},
+};
+
+module_platform_driver(isp1362_driver);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
new file mode 100644
index 0000000..6502408
--- /dev/null
+++ b/drivers/usb/host/isp1362.h
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * COPYRIGHT (C) by L. Wassmann <LW@KARO-electronics.de>
+ */
+
+/* ------------------------------------------------------------------------- */
+
+#define MAX_ROOT_PORTS		2
+
+#define USE_32BIT		0
+
+/* These options are mutually eclusive */
+#define USE_PLATFORM_DELAY	0
+#define USE_NDELAY		0
+
+#define DUMMY_DELAY_ACCESS do {} while (0)
+
+/* ------------------------------------------------------------------------- */
+
+#define USB_RESET_WIDTH			50
+#define MAX_XFER_SIZE			1023
+
+/* Buffer sizes */
+#define ISP1362_BUF_SIZE		4096
+#define ISP1362_ISTL_BUFSIZE		512
+#define ISP1362_INTL_BLKSIZE		64
+#define ISP1362_INTL_BUFFERS		16
+#define ISP1362_ATL_BLKSIZE		64
+
+#define ISP1362_REG_WRITE_OFFSET	0x80
+
+#define REG_WIDTH_16			0x000
+#define REG_WIDTH_32			0x100
+#define REG_WIDTH_MASK			0x100
+#define REG_NO_MASK			0x0ff
+
+#ifdef ISP1362_DEBUG
+typedef const unsigned int isp1362_reg_t;
+
+#define REG_ACCESS_R			0x200
+#define REG_ACCESS_W			0x400
+#define REG_ACCESS_RW			0x600
+#define REG_ACCESS_MASK			0x600
+
+#define ISP1362_REG_NO(r)		((r) & REG_NO_MASK)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
+
+#define REG_ACCESS_TEST(r)   BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W))
+#define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w))
+#else
+typedef const unsigned char isp1362_reg_t;
+#define ISP1362_REG_NO(r)		(r)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = addr
+
+#define REG_ACCESS_TEST(r)		do {} while (0)
+#define REG_WIDTH_TEST(r, w)		do {} while (0)
+#endif
+
+/* OHCI compatible registers */
+/*
+ * Note: Some of the ISP1362 'OHCI' registers implement only
+ * a subset of the bits defined in the OHCI spec.
+ *
+ * Bitmasks for the individual bits of these registers are defined in "ohci.h"
+ */
+ISP1362_REG(HCREVISION,	0x00,	REG_WIDTH_32,	REG_ACCESS_R);
+ISP1362_REG(HCCONTROL,	0x01,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCCMDSTAT,	0x02,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCINTSTAT,	0x03,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCINTENB,	0x04,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCINTDIS,	0x05,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCFMINTVL,	0x0d,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCFMREM,	0x0e,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCFMNUM,	0x0f,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCLSTHRESH,	0x11,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCA,	0x12,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCB,	0x13,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCRHSTATUS,	0x14,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT1,	0x15,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT2,	0x16,	REG_WIDTH_32,	REG_ACCESS_RW);
+
+/* Philips ISP1362 specific registers */
+ISP1362_REG(HCHWCFG,	0x20,	REG_WIDTH_16,	REG_ACCESS_RW);
+#define HCHWCFG_DISABLE_SUSPEND	(1 << 15)
+#define HCHWCFG_GLOBAL_PWRDOWN	(1 << 14)
+#define HCHWCFG_PULLDOWN_DS2	(1 << 13)
+#define HCHWCFG_PULLDOWN_DS1	(1 << 12)
+#define HCHWCFG_CLKNOTSTOP	(1 << 11)
+#define HCHWCFG_ANALOG_OC	(1 << 10)
+#define HCHWCFG_ONEINT		(1 << 9)
+#define HCHWCFG_DACK_MODE	(1 << 8)
+#define HCHWCFG_ONEDMA		(1 << 7)
+#define HCHWCFG_DACK_POL	(1 << 6)
+#define HCHWCFG_DREQ_POL	(1 << 5)
+#define HCHWCFG_DBWIDTH_MASK	(0x03 << 3)
+#define HCHWCFG_DBWIDTH(n)	(((n) << 3) & HCHWCFG_DBWIDTH_MASK)
+#define HCHWCFG_INT_POL		(1 << 2)
+#define HCHWCFG_INT_TRIGGER	(1 << 1)
+#define HCHWCFG_INT_ENABLE	(1 << 0)
+
+ISP1362_REG(HCDMACFG,	0x21,	REG_WIDTH_16,	REG_ACCESS_RW);
+#define HCDMACFG_CTR_ENABLE	(1 << 7)
+#define HCDMACFG_BURST_LEN_MASK	(0x03 << 5)
+#define HCDMACFG_BURST_LEN(n)	(((n) << 5) & HCDMACFG_BURST_LEN_MASK)
+#define HCDMACFG_BURST_LEN_1	HCDMACFG_BURST_LEN(0)
+#define HCDMACFG_BURST_LEN_4	HCDMACFG_BURST_LEN(1)
+#define HCDMACFG_BURST_LEN_8	HCDMACFG_BURST_LEN(2)
+#define HCDMACFG_DMA_ENABLE	(1 << 4)
+#define HCDMACFG_BUF_TYPE_MASK	(0x07 << 1)
+#define HCDMACFG_BUF_TYPE(n)	(((n) << 1) & HCDMACFG_BUF_TYPE_MASK)
+#define HCDMACFG_BUF_ISTL0	HCDMACFG_BUF_TYPE(0)
+#define HCDMACFG_BUF_ISTL1	HCDMACFG_BUF_TYPE(1)
+#define HCDMACFG_BUF_INTL	HCDMACFG_BUF_TYPE(2)
+#define HCDMACFG_BUF_ATL	HCDMACFG_BUF_TYPE(3)
+#define HCDMACFG_BUF_DIRECT	HCDMACFG_BUF_TYPE(4)
+#define HCDMACFG_DMA_RW_SELECT	(1 << 0)
+
+ISP1362_REG(HCXFERCTR,	0x22,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+ISP1362_REG(HCuPINT,	0x24,	REG_WIDTH_16,	REG_ACCESS_RW);
+#define HCuPINT_SOF		(1 << 0)
+#define HCuPINT_ISTL0		(1 << 1)
+#define HCuPINT_ISTL1		(1 << 2)
+#define HCuPINT_EOT		(1 << 3)
+#define HCuPINT_OPR		(1 << 4)
+#define HCuPINT_SUSP		(1 << 5)
+#define HCuPINT_CLKRDY		(1 << 6)
+#define HCuPINT_INTL		(1 << 7)
+#define HCuPINT_ATL		(1 << 8)
+#define HCuPINT_OTG		(1 << 9)
+
+ISP1362_REG(HCuPINTENB,	0x25,	REG_WIDTH_16,	REG_ACCESS_RW);
+/* same bit definitions apply as for HCuPINT */
+
+ISP1362_REG(HCCHIPID,	0x27,	REG_WIDTH_16,	REG_ACCESS_R);
+#define HCCHIPID_MASK		0xff00
+#define HCCHIPID_MAGIC		0x3600
+
+ISP1362_REG(HCSCRATCH,	0x28,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+ISP1362_REG(HCSWRES,	0x29,	REG_WIDTH_16,	REG_ACCESS_W);
+#define HCSWRES_MAGIC		0x00f6
+
+ISP1362_REG(HCBUFSTAT,	0x2c,	REG_WIDTH_16,	REG_ACCESS_RW);
+#define HCBUFSTAT_ISTL0_FULL	(1 << 0)
+#define HCBUFSTAT_ISTL1_FULL	(1 << 1)
+#define HCBUFSTAT_INTL_ACTIVE	(1 << 2)
+#define HCBUFSTAT_ATL_ACTIVE	(1 << 3)
+#define HCBUFSTAT_RESET_HWPP	(1 << 4)
+#define HCBUFSTAT_ISTL0_ACTIVE	(1 << 5)
+#define HCBUFSTAT_ISTL1_ACTIVE	(1 << 6)
+#define HCBUFSTAT_ISTL0_DONE	(1 << 8)
+#define HCBUFSTAT_ISTL1_DONE	(1 << 9)
+#define HCBUFSTAT_PAIRED_PTDPP	(1 << 10)
+
+ISP1362_REG(HCDIRADDR,	0x32,	REG_WIDTH_32,	REG_ACCESS_RW);
+#define HCDIRADDR_ADDR_MASK	0x0000ffff
+#define HCDIRADDR_ADDR(n)	(((n) << 0) & HCDIRADDR_ADDR_MASK)
+#define HCDIRADDR_COUNT_MASK	0xffff0000
+#define HCDIRADDR_COUNT(n)	(((n) << 16) & HCDIRADDR_COUNT_MASK)
+ISP1362_REG(HCDIRDATA,	0x45,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+ISP1362_REG(HCISTLBUFSZ, 0x30,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCISTL0PORT, 0x40,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCISTL1PORT, 0x42,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCISTLRATE,	0x47,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+ISP1362_REG(HCINTLBUFSZ, 0x33,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCINTLPORT,	0x43,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCINTLBLKSZ, 0x53,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCINTLDONE,	0x17,	REG_WIDTH_32,	REG_ACCESS_R);
+ISP1362_REG(HCINTLSKIP,	0x18,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCINTLLAST,	0x19,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCINTLCURR,	0x1a,	REG_WIDTH_16,	REG_ACCESS_R);
+
+ISP1362_REG(HCATLBUFSZ, 0x34,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCATLPORT,	0x44,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCATLBLKSZ, 0x54,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCATLDONE,	0x1b,	REG_WIDTH_32,	REG_ACCESS_R);
+ISP1362_REG(HCATLSKIP,	0x1c,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCATLLAST,	0x1d,	REG_WIDTH_32,	REG_ACCESS_RW);
+ISP1362_REG(HCATLCURR,	0x1e,	REG_WIDTH_16,	REG_ACCESS_R);
+
+ISP1362_REG(HCATLDTC,	0x51,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(HCATLDTCTO,	0x52,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+
+ISP1362_REG(OTGCONTROL,	0x62,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(OTGSTATUS,	0x67,	REG_WIDTH_16,	REG_ACCESS_R);
+ISP1362_REG(OTGINT,	0x68,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(OTGINTENB,	0x69,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(OTGTIMER,	0x6A,	REG_WIDTH_16,	REG_ACCESS_RW);
+ISP1362_REG(OTGALTTMR,	0x6C,	REG_WIDTH_16,	REG_ACCESS_RW);
+
+/* Philips transfer descriptor, cpu-endian */
+struct ptd {
+	u16 count;
+#define	PTD_COUNT_MSK	(0x3ff << 0)
+#define	PTD_TOGGLE_MSK	(1 << 10)
+#define	PTD_ACTIVE_MSK	(1 << 11)
+#define	PTD_CC_MSK	(0xf << 12)
+	u16 mps;
+#define	PTD_MPS_MSK	(0x3ff << 0)
+#define	PTD_SPD_MSK	(1 << 10)
+#define	PTD_LAST_MSK	(1 << 11)
+#define	PTD_EP_MSK	(0xf << 12)
+	u16 len;
+#define	PTD_LEN_MSK	(0x3ff << 0)
+#define	PTD_DIR_MSK	(3 << 10)
+#define	PTD_DIR_SETUP	(0)
+#define	PTD_DIR_OUT	(1)
+#define	PTD_DIR_IN	(2)
+	u16 faddr;
+#define	PTD_FA_MSK	(0x7f << 0)
+/* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */
+#define PTD_SF_ISO_MSK	(0xff << 8)
+#define PTD_SF_INT_MSK	(0x1f << 8)
+#define PTD_PR_MSK	(0x07 << 13)
+} __attribute__ ((packed, aligned(2)));
+#define PTD_HEADER_SIZE sizeof(struct ptd)
+
+/* ------------------------------------------------------------------------- */
+/* Copied from ohci.h: */
+/*
+ * Hardware transfer status codes -- CC from PTD
+ */
+#define PTD_CC_NOERROR      0x00
+#define PTD_CC_CRC          0x01
+#define PTD_CC_BITSTUFFING  0x02
+#define PTD_CC_DATATOGGLEM  0x03
+#define PTD_CC_STALL        0x04
+#define PTD_DEVNOTRESP      0x05
+#define PTD_PIDCHECKFAIL    0x06
+#define PTD_UNEXPECTEDPID   0x07
+#define PTD_DATAOVERRUN     0x08
+#define PTD_DATAUNDERRUN    0x09
+    /* 0x0A, 0x0B reserved for hardware */
+#define PTD_BUFFEROVERRUN   0x0C
+#define PTD_BUFFERUNDERRUN  0x0D
+    /* 0x0E, 0x0F reserved for HCD */
+#define PTD_NOTACCESSED     0x0F
+
+
+/* map OHCI TD status codes (CC) to errno values */
+static const int cc_to_error[16] = {
+	/* No  Error  */               0,
+	/* CRC Error  */               -EILSEQ,
+	/* Bit Stuff  */               -EPROTO,
+	/* Data Togg  */               -EILSEQ,
+	/* Stall      */               -EPIPE,
+	/* DevNotResp */               -ETIMEDOUT,
+	/* PIDCheck   */               -EPROTO,
+	/* UnExpPID   */               -EPROTO,
+	/* DataOver   */               -EOVERFLOW,
+	/* DataUnder  */               -EREMOTEIO,
+	/* (for hw)   */               -EIO,
+	/* (for hw)   */               -EIO,
+	/* BufferOver */               -ECOMM,
+	/* BuffUnder  */               -ENOSR,
+	/* (for HCD)  */               -EALREADY,
+	/* (for HCD)  */               -EALREADY
+};
+
+
+/*
+ * HcControl (control) register masks
+ */
+#define OHCI_CTRL_HCFS	(3 << 6)	/* host controller functional state */
+#define OHCI_CTRL_RWC	(1 << 9)	/* remote wakeup connected */
+#define OHCI_CTRL_RWE	(1 << 10)	/* remote wakeup enable */
+
+/* pre-shifted values for HCFS */
+#	define OHCI_USB_RESET	(0 << 6)
+#	define OHCI_USB_RESUME	(1 << 6)
+#	define OHCI_USB_OPER	(2 << 6)
+#	define OHCI_USB_SUSPEND	(3 << 6)
+
+/*
+ * HcCommandStatus (cmdstatus) register masks
+ */
+#define OHCI_HCR	(1 << 0)	/* host controller reset */
+#define OHCI_SOC  	(3 << 16)	/* scheduling overrun count */
+
+/*
+ * masks used with interrupt registers:
+ * HcInterruptStatus (intrstatus)
+ * HcInterruptEnable (intrenable)
+ * HcInterruptDisable (intrdisable)
+ */
+#define OHCI_INTR_SO	(1 << 0)	/* scheduling overrun */
+#define OHCI_INTR_WDH	(1 << 1)	/* writeback of done_head */
+#define OHCI_INTR_SF	(1 << 2)	/* start frame */
+#define OHCI_INTR_RD	(1 << 3)	/* resume detect */
+#define OHCI_INTR_UE	(1 << 4)	/* unrecoverable error */
+#define OHCI_INTR_FNO	(1 << 5)	/* frame number overflow */
+#define OHCI_INTR_RHSC	(1 << 6)	/* root hub status change */
+#define OHCI_INTR_OC	(1 << 30)	/* ownership change */
+#define OHCI_INTR_MIE	(1 << 31)	/* master interrupt enable */
+
+/* roothub.portstatus [i] bits */
+#define RH_PS_CCS            0x00000001   	/* current connect status */
+#define RH_PS_PES            0x00000002   	/* port enable status*/
+#define RH_PS_PSS            0x00000004   	/* port suspend status */
+#define RH_PS_POCI           0x00000008   	/* port over current indicator */
+#define RH_PS_PRS            0x00000010  	/* port reset status */
+#define RH_PS_PPS            0x00000100   	/* port power status */
+#define RH_PS_LSDA           0x00000200    	/* low speed device attached */
+#define RH_PS_CSC            0x00010000 	/* connect status change */
+#define RH_PS_PESC           0x00020000   	/* port enable status change */
+#define RH_PS_PSSC           0x00040000    	/* port suspend status change */
+#define RH_PS_OCIC           0x00080000    	/* over current indicator change */
+#define RH_PS_PRSC           0x00100000   	/* port reset status change */
+
+/* roothub.status bits */
+#define RH_HS_LPS	     0x00000001		/* local power status */
+#define RH_HS_OCI	     0x00000002		/* over current indicator */
+#define RH_HS_DRWE	     0x00008000		/* device remote wakeup enable */
+#define RH_HS_LPSC	     0x00010000		/* local power status change */
+#define RH_HS_OCIC	     0x00020000		/* over current indicator change */
+#define RH_HS_CRWE	     0x80000000		/* clear remote wakeup enable */
+
+/* roothub.b masks */
+#define RH_B_DR		0x0000ffff		/* device removable flags */
+#define RH_B_PPCM	0xffff0000		/* port power control mask */
+
+/* roothub.a masks */
+#define	RH_A_NDP	(0xff << 0)		/* number of downstream ports */
+#define	RH_A_PSM	(1 << 8)		/* power switching mode */
+#define	RH_A_NPS	(1 << 9)		/* no power switching */
+#define	RH_A_DT		(1 << 10)		/* device type (mbz) */
+#define	RH_A_OCPM	(1 << 11)		/* over current protection mode */
+#define	RH_A_NOCP	(1 << 12)		/* no over current protection */
+#define	RH_A_POTPGT	(0xff << 24)		/* power on to power good time */
+
+#define	FI			0x2edf		/* 12000 bits per frame (-1) */
+#define	FSMP(fi) 		(0x7fff & ((6 * ((fi) - 210)) / 7))
+#define LSTHRESH		0x628		/* lowspeed bit threshold */
+
+/* ------------------------------------------------------------------------- */
+
+/* PTD accessor macros. */
+#define PTD_GET_COUNT(p)	(((p)->count & PTD_COUNT_MSK) >> 0)
+#define PTD_COUNT(v)		(((v) << 0) & PTD_COUNT_MSK)
+#define PTD_GET_TOGGLE(p)	(((p)->count & PTD_TOGGLE_MSK) >> 10)
+#define PTD_TOGGLE(v)		(((v) << 10) & PTD_TOGGLE_MSK)
+#define PTD_GET_ACTIVE(p)	(((p)->count & PTD_ACTIVE_MSK) >> 11)
+#define PTD_ACTIVE(v)		(((v) << 11) & PTD_ACTIVE_MSK)
+#define PTD_GET_CC(p)		(((p)->count & PTD_CC_MSK) >> 12)
+#define PTD_CC(v)		(((v) << 12) & PTD_CC_MSK)
+#define PTD_GET_MPS(p)		(((p)->mps & PTD_MPS_MSK) >> 0)
+#define PTD_MPS(v)		(((v) << 0) & PTD_MPS_MSK)
+#define PTD_GET_SPD(p)		(((p)->mps & PTD_SPD_MSK) >> 10)
+#define PTD_SPD(v)		(((v) << 10) & PTD_SPD_MSK)
+#define PTD_GET_LAST(p)		(((p)->mps & PTD_LAST_MSK) >> 11)
+#define PTD_LAST(v)		(((v) << 11) & PTD_LAST_MSK)
+#define PTD_GET_EP(p)		(((p)->mps & PTD_EP_MSK) >> 12)
+#define PTD_EP(v)		(((v) << 12) & PTD_EP_MSK)
+#define PTD_GET_LEN(p)		(((p)->len & PTD_LEN_MSK) >> 0)
+#define PTD_LEN(v)		(((v) << 0) & PTD_LEN_MSK)
+#define PTD_GET_DIR(p)		(((p)->len & PTD_DIR_MSK) >> 10)
+#define PTD_DIR(v)		(((v) << 10) & PTD_DIR_MSK)
+#define PTD_GET_FA(p)		(((p)->faddr & PTD_FA_MSK) >> 0)
+#define PTD_FA(v)		(((v) << 0) & PTD_FA_MSK)
+#define PTD_GET_SF_INT(p)	(((p)->faddr & PTD_SF_INT_MSK) >> 8)
+#define PTD_SF_INT(v)		(((v) << 8) & PTD_SF_INT_MSK)
+#define PTD_GET_SF_ISO(p)	(((p)->faddr & PTD_SF_ISO_MSK) >> 8)
+#define PTD_SF_ISO(v)		(((v) << 8) & PTD_SF_ISO_MSK)
+#define PTD_GET_PR(p)		(((p)->faddr & PTD_PR_MSK) >> 13)
+#define PTD_PR(v)		(((v) << 13) & PTD_PR_MSK)
+
+#define	LOG2_PERIODIC_SIZE	5	/* arbitrary; this matches OHCI */
+#define	PERIODIC_SIZE		(1 << LOG2_PERIODIC_SIZE)
+
+struct isp1362_ep {
+	struct usb_host_endpoint *hep;
+	struct usb_device	*udev;
+
+	/* philips transfer descriptor */
+	struct ptd		ptd;
+
+	u8			maxpacket;
+	u8			epnum;
+	u8			nextpid;
+	u16			error_count;
+	u16			length;		/* of current packet */
+	s16			ptd_offset;	/* buffer offset in ISP1362 where
+						   PTD has been stored
+						   (for access thru HCDIRDATA) */
+	int			ptd_index;
+	int num_ptds;
+	void 			*data;		/* to databuf */
+	/* queue of active EPs (the ones transmitted to the chip) */
+	struct list_head	active;
+
+	/* periodic schedule */
+	u8			branch;
+	u16			interval;
+	u16			load;
+	u16			last_iso;
+
+	/* async schedule */
+	struct list_head	schedule;	/* list of all EPs that need processing */
+	struct list_head	remove_list;
+	int			num_req;
+};
+
+struct isp1362_ep_queue {
+	struct list_head	active;		/* list of PTDs currently processed by HC */
+	atomic_t		finishing;
+	unsigned long		buf_map;
+	unsigned long		skip_map;
+	int			free_ptd;
+	u16			buf_start;
+	u16			buf_size;
+	u16			blk_size;	/* PTD buffer block size for ATL and INTL */
+	u8			buf_count;
+	u8			buf_avail;
+	char			name[16];
+
+	/* for statistical tracking */
+	u8			stat_maxptds;	/* Max # of ptds seen simultaneously in fifo */
+	u8			ptd_count;	/* number of ptds submitted to this queue */
+};
+
+struct isp1362_hcd {
+	spinlock_t		lock;
+	void __iomem		*addr_reg;
+	void __iomem		*data_reg;
+
+	struct isp1362_platform_data *board;
+
+	struct dentry		*debug_file;
+	unsigned long		stat1, stat2, stat4, stat8, stat16;
+
+	/* HC registers */
+	u32			intenb;		/* "OHCI" interrupts */
+	u16			irqenb;		/* uP interrupts */
+
+	/* Root hub registers */
+	u32			rhdesca;
+	u32			rhdescb;
+	u32			rhstatus;
+	u32			rhport[MAX_ROOT_PORTS];
+	unsigned long		next_statechange;
+
+	/* HC control reg shadow copy */
+	u32			hc_control;
+
+	/* async schedule: control, bulk */
+	struct list_head	async;
+
+	/* periodic schedule: int */
+	u16			load[PERIODIC_SIZE];
+	struct list_head	periodic;
+	u16			fmindex;
+
+	/* periodic schedule: isochronous */
+	struct list_head	isoc;
+	unsigned int		istl_flip:1;
+	unsigned int		irq_active:1;
+
+	/* Schedules for the current frame */
+	struct isp1362_ep_queue atl_queue;
+	struct isp1362_ep_queue intl_queue;
+	struct isp1362_ep_queue istl_queue[2];
+
+	/* list of PTDs retrieved from HC */
+	struct list_head	remove_list;
+	enum {
+		ISP1362_INT_SOF,
+		ISP1362_INT_ISTL0,
+		ISP1362_INT_ISTL1,
+		ISP1362_INT_EOT,
+		ISP1362_INT_OPR,
+		ISP1362_INT_SUSP,
+		ISP1362_INT_CLKRDY,
+		ISP1362_INT_INTL,
+		ISP1362_INT_ATL,
+		ISP1362_INT_OTG,
+		NUM_ISP1362_IRQS
+	} IRQ_NAMES;
+	unsigned int		irq_stat[NUM_ISP1362_IRQS];
+	int			req_serial;
+};
+
+static inline const char *ISP1362_INT_NAME(int n)
+{
+	switch (n) {
+	case ISP1362_INT_SOF:    return "SOF";
+	case ISP1362_INT_ISTL0:  return "ISTL0";
+	case ISP1362_INT_ISTL1:  return "ISTL1";
+	case ISP1362_INT_EOT:    return "EOT";
+	case ISP1362_INT_OPR:    return "OPR";
+	case ISP1362_INT_SUSP:   return "SUSP";
+	case ISP1362_INT_CLKRDY: return "CLKRDY";
+	case ISP1362_INT_INTL:   return "INTL";
+	case ISP1362_INT_ATL:    return "ATL";
+	case ISP1362_INT_OTG:    return "OTG";
+	default:                 return "unknown";
+	}
+}
+
+static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr)
+{
+	unsigned long p = (unsigned long)ptr;
+	if (!(p & 0xf))
+		isp1362_hcd->stat16++;
+	else if (!(p & 0x7))
+		isp1362_hcd->stat8++;
+	else if (!(p & 0x3))
+		isp1362_hcd->stat4++;
+	else if (!(p & 0x1))
+		isp1362_hcd->stat2++;
+	else
+		isp1362_hcd->stat1++;
+}
+
+static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd)
+{
+	return (struct isp1362_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd)
+{
+	return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv);
+}
+
+#define frame_before(f1, f2)	((s16)((u16)f1 - (u16)f2) < 0)
+
+/*
+ * ISP1362 HW Interface
+ */
+
+#define DBG(level, fmt...) \
+	do { \
+		if (dbg_level > level) \
+			pr_debug(fmt); \
+	} while (0)
+
+#ifdef VERBOSE
+#    define VDBG(fmt...)	DBG(3, fmt)
+#else
+#    define VDBG(fmt...)	do {} while (0)
+#endif
+
+#ifdef REGISTERS
+#    define RDBG(fmt...)	DBG(1, fmt)
+#else
+#    define RDBG(fmt...)	do {} while (0)
+#endif
+
+#ifdef URB_TRACE
+#define URB_DBG(fmt...)		DBG(0, fmt)
+#else
+#define URB_DBG(fmt...)		do {} while (0)
+#endif
+
+
+#if USE_PLATFORM_DELAY
+#if USE_NDELAY
+#error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously.
+#endif
+#define	isp1362_delay(h, d)	(h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d)
+#elif USE_NDELAY
+#define	isp1362_delay(h, d)	ndelay(d)
+#else
+#define	isp1362_delay(h, d)	do {} while (0)
+#endif
+
+#define get_urb(ep) ({							\
+	BUG_ON(list_empty(&ep->hep->urb_list));				\
+	container_of(ep->hep->urb_list.next, struct urb, urb_list);	\
+})
+
+/* basic access functions for ISP1362 chip registers */
+/* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure,
+ * that all register accesses are performed with interrupts disabled, since the interrupt
+ * handler has no way of restoring the previous state.
+ */
+static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
+{
+	REG_ACCESS_TEST(reg);
+	DUMMY_DELAY_ACCESS;
+	writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
+	DUMMY_DELAY_ACCESS;
+	isp1362_delay(isp1362_hcd, 1);
+}
+
+static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
+{
+	DUMMY_DELAY_ACCESS;
+	writew(val, isp1362_hcd->data_reg);
+}
+
+static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
+{
+	u16 val;
+
+	DUMMY_DELAY_ACCESS;
+	val = readw(isp1362_hcd->data_reg);
+
+	return val;
+}
+
+static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
+{
+#if USE_32BIT
+	DUMMY_DELAY_ACCESS;
+	writel(val, isp1362_hcd->data_reg);
+#else
+	DUMMY_DELAY_ACCESS;
+	writew((u16)val, isp1362_hcd->data_reg);
+	DUMMY_DELAY_ACCESS;
+	writew(val >> 16, isp1362_hcd->data_reg);
+#endif
+}
+
+static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
+{
+	u32 val;
+
+#if USE_32BIT
+	DUMMY_DELAY_ACCESS;
+	val = readl(isp1362_hcd->data_reg);
+#else
+	DUMMY_DELAY_ACCESS;
+	val = (u32)readw(isp1362_hcd->data_reg);
+	DUMMY_DELAY_ACCESS;
+	val |= (u32)readw(isp1362_hcd->data_reg) << 16;
+#endif
+	return val;
+}
+
+/* use readsw/writesw to access the fifo whenever possible */
+/* assume HCDIRDATA or XFERCTR & addr_reg have been set up */
+static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+	u8 *dp = buf;
+	u16 data;
+
+	if (!len)
+		return;
+
+	RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
+#if USE_32BIT
+	if (len >= 4) {
+		RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2);
+		readsl(isp1362_hcd->data_reg, dp, len >> 2);
+		dp += len & ~3;
+		len &= 3;
+	}
+#endif
+	if (len >= 2) {
+		RDBG("%s: Using readsw for %d words\n", __func__, len >> 1);
+		insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+		dp += len & ~1;
+		len &= 1;
+	}
+
+	BUG_ON(len & ~1);
+	if (len > 0) {
+		data = isp1362_read_data16(isp1362_hcd);
+		RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__,
+		     (u8)data, (u32)dp);
+		*dp = (u8)data;
+	}
+}
+
+static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+	u8 *dp = buf;
+	u16 data;
+
+	if (!len)
+		return;
+
+	if ((unsigned long)dp & 0x1) {
+		/* not aligned */
+		for (; len > 1; len -= 2) {
+			data = *dp++;
+			data |= *dp++ << 8;
+			isp1362_write_data16(isp1362_hcd, data);
+		}
+		if (len)
+			isp1362_write_data16(isp1362_hcd, *dp);
+		return;
+	}
+
+	RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
+#if USE_32BIT
+	if (len >= 4) {
+		RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2);
+		writesl(isp1362_hcd->data_reg, dp, len >> 2);
+		dp += len & ~3;
+		len &= 3;
+	}
+#endif
+	if (len >= 2) {
+		RDBG("%s: Using writesw for %d words\n", __func__, len >> 1);
+		outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+		dp += len & ~1;
+		len &= 1;
+	}
+
+	BUG_ON(len & ~1);
+	if (len > 0) {
+		/* finally write any trailing byte; we don't need to care
+		 * about the high byte of the last word written
+		 */
+		data = (u16)*dp;
+		RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__,
+			data, (u32)dp);
+		isp1362_write_data16(isp1362_hcd, data);
+	}
+}
+
+#define isp1362_read_reg16(d, r)		({			\
+	u16 __v;							\
+	REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16);			\
+	isp1362_write_addr(d, ISP1362_REG_##r);				\
+	__v = isp1362_read_data16(d);					\
+	RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r,	\
+	     ISP1362_REG_NO(ISP1362_REG_##r));				\
+	__v;								\
+})
+
+#define isp1362_read_reg32(d, r)		({			\
+	u32 __v;							\
+	REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32);			\
+	isp1362_write_addr(d, ISP1362_REG_##r);				\
+	__v = isp1362_read_data32(d);					\
+	RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r,	\
+	     ISP1362_REG_NO(ISP1362_REG_##r));				\
+	__v;								\
+})
+
+#define isp1362_write_reg16(d, r, v)	{					\
+	REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16);				\
+	isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET);	\
+	isp1362_write_data16(d, (u16)(v));					\
+	RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r,	\
+	     ISP1362_REG_NO(ISP1362_REG_##r));					\
+}
+
+#define isp1362_write_reg32(d, r, v)	{					\
+	REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32);				\
+	isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET);	\
+	isp1362_write_data32(d, (u32)(v));					\
+	RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r,	\
+	     ISP1362_REG_NO(ISP1362_REG_##r));					\
+}
+
+#define isp1362_set_mask16(d, r, m) {			\
+	u16 __v;					\
+	__v = isp1362_read_reg16(d, r);			\
+	if ((__v | m) != __v)				\
+		isp1362_write_reg16(d, r, __v | m);	\
+}
+
+#define isp1362_clr_mask16(d, r, m) {			\
+	u16 __v;					\
+	__v = isp1362_read_reg16(d, r);			\
+	if ((__v & ~m) != __v)			\
+		isp1362_write_reg16(d, r, __v & ~m);	\
+}
+
+#define isp1362_set_mask32(d, r, m) {			\
+	u32 __v;					\
+	__v = isp1362_read_reg32(d, r);			\
+	if ((__v | m) != __v)				\
+		isp1362_write_reg32(d, r, __v | m);	\
+}
+
+#define isp1362_clr_mask32(d, r, m) {			\
+	u32 __v;					\
+	__v = isp1362_read_reg32(d, r);			\
+	if ((__v & ~m) != __v)			\
+		isp1362_write_reg32(d, r, __v & ~m);	\
+}
+
+#define isp1362_show_reg(d, r) {								\
+	if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32)			\
+		DBG(0, "%-12s[%02x]: %08x\n", #r,					\
+			ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r));	\
+	else									\
+		DBG(0, "%-12s[%02x]:     %04x\n", #r,					\
+			ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r));	\
+}
+
+static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
+{
+	isp1362_show_reg(isp1362_hcd, HCREVISION);
+	isp1362_show_reg(isp1362_hcd, HCCONTROL);
+	isp1362_show_reg(isp1362_hcd, HCCMDSTAT);
+	isp1362_show_reg(isp1362_hcd, HCINTSTAT);
+	isp1362_show_reg(isp1362_hcd, HCINTENB);
+	isp1362_show_reg(isp1362_hcd, HCFMINTVL);
+	isp1362_show_reg(isp1362_hcd, HCFMREM);
+	isp1362_show_reg(isp1362_hcd, HCFMNUM);
+	isp1362_show_reg(isp1362_hcd, HCLSTHRESH);
+	isp1362_show_reg(isp1362_hcd, HCRHDESCA);
+	isp1362_show_reg(isp1362_hcd, HCRHDESCB);
+	isp1362_show_reg(isp1362_hcd, HCRHSTATUS);
+	isp1362_show_reg(isp1362_hcd, HCRHPORT1);
+	isp1362_show_reg(isp1362_hcd, HCRHPORT2);
+
+	isp1362_show_reg(isp1362_hcd, HCHWCFG);
+	isp1362_show_reg(isp1362_hcd, HCDMACFG);
+	isp1362_show_reg(isp1362_hcd, HCXFERCTR);
+	isp1362_show_reg(isp1362_hcd, HCuPINT);
+
+	if (in_interrupt())
+		DBG(0, "%-12s[%02x]:     %04x\n", "HCuPINTENB",
+			 ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb);
+	else
+		isp1362_show_reg(isp1362_hcd, HCuPINTENB);
+	isp1362_show_reg(isp1362_hcd, HCCHIPID);
+	isp1362_show_reg(isp1362_hcd, HCSCRATCH);
+	isp1362_show_reg(isp1362_hcd, HCBUFSTAT);
+	isp1362_show_reg(isp1362_hcd, HCDIRADDR);
+	/* Access would advance fifo
+	 * isp1362_show_reg(isp1362_hcd, HCDIRDATA);
+	 */
+	isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ);
+	isp1362_show_reg(isp1362_hcd, HCISTLRATE);
+	isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ);
+	isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ);
+	isp1362_show_reg(isp1362_hcd, HCINTLDONE);
+	isp1362_show_reg(isp1362_hcd, HCINTLSKIP);
+	isp1362_show_reg(isp1362_hcd, HCINTLLAST);
+	isp1362_show_reg(isp1362_hcd, HCINTLCURR);
+	isp1362_show_reg(isp1362_hcd, HCATLBUFSZ);
+	isp1362_show_reg(isp1362_hcd, HCATLBLKSZ);
+	/* only valid after ATL_DONE interrupt
+	 * isp1362_show_reg(isp1362_hcd, HCATLDONE);
+	 */
+	isp1362_show_reg(isp1362_hcd, HCATLSKIP);
+	isp1362_show_reg(isp1362_hcd, HCATLLAST);
+	isp1362_show_reg(isp1362_hcd, HCATLCURR);
+	isp1362_show_reg(isp1362_hcd, HCATLDTC);
+	isp1362_show_reg(isp1362_hcd, HCATLDTCTO);
+}
+
+static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
+{
+	len = (len + 1) & ~1;
+
+	isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
+	isp1362_write_reg32(isp1362_hcd, HCDIRADDR,
+			    HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len));
+}
+
+static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+	isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+	DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n",
+	    __func__, len, offset, buf);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+
+	isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
+
+	isp1362_read_fifo(isp1362_hcd, buf, len);
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+}
+
+static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+	isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+	DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n",
+	    __func__, len, offset, buf);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+
+	isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
+	isp1362_write_fifo(isp1362_hcd, buf, len);
+
+	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+}
+
+static void __attribute__((unused)) dump_data(char *buf, int len)
+{
+	if (dbg_level > 0) {
+		int k;
+		int lf = 0;
+
+		for (k = 0; k < len; ++k) {
+			if (!lf)
+				DBG(0, "%04x:", k);
+			printk(" %02x", ((u8 *) buf)[k]);
+			lf = 1;
+			if (!k)
+				continue;
+			if (k % 16 == 15) {
+				printk("\n");
+				lf = 0;
+				continue;
+			}
+			if (k % 8 == 7)
+				printk(" ");
+			if (k % 4 == 3)
+				printk(" ");
+		}
+		if (lf)
+			printk("\n");
+	}
+}
+
+#if defined(PTD_TRACE)
+
+static void dump_ptd(struct ptd *ptd)
+{
+	DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n",
+	    container_of(ptd, struct isp1362_ep, ptd),
+	    PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd),
+	    PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
+	    PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd),
+	    PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd));
+	DBG(0, "  %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr);
+}
+
+static void dump_ptd_out_data(struct ptd *ptd, u8 *buf)
+{
+	if (dbg_level > 0) {
+		if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
+			DBG(0, "--out->\n");
+			dump_data(buf, PTD_GET_LEN(ptd));
+		}
+	}
+}
+
+static void dump_ptd_in_data(struct ptd *ptd, u8 *buf)
+{
+	if (dbg_level > 0) {
+		if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
+			DBG(0, "<--in--\n");
+			dump_data(buf, PTD_GET_COUNT(ptd));
+		}
+		DBG(0, "-----\n");
+	}
+}
+
+static void dump_ptd_queue(struct isp1362_ep_queue *epq)
+{
+	struct isp1362_ep *ep;
+	int dbg = dbg_level;
+
+	dbg_level = 1;
+	list_for_each_entry(ep, &epq->active, active) {
+		dump_ptd(&ep->ptd);
+		dump_data(ep->data, ep->length);
+	}
+	dbg_level = dbg;
+}
+#else
+#define dump_ptd(ptd)			do {} while (0)
+#define dump_ptd_in_data(ptd, buf)	do {} while (0)
+#define dump_ptd_out_data(ptd, buf)	do {} while (0)
+#define dump_ptd_data(ptd, buf)		do {} while (0)
+#define dump_ptd_queue(epq)		do {} while (0)
+#endif
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
new file mode 100644
index 0000000..afa321a
--- /dev/null
+++ b/drivers/usb/host/max3421-hcd.c
@@ -0,0 +1,2019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX3421 Host Controller driver for USB.
+ *
+ * Author: David Mosberger-Tang <davidm@egauge.net>
+ *
+ * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
+ *
+ * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
+ * controller on a SPI bus.
+ *
+ * Based on:
+ *	o MAX3421E datasheet
+ *		http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
+ *	o MAX3421E Programming Guide
+ *		http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
+ *	o gadget/dummy_hcd.c
+ *		For USB HCD implementation.
+ *	o Arduino MAX3421 driver
+ *	     https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
+ *
+ * This file is licenced under the GPL v2.
+ *
+ * Important note on worst-case (full-speed) packet size constraints
+ * (See USB 2.0 Section 5.6.3 and following):
+ *
+ *	- control:	  64 bytes
+ *	- isochronous:	1023 bytes
+ *	- interrupt:	  64 bytes
+ *	- bulk:		  64 bytes
+ *
+ * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
+ * multi-FIFO writes/reads for a single USB packet *except* for isochronous
+ * transfers.  We don't support isochronous transfers at this time, so we
+ * just assume that a USB packet always fits into a single FIFO buffer.
+ *
+ * NOTE: The June 2006 version of "MAX3421E Programming Guide"
+ * (AN3785) has conflicting info for the RCVDAVIRQ bit:
+ *
+ *	The description of RCVDAVIRQ says "The CPU *must* clear
+ *	this IRQ bit (by writing a 1 to it) before reading the
+ *	RCVFIFO data.
+ *
+ * However, the earlier section on "Programming BULK-IN
+ * Transfers" says * that:
+ *
+ *	After the CPU retrieves the data, it clears the
+ *	RCVDAVIRQ bit.
+ *
+ * The December 2006 version has been corrected and it consistently
+ * states the second behavior is the correct one.
+ *
+ * Synchronous SPI transactions sleep so we can't perform any such
+ * transactions while holding a spin-lock (and/or while interrupts are
+ * masked).  To achieve this, all SPI transactions are issued from a
+ * single thread (max3421_spi_thread).
+ */
+
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/max3421-hcd.h>
+
+#define DRIVER_DESC	"MAX3421 USB Host-Controller Driver"
+#define DRIVER_VERSION	"1.0"
+
+/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
+#define USB_MAX_FRAME_NUMBER	0x7ff
+#define USB_MAX_RETRIES		3 /* # of retries before error is reported */
+
+/*
+ * Max. # of times we're willing to retransmit a request immediately in
+ * resposne to a NAK.  Afterwards, we fall back on trying once a frame.
+ */
+#define NAK_MAX_FAST_RETRANSMITS	2
+
+#define POWER_BUDGET	500	/* in mA; use 8 for low-power port testing */
+
+/* Port-change mask: */
+#define PORT_C_MASK	((USB_PORT_STAT_C_CONNECTION |	\
+			  USB_PORT_STAT_C_ENABLE |	\
+			  USB_PORT_STAT_C_SUSPEND |	\
+			  USB_PORT_STAT_C_OVERCURRENT | \
+			  USB_PORT_STAT_C_RESET) << 16)
+
+#define MAX3421_GPOUT_COUNT	8
+
+enum max3421_rh_state {
+	MAX3421_RH_RESET,
+	MAX3421_RH_SUSPENDED,
+	MAX3421_RH_RUNNING
+};
+
+enum pkt_state {
+	PKT_STATE_SETUP,	/* waiting to send setup packet to ctrl pipe */
+	PKT_STATE_TRANSFER,	/* waiting to xfer transfer_buffer */
+	PKT_STATE_TERMINATE	/* waiting to terminate control transfer */
+};
+
+enum scheduling_pass {
+	SCHED_PASS_PERIODIC,
+	SCHED_PASS_NON_PERIODIC,
+	SCHED_PASS_DONE
+};
+
+/* Bit numbers for max3421_hcd->todo: */
+enum {
+	ENABLE_IRQ = 0,
+	RESET_HCD,
+	RESET_PORT,
+	CHECK_UNLINK,
+	IOPIN_UPDATE
+};
+
+struct max3421_dma_buf {
+	u8 data[2];
+};
+
+struct max3421_hcd {
+	spinlock_t lock;
+
+	struct task_struct *spi_thread;
+
+	struct max3421_hcd *next;
+
+	enum max3421_rh_state rh_state;
+	/* lower 16 bits contain port status, upper 16 bits the change mask: */
+	u32 port_status;
+
+	unsigned active:1;
+
+	struct list_head ep_list;	/* list of EP's with work */
+
+	/*
+	 * The following are owned by spi_thread (may be accessed by
+	 * SPI-thread without acquiring the HCD lock:
+	 */
+	u8 rev;				/* chip revision */
+	u16 frame_number;
+	/*
+	 * kmalloc'd buffers guaranteed to be in separate (DMA)
+	 * cache-lines:
+	 */
+	struct max3421_dma_buf *tx;
+	struct max3421_dma_buf *rx;
+	/*
+	 * URB we're currently processing.  Must not be reset to NULL
+	 * unless MAX3421E chip is idle:
+	 */
+	struct urb *curr_urb;
+	enum scheduling_pass sched_pass;
+	struct usb_device *loaded_dev;	/* dev that's loaded into the chip */
+	int loaded_epnum;		/* epnum whose toggles are loaded */
+	int urb_done;			/* > 0 -> no errors, < 0: errno */
+	size_t curr_len;
+	u8 hien;
+	u8 mode;
+	u8 iopins[2];
+	unsigned long todo;
+#ifdef DEBUG
+	unsigned long err_stat[16];
+#endif
+};
+
+struct max3421_ep {
+	struct usb_host_endpoint *ep;
+	struct list_head ep_list;
+	u32 naks;
+	u16 last_active;		/* frame # this ep was last active */
+	enum pkt_state pkt_state;
+	u8 retries;
+	u8 retransmit;			/* packet needs retransmission */
+};
+
+static struct max3421_hcd *max3421_hcd_list;
+
+#define MAX3421_FIFO_SIZE	64
+
+#define MAX3421_SPI_DIR_RD	0	/* read register from MAX3421 */
+#define MAX3421_SPI_DIR_WR	1	/* write register to MAX3421 */
+
+/* SPI commands: */
+#define MAX3421_SPI_DIR_SHIFT	1
+#define MAX3421_SPI_REG_SHIFT	3
+
+#define MAX3421_REG_RCVFIFO	1
+#define MAX3421_REG_SNDFIFO	2
+#define MAX3421_REG_SUDFIFO	4
+#define MAX3421_REG_RCVBC	6
+#define MAX3421_REG_SNDBC	7
+#define MAX3421_REG_USBIRQ	13
+#define MAX3421_REG_USBIEN	14
+#define MAX3421_REG_USBCTL	15
+#define MAX3421_REG_CPUCTL	16
+#define MAX3421_REG_PINCTL	17
+#define MAX3421_REG_REVISION	18
+#define MAX3421_REG_IOPINS1	20
+#define MAX3421_REG_IOPINS2	21
+#define MAX3421_REG_GPINIRQ	22
+#define MAX3421_REG_GPINIEN	23
+#define MAX3421_REG_GPINPOL	24
+#define MAX3421_REG_HIRQ	25
+#define MAX3421_REG_HIEN	26
+#define MAX3421_REG_MODE	27
+#define MAX3421_REG_PERADDR	28
+#define MAX3421_REG_HCTL	29
+#define MAX3421_REG_HXFR	30
+#define MAX3421_REG_HRSL	31
+
+enum {
+	MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
+	MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
+	MAX3421_USBIRQ_VBUSIRQ_BIT
+};
+
+enum {
+	MAX3421_CPUCTL_IE_BIT = 0,
+	MAX3421_CPUCTL_PULSEWID0_BIT = 6,
+	MAX3421_CPUCTL_PULSEWID1_BIT
+};
+
+enum {
+	MAX3421_USBCTL_PWRDOWN_BIT = 4,
+	MAX3421_USBCTL_CHIPRES_BIT
+};
+
+enum {
+	MAX3421_PINCTL_GPXA_BIT	= 0,
+	MAX3421_PINCTL_GPXB_BIT,
+	MAX3421_PINCTL_POSINT_BIT,
+	MAX3421_PINCTL_INTLEVEL_BIT,
+	MAX3421_PINCTL_FDUPSPI_BIT,
+	MAX3421_PINCTL_EP0INAK_BIT,
+	MAX3421_PINCTL_EP2INAK_BIT,
+	MAX3421_PINCTL_EP3INAK_BIT,
+};
+
+enum {
+	MAX3421_HI_BUSEVENT_BIT = 0,	/* bus-reset/-resume */
+	MAX3421_HI_RWU_BIT,		/* remote wakeup */
+	MAX3421_HI_RCVDAV_BIT,		/* receive FIFO data available */
+	MAX3421_HI_SNDBAV_BIT,		/* send buffer available */
+	MAX3421_HI_SUSDN_BIT,		/* suspend operation done */
+	MAX3421_HI_CONDET_BIT,		/* peripheral connect/disconnect */
+	MAX3421_HI_FRAME_BIT,		/* frame generator */
+	MAX3421_HI_HXFRDN_BIT,		/* host transfer done */
+};
+
+enum {
+	MAX3421_HCTL_BUSRST_BIT = 0,
+	MAX3421_HCTL_FRMRST_BIT,
+	MAX3421_HCTL_SAMPLEBUS_BIT,
+	MAX3421_HCTL_SIGRSM_BIT,
+	MAX3421_HCTL_RCVTOG0_BIT,
+	MAX3421_HCTL_RCVTOG1_BIT,
+	MAX3421_HCTL_SNDTOG0_BIT,
+	MAX3421_HCTL_SNDTOG1_BIT
+};
+
+enum {
+	MAX3421_MODE_HOST_BIT = 0,
+	MAX3421_MODE_LOWSPEED_BIT,
+	MAX3421_MODE_HUBPRE_BIT,
+	MAX3421_MODE_SOFKAENAB_BIT,
+	MAX3421_MODE_SEPIRQ_BIT,
+	MAX3421_MODE_DELAYISO_BIT,
+	MAX3421_MODE_DMPULLDN_BIT,
+	MAX3421_MODE_DPPULLDN_BIT
+};
+
+enum {
+	MAX3421_HRSL_OK = 0,
+	MAX3421_HRSL_BUSY,
+	MAX3421_HRSL_BADREQ,
+	MAX3421_HRSL_UNDEF,
+	MAX3421_HRSL_NAK,
+	MAX3421_HRSL_STALL,
+	MAX3421_HRSL_TOGERR,
+	MAX3421_HRSL_WRONGPID,
+	MAX3421_HRSL_BADBC,
+	MAX3421_HRSL_PIDERR,
+	MAX3421_HRSL_PKTERR,
+	MAX3421_HRSL_CRCERR,
+	MAX3421_HRSL_KERR,
+	MAX3421_HRSL_JERR,
+	MAX3421_HRSL_TIMEOUT,
+	MAX3421_HRSL_BABBLE,
+	MAX3421_HRSL_RESULT_MASK = 0xf,
+	MAX3421_HRSL_RCVTOGRD_BIT = 4,
+	MAX3421_HRSL_SNDTOGRD_BIT,
+	MAX3421_HRSL_KSTATUS_BIT,
+	MAX3421_HRSL_JSTATUS_BIT
+};
+
+/* Return same error-codes as ohci.h:cc_to_error: */
+static const int hrsl_to_error[] = {
+	[MAX3421_HRSL_OK] =		0,
+	[MAX3421_HRSL_BUSY] =		-EINVAL,
+	[MAX3421_HRSL_BADREQ] =		-EINVAL,
+	[MAX3421_HRSL_UNDEF] =		-EINVAL,
+	[MAX3421_HRSL_NAK] =		-EAGAIN,
+	[MAX3421_HRSL_STALL] =		-EPIPE,
+	[MAX3421_HRSL_TOGERR] =		-EILSEQ,
+	[MAX3421_HRSL_WRONGPID] =	-EPROTO,
+	[MAX3421_HRSL_BADBC] =		-EREMOTEIO,
+	[MAX3421_HRSL_PIDERR] =		-EPROTO,
+	[MAX3421_HRSL_PKTERR] =		-EPROTO,
+	[MAX3421_HRSL_CRCERR] =		-EILSEQ,
+	[MAX3421_HRSL_KERR] =		-EIO,
+	[MAX3421_HRSL_JERR] =		-EIO,
+	[MAX3421_HRSL_TIMEOUT] =	-ETIME,
+	[MAX3421_HRSL_BABBLE] =		-EOVERFLOW
+};
+
+/*
+ * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
+ * reasonable overview of how control transfers use the the IN/OUT
+ * tokens.
+ */
+#define MAX3421_HXFR_BULK_IN(ep)	(0x00 | (ep))	/* bulk or interrupt */
+#define MAX3421_HXFR_SETUP		 0x10
+#define MAX3421_HXFR_BULK_OUT(ep)	(0x20 | (ep))	/* bulk or interrupt */
+#define MAX3421_HXFR_ISO_IN(ep)		(0x40 | (ep))
+#define MAX3421_HXFR_ISO_OUT(ep)	(0x60 | (ep))
+#define MAX3421_HXFR_HS_IN		 0x80		/* handshake in */
+#define MAX3421_HXFR_HS_OUT		 0xa0		/* handshake out */
+
+#define field(val, bit)	((val) << (bit))
+
+static inline s16
+frame_diff(u16 left, u16 right)
+{
+	return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
+}
+
+static inline struct max3421_hcd *
+hcd_to_max3421(struct usb_hcd *hcd)
+{
+	return (struct max3421_hcd *) hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *
+max3421_to_hcd(struct max3421_hcd *max3421_hcd)
+{
+	return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
+}
+
+static u8
+spi_rd8(struct usb_hcd *hcd, unsigned int reg)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct spi_transfer transfer;
+	struct spi_message msg;
+
+	memset(&transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	max3421_hcd->tx->data[0] =
+		(field(reg, MAX3421_SPI_REG_SHIFT) |
+		 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+
+	transfer.tx_buf = max3421_hcd->tx->data;
+	transfer.rx_buf = max3421_hcd->rx->data;
+	transfer.len = 2;
+
+	spi_message_add_tail(&transfer, &msg);
+	spi_sync(spi, &msg);
+
+	return max3421_hcd->rx->data[1];
+}
+
+static void
+spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct spi_transfer transfer;
+	struct spi_message msg;
+
+	memset(&transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	max3421_hcd->tx->data[0] =
+		(field(reg, MAX3421_SPI_REG_SHIFT) |
+		 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+	max3421_hcd->tx->data[1] = val;
+
+	transfer.tx_buf = max3421_hcd->tx->data;
+	transfer.len = 2;
+
+	spi_message_add_tail(&transfer, &msg);
+	spi_sync(spi, &msg);
+}
+
+static void
+spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct spi_transfer transfer[2];
+	struct spi_message msg;
+
+	memset(transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	max3421_hcd->tx->data[0] =
+		(field(reg, MAX3421_SPI_REG_SHIFT) |
+		 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
+	transfer[0].tx_buf = max3421_hcd->tx->data;
+	transfer[0].len = 1;
+
+	transfer[1].rx_buf = buf;
+	transfer[1].len = len;
+
+	spi_message_add_tail(&transfer[0], &msg);
+	spi_message_add_tail(&transfer[1], &msg);
+	spi_sync(spi, &msg);
+}
+
+static void
+spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct spi_transfer transfer[2];
+	struct spi_message msg;
+
+	memset(transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	max3421_hcd->tx->data[0] =
+		(field(reg, MAX3421_SPI_REG_SHIFT) |
+		 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
+
+	transfer[0].tx_buf = max3421_hcd->tx->data;
+	transfer[0].len = 1;
+
+	transfer[1].tx_buf = buf;
+	transfer[1].len = len;
+
+	spi_message_add_tail(&transfer[0], &msg);
+	spi_message_add_tail(&transfer[1], &msg);
+	spi_sync(spi, &msg);
+}
+
+/*
+ * Figure out the correct setting for the LOWSPEED and HUBPRE mode
+ * bits.  The HUBPRE bit needs to be set when MAX3421E operates at
+ * full speed, but it's talking to a low-speed device (i.e., through a
+ * hub).  Setting that bit ensures that every low-speed packet is
+ * preceded by a full-speed PRE PID.  Possible configurations:
+ *
+ * Hub speed:	Device speed:	=>	LOWSPEED bit:	HUBPRE bit:
+ *	FULL	FULL		=>	0		0
+ *	FULL	LOW		=>	1		1
+ *	LOW	LOW		=>	1		0
+ *	LOW	FULL		=>	1		0
+ */
+static void
+max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
+
+	mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
+	mode_hubpre   = BIT(MAX3421_MODE_HUBPRE_BIT);
+	if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
+		mode |=  mode_lowspeed;
+		mode &= ~mode_hubpre;
+	} else if (dev->speed == USB_SPEED_LOW) {
+		mode |= mode_lowspeed | mode_hubpre;
+	} else {
+		mode &= ~(mode_lowspeed | mode_hubpre);
+	}
+	if (mode != max3421_hcd->mode) {
+		max3421_hcd->mode = mode;
+		spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+	}
+
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
+		    int force_toggles)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	int old_epnum, same_ep, rcvtog, sndtog;
+	struct usb_device *old_dev;
+	u8 hctl;
+
+	old_dev = max3421_hcd->loaded_dev;
+	old_epnum = max3421_hcd->loaded_epnum;
+
+	same_ep = (dev == old_dev && epnum == old_epnum);
+	if (same_ep && !force_toggles)
+		return;
+
+	if (old_dev && !same_ep) {
+		/* save the old end-points toggles: */
+		u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+		rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+		sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+		/* no locking: HCD (i.e., we) own toggles, don't we? */
+		usb_settoggle(old_dev, old_epnum, 0, rcvtog);
+		usb_settoggle(old_dev, old_epnum, 1, sndtog);
+	}
+	/* setup new endpoint's toggle bits: */
+	rcvtog = usb_gettoggle(dev, epnum, 0);
+	sndtog = usb_gettoggle(dev, epnum, 1);
+	hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
+		BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+
+	max3421_hcd->loaded_epnum = epnum;
+	spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
+
+	/*
+	 * Note: devnum for one and the same device can change during
+	 * address-assignment so it's best to just always load the
+	 * address whenever the end-point changed/was forced.
+	 */
+	max3421_hcd->loaded_dev = dev;
+	spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
+}
+
+static int
+max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
+{
+	spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
+	return MAX3421_HXFR_SETUP;
+}
+
+static int
+max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	int epnum = usb_pipeendpoint(urb->pipe);
+
+	max3421_hcd->curr_len = 0;
+	max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
+	return MAX3421_HXFR_BULK_IN(epnum);
+}
+
+static int
+max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	int epnum = usb_pipeendpoint(urb->pipe);
+	u32 max_packet;
+	void *src;
+
+	src = urb->transfer_buffer + urb->actual_length;
+
+	if (fast_retransmit) {
+		if (max3421_hcd->rev == 0x12) {
+			/* work around rev 0x12 bug: */
+			spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+			spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
+			spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+		}
+		return MAX3421_HXFR_BULK_OUT(epnum);
+	}
+
+	max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+	if (max_packet > MAX3421_FIFO_SIZE) {
+		/*
+		 * We do not support isochronous transfers at this
+		 * time.
+		 */
+		dev_err(&spi->dev,
+			"%s: packet-size of %u too big (limit is %u bytes)",
+			__func__, max_packet, MAX3421_FIFO_SIZE);
+		max3421_hcd->urb_done = -EMSGSIZE;
+		return -EMSGSIZE;
+	}
+	max3421_hcd->curr_len = min((urb->transfer_buffer_length -
+				     urb->actual_length), max_packet);
+
+	spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
+	spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
+	return MAX3421_HXFR_BULK_OUT(epnum);
+}
+
+/*
+ * Issue the next host-transfer command.
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct urb *urb = max3421_hcd->curr_urb;
+	struct max3421_ep *max3421_ep;
+	int cmd = -EINVAL;
+
+	if (!urb)
+		return;	/* nothing to do */
+
+	max3421_ep = urb->ep->hcpriv;
+
+	switch (max3421_ep->pkt_state) {
+	case PKT_STATE_SETUP:
+		cmd = max3421_ctrl_setup(hcd, urb);
+		break;
+
+	case PKT_STATE_TRANSFER:
+		if (usb_urb_dir_in(urb))
+			cmd = max3421_transfer_in(hcd, urb);
+		else
+			cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
+		break;
+
+	case PKT_STATE_TERMINATE:
+		/*
+		 * IN transfers are terminated with HS_OUT token,
+		 * OUT transfers with HS_IN:
+		 */
+		if (usb_urb_dir_in(urb))
+			cmd = MAX3421_HXFR_HS_OUT;
+		else
+			cmd = MAX3421_HXFR_HS_IN;
+		break;
+	}
+
+	if (cmd < 0)
+		return;
+
+	/* issue the command and wait for host-xfer-done interrupt: */
+
+	spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
+	max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
+}
+
+/*
+ * Find the next URB to process and start its execution.
+ *
+ * At this time, we do not anticipate ever connecting a USB hub to the
+ * MAX3421 chip, so at most USB device can be connected and we can use
+ * a simplistic scheduler: at the start of a frame, schedule all
+ * periodic transfers.  Once that is done, use the remainder of the
+ * frame to process non-periodic (bulk & control) transfers.
+ *
+ * Preconditions:
+ * o Caller must NOT hold HCD spinlock.
+ * o max3421_hcd->curr_urb MUST BE NULL.
+ * o MAX3421E chip must be idle.
+ */
+static int
+max3421_select_and_start_urb(struct usb_hcd *hcd)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct urb *urb, *curr_urb = NULL;
+	struct max3421_ep *max3421_ep;
+	int epnum, force_toggles = 0;
+	struct usb_host_endpoint *ep;
+	struct list_head *pos;
+	unsigned long flags;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	for (;
+	     max3421_hcd->sched_pass < SCHED_PASS_DONE;
+	     ++max3421_hcd->sched_pass)
+		list_for_each(pos, &max3421_hcd->ep_list) {
+			urb = NULL;
+			max3421_ep = container_of(pos, struct max3421_ep,
+						  ep_list);
+			ep = max3421_ep->ep;
+
+			switch (usb_endpoint_type(&ep->desc)) {
+			case USB_ENDPOINT_XFER_ISOC:
+			case USB_ENDPOINT_XFER_INT:
+				if (max3421_hcd->sched_pass !=
+				    SCHED_PASS_PERIODIC)
+					continue;
+				break;
+
+			case USB_ENDPOINT_XFER_CONTROL:
+			case USB_ENDPOINT_XFER_BULK:
+				if (max3421_hcd->sched_pass !=
+				    SCHED_PASS_NON_PERIODIC)
+					continue;
+				break;
+			}
+
+			if (list_empty(&ep->urb_list))
+				continue;	/* nothing to do */
+			urb = list_first_entry(&ep->urb_list, struct urb,
+					       urb_list);
+			if (urb->unlinked) {
+				dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+					__func__, urb, urb->unlinked);
+				max3421_hcd->curr_urb = urb;
+				max3421_hcd->urb_done = 1;
+				spin_unlock_irqrestore(&max3421_hcd->lock,
+						       flags);
+				return 1;
+			}
+
+			switch (usb_endpoint_type(&ep->desc)) {
+			case USB_ENDPOINT_XFER_CONTROL:
+				/*
+				 * Allow one control transaction per
+				 * frame per endpoint:
+				 */
+				if (frame_diff(max3421_ep->last_active,
+					       max3421_hcd->frame_number) == 0)
+					continue;
+				break;
+
+			case USB_ENDPOINT_XFER_BULK:
+				if (max3421_ep->retransmit
+				    && (frame_diff(max3421_ep->last_active,
+						   max3421_hcd->frame_number)
+					== 0))
+					/*
+					 * We already tried this EP
+					 * during this frame and got a
+					 * NAK or error; wait for next frame
+					 */
+					continue;
+				break;
+
+			case USB_ENDPOINT_XFER_ISOC:
+			case USB_ENDPOINT_XFER_INT:
+				if (frame_diff(max3421_hcd->frame_number,
+					       max3421_ep->last_active)
+				    < urb->interval)
+					/*
+					 * We already processed this
+					 * end-point in the current
+					 * frame
+					 */
+					continue;
+				break;
+			}
+
+			/* move current ep to tail: */
+			list_move_tail(pos, &max3421_hcd->ep_list);
+			curr_urb = urb;
+			goto done;
+		}
+done:
+	if (!curr_urb) {
+		spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+		return 0;
+	}
+
+	urb = max3421_hcd->curr_urb = curr_urb;
+	epnum = usb_endpoint_num(&urb->ep->desc);
+	if (max3421_ep->retransmit)
+		/* restart (part of) a USB transaction: */
+		max3421_ep->retransmit = 0;
+	else {
+		/* start USB transaction: */
+		if (usb_endpoint_xfer_control(&ep->desc)) {
+			/*
+			 * See USB 2.0 spec section 8.6.1
+			 * Initialization via SETUP Token:
+			 */
+			usb_settoggle(urb->dev, epnum, 0, 1);
+			usb_settoggle(urb->dev, epnum, 1, 1);
+			max3421_ep->pkt_state = PKT_STATE_SETUP;
+			force_toggles = 1;
+		} else
+			max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+	}
+
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+	max3421_ep->last_active = max3421_hcd->frame_number;
+	max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+	max3421_set_speed(hcd, urb->dev);
+	max3421_next_transfer(hcd, 0);
+	return 1;
+}
+
+/*
+ * Check all endpoints for URBs that got unlinked.
+ *
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_check_unlink(struct usb_hcd *hcd)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct max3421_ep *max3421_ep;
+	struct usb_host_endpoint *ep;
+	struct urb *urb, *next;
+	unsigned long flags;
+	int retval = 0;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+	list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
+		ep = max3421_ep->ep;
+		list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
+			if (urb->unlinked) {
+				retval = 1;
+				dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
+					__func__, urb, urb->unlinked);
+				usb_hcd_unlink_urb_from_ep(hcd, urb);
+				spin_unlock_irqrestore(&max3421_hcd->lock,
+						       flags);
+				usb_hcd_giveback_urb(hcd, urb, 0);
+				spin_lock_irqsave(&max3421_hcd->lock, flags);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+	return retval;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_slow_retransmit(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct urb *urb = max3421_hcd->curr_urb;
+	struct max3421_ep *max3421_ep;
+
+	max3421_ep = urb->ep->hcpriv;
+	max3421_ep->retransmit = 1;
+	max3421_hcd->curr_urb = NULL;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_recv_data_available(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct urb *urb = max3421_hcd->curr_urb;
+	size_t remaining, transfer_size;
+	u8 rcvbc;
+
+	rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
+
+	if (rcvbc > MAX3421_FIFO_SIZE)
+		rcvbc = MAX3421_FIFO_SIZE;
+	if (urb->actual_length >= urb->transfer_buffer_length)
+		remaining = 0;
+	else
+		remaining = urb->transfer_buffer_length - urb->actual_length;
+	transfer_size = rcvbc;
+	if (transfer_size > remaining)
+		transfer_size = remaining;
+	if (transfer_size > 0) {
+		void *dst = urb->transfer_buffer + urb->actual_length;
+
+		spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
+		urb->actual_length += transfer_size;
+		max3421_hcd->curr_len = transfer_size;
+	}
+
+	/* ack the RCVDAV irq now that the FIFO has been read: */
+	spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
+}
+
+static void
+max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+	struct urb *urb = max3421_hcd->curr_urb;
+	struct max3421_ep *max3421_ep = urb->ep->hcpriv;
+	int switch_sndfifo;
+
+	/*
+	 * If an OUT command results in any response other than OK
+	 * (i.e., error or NAK), we have to perform a dummy-write to
+	 * SNDBC so the FIFO gets switched back to us.  Otherwise, we
+	 * get out of sync with the SNDFIFO double buffer.
+	 */
+	switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
+			  usb_urb_dir_out(urb));
+
+	switch (result_code) {
+	case MAX3421_HRSL_OK:
+		return;			/* this shouldn't happen */
+
+	case MAX3421_HRSL_WRONGPID:	/* received wrong PID */
+	case MAX3421_HRSL_BUSY:		/* SIE busy */
+	case MAX3421_HRSL_BADREQ:	/* bad val in HXFR */
+	case MAX3421_HRSL_UNDEF:	/* reserved */
+	case MAX3421_HRSL_KERR:		/* K-state instead of response */
+	case MAX3421_HRSL_JERR:		/* J-state instead of response */
+		/*
+		 * packet experienced an error that we cannot recover
+		 * from; report error
+		 */
+		max3421_hcd->urb_done = hrsl_to_error[result_code];
+		dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+			__func__, hrsl);
+		break;
+
+	case MAX3421_HRSL_TOGERR:
+		if (usb_urb_dir_in(urb))
+			; /* don't do anything (device will switch toggle) */
+		else {
+			/* flip the send toggle bit: */
+			int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+
+			sndtog ^= 1;
+			spi_wr8(hcd, MAX3421_REG_HCTL,
+				BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
+		}
+		/* FALL THROUGH */
+	case MAX3421_HRSL_BADBC:	/* bad byte count */
+	case MAX3421_HRSL_PIDERR:	/* received PID is corrupted */
+	case MAX3421_HRSL_PKTERR:	/* packet error (stuff, EOP) */
+	case MAX3421_HRSL_CRCERR:	/* CRC error */
+	case MAX3421_HRSL_BABBLE:	/* device talked too long */
+	case MAX3421_HRSL_TIMEOUT:
+		if (max3421_ep->retries++ < USB_MAX_RETRIES)
+			/* retry the packet again in the next frame */
+			max3421_slow_retransmit(hcd);
+		else {
+			/* Based on ohci.h cc_to_err[]: */
+			max3421_hcd->urb_done = hrsl_to_error[result_code];
+			dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+				__func__, hrsl);
+		}
+		break;
+
+	case MAX3421_HRSL_STALL:
+		dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
+			__func__, hrsl);
+		max3421_hcd->urb_done = hrsl_to_error[result_code];
+		break;
+
+	case MAX3421_HRSL_NAK:
+		/*
+		 * Device wasn't ready for data or has no data
+		 * available: retry the packet again.
+		 */
+		if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
+			max3421_next_transfer(hcd, 1);
+			switch_sndfifo = 0;
+		} else
+			max3421_slow_retransmit(hcd);
+		break;
+	}
+	if (switch_sndfifo)
+		spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	u32 max_packet;
+
+	if (urb->actual_length >= urb->transfer_buffer_length)
+		return 1;	/* read is complete, so we're done */
+
+	/*
+	 * USB 2.0 Section 5.3.2 Pipes: packets must be full size
+	 * except for last one.
+	 */
+	max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
+	if (max_packet > MAX3421_FIFO_SIZE) {
+		/*
+		 * We do not support isochronous transfers at this
+		 * time...
+		 */
+		dev_err(&spi->dev,
+			"%s: packet-size of %u too big (limit is %u bytes)",
+			__func__, max_packet, MAX3421_FIFO_SIZE);
+		return -EINVAL;
+	}
+
+	if (max3421_hcd->curr_len < max_packet) {
+		if (urb->transfer_flags & URB_SHORT_NOT_OK) {
+			/*
+			 * remaining > 0 and received an
+			 * unexpected partial packet ->
+			 * error
+			 */
+			return -EREMOTEIO;
+		} else
+			/* short read, but it's OK */
+			return 1;
+	}
+	return 0;	/* not done */
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static int
+max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+	urb->actual_length += max3421_hcd->curr_len;
+	if (urb->actual_length < urb->transfer_buffer_length)
+		return 0;
+	if (urb->transfer_flags & URB_ZERO_PACKET) {
+		/*
+		 * Some hardware needs a zero-size packet at the end
+		 * of a bulk-out transfer if the last transfer was a
+		 * full-sized packet (i.e., such hardware use <
+		 * max_packet as an indicator that the end of the
+		 * packet has been reached).
+		 */
+		u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
+
+		if (max3421_hcd->curr_len == max_packet)
+			return 0;
+	}
+	return 1;
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_host_transfer_done(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct urb *urb = max3421_hcd->curr_urb;
+	struct max3421_ep *max3421_ep;
+	u8 result_code, hrsl;
+	int urb_done = 0;
+
+	max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
+			       BIT(MAX3421_HI_RCVDAV_BIT));
+
+	hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+	result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
+
+#ifdef DEBUG
+	++max3421_hcd->err_stat[result_code];
+#endif
+
+	max3421_ep = urb->ep->hcpriv;
+
+	if (unlikely(result_code != MAX3421_HRSL_OK)) {
+		max3421_handle_error(hcd, hrsl);
+		return;
+	}
+
+	max3421_ep->naks = 0;
+	max3421_ep->retries = 0;
+	switch (max3421_ep->pkt_state) {
+
+	case PKT_STATE_SETUP:
+		if (urb->transfer_buffer_length > 0)
+			max3421_ep->pkt_state = PKT_STATE_TRANSFER;
+		else
+			max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+		break;
+
+	case PKT_STATE_TRANSFER:
+		if (usb_urb_dir_in(urb))
+			urb_done = max3421_transfer_in_done(hcd, urb);
+		else
+			urb_done = max3421_transfer_out_done(hcd, urb);
+		if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
+			/*
+			 * We aren't really done - we still need to
+			 * terminate the control transfer:
+			 */
+			max3421_hcd->urb_done = urb_done = 0;
+			max3421_ep->pkt_state = PKT_STATE_TERMINATE;
+		}
+		break;
+
+	case PKT_STATE_TERMINATE:
+		urb_done = 1;
+		break;
+	}
+
+	if (urb_done)
+		max3421_hcd->urb_done = urb_done;
+	else
+		max3421_next_transfer(hcd, 0);
+}
+
+/*
+ * Caller must NOT hold HCD spinlock.
+ */
+static void
+max3421_detect_conn(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	unsigned int jk, have_conn = 0;
+	u32 old_port_status, chg;
+	unsigned long flags;
+	u8 hrsl, mode;
+
+	hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+
+	jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
+	      (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
+
+	mode = max3421_hcd->mode;
+
+	switch (jk) {
+	case 0x0: /* SE0: disconnect */
+		/*
+		 * Turn off SOFKAENAB bit to avoid getting interrupt
+		 * every milli-second:
+		 */
+		mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
+		break;
+
+	case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
+	case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
+		if (jk == 0x2)
+			/* need to switch to the other speed: */
+			mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
+		/* turn on SOFKAENAB bit: */
+		mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
+		have_conn = 1;
+		break;
+
+	case 0x3: /* illegal */
+		break;
+	}
+
+	max3421_hcd->mode = mode;
+	spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+	old_port_status = max3421_hcd->port_status;
+	if (have_conn)
+		max3421_hcd->port_status |=  USB_PORT_STAT_CONNECTION;
+	else
+		max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
+	if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
+		max3421_hcd->port_status |=  USB_PORT_STAT_LOW_SPEED;
+	else
+		max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
+	chg = (old_port_status ^ max3421_hcd->port_status);
+	max3421_hcd->port_status |= chg << 16;
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static irqreturn_t
+max3421_irq_handler(int irq, void *dev_id)
+{
+	struct usb_hcd *hcd = dev_id;
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+	if (max3421_hcd->spi_thread &&
+	    max3421_hcd->spi_thread->state != TASK_RUNNING)
+		wake_up_process(max3421_hcd->spi_thread);
+	if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
+		disable_irq_nosync(spi->irq);
+	return IRQ_HANDLED;
+}
+
+#ifdef DEBUG
+
+static void
+dump_eps(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct max3421_ep *max3421_ep;
+	struct usb_host_endpoint *ep;
+	char ubuf[512], *dp, *end;
+	unsigned long flags;
+	struct urb *urb;
+	int epnum, ret;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+	list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
+		ep = max3421_ep->ep;
+
+		dp = ubuf;
+		end = dp + sizeof(ubuf);
+		*dp = '\0';
+		list_for_each_entry(urb, &ep->urb_list, urb_list) {
+			ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+				       usb_pipetype(urb->pipe),
+				       usb_urb_dir_in(urb) ? "IN" : "OUT",
+				       urb->actual_length,
+				       urb->transfer_buffer_length);
+			if (ret < 0 || ret >= end - dp)
+				break;	/* error or buffer full */
+			dp += ret;
+		}
+
+		epnum = usb_endpoint_num(&ep->desc);
+		pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
+			epnum, max3421_ep->pkt_state, max3421_ep->last_active,
+			max3421_ep->retries, max3421_ep->naks,
+			max3421_ep->retransmit, ubuf);
+	}
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+#endif /* DEBUG */
+
+/* Return zero if no work was performed, 1 otherwise.  */
+static int
+max3421_handle_irqs(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	u32 chg, old_port_status;
+	unsigned long flags;
+	u8 hirq;
+
+	/*
+	 * Read and ack pending interrupts (CPU must never
+	 * clear SNDBAV directly and RCVDAV must be cleared by
+	 * max3421_recv_data_available()!):
+	 */
+	hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
+	hirq &= max3421_hcd->hien;
+	if (!hirq)
+		return 0;
+
+	spi_wr8(hcd, MAX3421_REG_HIRQ,
+		hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
+			 BIT(MAX3421_HI_RCVDAV_BIT)));
+
+	if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
+		max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
+					     & USB_MAX_FRAME_NUMBER);
+		max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+	}
+
+	if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
+		max3421_recv_data_available(hcd);
+
+	if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
+		max3421_host_transfer_done(hcd);
+
+	if (hirq & BIT(MAX3421_HI_CONDET_BIT))
+		max3421_detect_conn(hcd);
+
+	/*
+	 * Now process interrupts that may affect HCD state
+	 * other than the end-points:
+	 */
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	old_port_status = max3421_hcd->port_status;
+	if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
+		if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
+			/* BUSEVENT due to completion of Bus Reset */
+			max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
+			max3421_hcd->port_status |=  USB_PORT_STAT_ENABLE;
+		} else {
+			/* BUSEVENT due to completion of Bus Resume */
+			pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
+		}
+	}
+	if (hirq & BIT(MAX3421_HI_RWU_BIT))
+		pr_info("%s: RWU\n", __func__);
+	if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
+		pr_info("%s: SUSDN\n", __func__);
+
+	chg = (old_port_status ^ max3421_hcd->port_status);
+	max3421_hcd->port_status |= chg << 16;
+
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+#ifdef DEBUG
+	{
+		static unsigned long last_time;
+		char sbuf[16 * 16], *dp, *end;
+		int i;
+
+		if (time_after(jiffies, last_time + 5*HZ)) {
+			dp = sbuf;
+			end = sbuf + sizeof(sbuf);
+			*dp = '\0';
+			for (i = 0; i < 16; ++i) {
+				int ret = snprintf(dp, end - dp, " %lu",
+						   max3421_hcd->err_stat[i]);
+				if (ret < 0 || ret >= end - dp)
+					break;	/* error or buffer full */
+				dp += ret;
+			}
+			pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
+			memset(max3421_hcd->err_stat, 0,
+			       sizeof(max3421_hcd->err_stat));
+			last_time = jiffies;
+
+			dump_eps(hcd);
+		}
+	}
+#endif
+	return 1;
+}
+
+static int
+max3421_reset_hcd(struct usb_hcd *hcd)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	int timeout;
+
+	/* perform a chip reset and wait for OSCIRQ signal to appear: */
+	spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
+	/* clear reset: */
+	spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
+	timeout = 1000;
+	while (1) {
+		if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
+		    & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
+			break;
+		if (--timeout < 0) {
+			dev_err(&spi->dev,
+				"timed out waiting for oscillator OK signal");
+			return 1;
+		}
+		cond_resched();
+	}
+
+	/*
+	 * Turn on host mode, automatic generation of SOF packets, and
+	 * enable pull-down registers on DM/DP:
+	 */
+	max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
+			     BIT(MAX3421_MODE_SOFKAENAB_BIT) |
+			     BIT(MAX3421_MODE_DMPULLDN_BIT) |
+			     BIT(MAX3421_MODE_DPPULLDN_BIT));
+	spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
+
+	/* reset frame-number: */
+	max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
+	spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
+
+	/* sample the state of the D+ and D- lines */
+	spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
+	max3421_detect_conn(hcd);
+
+	/* enable frame, connection-detected, and bus-event interrupts: */
+	max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
+			     BIT(MAX3421_HI_CONDET_BIT) |
+			     BIT(MAX3421_HI_BUSEVENT_BIT));
+	spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+	/* enable interrupts: */
+	spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
+	return 1;
+}
+
+static int
+max3421_urb_done(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	unsigned long flags;
+	struct urb *urb;
+	int status;
+
+	status = max3421_hcd->urb_done;
+	max3421_hcd->urb_done = 0;
+	if (status > 0)
+		status = 0;
+	urb = max3421_hcd->curr_urb;
+	if (urb) {
+		max3421_hcd->curr_urb = NULL;
+		spin_lock_irqsave(&max3421_hcd->lock, flags);
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+		spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+		/* must be called without the HCD spinlock: */
+		usb_hcd_giveback_urb(hcd, urb, status);
+	}
+	return 1;
+}
+
+static int
+max3421_spi_thread(void *dev_id)
+{
+	struct usb_hcd *hcd = dev_id;
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	int i, i_worked = 1;
+
+	/* set full-duplex SPI mode, low-active interrupt pin: */
+	spi_wr8(hcd, MAX3421_REG_PINCTL,
+		(BIT(MAX3421_PINCTL_FDUPSPI_BIT) |	/* full-duplex */
+		 BIT(MAX3421_PINCTL_INTLEVEL_BIT)));	/* low-active irq */
+
+	while (!kthread_should_stop()) {
+		max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
+		if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
+			break;
+		dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
+		msleep(10000);
+	}
+	dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
+		 max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
+		 spi->irq);
+
+	while (!kthread_should_stop()) {
+		if (!i_worked) {
+			/*
+			 * We'll be waiting for wakeups from the hard
+			 * interrupt handler, so now is a good time to
+			 * sync our hien with the chip:
+			 */
+			spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
+
+			set_current_state(TASK_INTERRUPTIBLE);
+			if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo))
+				enable_irq(spi->irq);
+			schedule();
+			__set_current_state(TASK_RUNNING);
+		}
+
+		i_worked = 0;
+
+		if (max3421_hcd->urb_done)
+			i_worked |= max3421_urb_done(hcd);
+		else if (max3421_handle_irqs(hcd))
+			i_worked = 1;
+		else if (!max3421_hcd->curr_urb)
+			i_worked |= max3421_select_and_start_urb(hcd);
+
+		if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo))
+			/* reset the HCD: */
+			i_worked |= max3421_reset_hcd(hcd);
+		if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) {
+			/* perform a USB bus reset: */
+			spi_wr8(hcd, MAX3421_REG_HCTL,
+				BIT(MAX3421_HCTL_BUSRST_BIT));
+			i_worked = 1;
+		}
+		if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo))
+			i_worked |= max3421_check_unlink(hcd);
+		if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) {
+			/*
+			 * IOPINS1/IOPINS2 do not auto-increment, so we can't
+			 * use spi_wr_buf().
+			 */
+			for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+				u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+
+				val = ((val & 0xf0) |
+				       (max3421_hcd->iopins[i] & 0x0f));
+				spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
+				max3421_hcd->iopins[i] = val;
+			}
+			i_worked = 1;
+		}
+	}
+	set_current_state(TASK_RUNNING);
+	dev_info(&spi->dev, "SPI thread exiting");
+	return 0;
+}
+
+static int
+max3421_reset_port(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+	max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
+				      USB_PORT_STAT_LOW_SPEED);
+	max3421_hcd->port_status |= USB_PORT_STAT_RESET;
+	set_bit(RESET_PORT, &max3421_hcd->todo);
+	wake_up_process(max3421_hcd->spi_thread);
+	return 0;
+}
+
+static int
+max3421_reset(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+	hcd->self.sg_tablesize = 0;
+	hcd->speed = HCD_USB2;
+	hcd->self.root_hub->speed = USB_SPEED_FULL;
+	set_bit(RESET_HCD, &max3421_hcd->todo);
+	wake_up_process(max3421_hcd->spi_thread);
+	return 0;
+}
+
+static int
+max3421_start(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+
+	spin_lock_init(&max3421_hcd->lock);
+	max3421_hcd->rh_state = MAX3421_RH_RUNNING;
+
+	INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+	hcd->power_budget = POWER_BUDGET;
+	hcd->state = HC_STATE_RUNNING;
+	hcd->uses_new_polling = 1;
+	return 0;
+}
+
+static void
+max3421_stop(struct usb_hcd *hcd)
+{
+}
+
+static int
+max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct max3421_ep *max3421_ep;
+	unsigned long flags;
+	int retval;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_INTERRUPT:
+	case PIPE_ISOCHRONOUS:
+		if (urb->interval < 0) {
+			dev_err(&spi->dev,
+			  "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
+				__func__, urb->interval);
+			return -EINVAL;
+		}
+	default:
+		break;
+	}
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	max3421_ep = urb->ep->hcpriv;
+	if (!max3421_ep) {
+		/* gets freed in max3421_endpoint_disable: */
+		max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC);
+		if (!max3421_ep) {
+			retval = -ENOMEM;
+			goto out;
+		}
+		max3421_ep->ep = urb->ep;
+		max3421_ep->last_active = max3421_hcd->frame_number;
+		urb->ep->hcpriv = max3421_ep;
+
+		list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
+	}
+
+	retval = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (retval == 0) {
+		/* Since we added to the queue, restart scheduling: */
+		max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
+		wake_up_process(max3421_hcd->spi_thread);
+	}
+
+out:
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+	return retval;
+}
+
+static int
+max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	/*
+	 * This will set urb->unlinked which in turn causes the entry
+	 * to be dropped at the next opportunity.
+	 */
+	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (retval == 0) {
+		set_bit(CHECK_UNLINK, &max3421_hcd->todo);
+		wake_up_process(max3421_hcd->spi_thread);
+	}
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+	return retval;
+}
+
+static void
+max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	unsigned long flags;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	if (ep->hcpriv) {
+		struct max3421_ep *max3421_ep = ep->hcpriv;
+
+		/* remove myself from the ep_list: */
+		if (!list_empty(&max3421_ep->ep_list))
+			list_del(&max3421_ep->ep_list);
+		kfree(max3421_ep);
+		ep->hcpriv = NULL;
+	}
+
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+}
+
+static int
+max3421_get_frame_number(struct usb_hcd *hcd)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	return max3421_hcd->frame_number;
+}
+
+/*
+ * Should return a non-zero value when any port is undergoing a resume
+ * transition while the root hub is suspended.
+ */
+static int
+max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	unsigned long flags;
+	int retval = 0;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		goto done;
+
+	*buf = 0;
+	if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
+		*buf = (1 << 1); /* a hub over-current condition exists */
+		dev_dbg(hcd->self.controller,
+			"port status 0x%08x has changes\n",
+			max3421_hcd->port_status);
+		retval = 1;
+		if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
+			usb_hcd_resume_root_hub(hcd);
+	}
+done:
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+	return retval;
+}
+
+static inline void
+hub_descriptor(struct usb_hub_descriptor *desc)
+{
+	memset(desc, 0, sizeof(*desc));
+	/*
+	 * See Table 11-13: Hub Descriptor in USB 2.0 spec.
+	 */
+	desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */
+	desc->bDescLength = 9;
+	desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
+						HUB_CHAR_COMMON_OCPM);
+	desc->bNbrPorts = 1;
+}
+
+/*
+ * Set the MAX3421E general-purpose output with number PIN_NUMBER to
+ * VALUE (0 or 1).  PIN_NUMBER may be in the range from 1-8.  For
+ * any other value, this function acts as a no-op.
+ */
+static void
+max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
+{
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	u8 mask, idx;
+
+	--pin_number;
+	if (pin_number >= MAX3421_GPOUT_COUNT)
+		return;
+
+	mask = 1u << (pin_number % 4);
+	idx = pin_number / 4;
+
+	if (value)
+		max3421_hcd->iopins[idx] |=  mask;
+	else
+		max3421_hcd->iopins[idx] &= ~mask;
+	set_bit(IOPIN_UPDATE, &max3421_hcd->todo);
+	wake_up_process(max3421_hcd->spi_thread);
+}
+
+static int
+max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+		    char *buf, u16 length)
+{
+	struct spi_device *spi = to_spi_device(hcd->self.controller);
+	struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
+	struct max3421_hcd_platform_data *pdata;
+	unsigned long flags;
+	int retval = 0;
+
+	pdata = spi->dev.platform_data;
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	switch (type_req) {
+	case ClearHubFeature:
+		break;
+	case ClearPortFeature:
+		switch (value) {
+		case USB_PORT_FEAT_SUSPEND:
+			break;
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->self.controller, "power-off\n");
+			max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+						!pdata->vbus_active_level);
+			/* FALLS THROUGH */
+		default:
+			max3421_hcd->port_status &= ~(1 << value);
+		}
+		break;
+	case GetHubDescriptor:
+		hub_descriptor((struct usb_hub_descriptor *) buf);
+		break;
+
+	case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+	case GetPortErrorCount:
+	case SetHubDepth:
+		/* USB3 only */
+		goto error;
+
+	case GetHubStatus:
+		*(__le32 *) buf = cpu_to_le32(0);
+		break;
+
+	case GetPortStatus:
+		if (index != 1) {
+			retval = -EPIPE;
+			goto error;
+		}
+		((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
+		((__le16 *) buf)[1] =
+			cpu_to_le16(max3421_hcd->port_status >> 16);
+		break;
+
+	case SetHubFeature:
+		retval = -EPIPE;
+		break;
+
+	case SetPortFeature:
+		switch (value) {
+		case USB_PORT_FEAT_LINK_STATE:
+		case USB_PORT_FEAT_U1_TIMEOUT:
+		case USB_PORT_FEAT_U2_TIMEOUT:
+		case USB_PORT_FEAT_BH_PORT_RESET:
+			goto error;
+		case USB_PORT_FEAT_SUSPEND:
+			if (max3421_hcd->active)
+				max3421_hcd->port_status |=
+					USB_PORT_STAT_SUSPEND;
+			break;
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->self.controller, "power-on\n");
+			max3421_hcd->port_status |= USB_PORT_STAT_POWER;
+			max3421_gpout_set_value(hcd, pdata->vbus_gpout,
+						pdata->vbus_active_level);
+			break;
+		case USB_PORT_FEAT_RESET:
+			max3421_reset_port(hcd);
+			/* FALLS THROUGH */
+		default:
+			if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
+			    != 0)
+				max3421_hcd->port_status |= (1 << value);
+		}
+		break;
+
+	default:
+		dev_dbg(hcd->self.controller,
+			"hub control req%04x v%04x i%04x l%d\n",
+			type_req, value, index, length);
+error:		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+	return retval;
+}
+
+static int
+max3421_bus_suspend(struct usb_hcd *hcd)
+{
+	return -1;
+}
+
+static int
+max3421_bus_resume(struct usb_hcd *hcd)
+{
+	return -1;
+}
+
+/*
+ * The SPI driver already takes care of DMA-mapping/unmapping, so no
+ * reason to do it twice.
+ */
+static int
+max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	return 0;
+}
+
+static void
+max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+}
+
+static const struct hc_driver max3421_hcd_desc = {
+	.description =		"max3421",
+	.product_desc =		DRIVER_DESC,
+	.hcd_priv_size =	sizeof(struct max3421_hcd),
+	.flags =		HCD_USB11,
+	.reset =		max3421_reset,
+	.start =		max3421_start,
+	.stop =			max3421_stop,
+	.get_frame_number =	max3421_get_frame_number,
+	.urb_enqueue =		max3421_urb_enqueue,
+	.urb_dequeue =		max3421_urb_dequeue,
+	.map_urb_for_dma =	max3421_map_urb_for_dma,
+	.unmap_urb_for_dma =	max3421_unmap_urb_for_dma,
+	.endpoint_disable =	max3421_endpoint_disable,
+	.hub_status_data =	max3421_hub_status_data,
+	.hub_control =		max3421_hub_control,
+	.bus_suspend =		max3421_bus_suspend,
+	.bus_resume =		max3421_bus_resume,
+};
+
+static int
+max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
+{
+	int retval;
+	uint32_t value[2];
+
+	if (!pdata)
+		return -EINVAL;
+
+	retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
+	if (retval) {
+		dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
+		return retval;
+	}
+	dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
+
+	pdata->vbus_gpout = value[0];
+	pdata->vbus_active_level = value[1];
+
+	return 0;
+}
+
+static int
+max3421_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct max3421_hcd *max3421_hcd;
+	struct usb_hcd *hcd = NULL;
+	struct max3421_hcd_platform_data *pdata = NULL;
+	int retval = -ENOMEM;
+
+	if (spi_setup(spi) < 0) {
+		dev_err(&spi->dev, "Unable to setup SPI bus");
+		return -EFAULT;
+	}
+
+	if (!spi->irq) {
+		dev_err(dev, "Failed to get SPI IRQ");
+		return -EFAULT;
+	}
+
+	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+		pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata) {
+			retval = -ENOMEM;
+			goto error;
+		}
+		retval = max3421_of_vbus_en_pin(dev, pdata);
+		if (retval)
+			goto error;
+
+		spi->dev.platform_data = pdata;
+	}
+
+	pdata = spi->dev.platform_data;
+	if (!pdata) {
+		dev_err(&spi->dev, "driver configuration data is not provided\n");
+		retval = -EFAULT;
+		goto error;
+	}
+	if (pdata->vbus_active_level > 1) {
+		dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
+		retval = -EINVAL;
+		goto error;
+	}
+	if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
+		dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
+		retval = -EINVAL;
+		goto error;
+	}
+
+	hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
+			     dev_name(&spi->dev));
+	if (!hcd) {
+		dev_err(&spi->dev, "failed to create HCD structure\n");
+		goto error;
+	}
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	max3421_hcd = hcd_to_max3421(hcd);
+	max3421_hcd->next = max3421_hcd_list;
+	max3421_hcd_list = max3421_hcd;
+	INIT_LIST_HEAD(&max3421_hcd->ep_list);
+
+	max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
+	if (!max3421_hcd->tx)
+		goto error;
+	max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
+	if (!max3421_hcd->rx)
+		goto error;
+
+	max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
+					      "max3421_spi_thread");
+	if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
+		dev_err(&spi->dev,
+			"failed to create SPI thread (out of memory)\n");
+		goto error;
+	}
+
+	retval = usb_add_hcd(hcd, 0, 0);
+	if (retval) {
+		dev_err(&spi->dev, "failed to add HCD\n");
+		goto error;
+	}
+
+	retval = request_irq(spi->irq, max3421_irq_handler,
+			     IRQF_TRIGGER_LOW, "max3421", hcd);
+	if (retval < 0) {
+		dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
+		goto error;
+	}
+	return 0;
+
+error:
+	if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
+		devm_kfree(&spi->dev, pdata);
+		spi->dev.platform_data = NULL;
+	}
+
+	if (hcd) {
+		kfree(max3421_hcd->tx);
+		kfree(max3421_hcd->rx);
+		if (max3421_hcd->spi_thread)
+			kthread_stop(max3421_hcd->spi_thread);
+		usb_put_hcd(hcd);
+	}
+	return retval;
+}
+
+static int
+max3421_remove(struct spi_device *spi)
+{
+	struct max3421_hcd *max3421_hcd = NULL, **prev;
+	struct usb_hcd *hcd = NULL;
+	unsigned long flags;
+
+	for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
+		max3421_hcd = *prev;
+		hcd = max3421_to_hcd(max3421_hcd);
+		if (hcd->self.controller == &spi->dev)
+			break;
+	}
+	if (!max3421_hcd) {
+		dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
+			spi);
+		return -ENODEV;
+	}
+
+	usb_remove_hcd(hcd);
+
+	spin_lock_irqsave(&max3421_hcd->lock, flags);
+
+	kthread_stop(max3421_hcd->spi_thread);
+	*prev = max3421_hcd->next;
+
+	spin_unlock_irqrestore(&max3421_hcd->lock, flags);
+
+	free_irq(spi->irq, hcd);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+static const struct of_device_id max3421_of_match_table[] = {
+	{ .compatible = "maxim,max3421", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, max3421_of_match_table);
+
+static struct spi_driver max3421_driver = {
+	.probe		= max3421_probe,
+	.remove		= max3421_remove,
+	.driver		= {
+		.name	= "max3421-hcd",
+		.of_match_table = of_match_ptr(max3421_of_match_table),
+	},
+};
+
+module_spi_driver(max3421_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
new file mode 100644
index 0000000..ec6739e
--- /dev/null
+++ b/drivers/usb/host/ohci-at91.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ *  Copyright (C) 2004 SAN People (Pty) Ltd.
+ *  Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * AT91 Bus Glue
+ *
+ * Based on fragments of 2.4 driver by Rick Bronson.
+ * Based on ohci-omap.c
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <soc/at91/atmel-sfr.h>
+
+#include "ohci.h"
+
+#define valid_port(index)	((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
+#define at91_for_each_port(index)	\
+		for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
+
+/* interface, function and usb clocks; sometimes also an AHB clock */
+#define hcd_to_ohci_at91_priv(h) \
+	((struct ohci_at91_priv *)hcd_to_ohci(h)->priv)
+
+#define AT91_MAX_USBH_PORTS	3
+struct at91_usbh_data {
+	struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS];
+	struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
+	u8 ports;				/* number of ports on root hub */
+	u8 overcurrent_supported;
+	u8 overcurrent_status[AT91_MAX_USBH_PORTS];
+	u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
+};
+
+struct ohci_at91_priv {
+	struct clk *iclk;
+	struct clk *fclk;
+	struct clk *hclk;
+	bool clocked;
+	bool wakeup;		/* Saved wake-up state for resume */
+	struct regmap *sfr_regmap;
+};
+/* interface and function clocks; sometimes also an AHB clock */
+
+#define DRIVER_DESC "OHCI Atmel driver"
+
+static const char hcd_name[] = "ohci-atmel";
+
+static struct hc_driver __read_mostly ohci_at91_hc_driver;
+
+static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = {
+	.extra_priv_size = sizeof(struct ohci_at91_priv),
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
+{
+	if (ohci_at91->clocked)
+		return;
+
+	clk_set_rate(ohci_at91->fclk, 48000000);
+	clk_prepare_enable(ohci_at91->hclk);
+	clk_prepare_enable(ohci_at91->iclk);
+	clk_prepare_enable(ohci_at91->fclk);
+	ohci_at91->clocked = true;
+}
+
+static void at91_stop_clock(struct ohci_at91_priv *ohci_at91)
+{
+	if (!ohci_at91->clocked)
+		return;
+
+	clk_disable_unprepare(ohci_at91->fclk);
+	clk_disable_unprepare(ohci_at91->iclk);
+	clk_disable_unprepare(ohci_at91->hclk);
+	ohci_at91->clocked = false;
+}
+
+static void at91_start_hc(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct ohci_regs __iomem *regs = hcd->regs;
+	struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+
+	dev_dbg(&pdev->dev, "start\n");
+
+	/*
+	 * Start the USB clocks.
+	 */
+	at91_start_clock(ohci_at91);
+
+	/*
+	 * The USB host controller must remain in reset.
+	 */
+	writel(0, &regs->control);
+}
+
+static void at91_stop_hc(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct ohci_regs __iomem *regs = hcd->regs;
+	struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+
+	dev_dbg(&pdev->dev, "stop\n");
+
+	/*
+	 * Put the USB host controller into reset.
+	 */
+	writel(0, &regs->control);
+
+	/*
+	 * Stop the USB clocks.
+	 */
+	at91_stop_clock(ohci_at91);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
+
+static struct regmap *at91_dt_syscon_sfr(void)
+{
+	struct regmap *regmap;
+
+	regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+	if (IS_ERR(regmap))
+		regmap = NULL;
+
+	return regmap;
+}
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+
+/**
+ * usb_hcd_at91_probe - initialize AT91-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_at91_probe(const struct hc_driver *driver,
+			struct platform_device *pdev)
+{
+	struct at91_usbh_data *board;
+	struct ohci_hcd *ohci;
+	int retval;
+	struct usb_hcd *hcd;
+	struct ohci_at91_priv *ohci_at91;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int irq;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_dbg(dev, "hcd probe: missing irq resource\n");
+		return irq;
+	}
+
+	hcd = usb_create_hcd(driver, dev, "at91");
+	if (!hcd)
+		return -ENOMEM;
+	ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	ohci_at91->iclk = devm_clk_get(dev, "ohci_clk");
+	if (IS_ERR(ohci_at91->iclk)) {
+		dev_err(dev, "failed to get ohci_clk\n");
+		retval = PTR_ERR(ohci_at91->iclk);
+		goto err;
+	}
+	ohci_at91->fclk = devm_clk_get(dev, "uhpck");
+	if (IS_ERR(ohci_at91->fclk)) {
+		dev_err(dev, "failed to get uhpck\n");
+		retval = PTR_ERR(ohci_at91->fclk);
+		goto err;
+	}
+	ohci_at91->hclk = devm_clk_get(dev, "hclk");
+	if (IS_ERR(ohci_at91->hclk)) {
+		dev_err(dev, "failed to get hclk\n");
+		retval = PTR_ERR(ohci_at91->hclk);
+		goto err;
+	}
+
+	ohci_at91->sfr_regmap = at91_dt_syscon_sfr();
+	if (!ohci_at91->sfr_regmap)
+		dev_dbg(dev, "failed to find sfr node\n");
+
+	board = hcd->self.controller->platform_data;
+	ohci = hcd_to_ohci(hcd);
+	ohci->num_ports = board->ports;
+	at91_start_hc(pdev);
+
+	/*
+	 * The RemoteWakeupConnected bit has to be set explicitly
+	 * before calling ohci_run. The reset value of this bit is 0.
+	 */
+	ohci->hc_control = OHCI_CTRL_RWC;
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return retval;
+	}
+
+	/* Error handling */
+	at91_stop_hc(pdev);
+
+ err:
+	usb_put_hcd(hcd);
+	return retval;
+}
+
+
+/* may be called with controller, bus, and devices active */
+
+/**
+ * usb_hcd_at91_remove - shutdown processing for AT91-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_at91_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, "rmmod" or something similar.
+ *
+ */
+static void usb_hcd_at91_remove(struct usb_hcd *hcd,
+				struct platform_device *pdev)
+{
+	usb_remove_hcd(hcd);
+	at91_stop_hc(pdev);
+	usb_put_hcd(hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
+{
+	if (!valid_port(port))
+		return;
+
+	gpiod_set_value(pdata->vbus_pin[port], enable);
+}
+
+static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
+{
+	if (!valid_port(port))
+		return -EINVAL;
+
+	return gpiod_get_value(pdata->vbus_pin[port]);
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+	int length = ohci_hub_status_data(hcd, buf);
+	int port;
+
+	at91_for_each_port(port) {
+		if (pdata->overcurrent_changed[port]) {
+			if (!length)
+				length = 1;
+			buf[0] |= 1 << (port + 1);
+		}
+	}
+
+	return length;
+}
+
+static int ohci_at91_port_suspend(struct regmap *regmap, u8 set)
+{
+	u32 regval;
+	int ret;
+
+	if (!regmap)
+		return 0;
+
+	ret = regmap_read(regmap, AT91_SFR_OHCIICR, &regval);
+	if (ret)
+		return ret;
+
+	if (set)
+		regval |= AT91_OHCIICR_USB_SUSPEND;
+	else
+		regval &= ~AT91_OHCIICR_USB_SUSPEND;
+
+	regmap_write(regmap, AT91_SFR_OHCIICR, regval);
+
+	return 0;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+				 u16 wIndex, char *buf, u16 wLength)
+{
+	struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
+	struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+	struct usb_hub_descriptor *desc;
+	int ret = -EINVAL;
+	u32 *data = (u32 *)buf;
+
+	dev_dbg(hcd->self.controller,
+		"ohci_at91_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
+		hcd, typeReq, wValue, wIndex, buf, wLength);
+
+	wIndex--;
+
+	switch (typeReq) {
+	case SetPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
+			if (valid_port(wIndex)) {
+				ohci_at91_usb_set_power(pdata, wIndex, 1);
+				ret = 0;
+			}
+
+			goto out;
+
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
+				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
+						       1);
+				return 0;
+			}
+			break;
+		}
+		break;
+
+	case ClearPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: C_OVER_CURRENT\n");
+
+			if (valid_port(wIndex)) {
+				pdata->overcurrent_changed[wIndex] = 0;
+				pdata->overcurrent_status[wIndex] = 0;
+			}
+
+			goto out;
+
+		case USB_PORT_FEAT_OVER_CURRENT:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: OVER_CURRENT\n");
+
+			if (valid_port(wIndex))
+				pdata->overcurrent_status[wIndex] = 0;
+
+			goto out;
+
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: POWER\n");
+
+			if (valid_port(wIndex)) {
+				ohci_at91_usb_set_power(pdata, wIndex, 0);
+				return 0;
+			}
+			break;
+
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
+				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
+						       0);
+				return 0;
+			}
+			break;
+		}
+		break;
+	}
+
+	ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
+	if (ret)
+		goto out;
+
+	switch (typeReq) {
+	case GetHubDescriptor:
+
+		/* update the hub's descriptor */
+
+		desc = (struct usb_hub_descriptor *)buf;
+
+		dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
+			desc->wHubCharacteristics);
+
+		/* remove the old configurations for power-switching, and
+		 * over-current protection, and insert our new configuration
+		 */
+
+		desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
+		desc->wHubCharacteristics |=
+			cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM);
+
+		if (pdata->overcurrent_supported) {
+			desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM);
+			desc->wHubCharacteristics |=
+				cpu_to_le16(HUB_CHAR_INDV_PORT_OCPM);
+		}
+
+		dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+			desc->wHubCharacteristics);
+
+		return ret;
+
+	case GetPortStatus:
+		/* check port status */
+
+		dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+
+		if (valid_port(wIndex)) {
+			if (!ohci_at91_usb_get_power(pdata, wIndex))
+				*data &= ~cpu_to_le32(RH_PS_PPS);
+
+			if (pdata->overcurrent_changed[wIndex])
+				*data |= cpu_to_le32(RH_PS_OCIC);
+
+			if (pdata->overcurrent_status[wIndex])
+				*data |= cpu_to_le32(RH_PS_POCI);
+		}
+	}
+
+ out:
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
+{
+	struct platform_device *pdev = data;
+	struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
+	int val, port;
+
+	/* From the GPIO notifying the over-current situation, find
+	 * out the corresponding port */
+	at91_for_each_port(port) {
+		if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq)
+			break;
+	}
+
+	if (port == AT91_MAX_USBH_PORTS) {
+		dev_err(& pdev->dev, "overcurrent interrupt from unknown GPIO\n");
+		return IRQ_HANDLED;
+	}
+
+	val = gpiod_get_value(pdata->overcurrent_pin[port]);
+
+	/* When notified of an over-current situation, disable power
+	   on the corresponding port, and mark this port in
+	   over-current. */
+	if (!val) {
+		ohci_at91_usb_set_power(pdata, port, 0);
+		pdata->overcurrent_status[port]  = 1;
+		pdata->overcurrent_changed[port] = 1;
+	}
+
+	dev_dbg(& pdev->dev, "overcurrent situation %s\n",
+		val ? "exited" : "notified");
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id at91_ohci_dt_ids[] = {
+	{ .compatible = "atmel,at91rm9200-ohci" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct at91_usbh_data	*pdata;
+	int			i;
+	int			ret;
+	int			err;
+	u32			ports;
+
+	/* Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdev->dev.platform_data = pdata;
+
+	if (!of_property_read_u32(np, "num-ports", &ports))
+		pdata->ports = ports;
+
+	at91_for_each_port(i) {
+		if (i >= pdata->ports)
+			break;
+
+		pdata->vbus_pin[i] =
+			devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
+						      i, GPIOD_OUT_HIGH);
+		if (IS_ERR(pdata->vbus_pin[i])) {
+			err = PTR_ERR(pdata->vbus_pin[i]);
+			dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
+			continue;
+		}
+	}
+
+	at91_for_each_port(i) {
+		if (i >= pdata->ports)
+			break;
+
+		pdata->overcurrent_pin[i] =
+			devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
+						      i, GPIOD_IN);
+		if (!pdata->overcurrent_pin[i])
+			continue;
+		if (IS_ERR(pdata->overcurrent_pin[i])) {
+			err = PTR_ERR(pdata->overcurrent_pin[i]);
+			dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
+			continue;
+		}
+
+		ret = devm_request_irq(&pdev->dev,
+				       gpiod_to_irq(pdata->overcurrent_pin[i]),
+				       ohci_hcd_at91_overcurrent_irq,
+				       IRQF_SHARED,
+				       "ohci_overcurrent", pdev);
+		if (ret)
+			dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n");
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+	return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
+}
+
+static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
+{
+	struct at91_usbh_data	*pdata = dev_get_platdata(&pdev->dev);
+	int			i;
+
+	if (pdata) {
+		at91_for_each_port(i)
+			ohci_at91_usb_set_power(pdata, i, 0);
+	}
+
+	device_init_wakeup(&pdev->dev, 0);
+	usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev);
+	return 0;
+}
+
+static int __maybe_unused
+ohci_hcd_at91_drv_suspend(struct device *dev)
+{
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+	int		ret;
+
+	/*
+	 * Disable wakeup if we are going to sleep with slow clock mode
+	 * enabled.
+	 */
+	ohci_at91->wakeup = device_may_wakeup(dev)
+			&& !at91_suspend_entering_slow_clock();
+
+	if (ohci_at91->wakeup)
+		enable_irq_wake(hcd->irq);
+
+	ohci_at91_port_suspend(ohci_at91->sfr_regmap, 1);
+
+	ret = ohci_suspend(hcd, ohci_at91->wakeup);
+	if (ret) {
+		if (ohci_at91->wakeup)
+			disable_irq_wake(hcd->irq);
+		return ret;
+	}
+	/*
+	 * The integrated transceivers seem unable to notice disconnect,
+	 * reconnect, or wakeup without the 48 MHz clock active.  so for
+	 * correctness, always discard connection state (using reset).
+	 *
+	 * REVISIT: some boards will be able to turn VBUS off...
+	 */
+	if (!ohci_at91->wakeup) {
+		ohci->rh_state = OHCI_RH_HALTED;
+
+		/* flush the writes */
+		(void) ohci_readl (ohci, &ohci->regs->control);
+		at91_stop_clock(ohci_at91);
+	}
+
+	return ret;
+}
+
+static int __maybe_unused
+ohci_hcd_at91_drv_resume(struct device *dev)
+{
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
+
+	if (ohci_at91->wakeup)
+		disable_irq_wake(hcd->irq);
+
+	at91_start_clock(ohci_at91);
+
+	ohci_resume(hcd, false);
+
+	ohci_at91_port_suspend(ohci_at91->sfr_regmap, 0);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
+					ohci_hcd_at91_drv_resume);
+
+static struct platform_driver ohci_hcd_at91_driver = {
+	.probe		= ohci_hcd_at91_drv_probe,
+	.remove		= ohci_hcd_at91_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "at91_ohci",
+		.pm	= &ohci_hcd_at91_pm_ops,
+		.of_match_table	= at91_ohci_dt_ids,
+	},
+};
+
+static int __init ohci_at91_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	ohci_init_driver(&ohci_at91_hc_driver, &ohci_at91_drv_overrides);
+
+	/*
+	 * The Atmel HW has some unusual quirks, which require Atmel-specific
+	 * workarounds. We override certain hc_driver functions here to
+	 * achieve that. We explicitly do not enhance ohci_driver_overrides to
+	 * allow this more easily, since this is an unusual case, and we don't
+	 * want to encourage others to override these functions by making it
+	 * too easy.
+	 */
+
+	ohci_at91_hc_driver.hub_status_data	= ohci_at91_hub_status_data;
+	ohci_at91_hc_driver.hub_control		= ohci_at91_hub_control;
+
+	return platform_driver_register(&ohci_hcd_at91_driver);
+}
+module_init(ohci_at91_init);
+
+static void __exit ohci_at91_cleanup(void)
+{
+	platform_driver_unregister(&ohci_hcd_at91_driver);
+}
+module_exit(ohci_at91_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_ohci");
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 0000000..a55cbba
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * TI DA8xx (OMAP-L1x) Bus Glue
+ *
+ * Derived from: ohci-omap.c and ohci-s3c2410.c
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_data/usb-davinci.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <asm/unaligned.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "DA8XX"
+#define DRV_NAME "ohci-da8xx"
+
+static struct hc_driver __read_mostly ohci_da8xx_hc_driver;
+
+static int (*orig_ohci_hub_control)(struct usb_hcd  *hcd, u16 typeReq,
+			u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
+
+struct da8xx_ohci_hcd {
+	struct usb_hcd *hcd;
+	struct clk *usb11_clk;
+	struct phy *usb11_phy;
+	struct regulator *vbus_reg;
+	struct notifier_block nb;
+	unsigned int reg_enabled;
+};
+
+#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
+
+/* Over-current indicator change bitmask */
+static volatile u16 ocic_mask;
+
+static int ohci_da8xx_enable(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	int ret;
+
+	ret = clk_prepare_enable(da8xx_ohci->usb11_clk);
+	if (ret)
+		return ret;
+
+	ret = phy_init(da8xx_ohci->usb11_phy);
+	if (ret)
+		goto err_phy_init;
+
+	ret = phy_power_on(da8xx_ohci->usb11_phy);
+	if (ret)
+		goto err_phy_power_on;
+
+	return 0;
+
+err_phy_power_on:
+	phy_exit(da8xx_ohci->usb11_phy);
+err_phy_init:
+	clk_disable_unprepare(da8xx_ohci->usb11_clk);
+
+	return ret;
+}
+
+static void ohci_da8xx_disable(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+
+	phy_power_off(da8xx_ohci->usb11_phy);
+	phy_exit(da8xx_ohci->usb11_phy);
+	clk_disable_unprepare(da8xx_ohci->usb11_clk);
+}
+
+static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	int ret;
+
+	if (hub && hub->set_power)
+		return hub->set_power(1, on);
+
+	if (!da8xx_ohci->vbus_reg)
+		return 0;
+
+	if (on && !da8xx_ohci->reg_enabled) {
+		ret = regulator_enable(da8xx_ohci->vbus_reg);
+		if (ret) {
+			dev_err(dev, "Failed to enable regulator: %d\n", ret);
+			return ret;
+		}
+		da8xx_ohci->reg_enabled = 1;
+
+	} else if (!on && da8xx_ohci->reg_enabled) {
+		ret = regulator_disable(da8xx_ohci->vbus_reg);
+		if (ret) {
+			dev_err(dev, "Failed  to disable regulator: %d\n", ret);
+			return ret;
+		}
+		da8xx_ohci->reg_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int ohci_da8xx_get_power(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->get_power)
+		return hub->get_power(1);
+
+	if (da8xx_ohci->vbus_reg)
+		return regulator_is_enabled(da8xx_ohci->vbus_reg);
+
+	return 1;
+}
+
+static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	unsigned int flags;
+	int ret;
+
+	if (hub && hub->get_oci)
+		return hub->get_oci(1);
+
+	if (!da8xx_ohci->vbus_reg)
+		return 0;
+
+	ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags);
+	if (ret)
+		return ret;
+
+	if (flags & REGULATOR_ERROR_OVER_CURRENT)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->set_power)
+		return 1;
+
+	if (da8xx_ohci->vbus_reg)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->get_oci)
+		return 1;
+
+	if (da8xx_ohci->vbus_reg)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
+{
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->potpgt)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Handle the port over-current indicator change.
+ */
+static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
+				    unsigned port)
+{
+	ocic_mask |= 1 << port;
+
+	/* Once over-current is detected, the port needs to be powered down */
+	if (hub->get_oci(port) > 0)
+		hub->set_power(port, 0);
+}
+
+static int ohci_da8xx_regulator_event(struct notifier_block *nb,
+				unsigned long event, void *data)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci =
+				container_of(nb, struct da8xx_ohci_hcd, nb);
+
+	if (event & REGULATOR_EVENT_OVER_CURRENT) {
+		ocic_mask |= 1 << 1;
+		ohci_da8xx_set_power(da8xx_ohci->hcd, 0);
+	}
+
+	return 0;
+}
+
+static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	int ret = 0;
+
+	if (hub && hub->ocic_notify) {
+		ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
+	} else if (da8xx_ohci->vbus_reg) {
+		da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
+		ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
+						&da8xx_ohci->nb);
+	}
+
+	if (ret)
+		dev_err(dev, "Failed to register notifier: %d\n", ret);
+
+	return ret;
+}
+
+static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
+{
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->ocic_notify)
+		hub->ocic_notify(NULL);
+}
+
+static int ohci_da8xx_reset(struct usb_hcd *hcd)
+{
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	struct ohci_hcd	*ohci		= hcd_to_ohci(hcd);
+	int result;
+	u32 rh_a;
+
+	dev_dbg(dev, "starting USB controller\n");
+
+	result = ohci_da8xx_enable(hcd);
+	if (result < 0)
+		return result;
+
+	/*
+	 * DA8xx only have 1 port connected to the pins but the HC root hub
+	 * register A reports 2 ports, thus we'll have to override it...
+	 */
+	ohci->num_ports = 1;
+
+	result = ohci_setup(hcd);
+	if (result < 0) {
+		ohci_da8xx_disable(hcd);
+		return result;
+	}
+
+	/*
+	 * Since we're providing a board-specific root hub port power control
+	 * and over-current reporting, we have to override the HC root hub A
+	 * register's default value, so that ohci_hub_control() could return
+	 * the correct hub descriptor...
+	 */
+	rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
+	if (ohci_da8xx_has_set_power(hcd)) {
+		rh_a &= ~RH_A_NPS;
+		rh_a |=  RH_A_PSM;
+	}
+	if (ohci_da8xx_has_oci(hcd)) {
+		rh_a &= ~RH_A_NOCP;
+		rh_a |=  RH_A_OCPM;
+	}
+	if (ohci_da8xx_has_potpgt(hcd)) {
+		rh_a &= ~RH_A_POTPGT;
+		rh_a |= hub->potpgt << 24;
+	}
+	ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
+
+	return result;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	int length		= orig_ohci_hub_status_data(hcd, buf);
+
+	/* See if we have OCIC bit set on port 1 */
+	if (ocic_mask & (1 << 1)) {
+		dev_dbg(hcd->self.controller, "over-current indicator change "
+			"on port 1\n");
+
+		if (!length)
+			length = 1;
+
+		buf[0] |= 1 << 1;
+	}
+	return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+				  u16 wIndex, char *buf, u16 wLength)
+{
+	struct device *dev		= hcd->self.controller;
+	int temp;
+
+	switch (typeReq) {
+	case GetPortStatus:
+		/* Check the port number */
+		if (wIndex != 1)
+			break;
+
+		dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
+
+		temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
+
+		/* The port power status (PPS) bit defaults to 1 */
+		if (!ohci_da8xx_get_power(hcd))
+			temp &= ~RH_PS_PPS;
+
+		/* The port over-current indicator (POCI) bit is always 0 */
+		if (ohci_da8xx_get_oci(hcd) > 0)
+			temp |=  RH_PS_POCI;
+
+		/* The over-current indicator change (OCIC) bit is 0 too */
+		if (ocic_mask & (1 << wIndex))
+			temp |=  RH_PS_OCIC;
+
+		put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
+		return 0;
+	case SetPortFeature:
+		temp = 1;
+		goto check_port;
+	case ClearPortFeature:
+		temp = 0;
+
+check_port:
+		/* Check the port number */
+		if (wIndex != 1)
+			break;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(dev, "%sPortFeature(%u): %s\n",
+				temp ? "Set" : "Clear", wIndex, "POWER");
+
+			return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			dev_dbg(dev, "%sPortFeature(%u): %s\n",
+				temp ? "Set" : "Clear", wIndex,
+				"C_OVER_CURRENT");
+
+			if (temp)
+				ocic_mask |= 1 << wIndex;
+			else
+				ocic_mask &= ~(1 << wIndex);
+			return 0;
+		}
+	}
+
+	return orig_ohci_hub_control(hcd, typeReq, wValue,
+			wIndex, buf, wLength);
+}
+
+/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_ohci_ids[] = {
+	{ .compatible = "ti,da830-ohci" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
+#endif
+
+static int ohci_da8xx_probe(struct platform_device *pdev)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci;
+	struct usb_hcd	*hcd;
+	struct resource *mem;
+	int error, irq;
+	hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
+				dev_name(&pdev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	da8xx_ohci = to_da8xx_ohci(hcd);
+	da8xx_ohci->hcd = hcd;
+
+	da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(da8xx_ohci->usb11_clk)) {
+		error = PTR_ERR(da8xx_ohci->usb11_clk);
+		if (error != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Failed to get clock.\n");
+		goto err;
+	}
+
+	da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+	if (IS_ERR(da8xx_ohci->usb11_phy)) {
+		error = PTR_ERR(da8xx_ohci->usb11_phy);
+		if (error != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Failed to get phy.\n");
+		goto err;
+	}
+
+	da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+	if (IS_ERR(da8xx_ohci->vbus_reg)) {
+		error = PTR_ERR(da8xx_ohci->vbus_reg);
+		if (error == -ENODEV) {
+			da8xx_ohci->vbus_reg = NULL;
+		} else if (error == -EPROBE_DEFER) {
+			goto err;
+		} else {
+			dev_err(&pdev->dev, "Failed to get regulator\n");
+			goto err;
+		}
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(hcd->regs)) {
+		error = PTR_ERR(hcd->regs);
+		goto err;
+	}
+	hcd->rsrc_start = mem->start;
+	hcd->rsrc_len = resource_size(mem);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		error = -ENODEV;
+		goto err;
+	}
+
+	error = usb_add_hcd(hcd, irq, 0);
+	if (error)
+		goto err;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	error = ohci_da8xx_register_notify(hcd);
+	if (error)
+		goto err_remove_hcd;
+
+	return 0;
+
+err_remove_hcd:
+	usb_remove_hcd(hcd);
+err:
+	usb_put_hcd(hcd);
+	return error;
+}
+
+static int ohci_da8xx_remove(struct platform_device *pdev)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(pdev);
+
+	ohci_da8xx_unregister_notify(hcd);
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_da8xx_suspend(struct platform_device *pdev,
+				pm_message_t message)
+{
+	struct usb_hcd	*hcd	= platform_get_drvdata(pdev);
+	struct ohci_hcd	*ohci	= hcd_to_ohci(hcd);
+	bool		do_wakeup	= device_may_wakeup(&pdev->dev);
+	int		ret;
+
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	ohci_da8xx_disable(hcd);
+	hcd->state = HC_STATE_SUSPENDED;
+
+	return ret;
+}
+
+static int ohci_da8xx_resume(struct platform_device *dev)
+{
+	struct usb_hcd	*hcd	= platform_get_drvdata(dev);
+	struct ohci_hcd	*ohci	= hcd_to_ohci(hcd);
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_da8xx_enable(hcd);
+	if (ret)
+		return ret;
+
+	ohci_resume(hcd, false);
+
+	return 0;
+}
+#endif
+
+static const struct ohci_driver_overrides da8xx_overrides __initconst = {
+	.reset		 = ohci_da8xx_reset,
+	.extra_priv_size = sizeof(struct da8xx_ohci_hcd),
+};
+
+/*
+ * Driver definition to register with platform structure.
+ */
+static struct platform_driver ohci_hcd_da8xx_driver = {
+	.probe		= ohci_da8xx_probe,
+	.remove		= ohci_da8xx_remove,
+	.shutdown 	= usb_hcd_platform_shutdown,
+#ifdef	CONFIG_PM
+	.suspend	= ohci_da8xx_suspend,
+	.resume		= ohci_da8xx_resume,
+#endif
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = of_match_ptr(da8xx_ohci_ids),
+	},
+};
+
+static int __init ohci_da8xx_init(void)
+{
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
+	ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
+
+	/*
+	 * The Davinci da8xx HW has some unusual quirks, which require
+	 * da8xx-specific workarounds. We override certain hc_driver
+	 * functions here to achieve that. We explicitly do not enhance
+	 * ohci_driver_overrides to allow this more easily, since this
+	 * is an unusual case, and we don't want to encourage others to
+	 * override these functions by making it too easy.
+	 */
+
+	orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control;
+	orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data;
+
+	ohci_da8xx_hc_driver.hub_status_data     = ohci_da8xx_hub_status_data;
+	ohci_da8xx_hc_driver.hub_control         = ohci_da8xx_hub_control;
+
+	return platform_driver_register(&ohci_hcd_da8xx_driver);
+}
+module_init(ohci_da8xx_init);
+
+static void __exit ohci_da8xx_exit(void)
+{
+	platform_driver_unregister(&ohci_hcd_da8xx_driver);
+}
+module_exit(ohci_da8xx_exit);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
new file mode 100644
index 0000000..4f267dc
--- /dev/null
+++ b/drivers/usb/host/ohci-dbg.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under the GPL.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+#define edstring(ed_type) ({ char *temp; \
+	switch (ed_type) { \
+	case PIPE_CONTROL:	temp = "ctrl"; break; \
+	case PIPE_BULK:		temp = "bulk"; break; \
+	case PIPE_INTERRUPT:	temp = "intr"; break; \
+	default:		temp = "isoc"; break; \
+	} temp;})
+#define pipestring(pipe) edstring(usb_pipetype(pipe))
+
+
+#define ohci_dbg_sw(ohci, next, size, format, arg...) \
+	do { \
+	if (next != NULL) { \
+		unsigned s_len; \
+		s_len = scnprintf (*next, *size, format, ## arg ); \
+		*size -= s_len; *next += s_len; \
+	} else \
+		ohci_dbg(ohci,format, ## arg ); \
+	} while (0);
+
+/* Version for use where "next" is the address of a local variable */
+#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
+	do { \
+		unsigned s_len; \
+		s_len = scnprintf(*next, *size, format, ## arg); \
+		*size -= s_len; *next += s_len; \
+	} while (0);
+
+
+static void ohci_dump_intr_mask (
+	struct ohci_hcd *ohci,
+	char *label,
+	u32 mask,
+	char **next,
+	unsigned *size)
+{
+	ohci_dbg_sw (ohci, next, size, "%s 0x%08x%s%s%s%s%s%s%s%s%s\n",
+		label,
+		mask,
+		(mask & OHCI_INTR_MIE) ? " MIE" : "",
+		(mask & OHCI_INTR_OC) ? " OC" : "",
+		(mask & OHCI_INTR_RHSC) ? " RHSC" : "",
+		(mask & OHCI_INTR_FNO) ? " FNO" : "",
+		(mask & OHCI_INTR_UE) ? " UE" : "",
+		(mask & OHCI_INTR_RD) ? " RD" : "",
+		(mask & OHCI_INTR_SF) ? " SF" : "",
+		(mask & OHCI_INTR_WDH) ? " WDH" : "",
+		(mask & OHCI_INTR_SO) ? " SO" : ""
+		);
+}
+
+static void maybe_print_eds (
+	struct ohci_hcd *ohci,
+	char *label,
+	u32 value,
+	char **next,
+	unsigned *size)
+{
+	if (value)
+		ohci_dbg_sw (ohci, next, size, "%s %08x\n", label, value);
+}
+
+static char *hcfs2string (int state)
+{
+	switch (state) {
+		case OHCI_USB_RESET:	return "reset";
+		case OHCI_USB_RESUME:	return "resume";
+		case OHCI_USB_OPER:	return "operational";
+		case OHCI_USB_SUSPEND:	return "suspend";
+	}
+	return "?";
+}
+
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+	switch (ohci->rh_state) {
+	case OHCI_RH_HALTED:
+		return "halted";
+	case OHCI_RH_SUSPENDED:
+		return "suspended";
+	case OHCI_RH_RUNNING:
+		return "running";
+	}
+	return "?";
+}
+
+// dump control and status registers
+static void
+ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
+{
+	struct ohci_regs __iomem *regs = controller->regs;
+	u32			temp;
+
+	temp = ohci_readl (controller, &regs->revision) & 0xff;
+	ohci_dbg_sw (controller, next, size,
+		"OHCI %d.%d, %s legacy support registers, rh state %s\n",
+		0x03 & (temp >> 4), (temp & 0x0f),
+		(temp & 0x0100) ? "with" : "NO",
+		rh_state_string(controller));
+
+	temp = ohci_readl (controller, &regs->control);
+	ohci_dbg_sw (controller, next, size,
+		"control 0x%03x%s%s%s HCFS=%s%s%s%s%s CBSR=%d\n",
+		temp,
+		(temp & OHCI_CTRL_RWE) ? " RWE" : "",
+		(temp & OHCI_CTRL_RWC) ? " RWC" : "",
+		(temp & OHCI_CTRL_IR) ? " IR" : "",
+		hcfs2string (temp & OHCI_CTRL_HCFS),
+		(temp & OHCI_CTRL_BLE) ? " BLE" : "",
+		(temp & OHCI_CTRL_CLE) ? " CLE" : "",
+		(temp & OHCI_CTRL_IE) ? " IE" : "",
+		(temp & OHCI_CTRL_PLE) ? " PLE" : "",
+		temp & OHCI_CTRL_CBSR
+		);
+
+	temp = ohci_readl (controller, &regs->cmdstatus);
+	ohci_dbg_sw (controller, next, size,
+		"cmdstatus 0x%05x SOC=%d%s%s%s%s\n", temp,
+		(temp & OHCI_SOC) >> 16,
+		(temp & OHCI_OCR) ? " OCR" : "",
+		(temp & OHCI_BLF) ? " BLF" : "",
+		(temp & OHCI_CLF) ? " CLF" : "",
+		(temp & OHCI_HCR) ? " HCR" : ""
+		);
+
+	ohci_dump_intr_mask (controller, "intrstatus",
+			ohci_readl (controller, &regs->intrstatus),
+			next, size);
+	ohci_dump_intr_mask (controller, "intrenable",
+			ohci_readl (controller, &regs->intrenable),
+			next, size);
+	// intrdisable always same as intrenable
+
+	maybe_print_eds (controller, "ed_periodcurrent",
+			ohci_readl (controller, &regs->ed_periodcurrent),
+			next, size);
+
+	maybe_print_eds (controller, "ed_controlhead",
+			ohci_readl (controller, &regs->ed_controlhead),
+			next, size);
+	maybe_print_eds (controller, "ed_controlcurrent",
+			ohci_readl (controller, &regs->ed_controlcurrent),
+			next, size);
+
+	maybe_print_eds (controller, "ed_bulkhead",
+			ohci_readl (controller, &regs->ed_bulkhead),
+			next, size);
+	maybe_print_eds (controller, "ed_bulkcurrent",
+			ohci_readl (controller, &regs->ed_bulkcurrent),
+			next, size);
+
+	maybe_print_eds (controller, "donehead",
+			ohci_readl (controller, &regs->donehead), next, size);
+}
+
+#define dbg_port_sw(hc,num,value,next,size) \
+	ohci_dbg_sw (hc, next, size, \
+		"roothub.portstatus [%d] " \
+		"0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
+		num, temp, \
+		(temp & RH_PS_PRSC) ? " PRSC" : "", \
+		(temp & RH_PS_OCIC) ? " OCIC" : "", \
+		(temp & RH_PS_PSSC) ? " PSSC" : "", \
+		(temp & RH_PS_PESC) ? " PESC" : "", \
+		(temp & RH_PS_CSC) ? " CSC" : "", \
+		\
+		(temp & RH_PS_LSDA) ? " LSDA" : "", \
+		(temp & RH_PS_PPS) ? " PPS" : "", \
+		(temp & RH_PS_PRS) ? " PRS" : "", \
+		(temp & RH_PS_POCI) ? " POCI" : "", \
+		(temp & RH_PS_PSS) ? " PSS" : "", \
+		\
+		(temp & RH_PS_PES) ? " PES" : "", \
+		(temp & RH_PS_CCS) ? " CCS" : "" \
+		);
+
+
+static void
+ohci_dump_roothub (
+	struct ohci_hcd *controller,
+	int verbose,
+	char **next,
+	unsigned *size)
+{
+	u32			temp, i;
+
+	temp = roothub_a (controller);
+	if (temp == ~(u32)0)
+		return;
+
+	if (verbose) {
+		ohci_dbg_sw (controller, next, size,
+			"roothub.a %08x POTPGT=%d%s%s%s%s%s NDP=%d(%d)\n", temp,
+			((temp & RH_A_POTPGT) >> 24) & 0xff,
+			(temp & RH_A_NOCP) ? " NOCP" : "",
+			(temp & RH_A_OCPM) ? " OCPM" : "",
+			(temp & RH_A_DT) ? " DT" : "",
+			(temp & RH_A_NPS) ? " NPS" : "",
+			(temp & RH_A_PSM) ? " PSM" : "",
+			(temp & RH_A_NDP), controller->num_ports
+			);
+		temp = roothub_b (controller);
+		ohci_dbg_sw (controller, next, size,
+			"roothub.b %08x PPCM=%04x DR=%04x\n",
+			temp,
+			(temp & RH_B_PPCM) >> 16,
+			(temp & RH_B_DR)
+			);
+		temp = roothub_status (controller);
+		ohci_dbg_sw (controller, next, size,
+			"roothub.status %08x%s%s%s%s%s%s\n",
+			temp,
+			(temp & RH_HS_CRWE) ? " CRWE" : "",
+			(temp & RH_HS_OCIC) ? " OCIC" : "",
+			(temp & RH_HS_LPSC) ? " LPSC" : "",
+			(temp & RH_HS_DRWE) ? " DRWE" : "",
+			(temp & RH_HS_OCI) ? " OCI" : "",
+			(temp & RH_HS_LPS) ? " LPS" : ""
+			);
+	}
+
+	for (i = 0; i < controller->num_ports; i++) {
+		temp = roothub_portstatus (controller, i);
+		dbg_port_sw (controller, i, temp, next, size);
+	}
+}
+
+static void ohci_dump(struct ohci_hcd *controller)
+{
+	ohci_dbg (controller, "OHCI controller state\n");
+
+	// dumps some of the state we know about
+	ohci_dump_status (controller, NULL, NULL);
+	if (controller->hcca)
+		ohci_dbg (controller,
+			"hcca frame #%04x\n", ohci_frame_no(controller));
+	ohci_dump_roothub (controller, 1, NULL, NULL);
+}
+
+static const char data0 [] = "DATA0";
+static const char data1 [] = "DATA1";
+
+static void ohci_dump_td (const struct ohci_hcd *ohci, const char *label,
+		const struct td *td)
+{
+	u32	tmp = hc32_to_cpup (ohci, &td->hwINFO);
+
+	ohci_dbg (ohci, "%s td %p%s; urb %p index %d; hw next td %08x\n",
+		label, td,
+		(tmp & TD_DONE) ? " (DONE)" : "",
+		td->urb, td->index,
+		hc32_to_cpup (ohci, &td->hwNextTD));
+	if ((tmp & TD_ISO) == 0) {
+		const char	*toggle, *pid;
+		u32	cbp, be;
+
+		switch (tmp & TD_T) {
+		case TD_T_DATA0: toggle = data0; break;
+		case TD_T_DATA1: toggle = data1; break;
+		case TD_T_TOGGLE: toggle = "(CARRY)"; break;
+		default: toggle = "(?)"; break;
+		}
+		switch (tmp & TD_DP) {
+		case TD_DP_SETUP: pid = "SETUP"; break;
+		case TD_DP_IN: pid = "IN"; break;
+		case TD_DP_OUT: pid = "OUT"; break;
+		default: pid = "(bad pid)"; break;
+		}
+		ohci_dbg (ohci, "     info %08x CC=%x %s DI=%d %s %s\n", tmp,
+			TD_CC_GET(tmp), /* EC, */ toggle,
+			(tmp & TD_DI) >> 21, pid,
+			(tmp & TD_R) ? "R" : "");
+		cbp = hc32_to_cpup (ohci, &td->hwCBP);
+		be = hc32_to_cpup (ohci, &td->hwBE);
+		ohci_dbg (ohci, "     cbp %08x be %08x (len %d)\n", cbp, be,
+			cbp ? (be + 1 - cbp) : 0);
+	} else {
+		unsigned	i;
+		ohci_dbg (ohci, "  info %08x CC=%x FC=%d DI=%d SF=%04x\n", tmp,
+			TD_CC_GET(tmp),
+			(tmp >> 24) & 0x07,
+			(tmp & TD_DI) >> 21,
+			tmp & 0x0000ffff);
+		ohci_dbg (ohci, "  bp0 %08x be %08x\n",
+			hc32_to_cpup (ohci, &td->hwCBP) & ~0x0fff,
+			hc32_to_cpup (ohci, &td->hwBE));
+		for (i = 0; i < MAXPSW; i++) {
+			u16	psw = ohci_hwPSW (ohci, td, i);
+			int	cc = (psw >> 12) & 0x0f;
+			ohci_dbg (ohci, "    psw [%d] = %2x, CC=%x %s=%d\n", i,
+				psw, cc,
+				(cc >= 0x0e) ? "OFFSET" : "SIZE",
+				psw & 0x0fff);
+		}
+	}
+}
+
+/* caller MUST own hcd spinlock if verbose is set! */
+static void __maybe_unused
+ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
+		const struct ed *ed, int verbose)
+{
+	u32	tmp = hc32_to_cpu (ohci, ed->hwINFO);
+	char	*type = "";
+
+	ohci_dbg (ohci, "%s, ed %p state 0x%x type %s; next ed %08x\n",
+		label,
+		ed, ed->state, edstring (ed->type),
+		hc32_to_cpup (ohci, &ed->hwNextED));
+	switch (tmp & (ED_IN|ED_OUT)) {
+	case ED_OUT: type = "-OUT"; break;
+	case ED_IN: type = "-IN"; break;
+	/* else from TDs ... control */
+	}
+	ohci_dbg (ohci,
+		"  info %08x MAX=%d%s%s%s%s EP=%d%s DEV=%d\n", tmp,
+		0x03ff & (tmp >> 16),
+		(tmp & ED_DEQUEUE) ? " DQ" : "",
+		(tmp & ED_ISO) ? " ISO" : "",
+		(tmp & ED_SKIP) ? " SKIP" : "",
+		(tmp & ED_LOWSPEED) ? " LOW" : "",
+		0x000f & (tmp >> 7),
+		type,
+		0x007f & tmp);
+	tmp = hc32_to_cpup (ohci, &ed->hwHeadP);
+	ohci_dbg (ohci, "  tds: head %08x %s%s tail %08x%s\n",
+		tmp,
+		(tmp & ED_C) ? data1 : data0,
+		(tmp & ED_H) ? " HALT" : "",
+		hc32_to_cpup (ohci, &ed->hwTailP),
+		verbose ? "" : " (not listing)");
+	if (verbose) {
+		struct list_head	*tmp;
+
+		/* use ed->td_list because HC concurrently modifies
+		 * hwNextTD as it accumulates ed_donelist.
+		 */
+		list_for_each (tmp, &ed->td_list) {
+			struct td		*td;
+			td = list_entry (tmp, struct td, td_list);
+			ohci_dump_td (ohci, "  ->", td);
+		}
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_async_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_periodic_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_registers_open,
+	.read		= debug_output,
+	.release	= debug_close,
+	.llseek		= default_llseek,
+};
+
+static struct dentry *ohci_debug_root;
+
+struct debug_buffer {
+	ssize_t (*fill_func)(struct debug_buffer *);	/* fill method */
+	struct ohci_hcd *ohci;
+	struct mutex mutex;	/* protect filling of buffer */
+	size_t count;		/* number of characters filled into buffer */
+	char *page;
+};
+
+static ssize_t
+show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
+{
+	unsigned		temp, size = count;
+
+	if (!ed)
+		return 0;
+
+	/* print first --> last */
+	while (ed->ed_prev)
+		ed = ed->ed_prev;
+
+	/* dump a snapshot of the bulk or control schedule */
+	while (ed) {
+		u32		info = hc32_to_cpu (ohci, ed->hwINFO);
+		u32		headp = hc32_to_cpu (ohci, ed->hwHeadP);
+		struct list_head *entry;
+		struct td	*td;
+
+		temp = scnprintf (buf, size,
+			"ed/%p %cs dev%d ep%d%s max %d %08x%s%s %s",
+			ed,
+			(info & ED_LOWSPEED) ? 'l' : 'f',
+			info & 0x7f,
+			(info >> 7) & 0xf,
+			(info & ED_IN) ? "in" : "out",
+			0x03ff & (info >> 16),
+			info,
+			(info & ED_SKIP) ? " s" : "",
+			(headp & ED_H) ? " H" : "",
+			(headp & ED_C) ? data1 : data0);
+		size -= temp;
+		buf += temp;
+
+		list_for_each (entry, &ed->td_list) {
+			u32		cbp, be;
+
+			td = list_entry (entry, struct td, td_list);
+			info = hc32_to_cpup (ohci, &td->hwINFO);
+			cbp = hc32_to_cpup (ohci, &td->hwCBP);
+			be = hc32_to_cpup (ohci, &td->hwBE);
+			temp = scnprintf (buf, size,
+					"\n\ttd %p %s %d cc=%x urb %p (%08x)",
+					td,
+					({ char *pid;
+					switch (info & TD_DP) {
+					case TD_DP_SETUP: pid = "setup"; break;
+					case TD_DP_IN: pid = "in"; break;
+					case TD_DP_OUT: pid = "out"; break;
+					default: pid = "(?)"; break;
+					 } pid;}),
+					cbp ? (be + 1 - cbp) : 0,
+					TD_CC_GET (info), td->urb, info);
+			size -= temp;
+			buf += temp;
+		}
+
+		temp = scnprintf (buf, size, "\n");
+		size -= temp;
+		buf += temp;
+
+		ed = ed->ed_next;
+	}
+	return count - size;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+	struct ohci_hcd		*ohci;
+	size_t			temp, size;
+	unsigned long		flags;
+
+	ohci = buf->ohci;
+	size = PAGE_SIZE;
+
+	/* display control and bulk lists together, for simplicity */
+	spin_lock_irqsave (&ohci->lock, flags);
+	temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
+	temp += show_list(ohci, buf->page + temp, size - temp,
+			  ohci->ed_bulktail);
+	spin_unlock_irqrestore (&ohci->lock, flags);
+
+	return temp;
+}
+
+#define DBG_SCHED_LIMIT 64
+
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+	struct ohci_hcd		*ohci;
+	struct ed		**seen, *ed;
+	unsigned long		flags;
+	unsigned		temp, size, seen_count;
+	char			*next;
+	unsigned		i;
+
+	seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
+	if (!seen)
+		return 0;
+	seen_count = 0;
+
+	ohci = buf->ohci;
+	next = buf->page;
+	size = PAGE_SIZE;
+
+	temp = scnprintf (next, size, "size = %d\n", NUM_INTS);
+	size -= temp;
+	next += temp;
+
+	/* dump a snapshot of the periodic schedule (and load) */
+	spin_lock_irqsave (&ohci->lock, flags);
+	for (i = 0; i < NUM_INTS; i++) {
+		ed = ohci->periodic[i];
+		if (!ed)
+			continue;
+
+		temp = scnprintf (next, size, "%2d [%3d]:", i, ohci->load [i]);
+		size -= temp;
+		next += temp;
+
+		do {
+			temp = scnprintf (next, size, " ed%d/%p",
+				ed->interval, ed);
+			size -= temp;
+			next += temp;
+			for (temp = 0; temp < seen_count; temp++) {
+				if (seen [temp] == ed)
+					break;
+			}
+
+			/* show more info the first time around */
+			if (temp == seen_count) {
+				u32	info = hc32_to_cpu (ohci, ed->hwINFO);
+				struct list_head	*entry;
+				unsigned		qlen = 0;
+
+				/* qlen measured here in TDs, not urbs */
+				list_for_each (entry, &ed->td_list)
+					qlen++;
+
+				temp = scnprintf (next, size,
+					" (%cs dev%d ep%d%s-%s qlen %u"
+					" max %d %08x%s%s)",
+					(info & ED_LOWSPEED) ? 'l' : 'f',
+					info & 0x7f,
+					(info >> 7) & 0xf,
+					(info & ED_IN) ? "in" : "out",
+					(info & ED_ISO) ? "iso" : "int",
+					qlen,
+					0x03ff & (info >> 16),
+					info,
+					(info & ED_SKIP) ? " K" : "",
+					(ed->hwHeadP &
+						cpu_to_hc32(ohci, ED_H)) ?
+							" H" : "");
+				size -= temp;
+				next += temp;
+
+				if (seen_count < DBG_SCHED_LIMIT)
+					seen [seen_count++] = ed;
+
+				ed = ed->ed_next;
+
+			} else {
+				/* we've seen it and what's after */
+				temp = 0;
+				ed = NULL;
+			}
+
+		} while (ed);
+
+		temp = scnprintf (next, size, "\n");
+		size -= temp;
+		next += temp;
+	}
+	spin_unlock_irqrestore (&ohci->lock, flags);
+	kfree (seen);
+
+	return PAGE_SIZE - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+	struct usb_hcd		*hcd;
+	struct ohci_hcd		*ohci;
+	struct ohci_regs __iomem *regs;
+	unsigned long		flags;
+	unsigned		temp, size;
+	char			*next;
+	u32			rdata;
+
+	ohci = buf->ohci;
+	hcd = ohci_to_hcd(ohci);
+	regs = ohci->regs;
+	next = buf->page;
+	size = PAGE_SIZE;
+
+	spin_lock_irqsave (&ohci->lock, flags);
+
+	/* dump driver info, then registers in spec order */
+
+	ohci_dbg_nosw(ohci, &next, &size,
+		"bus %s, device %s\n"
+		"%s\n"
+		"%s\n",
+		hcd->self.controller->bus->name,
+		dev_name(hcd->self.controller),
+		hcd->product_desc,
+		hcd_name);
+
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		size -= scnprintf (next, size,
+			"SUSPENDED (no register access)\n");
+		goto done;
+	}
+
+	ohci_dump_status(ohci, &next, &size);
+
+	/* hcca */
+	if (ohci->hcca)
+		ohci_dbg_nosw(ohci, &next, &size,
+			"hcca frame 0x%04x\n", ohci_frame_no(ohci));
+
+	/* other registers mostly affect frame timings */
+	rdata = ohci_readl (ohci, &regs->fminterval);
+	temp = scnprintf (next, size,
+			"fmintvl 0x%08x %sFSMPS=0x%04x FI=0x%04x\n",
+			rdata, (rdata >> 31) ? "FIT " : "",
+			(rdata >> 16) & 0xefff, rdata & 0xffff);
+	size -= temp;
+	next += temp;
+
+	rdata = ohci_readl (ohci, &regs->fmremaining);
+	temp = scnprintf (next, size, "fmremaining 0x%08x %sFR=0x%04x\n",
+			rdata, (rdata >> 31) ? "FRT " : "",
+			rdata & 0x3fff);
+	size -= temp;
+	next += temp;
+
+	rdata = ohci_readl (ohci, &regs->periodicstart);
+	temp = scnprintf (next, size, "periodicstart 0x%04x\n",
+			rdata & 0x3fff);
+	size -= temp;
+	next += temp;
+
+	rdata = ohci_readl (ohci, &regs->lsthresh);
+	temp = scnprintf (next, size, "lsthresh 0x%04x\n",
+			rdata & 0x3fff);
+	size -= temp;
+	next += temp;
+
+	temp = scnprintf (next, size, "hub poll timer %s\n",
+			HCD_POLL_RH(ohci_to_hcd(ohci)) ? "ON" : "off");
+	size -= temp;
+	next += temp;
+
+	/* roothub */
+	ohci_dump_roothub (ohci, 1, &next, &size);
+
+done:
+	spin_unlock_irqrestore (&ohci->lock, flags);
+
+	return PAGE_SIZE - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
+				ssize_t (*fill_func)(struct debug_buffer *))
+{
+	struct debug_buffer *buf;
+
+	buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+	if (buf) {
+		buf->ohci = ohci;
+		buf->fill_func = fill_func;
+		mutex_init(&buf->mutex);
+	}
+
+	return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+	int ret = 0;
+
+	if (!buf->page)
+		buf->page = (char *)get_zeroed_page(GFP_KERNEL);
+
+	if (!buf->page) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = buf->fill_func(buf);
+
+	if (ret >= 0) {
+		buf->count = ret;
+		ret = 0;
+	}
+
+out:
+	return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+			size_t len, loff_t *offset)
+{
+	struct debug_buffer *buf = file->private_data;
+	int ret = 0;
+
+	mutex_lock(&buf->mutex);
+	if (buf->count == 0) {
+		ret = fill_buffer(buf);
+		if (ret != 0) {
+			mutex_unlock(&buf->mutex);
+			goto out;
+		}
+	}
+	mutex_unlock(&buf->mutex);
+
+	ret = simple_read_from_buffer(user_buf, len, offset,
+				      buf->page, buf->count);
+
+out:
+	return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	struct debug_buffer *buf = file->private_data;
+
+	if (buf) {
+		if (buf->page)
+			free_page((unsigned long)buf->page);
+		kfree(buf);
+	}
+
+	return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private,
+					  fill_periodic_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+	file->private_data = alloc_buffer(inode->i_private,
+					  fill_registers_buffer);
+
+	return file->private_data ? 0 : -ENOMEM;
+}
+static inline void create_debug_files (struct ohci_hcd *ohci)
+{
+	struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
+	struct dentry *root;
+
+	root = debugfs_create_dir(bus->bus_name, ohci_debug_root);
+	ohci->debug_dir = root;
+
+	debugfs_create_file("async", S_IRUGO, root, ohci, &debug_async_fops);
+	debugfs_create_file("periodic", S_IRUGO, root, ohci,
+			    &debug_periodic_fops);
+	debugfs_create_file("registers", S_IRUGO, root, ohci,
+			    &debug_registers_fops);
+
+	ohci_dbg (ohci, "created debug files\n");
+}
+
+static inline void remove_debug_files (struct ohci_hcd *ohci)
+{
+	debugfs_remove_recursive(ohci->debug_dir);
+}
+
+/*-------------------------------------------------------------------------*/
+
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 0000000..c0c4dcc
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI EXYNOS driver"
+
+static const char hcd_name[] = "ohci-exynos";
+static struct hc_driver __read_mostly exynos_ohci_hc_driver;
+
+#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
+
+#define PHY_NUMBER 3
+
+struct exynos_ohci_hcd {
+	struct clk *clk;
+	struct phy *phy[PHY_NUMBER];
+};
+
+static int exynos_ohci_get_phy(struct device *dev,
+				struct exynos_ohci_hcd *exynos_ohci)
+{
+	struct device_node *child;
+	struct phy *phy;
+	int phy_number;
+	int ret;
+
+	/* Get PHYs for the controller */
+	for_each_available_child_of_node(dev->of_node, child) {
+		ret = of_property_read_u32(child, "reg", &phy_number);
+		if (ret) {
+			dev_err(dev, "Failed to parse device tree\n");
+			of_node_put(child);
+			return ret;
+		}
+
+		if (phy_number >= PHY_NUMBER) {
+			dev_err(dev, "Invalid number of PHYs\n");
+			of_node_put(child);
+			return -EINVAL;
+		}
+
+		phy = devm_of_phy_get(dev, child, NULL);
+		exynos_ohci->phy[phy_number] = phy;
+		if (IS_ERR(phy)) {
+			ret = PTR_ERR(phy);
+			if (ret == -EPROBE_DEFER) {
+				of_node_put(child);
+				return ret;
+			} else if (ret != -ENOSYS && ret != -ENODEV) {
+				dev_err(dev,
+					"Error retrieving usb2 phy: %d\n", ret);
+				of_node_put(child);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int exynos_ohci_phy_enable(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+	int i;
+	int ret = 0;
+
+	for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
+		if (!IS_ERR(exynos_ohci->phy[i]))
+			ret = phy_power_on(exynos_ohci->phy[i]);
+	if (ret)
+		for (i--; i >= 0; i--)
+			if (!IS_ERR(exynos_ohci->phy[i]))
+				phy_power_off(exynos_ohci->phy[i]);
+
+	return ret;
+}
+
+static void exynos_ohci_phy_disable(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+	int i;
+
+	for (i = 0; i < PHY_NUMBER; i++)
+		if (!IS_ERR(exynos_ohci->phy[i]))
+			phy_power_off(exynos_ohci->phy[i]);
+}
+
+static int exynos_ohci_probe(struct platform_device *pdev)
+{
+	struct exynos_ohci_hcd *exynos_ohci;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int irq;
+	int err;
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we move to full device tree support this will vanish off.
+	 */
+	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	hcd = usb_create_hcd(&exynos_ohci_hc_driver,
+				&pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_err(&pdev->dev, "Unable to create HCD\n");
+		return -ENOMEM;
+	}
+
+	exynos_ohci = to_exynos_ohci(hcd);
+
+	err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
+	if (err)
+		goto fail_clk;
+
+	exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
+
+	if (IS_ERR(exynos_ohci->clk)) {
+		dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+		err = PTR_ERR(exynos_ohci->clk);
+		goto fail_clk;
+	}
+
+	err = clk_prepare_enable(exynos_ohci->clk);
+	if (err)
+		goto fail_clk;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto fail_io;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	irq = platform_get_irq(pdev, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "Failed to get IRQ\n");
+		err = -ENODEV;
+		goto fail_io;
+	}
+
+	platform_set_drvdata(pdev, hcd);
+
+	err = exynos_ohci_phy_enable(&pdev->dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to enable USB phy\n");
+		goto fail_io;
+	}
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to add USB HCD\n");
+		goto fail_add_hcd;
+	}
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+fail_add_hcd:
+	exynos_ohci_phy_disable(&pdev->dev);
+fail_io:
+	clk_disable_unprepare(exynos_ohci->clk);
+fail_clk:
+	usb_put_hcd(hcd);
+	return err;
+}
+
+static int exynos_ohci_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+
+	usb_remove_hcd(hcd);
+
+	exynos_ohci_phy_disable(&pdev->dev);
+
+	clk_disable_unprepare(exynos_ohci->clk);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
+	bool do_wakeup = device_may_wakeup(dev);
+	int rc = ohci_suspend(hcd, do_wakeup);
+
+	if (rc)
+		return rc;
+
+	exynos_ohci_phy_disable(dev);
+
+	clk_disable_unprepare(exynos_ohci->clk);
+
+	return 0;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+	struct usb_hcd *hcd			= dev_get_drvdata(dev);
+	struct exynos_ohci_hcd *exynos_ohci	= to_exynos_ohci(hcd);
+	int ret;
+
+	clk_prepare_enable(exynos_ohci->clk);
+
+	ret = exynos_ohci_phy_enable(dev);
+	if (ret) {
+		dev_err(dev, "Failed to enable USB phy\n");
+		clk_disable_unprepare(exynos_ohci->clk);
+		return ret;
+	}
+
+	ohci_resume(hcd, false);
+
+	return 0;
+}
+#else
+#define exynos_ohci_suspend	NULL
+#define exynos_ohci_resume	NULL
+#endif
+
+static const struct ohci_driver_overrides exynos_overrides __initconst = {
+	.extra_priv_size =	sizeof(struct exynos_ohci_hcd),
+};
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+	.suspend	= exynos_ohci_suspend,
+	.resume		= exynos_ohci_resume,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_ohci_match[] = {
+	{ .compatible = "samsung,exynos4210-ohci" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, exynos_ohci_match);
+#endif
+
+static struct platform_driver exynos_ohci_driver = {
+	.probe		= exynos_ohci_probe,
+	.remove		= exynos_ohci_remove,
+	.shutdown	= exynos_ohci_shutdown,
+	.driver = {
+		.name	= "exynos-ohci",
+		.pm	= &exynos_ohci_pm_ops,
+		.of_match_table	= of_match_ptr(exynos_ohci_match),
+	}
+};
+static int __init ohci_exynos_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
+	return platform_driver_register(&exynos_ohci_driver);
+}
+module_init(ohci_exynos_init);
+
+static void __exit ohci_exynos_cleanup(void)
+{
+	platform_driver_unregister(&exynos_ohci_driver);
+}
+module_exit(ohci_exynos_cleanup);
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
new file mode 100644
index 0000000..210181f
--- /dev/null
+++ b/drivers/usb/host/ohci-hcd.c
@@ -0,0 +1,1344 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * [ Initialisation is based on Linus'  ]
+ * [ uhci code and gregs ohci fragments ]
+ * [ (C) Copyright 1999 Linus Torvalds  ]
+ * [ (C) Copyright 1999 Gregory P. Smith]
+ *
+ *
+ * OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
+ * interfaces (though some non-x86 Intel chips use it).  It supports
+ * smarter hardware than UHCI.  A download link for the spec available
+ * through the http://www.usb.org website.
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+
+#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
+#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
+
+/*-------------------------------------------------------------------------*/
+
+/* For initializing controller (mask in an HCFS mode too) */
+#define	OHCI_CONTROL_INIT	OHCI_CTRL_CBSR
+#define	OHCI_INTR_INIT \
+		(OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE \
+		| OHCI_INTR_RD | OHCI_INTR_WDH)
+
+#ifdef __hppa__
+/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
+#define	IR_DISABLE
+#endif
+
+#ifdef CONFIG_ARCH_OMAP
+/* OMAP doesn't support IR (no SMM; not needed) */
+#define	IR_DISABLE
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static const char	hcd_name [] = "ohci_hcd";
+
+#define	STATECHANGE_DELAY	msecs_to_jiffies(300)
+#define	IO_WATCHDOG_DELAY	msecs_to_jiffies(275)
+#define	IO_WATCHDOG_OFF		0xffffff00
+
+#include "ohci.h"
+#include "pci-quirks.h"
+
+static void ohci_dump(struct ohci_hcd *ohci);
+static void ohci_stop(struct usb_hcd *hcd);
+static void io_watchdog_func(struct timer_list *t);
+
+#include "ohci-hub.c"
+#include "ohci-dbg.c"
+#include "ohci-mem.c"
+#include "ohci-q.c"
+
+
+/*
+ * On architectures with edge-triggered interrupts we must never return
+ * IRQ_NONE.
+ */
+#if defined(CONFIG_SA1111)  /* ... or other edge-triggered systems */
+#define IRQ_NOTMINE	IRQ_HANDLED
+#else
+#define IRQ_NOTMINE	IRQ_NONE
+#endif
+
+
+/* Some boards misreport power switching/overcurrent */
+static bool distrust_firmware = true;
+module_param (distrust_firmware, bool, 0);
+MODULE_PARM_DESC (distrust_firmware,
+	"true to distrust firmware power/overcurrent setup");
+
+/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
+static bool no_handshake;
+module_param (no_handshake, bool, 0);
+MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
+
+/*-------------------------------------------------------------------------*/
+
+static int number_of_tds(struct urb *urb)
+{
+	int			len, i, num, this_sg_len;
+	struct scatterlist	*sg;
+
+	len = urb->transfer_buffer_length;
+	i = urb->num_mapped_sgs;
+
+	if (len > 0 && i > 0) {		/* Scatter-gather transfer */
+		num = 0;
+		sg = urb->sg;
+		for (;;) {
+			this_sg_len = min_t(int, sg_dma_len(sg), len);
+			num += DIV_ROUND_UP(this_sg_len, 4096);
+			len -= this_sg_len;
+			if (--i <= 0 || len <= 0)
+				break;
+			sg = sg_next(sg);
+		}
+
+	} else {			/* Non-SG transfer */
+		/* one TD for every 4096 Bytes (could be up to 8K) */
+		num = DIV_ROUND_UP(len, 4096);
+	}
+	return num;
+}
+
+/*
+ * queue up an urb for anything except the root hub
+ */
+static int ohci_urb_enqueue (
+	struct usb_hcd	*hcd,
+	struct urb	*urb,
+	gfp_t		mem_flags
+) {
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	struct ed	*ed;
+	urb_priv_t	*urb_priv;
+	unsigned int	pipe = urb->pipe;
+	int		i, size = 0;
+	unsigned long	flags;
+	int		retval = 0;
+
+	/* every endpoint has a ed, locate and maybe (re)initialize it */
+	ed = ed_get(ohci, urb->ep, urb->dev, pipe, urb->interval);
+	if (! ed)
+		return -ENOMEM;
+
+	/* for the private part of the URB we need the number of TDs (size) */
+	switch (ed->type) {
+		case PIPE_CONTROL:
+			/* td_submit_urb() doesn't yet handle these */
+			if (urb->transfer_buffer_length > 4096)
+				return -EMSGSIZE;
+
+			/* 1 TD for setup, 1 for ACK, plus ... */
+			size = 2;
+			/* FALLTHROUGH */
+		// case PIPE_INTERRUPT:
+		// case PIPE_BULK:
+		default:
+			size += number_of_tds(urb);
+			/* maybe a zero-length packet to wrap it up */
+			if (size == 0)
+				size++;
+			else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
+				&& (urb->transfer_buffer_length
+					% usb_maxpacket (urb->dev, pipe,
+						usb_pipeout (pipe))) == 0)
+				size++;
+			break;
+		case PIPE_ISOCHRONOUS: /* number of packets from URB */
+			size = urb->number_of_packets;
+			break;
+	}
+
+	/* allocate the private part of the URB */
+	urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
+			mem_flags);
+	if (!urb_priv)
+		return -ENOMEM;
+	INIT_LIST_HEAD (&urb_priv->pending);
+	urb_priv->length = size;
+	urb_priv->ed = ed;
+
+	/* allocate the TDs (deferring hash chain updates) */
+	for (i = 0; i < size; i++) {
+		urb_priv->td [i] = td_alloc (ohci, mem_flags);
+		if (!urb_priv->td [i]) {
+			urb_priv->length = i;
+			urb_free_priv (ohci, urb_priv);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_irqsave (&ohci->lock, flags);
+
+	/* don't submit to a dead HC */
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		retval = -ENODEV;
+		goto fail;
+	}
+	if (ohci->rh_state != OHCI_RH_RUNNING) {
+		retval = -ENODEV;
+		goto fail;
+	}
+	retval = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (retval)
+		goto fail;
+
+	/* schedule the ed if needed */
+	if (ed->state == ED_IDLE) {
+		retval = ed_schedule (ohci, ed);
+		if (retval < 0) {
+			usb_hcd_unlink_urb_from_ep(hcd, urb);
+			goto fail;
+		}
+
+		/* Start up the I/O watchdog timer, if it's not running */
+		if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
+				list_empty(&ohci->eds_in_use) &&
+				!(ohci->flags & OHCI_QUIRK_QEMU)) {
+			ohci->prev_frame_no = ohci_frame_no(ohci);
+			mod_timer(&ohci->io_watchdog,
+					jiffies + IO_WATCHDOG_DELAY);
+		}
+		list_add(&ed->in_use_list, &ohci->eds_in_use);
+
+		if (ed->type == PIPE_ISOCHRONOUS) {
+			u16	frame = ohci_frame_no(ohci);
+
+			/* delay a few frames before the first TD */
+			frame += max_t (u16, 8, ed->interval);
+			frame &= ~(ed->interval - 1);
+			frame |= ed->branch;
+			urb->start_frame = frame;
+			ed->last_iso = frame + ed->interval * (size - 1);
+		}
+	} else if (ed->type == PIPE_ISOCHRONOUS) {
+		u16	next = ohci_frame_no(ohci) + 1;
+		u16	frame = ed->last_iso + ed->interval;
+		u16	length = ed->interval * (size - 1);
+
+		/* Behind the scheduling threshold? */
+		if (unlikely(tick_before(frame, next))) {
+
+			/* URB_ISO_ASAP: Round up to the first available slot */
+			if (urb->transfer_flags & URB_ISO_ASAP) {
+				frame += (next - frame + ed->interval - 1) &
+						-ed->interval;
+
+			/*
+			 * Not ASAP: Use the next slot in the stream,
+			 * no matter what.
+			 */
+			} else {
+				/*
+				 * Some OHCI hardware doesn't handle late TDs
+				 * correctly.  After retiring them it proceeds
+				 * to the next ED instead of the next TD.
+				 * Therefore we have to omit the late TDs
+				 * entirely.
+				 */
+				urb_priv->td_cnt = DIV_ROUND_UP(
+						(u16) (next - frame),
+						ed->interval);
+				if (urb_priv->td_cnt >= urb_priv->length) {
+					++urb_priv->td_cnt;	/* Mark it */
+					ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+							urb, frame, length,
+							next);
+				}
+			}
+		}
+		urb->start_frame = frame;
+		ed->last_iso = frame + length;
+	}
+
+	/* fill the TDs and link them to the ed; and
+	 * enable that part of the schedule, if needed
+	 * and update count of queued periodic urbs
+	 */
+	urb->hcpriv = urb_priv;
+	td_submit_urb (ohci, urb);
+
+fail:
+	if (retval)
+		urb_free_priv (ohci, urb_priv);
+	spin_unlock_irqrestore (&ohci->lock, flags);
+	return retval;
+}
+
+/*
+ * decouple the URB from the HC queues (TDs, urb_priv).
+ * reporting is always done
+ * asynchronously, and we might be dealing with an urb that's
+ * partially transferred, or an ED with other urbs being unlinked.
+ */
+static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+	unsigned long		flags;
+	int			rc;
+	urb_priv_t		*urb_priv;
+
+	spin_lock_irqsave (&ohci->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc == 0) {
+
+		/* Unless an IRQ completed the unlink while it was being
+		 * handed to us, flag it for unlink and giveback, and force
+		 * some upcoming INTR_SF to call finish_unlinks()
+		 */
+		urb_priv = urb->hcpriv;
+		if (urb_priv->ed->state == ED_OPER)
+			start_ed_unlink(ohci, urb_priv->ed);
+
+		if (ohci->rh_state != OHCI_RH_RUNNING) {
+			/* With HC dead, we can clean up right away */
+			ohci_work(ohci);
+		}
+	}
+	spin_unlock_irqrestore (&ohci->lock, flags);
+	return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* frees config/altsetting state for endpoints,
+ * including ED memory, dummy TD, and bulk/intr data toggle
+ */
+
+static void
+ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+	unsigned long		flags;
+	struct ed		*ed = ep->hcpriv;
+	unsigned		limit = 1000;
+
+	/* ASSERT:  any requests/urbs are being unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+
+	if (!ed)
+		return;
+
+rescan:
+	spin_lock_irqsave (&ohci->lock, flags);
+
+	if (ohci->rh_state != OHCI_RH_RUNNING) {
+sanitize:
+		ed->state = ED_IDLE;
+		ohci_work(ohci);
+	}
+
+	switch (ed->state) {
+	case ED_UNLINK:		/* wait for hw to finish? */
+		/* major IRQ delivery trouble loses INTR_SF too... */
+		if (limit-- == 0) {
+			ohci_warn(ohci, "ED unlink timeout\n");
+			goto sanitize;
+		}
+		spin_unlock_irqrestore (&ohci->lock, flags);
+		schedule_timeout_uninterruptible(1);
+		goto rescan;
+	case ED_IDLE:		/* fully unlinked */
+		if (list_empty (&ed->td_list)) {
+			td_free (ohci, ed->dummy);
+			ed_free (ohci, ed);
+			break;
+		}
+		/* fall through */
+	default:
+		/* caller was supposed to have unlinked any requests;
+		 * that's not our job.  can't recover; must leak ed.
+		 */
+		ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
+			ed, ep->desc.bEndpointAddress, ed->state,
+			list_empty (&ed->td_list) ? "" : " (has tds)");
+		td_free (ohci, ed->dummy);
+		break;
+	}
+	ep->hcpriv = NULL;
+	spin_unlock_irqrestore (&ohci->lock, flags);
+}
+
+static int ohci_get_frame (struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+
+	return ohci_frame_no(ohci);
+}
+
+static void ohci_usb_reset (struct ohci_hcd *ohci)
+{
+	ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
+	ohci->hc_control &= OHCI_CTRL_RWC;
+	ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	ohci->rh_state = OHCI_RH_HALTED;
+}
+
+/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
+ * other cases where the next software may expect clean state from the
+ * "firmware".  this is bus-neutral, unlike shutdown() methods.
+ */
+static void
+ohci_shutdown (struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci;
+
+	ohci = hcd_to_ohci (hcd);
+	ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
+
+	/* Software reset, after which the controller goes into SUSPEND */
+	ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+	ohci_readl(ohci, &ohci->regs->cmdstatus);	/* flush the writes */
+	udelay(10);
+
+	ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
+	ohci->rh_state = OHCI_RH_HALTED;
+}
+
+/*-------------------------------------------------------------------------*
+ * HC functions
+ *-------------------------------------------------------------------------*/
+
+/* init memory, and kick BIOS/SMM off */
+
+static int ohci_init (struct ohci_hcd *ohci)
+{
+	int ret;
+	struct usb_hcd *hcd = ohci_to_hcd(ohci);
+
+	/* Accept arbitrarily long scatter-gather lists */
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
+
+	if (distrust_firmware)
+		ohci->flags |= OHCI_QUIRK_HUB_POWER;
+
+	ohci->rh_state = OHCI_RH_HALTED;
+	ohci->regs = hcd->regs;
+
+	/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
+	 * was never needed for most non-PCI systems ... remove the code?
+	 */
+
+#ifndef IR_DISABLE
+	/* SMM owns the HC?  not for long! */
+	if (!no_handshake && ohci_readl (ohci,
+					&ohci->regs->control) & OHCI_CTRL_IR) {
+		u32 temp;
+
+		ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
+
+		/* this timeout is arbitrary.  we make it long, so systems
+		 * depending on usb keyboards may be usable even if the
+		 * BIOS/SMM code seems pretty broken.
+		 */
+		temp = 500;	/* arbitrary: five seconds */
+
+		ohci_writel (ohci, OHCI_INTR_OC, &ohci->regs->intrenable);
+		ohci_writel (ohci, OHCI_OCR, &ohci->regs->cmdstatus);
+		while (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_IR) {
+			msleep (10);
+			if (--temp == 0) {
+				ohci_err (ohci, "USB HC takeover failed!"
+					"  (BIOS/SMM bug)\n");
+				return -EBUSY;
+			}
+		}
+		ohci_usb_reset (ohci);
+	}
+#endif
+
+	/* Disable HC interrupts */
+	ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+
+	/* flush the writes, and save key bits like RWC */
+	if (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_RWC)
+		ohci->hc_control |= OHCI_CTRL_RWC;
+
+	/* Read the number of ports unless overridden */
+	if (ohci->num_ports == 0)
+		ohci->num_ports = roothub_a(ohci) & RH_A_NDP;
+
+	if (ohci->hcca)
+		return 0;
+
+	timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
+	ohci->prev_frame_no = IO_WATCHDOG_OFF;
+
+	ohci->hcca = dma_alloc_coherent (hcd->self.controller,
+			sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
+	if (!ohci->hcca)
+		return -ENOMEM;
+
+	if ((ret = ohci_mem_init (ohci)) < 0)
+		ohci_stop (hcd);
+	else {
+		create_debug_files (ohci);
+	}
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Start an OHCI controller, set the BUS operational
+ * resets USB and controller
+ * enable interrupts
+ */
+static int ohci_run (struct ohci_hcd *ohci)
+{
+	u32			mask, val;
+	int			first = ohci->fminterval == 0;
+	struct usb_hcd		*hcd = ohci_to_hcd(ohci);
+
+	ohci->rh_state = OHCI_RH_HALTED;
+
+	/* boot firmware should have set this up (5.1.1.3.1) */
+	if (first) {
+
+		val = ohci_readl (ohci, &ohci->regs->fminterval);
+		ohci->fminterval = val & 0x3fff;
+		if (ohci->fminterval != FI)
+			ohci_dbg (ohci, "fminterval delta %d\n",
+				ohci->fminterval - FI);
+		ohci->fminterval |= FSMP (ohci->fminterval) << 16;
+		/* also: power/overcurrent flags in roothub.a */
+	}
+
+	/* Reset USB nearly "by the book".  RemoteWakeupConnected has
+	 * to be checked in case boot firmware (BIOS/SMM/...) has set up
+	 * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
+	 * If the bus glue detected wakeup capability then it should
+	 * already be enabled; if so we'll just enable it again.
+	 */
+	if ((ohci->hc_control & OHCI_CTRL_RWC) != 0)
+		device_set_wakeup_capable(hcd->self.controller, 1);
+
+	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_OPER:
+		val = 0;
+		break;
+	case OHCI_USB_SUSPEND:
+	case OHCI_USB_RESUME:
+		ohci->hc_control &= OHCI_CTRL_RWC;
+		ohci->hc_control |= OHCI_USB_RESUME;
+		val = 10 /* msec wait */;
+		break;
+	// case OHCI_USB_RESET:
+	default:
+		ohci->hc_control &= OHCI_CTRL_RWC;
+		ohci->hc_control |= OHCI_USB_RESET;
+		val = 50 /* msec wait */;
+		break;
+	}
+	ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	// flush the writes
+	(void) ohci_readl (ohci, &ohci->regs->control);
+	msleep(val);
+
+	memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
+
+	/* 2msec timelimit here means no irqs/preempt */
+	spin_lock_irq (&ohci->lock);
+
+retry:
+	/* HC Reset requires max 10 us delay */
+	ohci_writel (ohci, OHCI_HCR,  &ohci->regs->cmdstatus);
+	val = 30;	/* ... allow extra time */
+	while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
+		if (--val == 0) {
+			spin_unlock_irq (&ohci->lock);
+			ohci_err (ohci, "USB HC reset timed out!\n");
+			return -1;
+		}
+		udelay (1);
+	}
+
+	/* now we're in the SUSPEND state ... must go OPERATIONAL
+	 * within 2msec else HC enters RESUME
+	 *
+	 * ... but some hardware won't init fmInterval "by the book"
+	 * (SiS, OPTi ...), so reset again instead.  SiS doesn't need
+	 * this if we write fmInterval after we're OPERATIONAL.
+	 * Unclear about ALi, ServerWorks, and others ... this could
+	 * easily be a longstanding bug in chip init on Linux.
+	 */
+	if (ohci->flags & OHCI_QUIRK_INITRESET) {
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+		// flush those writes
+		(void) ohci_readl (ohci, &ohci->regs->control);
+	}
+
+	/* Tell the controller where the control and bulk lists are
+	 * The lists are empty now. */
+	ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
+	ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
+
+	/* a reset clears this */
+	ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
+
+	periodic_reinit (ohci);
+
+	/* some OHCI implementations are finicky about how they init.
+	 * bogus values here mean not even enumeration could work.
+	 */
+	if ((ohci_readl (ohci, &ohci->regs->fminterval) & 0x3fff0000) == 0
+			|| !ohci_readl (ohci, &ohci->regs->periodicstart)) {
+		if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
+			ohci->flags |= OHCI_QUIRK_INITRESET;
+			ohci_dbg (ohci, "enabling initreset quirk\n");
+			goto retry;
+		}
+		spin_unlock_irq (&ohci->lock);
+		ohci_err (ohci, "init err (%08x %04x)\n",
+			ohci_readl (ohci, &ohci->regs->fminterval),
+			ohci_readl (ohci, &ohci->regs->periodicstart));
+		return -EOVERFLOW;
+	}
+
+	/* use rhsc irqs after hub_wq is allocated */
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	hcd->uses_new_polling = 1;
+
+	/* start controller operations */
+	ohci->hc_control &= OHCI_CTRL_RWC;
+	ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
+	ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	ohci->rh_state = OHCI_RH_RUNNING;
+
+	/* wake on ConnectStatusChange, matching external hubs */
+	ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
+
+	/* Choose the interrupts we care about now, others later on demand */
+	mask = OHCI_INTR_INIT;
+	ohci_writel (ohci, ~0, &ohci->regs->intrstatus);
+	ohci_writel (ohci, mask, &ohci->regs->intrenable);
+
+	/* handle root hub init quirks ... */
+	val = roothub_a (ohci);
+	val &= ~(RH_A_PSM | RH_A_OCPM);
+	if (ohci->flags & OHCI_QUIRK_SUPERIO) {
+		/* NSC 87560 and maybe others */
+		val |= RH_A_NOCP;
+		val &= ~(RH_A_POTPGT | RH_A_NPS);
+		ohci_writel (ohci, val, &ohci->regs->roothub.a);
+	} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+			(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
+		/* hub power always on; required for AMD-756 and some
+		 * Mac platforms.  ganged overcurrent reporting, if any.
+		 */
+		val |= RH_A_NPS;
+		ohci_writel (ohci, val, &ohci->regs->roothub.a);
+	}
+	ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
+	ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
+						&ohci->regs->roothub.b);
+	// flush those writes
+	(void) ohci_readl (ohci, &ohci->regs->control);
+
+	ohci->next_statechange = jiffies + STATECHANGE_DELAY;
+	spin_unlock_irq (&ohci->lock);
+
+	// POTPGT delay is bits 24-31, in 2 ms units.
+	mdelay ((val >> 23) & 0x1fe);
+
+	ohci_dump(ohci);
+
+	return 0;
+}
+
+/* ohci_setup routine for generic controller initialization */
+
+int ohci_setup(struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci(hcd);
+
+	ohci_hcd_init(ohci);
+	
+	return ohci_init(ohci);
+}
+EXPORT_SYMBOL_GPL(ohci_setup);
+
+/* ohci_start routine for generic controller start of all OHCI bus glue */
+static int ohci_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci(hcd);
+	int	ret;
+
+	ret = ohci_run(ohci);
+	if (ret < 0) {
+		ohci_err(ohci, "can't start\n");
+		ohci_stop(hcd);
+	}
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some OHCI controllers are known to lose track of completed TDs.  They
+ * don't add the TDs to the hardware done queue, which means we never see
+ * them as being completed.
+ *
+ * This watchdog routine checks for such problems.  Without some way to
+ * tell when those TDs have completed, we would never take their EDs off
+ * the unlink list.  As a result, URBs could never be dequeued and
+ * endpoints could never be released.
+ */
+static void io_watchdog_func(struct timer_list *t)
+{
+	struct ohci_hcd	*ohci = from_timer(ohci, t, io_watchdog);
+	bool		takeback_all_pending = false;
+	u32		status;
+	u32		head;
+	struct ed	*ed;
+	struct td	*td, *td_start, *td_next;
+	unsigned	frame_no, prev_frame_no = IO_WATCHDOG_OFF;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	/*
+	 * One way to lose track of completed TDs is if the controller
+	 * never writes back the done queue head.  If it hasn't been
+	 * written back since the last time this function ran and if it
+	 * was non-empty at that time, something is badly wrong with the
+	 * hardware.
+	 */
+	status = ohci_readl(ohci, &ohci->regs->intrstatus);
+	if (!(status & OHCI_INTR_WDH) && ohci->wdh_cnt == ohci->prev_wdh_cnt) {
+		if (ohci->prev_donehead) {
+			ohci_err(ohci, "HcDoneHead not written back; disabled\n");
+ died:
+			usb_hc_died(ohci_to_hcd(ohci));
+			ohci_dump(ohci);
+			ohci_shutdown(ohci_to_hcd(ohci));
+			goto done;
+		} else {
+			/* No write back because the done queue was empty */
+			takeback_all_pending = true;
+		}
+	}
+
+	/* Check every ED which might have pending TDs */
+	list_for_each_entry(ed, &ohci->eds_in_use, in_use_list) {
+		if (ed->pending_td) {
+			if (takeback_all_pending ||
+					OKAY_TO_TAKEBACK(ohci, ed)) {
+				unsigned tmp = hc32_to_cpu(ohci, ed->hwINFO);
+
+				ohci_dbg(ohci, "takeback pending TD for dev %d ep 0x%x\n",
+						0x007f & tmp,
+						(0x000f & (tmp >> 7)) +
+							((tmp & ED_IN) >> 5));
+				add_to_done_list(ohci, ed->pending_td);
+			}
+		}
+
+		/* Starting from the latest pending TD, */
+		td = ed->pending_td;
+
+		/* or the last TD on the done list, */
+		if (!td) {
+			list_for_each_entry(td_next, &ed->td_list, td_list) {
+				if (!td_next->next_dl_td)
+					break;
+				td = td_next;
+			}
+		}
+
+		/* find the last TD processed by the controller. */
+		head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
+		td_start = td;
+		td_next = list_prepare_entry(td, &ed->td_list, td_list);
+		list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
+			if (head == (u32) td_next->td_dma)
+				break;
+			td = td_next;	/* head pointer has passed this TD */
+		}
+		if (td != td_start) {
+			/*
+			 * In case a WDH cycle is in progress, we will wait
+			 * for the next two cycles to complete before assuming
+			 * this TD will never get on the done queue.
+			 */
+			ed->takeback_wdh_cnt = ohci->wdh_cnt + 2;
+			ed->pending_td = td;
+		}
+	}
+
+	ohci_work(ohci);
+
+	if (ohci->rh_state == OHCI_RH_RUNNING) {
+
+		/*
+		 * Sometimes a controller just stops working.  We can tell
+		 * by checking that the frame counter has advanced since
+		 * the last time we ran.
+		 *
+		 * But be careful: Some controllers violate the spec by
+		 * stopping their frame counter when no ports are active.
+		 */
+		frame_no = ohci_frame_no(ohci);
+		if (frame_no == ohci->prev_frame_no) {
+			int		active_cnt = 0;
+			int		i;
+			unsigned	tmp;
+
+			for (i = 0; i < ohci->num_ports; ++i) {
+				tmp = roothub_portstatus(ohci, i);
+				/* Enabled and not suspended? */
+				if ((tmp & RH_PS_PES) && !(tmp & RH_PS_PSS))
+					++active_cnt;
+			}
+
+			if (active_cnt > 0) {
+				ohci_err(ohci, "frame counter not updating; disabled\n");
+				goto died;
+			}
+		}
+		if (!list_empty(&ohci->eds_in_use)) {
+			prev_frame_no = frame_no;
+			ohci->prev_wdh_cnt = ohci->wdh_cnt;
+			ohci->prev_donehead = ohci_readl(ohci,
+					&ohci->regs->donehead);
+			mod_timer(&ohci->io_watchdog,
+					jiffies + IO_WATCHDOG_DELAY);
+		}
+	}
+
+ done:
+	ohci->prev_frame_no = prev_frame_no;
+	spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
+/* an interrupt happens */
+
+static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+	struct ohci_regs __iomem *regs = ohci->regs;
+	int			ints;
+
+	/* Read interrupt status (and flush pending writes).  We ignore the
+	 * optimization of checking the LSB of hcca->done_head; it doesn't
+	 * work on all systems (edge triggering for OHCI can be a factor).
+	 */
+	ints = ohci_readl(ohci, &regs->intrstatus);
+
+	/* Check for an all 1's result which is a typical consequence
+	 * of dead, unclocked, or unplugged (CardBus...) devices
+	 */
+	if (ints == ~(u32)0) {
+		ohci->rh_state = OHCI_RH_HALTED;
+		ohci_dbg (ohci, "device removed!\n");
+		usb_hc_died(hcd);
+		return IRQ_HANDLED;
+	}
+
+	/* We only care about interrupts that are enabled */
+	ints &= ohci_readl(ohci, &regs->intrenable);
+
+	/* interrupt for some other device? */
+	if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
+		return IRQ_NOTMINE;
+
+	if (ints & OHCI_INTR_UE) {
+		// e.g. due to PCI Master/Target Abort
+		if (quirk_nec(ohci)) {
+			/* Workaround for a silicon bug in some NEC chips used
+			 * in Apple's PowerBooks. Adapted from Darwin code.
+			 */
+			ohci_err (ohci, "OHCI Unrecoverable Error, scheduling NEC chip restart\n");
+
+			ohci_writel (ohci, OHCI_INTR_UE, &regs->intrdisable);
+
+			schedule_work (&ohci->nec_work);
+		} else {
+			ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+			ohci->rh_state = OHCI_RH_HALTED;
+			usb_hc_died(hcd);
+		}
+
+		ohci_dump(ohci);
+		ohci_usb_reset (ohci);
+	}
+
+	if (ints & OHCI_INTR_RHSC) {
+		ohci_dbg(ohci, "rhsc\n");
+		ohci->next_statechange = jiffies + STATECHANGE_DELAY;
+		ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
+				&regs->intrstatus);
+
+		/* NOTE: Vendors didn't always make the same implementation
+		 * choices for RHSC.  Many followed the spec; RHSC triggers
+		 * on an edge, like setting and maybe clearing a port status
+		 * change bit.  With others it's level-triggered, active
+		 * until hub_wq clears all the port status change bits.  We'll
+		 * always disable it here and rely on polling until hub_wq
+		 * re-enables it.
+		 */
+		ohci_writel(ohci, OHCI_INTR_RHSC, &regs->intrdisable);
+		usb_hcd_poll_rh_status(hcd);
+	}
+
+	/* For connect and disconnect events, we expect the controller
+	 * to turn on RHSC along with RD.  But for remote wakeup events
+	 * this might not happen.
+	 */
+	else if (ints & OHCI_INTR_RD) {
+		ohci_dbg(ohci, "resume detect\n");
+		ohci_writel(ohci, OHCI_INTR_RD, &regs->intrstatus);
+		set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+		if (ohci->autostop) {
+			spin_lock (&ohci->lock);
+			ohci_rh_resume (ohci);
+			spin_unlock (&ohci->lock);
+		} else
+			usb_hcd_resume_root_hub(hcd);
+	}
+
+	spin_lock(&ohci->lock);
+	if (ints & OHCI_INTR_WDH)
+		update_done_list(ohci);
+
+	/* could track INTR_SO to reduce available PCI/... bandwidth */
+
+	/* handle any pending URB/ED unlinks, leaving INTR_SF enabled
+	 * when there's still unlinking to be done (next frame).
+	 */
+	ohci_work(ohci);
+	if ((ints & OHCI_INTR_SF) != 0 && !ohci->ed_rm_list
+			&& ohci->rh_state == OHCI_RH_RUNNING)
+		ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
+
+	if (ohci->rh_state == OHCI_RH_RUNNING) {
+		ohci_writel (ohci, ints, &regs->intrstatus);
+		if (ints & OHCI_INTR_WDH)
+			++ohci->wdh_cnt;
+
+		ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
+		// flush those writes
+		(void) ohci_readl (ohci, &ohci->regs->control);
+	}
+	spin_unlock(&ohci->lock);
+
+	return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ohci_stop (struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+
+	ohci_dump(ohci);
+
+	if (quirk_nec(ohci))
+		flush_work(&ohci->nec_work);
+	del_timer_sync(&ohci->io_watchdog);
+	ohci->prev_frame_no = IO_WATCHDOG_OFF;
+
+	ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+	ohci_usb_reset(ohci);
+	free_irq(hcd->irq, hcd);
+	hcd->irq = 0;
+
+	if (quirk_amdiso(ohci))
+		usb_amd_dev_put();
+
+	remove_debug_files (ohci);
+	ohci_mem_cleanup (ohci);
+	if (ohci->hcca) {
+		dma_free_coherent (hcd->self.controller,
+				sizeof *ohci->hcca,
+				ohci->hcca, ohci->hcca_dma);
+		ohci->hcca = NULL;
+		ohci->hcca_dma = 0;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_PM) || defined(CONFIG_USB_PCI)
+
+/* must not be called from interrupt context */
+int ohci_restart(struct ohci_hcd *ohci)
+{
+	int temp;
+	int i;
+	struct urb_priv *priv;
+
+	ohci_init(ohci);
+	spin_lock_irq(&ohci->lock);
+	ohci->rh_state = OHCI_RH_HALTED;
+
+	/* Recycle any "live" eds/tds (and urbs). */
+	if (!list_empty (&ohci->pending))
+		ohci_dbg(ohci, "abort schedule...\n");
+	list_for_each_entry (priv, &ohci->pending, pending) {
+		struct urb	*urb = priv->td[0]->urb;
+		struct ed	*ed = priv->ed;
+
+		switch (ed->state) {
+		case ED_OPER:
+			ed->state = ED_UNLINK;
+			ed->hwINFO |= cpu_to_hc32(ohci, ED_DEQUEUE);
+			ed_deschedule (ohci, ed);
+
+			ed->ed_next = ohci->ed_rm_list;
+			ed->ed_prev = NULL;
+			ohci->ed_rm_list = ed;
+			/* FALLTHROUGH */
+		case ED_UNLINK:
+			break;
+		default:
+			ohci_dbg(ohci, "bogus ed %p state %d\n",
+					ed, ed->state);
+		}
+
+		if (!urb->unlinked)
+			urb->unlinked = -ESHUTDOWN;
+	}
+	ohci_work(ohci);
+	spin_unlock_irq(&ohci->lock);
+
+	/* paranoia, in case that didn't work: */
+
+	/* empty the interrupt branches */
+	for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
+	for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
+
+	/* no EDs to remove */
+	ohci->ed_rm_list = NULL;
+
+	/* empty control and bulk lists */
+	ohci->ed_controltail = NULL;
+	ohci->ed_bulktail    = NULL;
+
+	if ((temp = ohci_run (ohci)) < 0) {
+		ohci_err (ohci, "can't restart, %d\n", temp);
+		return temp;
+	}
+	ohci_dbg(ohci, "restart complete\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ohci_restart);
+
+#endif
+
+#ifdef CONFIG_PM
+
+int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	unsigned long	flags;
+	int		rc = 0;
+
+	/* Disable irq emission and mark HW unaccessible. Use
+	 * the spinlock to properly synchronize with possible pending
+	 * RH suspend or resume activity.
+	 */
+	spin_lock_irqsave (&ohci->lock, flags);
+	ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+	(void)ohci_readl(ohci, &ohci->regs->intrdisable);
+
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	spin_unlock_irqrestore (&ohci->lock, flags);
+
+	synchronize_irq(hcd->irq);
+
+	if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+		ohci_resume(hcd, false);
+		rc = -EBUSY;
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ohci_suspend);
+
+
+int ohci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci(hcd);
+	int			port;
+	bool			need_reinit = false;
+
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	/* Make sure resume from hibernation re-enumerates everything */
+	if (hibernated)
+		ohci_usb_reset(ohci);
+
+	/* See if the controller is already running or has been reset */
+	ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+	if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+		need_reinit = true;
+	} else {
+		switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+		case OHCI_USB_OPER:
+		case OHCI_USB_RESET:
+			need_reinit = true;
+		}
+	}
+
+	/* If needed, reinitialize and suspend the root hub */
+	if (need_reinit) {
+		spin_lock_irq(&ohci->lock);
+		ohci_rh_resume(ohci);
+		ohci_rh_suspend(ohci, 0);
+		spin_unlock_irq(&ohci->lock);
+	}
+
+	/* Normally just turn on port power and enable interrupts */
+	else {
+		ohci_dbg(ohci, "powerup ports\n");
+		for (port = 0; port < ohci->num_ports; port++)
+			ohci_writel(ohci, RH_PS_PPS,
+					&ohci->regs->roothub.portstatus[port]);
+
+		ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrenable);
+		ohci_readl(ohci, &ohci->regs->intrenable);
+		msleep(20);
+	}
+
+	usb_hcd_resume_root_hub(hcd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ohci_resume);
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Generic structure: This gets copied for platform drivers so that
+ * individual entries can be overridden as needed.
+ */
+
+static const struct hc_driver ohci_hc_driver = {
+	.description =          hcd_name,
+	.product_desc =         "OHCI Host Controller",
+	.hcd_priv_size =        sizeof(struct ohci_hcd),
+
+	/*
+	 * generic hardware linkage
+	*/
+	.irq =                  ohci_irq,
+	.flags =                HCD_MEMORY | HCD_USB11,
+
+	/*
+	* basic lifecycle operations
+	*/
+	.reset =                ohci_setup,
+	.start =                ohci_start,
+	.stop =                 ohci_stop,
+	.shutdown =             ohci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	*/
+	.urb_enqueue =          ohci_urb_enqueue,
+	.urb_dequeue =          ohci_urb_dequeue,
+	.endpoint_disable =     ohci_endpoint_disable,
+
+	/*
+	* scheduling support
+	*/
+	.get_frame_number =     ohci_get_frame,
+
+	/*
+	* root hub support
+	*/
+	.hub_status_data =      ohci_hub_status_data,
+	.hub_control =          ohci_hub_control,
+#ifdef CONFIG_PM
+	.bus_suspend =          ohci_bus_suspend,
+	.bus_resume =           ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+void ohci_init_driver(struct hc_driver *drv,
+		const struct ohci_driver_overrides *over)
+{
+	/* Copy the generic table to drv and then apply the overrides */
+	*drv = ohci_hc_driver;
+
+	if (over) {
+		drv->product_desc = over->product_desc;
+		drv->hcd_priv_size += over->extra_priv_size;
+		if (over->reset)
+			drv->reset = over->reset;
+	}
+}
+EXPORT_SYMBOL_GPL(ohci_init_driver);
+
+/*-------------------------------------------------------------------------*/
+
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE ("GPL");
+
+#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
+#include "ohci-sa1111.c"
+#define SA1111_DRIVER		ohci_hcd_sa1111_driver
+#endif
+
+#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
+#include "ohci-ppc-of.c"
+#define OF_PLATFORM_DRIVER	ohci_hcd_ppc_of_driver
+#endif
+
+#ifdef CONFIG_PPC_PS3
+#include "ohci-ps3.c"
+#define PS3_SYSTEM_BUS_DRIVER	ps3_ohci_driver
+#endif
+
+#ifdef CONFIG_MFD_SM501
+#include "ohci-sm501.c"
+#define SM501_OHCI_DRIVER	ohci_hcd_sm501_driver
+#endif
+
+#ifdef CONFIG_MFD_TC6393XB
+#include "ohci-tmio.c"
+#define TMIO_OHCI_DRIVER	ohci_hcd_tmio_driver
+#endif
+
+static int __init ohci_hcd_mod_init(void)
+{
+	int retval = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
+	pr_debug ("%s: block sizes: ed %zd td %zd\n", hcd_name,
+		sizeof (struct ed), sizeof (struct td));
+	set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+
+	ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
+
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
+	if (retval < 0)
+		goto error_ps3;
+#endif
+
+#ifdef OF_PLATFORM_DRIVER
+	retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+	if (retval < 0)
+		goto error_of_platform;
+#endif
+
+#ifdef SA1111_DRIVER
+	retval = sa1111_driver_register(&SA1111_DRIVER);
+	if (retval < 0)
+		goto error_sa1111;
+#endif
+
+#ifdef SM501_OHCI_DRIVER
+	retval = platform_driver_register(&SM501_OHCI_DRIVER);
+	if (retval < 0)
+		goto error_sm501;
+#endif
+
+#ifdef TMIO_OHCI_DRIVER
+	retval = platform_driver_register(&TMIO_OHCI_DRIVER);
+	if (retval < 0)
+		goto error_tmio;
+#endif
+
+	return retval;
+
+	/* Error path */
+#ifdef TMIO_OHCI_DRIVER
+	platform_driver_unregister(&TMIO_OHCI_DRIVER);
+ error_tmio:
+#endif
+#ifdef SM501_OHCI_DRIVER
+	platform_driver_unregister(&SM501_OHCI_DRIVER);
+ error_sm501:
+#endif
+#ifdef SA1111_DRIVER
+	sa1111_driver_unregister(&SA1111_DRIVER);
+ error_sa1111:
+#endif
+#ifdef OF_PLATFORM_DRIVER
+	platform_driver_unregister(&OF_PLATFORM_DRIVER);
+ error_of_platform:
+#endif
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ error_ps3:
+#endif
+	debugfs_remove(ohci_debug_root);
+	ohci_debug_root = NULL;
+
+	clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+	return retval;
+}
+module_init(ohci_hcd_mod_init);
+
+static void __exit ohci_hcd_mod_exit(void)
+{
+#ifdef TMIO_OHCI_DRIVER
+	platform_driver_unregister(&TMIO_OHCI_DRIVER);
+#endif
+#ifdef SM501_OHCI_DRIVER
+	platform_driver_unregister(&SM501_OHCI_DRIVER);
+#endif
+#ifdef SA1111_DRIVER
+	sa1111_driver_unregister(&SA1111_DRIVER);
+#endif
+#ifdef OF_PLATFORM_DRIVER
+	platform_driver_unregister(&OF_PLATFORM_DRIVER);
+#endif
+#ifdef PS3_SYSTEM_BUS_DRIVER
+	ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#endif
+	debugfs_remove(ohci_debug_root);
+	clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(ohci_hcd_mod_exit);
+
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
new file mode 100644
index 0000000..634f3c7
--- /dev/null
+++ b/drivers/usb/host/ohci-hub.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under GPL
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * OHCI Root Hub ... the nonsharable stuff
+ */
+
+#define dbg_port(hc,label,num,value) \
+	ohci_dbg (hc, \
+		"%s roothub.portstatus [%d] " \
+		"= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
+		label, num, value, \
+		(value & RH_PS_PRSC) ? " PRSC" : "", \
+		(value & RH_PS_OCIC) ? " OCIC" : "", \
+		(value & RH_PS_PSSC) ? " PSSC" : "", \
+		(value & RH_PS_PESC) ? " PESC" : "", \
+		(value & RH_PS_CSC) ? " CSC" : "", \
+		\
+		(value & RH_PS_LSDA) ? " LSDA" : "", \
+		(value & RH_PS_PPS) ? " PPS" : "", \
+		(value & RH_PS_PRS) ? " PRS" : "", \
+		(value & RH_PS_POCI) ? " POCI" : "", \
+		(value & RH_PS_PSS) ? " PSS" : "", \
+		\
+		(value & RH_PS_PES) ? " PES" : "", \
+		(value & RH_PS_CCS) ? " CCS" : "" \
+		);
+
+/*-------------------------------------------------------------------------*/
+
+#define OHCI_SCHED_ENABLES \
+	(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
+
+static void update_done_list(struct ohci_hcd *);
+static void ohci_work(struct ohci_hcd *);
+
+#ifdef	CONFIG_PM
+static int ohci_rh_suspend (struct ohci_hcd *ohci, int autostop)
+__releases(ohci->lock)
+__acquires(ohci->lock)
+{
+	int			status = 0;
+
+	ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
+	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_RESUME:
+		ohci_dbg (ohci, "resume/suspend?\n");
+		ohci->hc_control &= ~OHCI_CTRL_HCFS;
+		ohci->hc_control |= OHCI_USB_RESET;
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+		(void) ohci_readl (ohci, &ohci->regs->control);
+		/* FALL THROUGH */
+	case OHCI_USB_RESET:
+		status = -EBUSY;
+		ohci_dbg (ohci, "needs reinit!\n");
+		goto done;
+	case OHCI_USB_SUSPEND:
+		if (!ohci->autostop) {
+			ohci_dbg (ohci, "already suspended\n");
+			goto done;
+		}
+	}
+	ohci_dbg (ohci, "%s root hub\n",
+			autostop ? "auto-stop" : "suspend");
+
+	/* First stop any processing */
+	if (!autostop && (ohci->hc_control & OHCI_SCHED_ENABLES)) {
+		ohci->hc_control &= ~OHCI_SCHED_ENABLES;
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+		ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
+		ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
+
+		/* sched disables take effect on the next frame,
+		 * then the last WDH could take 6+ msec
+		 */
+		ohci_dbg (ohci, "stopping schedules ...\n");
+		ohci->autostop = 0;
+		spin_unlock_irq (&ohci->lock);
+		msleep (8);
+		spin_lock_irq (&ohci->lock);
+	}
+	update_done_list(ohci);
+	ohci_work(ohci);
+
+	/*
+	 * Some controllers don't handle "global" suspend properly if
+	 * there are unsuspended ports.  For these controllers, put all
+	 * the enabled ports into suspend before suspending the root hub.
+	 */
+	if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+		__hc32 __iomem	*portstat = ohci->regs->roothub.portstatus;
+		int		i;
+		unsigned	temp;
+
+		for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+			temp = ohci_readl(ohci, portstat);
+			if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+					RH_PS_PES)
+				ohci_writel(ohci, RH_PS_PSS, portstat);
+		}
+	}
+
+	/* maybe resume can wake root hub */
+	if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
+		ohci->hc_control |= OHCI_CTRL_RWE;
+	} else {
+		ohci_writel(ohci, OHCI_INTR_RHSC | OHCI_INTR_RD,
+				&ohci->regs->intrdisable);
+		ohci->hc_control &= ~OHCI_CTRL_RWE;
+	}
+
+	/* Suspend hub ... this is the "global (to this bus) suspend" mode,
+	 * which doesn't imply ports will first be individually suspended.
+	 */
+	ohci->hc_control &= ~OHCI_CTRL_HCFS;
+	ohci->hc_control |= OHCI_USB_SUSPEND;
+	ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	(void) ohci_readl (ohci, &ohci->regs->control);
+
+	/* no resumes until devices finish suspending */
+	if (!autostop) {
+		ohci->next_statechange = jiffies + msecs_to_jiffies (5);
+		ohci->autostop = 0;
+		ohci->rh_state = OHCI_RH_SUSPENDED;
+	}
+
+done:
+	return status;
+}
+
+static inline struct ed *find_head (struct ed *ed)
+{
+	/* for bulk and control lists */
+	while (ed->ed_prev)
+		ed = ed->ed_prev;
+	return ed;
+}
+
+/* caller has locked the root hub */
+static int ohci_rh_resume (struct ohci_hcd *ohci)
+__releases(ohci->lock)
+__acquires(ohci->lock)
+{
+	struct usb_hcd		*hcd = ohci_to_hcd (ohci);
+	u32			temp, enables;
+	int			status = -EINPROGRESS;
+	int			autostopped = ohci->autostop;
+
+	ohci->autostop = 0;
+	ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
+
+	if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+		/* this can happen after resuming a swsusp snapshot */
+		if (ohci->rh_state != OHCI_RH_RUNNING) {
+			ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
+					ohci->hc_control);
+			status = -EBUSY;
+		/* this happens when pmcore resumes HC then root */
+		} else {
+			ohci_dbg (ohci, "duplicate resume\n");
+			status = 0;
+		}
+	} else switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_SUSPEND:
+		ohci->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES);
+		ohci->hc_control |= OHCI_USB_RESUME;
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+		(void) ohci_readl (ohci, &ohci->regs->control);
+		ohci_dbg (ohci, "%s root hub\n",
+				autostopped ? "auto-start" : "resume");
+		break;
+	case OHCI_USB_RESUME:
+		/* HCFS changes sometime after INTR_RD */
+		ohci_dbg(ohci, "%swakeup root hub\n",
+				autostopped ? "auto-" : "");
+		break;
+	case OHCI_USB_OPER:
+		/* this can happen after resuming a swsusp snapshot */
+		ohci_dbg (ohci, "snapshot resume? reinit\n");
+		status = -EBUSY;
+		break;
+	default:		/* RESET, we lost power */
+		ohci_dbg (ohci, "lost power\n");
+		status = -EBUSY;
+	}
+	if (status == -EBUSY) {
+		if (!autostopped) {
+			spin_unlock_irq (&ohci->lock);
+			status = ohci_restart (ohci);
+
+			usb_root_hub_lost_power(hcd->self.root_hub);
+
+			spin_lock_irq (&ohci->lock);
+		}
+		return status;
+	}
+	if (status != -EINPROGRESS)
+		return status;
+	if (autostopped)
+		goto skip_resume;
+	spin_unlock_irq (&ohci->lock);
+
+	/* Some controllers (lucent erratum) need extra-long delays */
+	msleep (20 /* usb 11.5.1.10 */ + 12 /* 32 msec counter */ + 1);
+
+	temp = ohci_readl (ohci, &ohci->regs->control);
+	temp &= OHCI_CTRL_HCFS;
+	if (temp != OHCI_USB_RESUME) {
+		ohci_err (ohci, "controller won't resume\n");
+		spin_lock_irq(&ohci->lock);
+		return -EBUSY;
+	}
+
+	/* disable old schedule state, reinit from scratch */
+	ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
+	ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
+	ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
+	ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
+	ohci_writel (ohci, 0, &ohci->regs->ed_periodcurrent);
+	ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
+
+	/* Sometimes PCI D3 suspend trashes frame timings ... */
+	periodic_reinit (ohci);
+
+	/*
+	 * The following code is executed with ohci->lock held and
+	 * irqs disabled if and only if autostopped is true.  This
+	 * will cause sparse to warn about a "context imbalance".
+	 */
+skip_resume:
+	/* interrupts might have been disabled */
+	ohci_writel (ohci, OHCI_INTR_INIT, &ohci->regs->intrenable);
+	if (ohci->ed_rm_list)
+		ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
+
+	/* Then re-enable operations */
+	ohci_writel (ohci, OHCI_USB_OPER, &ohci->regs->control);
+	(void) ohci_readl (ohci, &ohci->regs->control);
+	if (!autostopped)
+		msleep (3);
+
+	temp = ohci->hc_control;
+	temp &= OHCI_CTRL_RWC;
+	temp |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
+	ohci->hc_control = temp;
+	ohci_writel (ohci, temp, &ohci->regs->control);
+	(void) ohci_readl (ohci, &ohci->regs->control);
+
+	/* TRSMRCY */
+	if (!autostopped) {
+		msleep (10);
+		spin_lock_irq (&ohci->lock);
+	}
+	/* now ohci->lock is always held and irqs are always disabled */
+
+	/* keep it alive for more than ~5x suspend + resume costs */
+	ohci->next_statechange = jiffies + STATECHANGE_DELAY;
+
+	/* maybe turn schedules back on */
+	enables = 0;
+	temp = 0;
+	if (!ohci->ed_rm_list) {
+		if (ohci->ed_controltail) {
+			ohci_writel (ohci,
+					find_head (ohci->ed_controltail)->dma,
+					&ohci->regs->ed_controlhead);
+			enables |= OHCI_CTRL_CLE;
+			temp |= OHCI_CLF;
+		}
+		if (ohci->ed_bulktail) {
+			ohci_writel (ohci, find_head (ohci->ed_bulktail)->dma,
+				&ohci->regs->ed_bulkhead);
+			enables |= OHCI_CTRL_BLE;
+			temp |= OHCI_BLF;
+		}
+	}
+	if (hcd->self.bandwidth_isoc_reqs || hcd->self.bandwidth_int_reqs)
+		enables |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
+	if (enables) {
+		ohci_dbg (ohci, "restarting schedules ... %08x\n", enables);
+		ohci->hc_control |= enables;
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+		if (temp)
+			ohci_writel (ohci, temp, &ohci->regs->cmdstatus);
+		(void) ohci_readl (ohci, &ohci->regs->control);
+	}
+
+	ohci->rh_state = OHCI_RH_RUNNING;
+	return 0;
+}
+
+static int ohci_bus_suspend (struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+	int			rc;
+
+	spin_lock_irq (&ohci->lock);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+		rc = -ESHUTDOWN;
+	else
+		rc = ohci_rh_suspend (ohci, 0);
+	spin_unlock_irq (&ohci->lock);
+
+	if (rc == 0) {
+		del_timer_sync(&ohci->io_watchdog);
+		ohci->prev_frame_no = IO_WATCHDOG_OFF;
+	}
+	return rc;
+}
+
+static int ohci_bus_resume (struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
+	int			rc;
+
+	if (time_before (jiffies, ohci->next_statechange))
+		msleep(5);
+
+	spin_lock_irq (&ohci->lock);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+		rc = -ESHUTDOWN;
+	else
+		rc = ohci_rh_resume (ohci);
+	spin_unlock_irq (&ohci->lock);
+
+	/* poll until we know a device is connected or we autostop */
+	if (rc == 0)
+		usb_hcd_poll_rh_status(hcd);
+	return rc;
+}
+
+/* Carry out polling-, autostop-, and autoresume-related state changes */
+static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
+		int any_connected, int rhsc_status)
+{
+	int	poll_rh = 1;
+	int	rhsc_enable;
+
+	/* Some broken controllers never turn off RHSC in the interrupt
+	 * status register.  For their sake we won't re-enable RHSC
+	 * interrupts if the interrupt bit is already active.
+	 */
+	rhsc_enable = ohci_readl(ohci, &ohci->regs->intrenable) &
+			OHCI_INTR_RHSC;
+
+	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_OPER:
+		/* If no status changes are pending, enable RHSC interrupts. */
+		if (!rhsc_enable && !rhsc_status && !changed) {
+			rhsc_enable = OHCI_INTR_RHSC;
+			ohci_writel(ohci, rhsc_enable, &ohci->regs->intrenable);
+		}
+
+		/* Keep on polling until we know a device is connected
+		 * and RHSC is enabled, or until we autostop.
+		 */
+		if (!ohci->autostop) {
+			if (any_connected ||
+					!device_may_wakeup(&ohci_to_hcd(ohci)
+						->self.root_hub->dev)) {
+				if (rhsc_enable)
+					poll_rh = 0;
+			} else {
+				ohci->autostop = 1;
+				ohci->next_statechange = jiffies + HZ;
+			}
+
+		/* if no devices have been attached for one second, autostop */
+		} else {
+			if (changed || any_connected) {
+				ohci->autostop = 0;
+				ohci->next_statechange = jiffies +
+						STATECHANGE_DELAY;
+			} else if (time_after_eq(jiffies,
+						ohci->next_statechange)
+					&& !ohci->ed_rm_list
+					&& !(ohci->hc_control &
+						OHCI_SCHED_ENABLES)) {
+				ohci_rh_suspend(ohci, 1);
+				if (rhsc_enable)
+					poll_rh = 0;
+			}
+		}
+		break;
+
+	case OHCI_USB_SUSPEND:
+	case OHCI_USB_RESUME:
+		/* if there is a port change, autostart or ask to be resumed */
+		if (changed) {
+			if (ohci->autostop)
+				ohci_rh_resume(ohci);
+			else
+				usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
+
+		/* If remote wakeup is disabled, stop polling */
+		} else if (!ohci->autostop &&
+				!ohci_to_hcd(ohci)->self.root_hub->
+					do_remote_wakeup) {
+			poll_rh = 0;
+
+		} else {
+			/* If no status changes are pending,
+			 * enable RHSC interrupts
+			 */
+			if (!rhsc_enable && !rhsc_status) {
+				rhsc_enable = OHCI_INTR_RHSC;
+				ohci_writel(ohci, rhsc_enable,
+						&ohci->regs->intrenable);
+			}
+			/* Keep polling until RHSC is enabled */
+			if (rhsc_enable)
+				poll_rh = 0;
+		}
+		break;
+	}
+	return poll_rh;
+}
+
+#else	/* CONFIG_PM */
+
+static inline int ohci_rh_resume(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+
+/* Carry out polling-related state changes.
+ * autostop isn't used when CONFIG_PM is turned off.
+ */
+static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
+		int any_connected, int rhsc_status)
+{
+	/* If RHSC is enabled, don't poll */
+	if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
+		return 0;
+
+	/* If status changes are pending, continue polling.
+	 * Conversely, if no status changes are pending but the RHSC
+	 * status bit was set, then RHSC may be broken so continue polling.
+	 */
+	if (changed || rhsc_status)
+		return 1;
+
+	/* It's safe to re-enable RHSC interrupts */
+	ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+	return 0;
+}
+
+#endif	/* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+int ohci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	int		i, changed = 0, length = 1;
+	int		any_connected = 0;
+	int		rhsc_status;
+	unsigned long	flags;
+
+	spin_lock_irqsave (&ohci->lock, flags);
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		goto done;
+
+	/* undocumented erratum seen on at least rev D */
+	if ((ohci->flags & OHCI_QUIRK_AMD756)
+			&& (roothub_a (ohci) & RH_A_NDP) > MAX_ROOT_PORTS) {
+		ohci_warn (ohci, "bogus NDP, rereads as NDP=%d\n",
+			  ohci_readl (ohci, &ohci->regs->roothub.a) & RH_A_NDP);
+		/* retry later; "should not happen" */
+		goto done;
+	}
+
+	/* init status */
+	if (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC))
+		buf [0] = changed = 1;
+	else
+		buf [0] = 0;
+	if (ohci->num_ports > 7) {
+		buf [1] = 0;
+		length++;
+	}
+
+	/* Clear the RHSC status flag before reading the port statuses */
+	ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrstatus);
+	rhsc_status = ohci_readl(ohci, &ohci->regs->intrstatus) &
+			OHCI_INTR_RHSC;
+
+	/* look at each port */
+	for (i = 0; i < ohci->num_ports; i++) {
+		u32	status = roothub_portstatus (ohci, i);
+
+		/* can't autostop if ports are connected */
+		any_connected |= (status & RH_PS_CCS);
+
+		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
+				| RH_PS_OCIC | RH_PS_PRSC)) {
+			changed = 1;
+			if (i < 7)
+			    buf [0] |= 1 << (i + 1);
+			else
+			    buf [1] |= 1 << (i - 7);
+		}
+	}
+
+	if (ohci_root_hub_state_changes(ohci, changed,
+			any_connected, rhsc_status))
+		set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	else
+		clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+
+done:
+	spin_unlock_irqrestore (&ohci->lock, flags);
+
+	return changed ? length : 0;
+}
+EXPORT_SYMBOL_GPL(ohci_hub_status_data);
+
+/*-------------------------------------------------------------------------*/
+
+static void
+ohci_hub_descriptor (
+	struct ohci_hcd			*ohci,
+	struct usb_hub_descriptor	*desc
+) {
+	u32		rh = roothub_a (ohci);
+	u16		temp;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bPwrOn2PwrGood = (rh & RH_A_POTPGT) >> 24;
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ohci->num_ports;
+	temp = 1 + (ohci->num_ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	temp = HUB_CHAR_COMMON_LPSM | HUB_CHAR_COMMON_OCPM;
+	if (rh & RH_A_NPS)		/* no power switching? */
+		temp |= HUB_CHAR_NO_LPSM;
+	if (rh & RH_A_PSM)		/* per-port power switching? */
+		temp |= HUB_CHAR_INDV_PORT_LPSM;
+	if (rh & RH_A_NOCP)		/* no overcurrent reporting? */
+		temp |= HUB_CHAR_NO_OCPM;
+	else if (rh & RH_A_OCPM)	/* per-port overcurrent reporting? */
+		temp |= HUB_CHAR_INDV_PORT_OCPM;
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+
+	/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	rh = roothub_b (ohci);
+	memset(desc->u.hs.DeviceRemovable, 0xff,
+			sizeof(desc->u.hs.DeviceRemovable));
+	desc->u.hs.DeviceRemovable[0] = rh & RH_B_DR;
+	if (ohci->num_ports > 7) {
+		desc->u.hs.DeviceRemovable[1] = (rh & RH_B_DR) >> 8;
+		desc->u.hs.DeviceRemovable[2] = 0xff;
+	} else
+		desc->u.hs.DeviceRemovable[1] = 0xff;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_USB_OTG
+
+static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	u32			status;
+
+	if (!port)
+		return -EINVAL;
+	port--;
+
+	/* start port reset before HNP protocol times out */
+	status = ohci_readl(ohci, &ohci->regs->roothub.portstatus [port]);
+	if (!(status & RH_PS_CCS))
+		return -ENODEV;
+
+	/* hub_wq will finish the reset later */
+	ohci_writel(ohci, RH_PS_PRS, &ohci->regs->roothub.portstatus [port]);
+	return 0;
+}
+
+#else
+
+#define	ohci_start_port_reset		NULL
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+
+/* See usb 7.1.7.5:  root hubs must issue at least 50 msec reset signaling,
+ * not necessarily continuous ... to guard against resume signaling.
+ */
+#define	PORT_RESET_MSEC		50
+
+/* this timer value might be vendor-specific ... */
+#define	PORT_RESET_HW_MSEC	10
+
+/* wrap-aware logic morphed from <linux/jiffies.h> */
+#define tick_before(t1,t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
+
+/* called from some task, normally hub_wq */
+static inline int root_port_reset (struct ohci_hcd *ohci, unsigned port)
+{
+	__hc32 __iomem *portstat = &ohci->regs->roothub.portstatus [port];
+	u32	temp = 0;
+	u16	now = ohci_readl(ohci, &ohci->regs->fmnumber);
+	u16	reset_done = now + PORT_RESET_MSEC;
+	int	limit_1 = DIV_ROUND_UP(PORT_RESET_MSEC, PORT_RESET_HW_MSEC);
+
+	/* build a "continuous enough" reset signal, with up to
+	 * 3msec gap between pulses.  scheduler HZ==100 must work;
+	 * this might need to be deadline-scheduled.
+	 */
+	do {
+		int limit_2;
+
+		/* spin until any current reset finishes */
+		limit_2 = PORT_RESET_HW_MSEC * 2;
+		while (--limit_2 >= 0) {
+			temp = ohci_readl (ohci, portstat);
+			/* handle e.g. CardBus eject */
+			if (temp == ~(u32)0)
+				return -ESHUTDOWN;
+			if (!(temp & RH_PS_PRS))
+				break;
+			udelay (500);
+		}
+
+		/* timeout (a hardware error) has been observed when
+		 * EHCI sets CF while this driver is resetting a port;
+		 * presumably other disconnect paths might do it too.
+		 */
+		if (limit_2 < 0) {
+			ohci_dbg(ohci,
+				"port[%d] reset timeout, stat %08x\n",
+				port, temp);
+			break;
+		}
+
+		if (!(temp & RH_PS_CCS))
+			break;
+		if (temp & RH_PS_PRSC)
+			ohci_writel (ohci, RH_PS_PRSC, portstat);
+
+		/* start the next reset, sleep till it's probably done */
+		ohci_writel (ohci, RH_PS_PRS, portstat);
+		msleep(PORT_RESET_HW_MSEC);
+		now = ohci_readl(ohci, &ohci->regs->fmnumber);
+	} while (tick_before(now, reset_done) && --limit_1 >= 0);
+
+	/* caller synchronizes using PRSC ... and handles PRS
+	 * still being set when this returns.
+	 */
+
+	return 0;
+}
+
+int ohci_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+) {
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	int		ports = ohci->num_ports;
+	u32		temp;
+	int		retval = 0;
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+		return -ESHUTDOWN;
+
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			ohci_writel (ohci, RH_HS_OCIC,
+					&ohci->regs->roothub.status);
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			temp = RH_PS_CCS;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			temp = RH_PS_PESC;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			temp = RH_PS_POCI;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			temp = RH_PS_PSSC;
+			break;
+		case USB_PORT_FEAT_POWER:
+			temp = RH_PS_LSDA;
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			temp = RH_PS_CSC;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			temp = RH_PS_OCIC;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			temp = RH_PS_PRSC;
+			break;
+		default:
+			goto error;
+		}
+		ohci_writel (ohci, temp,
+				&ohci->regs->roothub.portstatus [wIndex]);
+		// ohci_readl (ohci, &ohci->regs->roothub.portstatus [wIndex]);
+		break;
+	case GetHubDescriptor:
+		ohci_hub_descriptor (ohci, (struct usb_hub_descriptor *) buf);
+		break;
+	case GetHubStatus:
+		temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
+		put_unaligned_le32(temp, buf);
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = roothub_portstatus (ohci, wIndex);
+		put_unaligned_le32(temp, buf);
+
+		if (*(u16*)(buf+2))	/* only if wPortChange is interesting */
+			dbg_port(ohci, "GetStatus", wIndex, temp);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+			// FIXME:  this can be cleared, yes?
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+#ifdef	CONFIG_USB_OTG
+			if (hcd->self.otg_port == (wIndex + 1)
+					&& hcd->self.b_hnp_enable)
+				ohci->start_hnp(ohci);
+			else
+#endif
+			ohci_writel (ohci, RH_PS_PSS,
+				&ohci->regs->roothub.portstatus [wIndex]);
+			break;
+		case USB_PORT_FEAT_POWER:
+			ohci_writel (ohci, RH_PS_PPS,
+				&ohci->regs->roothub.portstatus [wIndex]);
+			break;
+		case USB_PORT_FEAT_RESET:
+			retval = root_port_reset (ohci, wIndex);
+			break;
+		default:
+			goto error;
+		}
+		break;
+
+	default:
+error:
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+	return retval;
+}
+EXPORT_SYMBOL_GPL(ohci_hub_control);
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
new file mode 100644
index 0000000..b3da3f1
--- /dev/null
+++ b/drivers/usb/host/ohci-mem.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under the GPL.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * OHCI deals with three types of memory:
+ *	- data used only by the HCD ... kmalloc is fine
+ *	- async and periodic schedules, shared by HC and HCD ... these
+ *	  need to use dma_pool or dma_alloc_coherent
+ *	- driver buffers, read/written by HC ... the hcd glue or the
+ *	  device driver provides us with dma addresses
+ *
+ * There's also "register" data, which is memory mapped.
+ * No memory seen by this driver (or any HCD) may be paged out.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+static void ohci_hcd_init (struct ohci_hcd *ohci)
+{
+	ohci->next_statechange = jiffies;
+	spin_lock_init (&ohci->lock);
+	INIT_LIST_HEAD (&ohci->pending);
+	INIT_LIST_HEAD(&ohci->eds_in_use);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_mem_init (struct ohci_hcd *ohci)
+{
+	ohci->td_cache = dma_pool_create ("ohci_td",
+		ohci_to_hcd(ohci)->self.controller,
+		sizeof (struct td),
+		32 /* byte alignment */,
+		0 /* no page-crossing issues */);
+	if (!ohci->td_cache)
+		return -ENOMEM;
+	ohci->ed_cache = dma_pool_create ("ohci_ed",
+		ohci_to_hcd(ohci)->self.controller,
+		sizeof (struct ed),
+		16 /* byte alignment */,
+		0 /* no page-crossing issues */);
+	if (!ohci->ed_cache) {
+		dma_pool_destroy (ohci->td_cache);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void ohci_mem_cleanup (struct ohci_hcd *ohci)
+{
+	if (ohci->td_cache) {
+		dma_pool_destroy (ohci->td_cache);
+		ohci->td_cache = NULL;
+	}
+	if (ohci->ed_cache) {
+		dma_pool_destroy (ohci->ed_cache);
+		ohci->ed_cache = NULL;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ohci "done list" processing needs this mapping */
+static inline struct td *
+dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
+{
+	struct td *td;
+
+	td_dma &= TD_MASK;
+	td = hc->td_hash [TD_HASH_FUNC(td_dma)];
+	while (td && td->td_dma != td_dma)
+		td = td->td_hash;
+	return td;
+}
+
+/* TDs ... */
+static struct td *
+td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
+{
+	dma_addr_t	dma;
+	struct td	*td;
+
+	td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma);
+	if (td) {
+		/* in case hc fetches it, make it look dead */
+		td->hwNextTD = cpu_to_hc32 (hc, dma);
+		td->td_dma = dma;
+		/* hashed in td_fill */
+	}
+	return td;
+}
+
+static void
+td_free (struct ohci_hcd *hc, struct td *td)
+{
+	struct td	**prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)];
+
+	while (*prev && *prev != td)
+		prev = &(*prev)->td_hash;
+	if (*prev)
+		*prev = td->td_hash;
+	else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
+		ohci_dbg (hc, "no hash for td %p\n", td);
+	dma_pool_free (hc->td_cache, td, td->td_dma);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EDs ... */
+static struct ed *
+ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
+{
+	dma_addr_t	dma;
+	struct ed	*ed;
+
+	ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma);
+	if (ed) {
+		INIT_LIST_HEAD (&ed->td_list);
+		ed->dma = dma;
+	}
+	return ed;
+}
+
+static void
+ed_free (struct ohci_hcd *hc, struct ed *ed)
+{
+	dma_pool_free (hc->ed_cache, ed, ed->dma);
+}
+
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
new file mode 100644
index 0000000..f5f5326
--- /dev/null
+++ b/drivers/usb/host/ohci-nxp.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver for NXP USB Host devices
+ *
+ * Currently supported OHCI host devices:
+ * - NXP LPC32xx
+ *
+ * Authors: Dmitry Chigirev <source@mvista.com>
+ *	    Vitaly Wool <vitalywool@gmail.com>
+ *
+ * register initialization is based on code examples provided by Philips
+ * Copyright (c) 2005 Koninklijke Philips Electronics N.V.
+ *
+ * NOTE: This driver does not have suspend/resume functionality
+ * This driver is intended for engineering development purposes only
+ *
+ * 2005-2006 (c) MontaVista Software, Inc.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/usb/isp1301.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#include <mach/hardware.h>
+
+#define USB_CONFIG_BASE		0x31020000
+#define USB_OTG_STAT_CONTROL	IO_ADDRESS(USB_CONFIG_BASE + 0x110)
+
+/* USB_OTG_STAT_CONTROL bit defines */
+#define TRANSPARENT_I2C_EN	(1 << 7)
+#define HOST_EN			(1 << 0)
+
+/* On LPC32xx, those are undefined */
+#ifndef start_int_set_falling_edge
+#define start_int_set_falling_edge(irq)
+#define start_int_set_rising_edge(irq)
+#define start_int_ack(irq)
+#define start_int_mask(irq)
+#define start_int_umask(irq)
+#endif
+
+#define DRIVER_DESC "OHCI NXP driver"
+
+static const char hcd_name[] = "ohci-nxp";
+static struct hc_driver __read_mostly ohci_nxp_hc_driver;
+
+static struct i2c_client *isp1301_i2c_client;
+
+static struct clk *usb_host_clk;
+
+static void isp1301_configure_lpc32xx(void)
+{
+	/* LPC32XX only supports DAT_SE0 USB mode */
+	/* This sequence is important */
+
+	/* Disable transparent UART mode first */
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+		MC1_UART_EN);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+		~MC1_SPEED_REG);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+				  ISP1301_I2C_MODE_CONTROL_1, MC1_SPEED_REG);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		  (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR),
+		  ~0);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_MODE_CONTROL_2,
+		(MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL));
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_MODE_CONTROL_1, MC1_DAT_SE0);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_OTG_CONTROL_1,
+		(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
+		(OTG1_DM_PULLUP | OTG1_DP_PULLUP));
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR,
+		~0);
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
+
+	printk(KERN_INFO "ISP1301 Vendor ID  : 0x%04x\n",
+	      i2c_smbus_read_word_data(isp1301_i2c_client, 0x00));
+	printk(KERN_INFO "ISP1301 Product ID : 0x%04x\n",
+	      i2c_smbus_read_word_data(isp1301_i2c_client, 0x02));
+	printk(KERN_INFO "ISP1301 Version ID : 0x%04x\n",
+	      i2c_smbus_read_word_data(isp1301_i2c_client, 0x14));
+}
+
+static void isp1301_configure(void)
+{
+	isp1301_configure_lpc32xx();
+}
+
+static inline void isp1301_vbus_on(void)
+{
+	i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1,
+				  OTG1_VBUS_DRV);
+}
+
+static inline void isp1301_vbus_off(void)
+{
+	i2c_smbus_write_byte_data(isp1301_i2c_client,
+		ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
+		OTG1_VBUS_DRV);
+}
+
+static void ohci_nxp_start_hc(void)
+{
+	unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+
+	__raw_writel(tmp, USB_OTG_STAT_CONTROL);
+	isp1301_vbus_on();
+}
+
+static void ohci_nxp_stop_hc(void)
+{
+	unsigned long tmp;
+
+	isp1301_vbus_off();
+	tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
+	__raw_writel(tmp, USB_OTG_STAT_CONTROL);
+}
+
+static int ohci_hcd_nxp_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = 0;
+	const struct hc_driver *driver = &ohci_nxp_hc_driver;
+	struct resource *res;
+	int ret = 0, irq;
+	struct device_node *isp1301_node;
+
+	if (pdev->dev.of_node) {
+		isp1301_node = of_parse_phandle(pdev->dev.of_node,
+						"transceiver", 0);
+	} else {
+		isp1301_node = NULL;
+	}
+
+	isp1301_i2c_client = isp1301_get_client(isp1301_node);
+	if (!isp1301_i2c_client)
+		return -EPROBE_DEFER;
+
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto fail_disable;
+
+	dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
+	if (usb_disabled()) {
+		dev_err(&pdev->dev, "USB is disabled\n");
+		ret = -ENODEV;
+		goto fail_disable;
+	}
+
+	/* Enable USB host clock */
+	usb_host_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(usb_host_clk)) {
+		dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
+		ret = PTR_ERR(usb_host_clk);
+		goto fail_disable;
+	}
+
+	ret = clk_prepare_enable(usb_host_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
+		goto fail_disable;
+	}
+
+	isp1301_configure();
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
+		ret = -ENOMEM;
+		goto fail_hcd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto fail_resource;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = -ENXIO;
+		goto fail_resource;
+	}
+
+	ohci_nxp_start_hc();
+	platform_set_drvdata(pdev, hcd);
+
+	dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
+	ret = usb_add_hcd(hcd, irq, 0);
+	if (ret == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return ret;
+	}
+
+	ohci_nxp_stop_hc();
+fail_resource:
+	usb_put_hcd(hcd);
+fail_hcd:
+	clk_disable_unprepare(usb_host_clk);
+fail_disable:
+	isp1301_i2c_client = NULL;
+	return ret;
+}
+
+static int ohci_hcd_nxp_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_remove_hcd(hcd);
+	ohci_nxp_stop_hc();
+	usb_put_hcd(hcd);
+	clk_disable_unprepare(usb_host_clk);
+	isp1301_i2c_client = NULL;
+
+	return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:usb-ohci");
+
+#ifdef CONFIG_OF
+static const struct of_device_id ohci_hcd_nxp_match[] = {
+	{ .compatible = "nxp,ohci-nxp" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
+#endif
+
+static struct platform_driver ohci_hcd_nxp_driver = {
+	.driver = {
+		.name = "usb-ohci",
+		.of_match_table = of_match_ptr(ohci_hcd_nxp_match),
+	},
+	.probe = ohci_hcd_nxp_probe,
+	.remove = ohci_hcd_nxp_remove,
+};
+
+static int __init ohci_nxp_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_nxp_hc_driver, NULL);
+	return platform_driver_register(&ohci_hcd_nxp_driver);
+}
+module_init(ohci_nxp_init);
+
+static void __exit ohci_nxp_cleanup(void)
+{
+	platform_driver_unregister(&ohci_hcd_nxp_driver);
+}
+module_exit(ohci_nxp_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
new file mode 100644
index 0000000..d8d35d4
--- /dev/null
+++ b/drivers/usb/host/ohci-omap.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2005 David Brownell
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * OMAP Bus Glue
+ *
+ * Modified for OMAP by Tony Lindgren <tony@atomide.com>
+ * Based on the 2.4 OMAP OHCI driver originally done by MontaVista Software Inc.
+ * and on ohci-sa1111.c by Christopher Hoover <ch@hpl.hp.com>
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#include <asm/io.h>
+#include <asm/mach-types.h>
+
+#include <mach/mux.h>
+
+#include <mach/hardware.h>
+#include <mach/usb.h>
+
+
+/* OMAP-1510 OHCI has its own MMU for DMA */
+#define OMAP1510_LB_MEMSIZE	32	/* Should be same as SDRAM size */
+#define OMAP1510_LB_CLOCK_DIV	0xfffec10c
+#define OMAP1510_LB_MMU_CTL	0xfffec208
+#define OMAP1510_LB_MMU_LCK	0xfffec224
+#define OMAP1510_LB_MMU_LD_TLB	0xfffec228
+#define OMAP1510_LB_MMU_CAM_H	0xfffec22c
+#define OMAP1510_LB_MMU_CAM_L	0xfffec230
+#define OMAP1510_LB_MMU_RAM_H	0xfffec234
+#define OMAP1510_LB_MMU_RAM_L	0xfffec238
+
+#define DRIVER_DESC "OHCI OMAP driver"
+
+#ifdef CONFIG_TPS65010
+#include <linux/mfd/tps65010.h>
+#else
+
+#define LOW	0
+#define HIGH	1
+
+#define GPIO1	1
+
+static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
+{
+	return 0;
+}
+
+#endif
+
+static struct clk *usb_host_ck;
+static struct clk *usb_dc_ck;
+
+static const char hcd_name[] = "ohci-omap";
+static struct hc_driver __read_mostly ohci_omap_hc_driver;
+
+static void omap_ohci_clock_power(int on)
+{
+	if (on) {
+		clk_enable(usb_dc_ck);
+		clk_enable(usb_host_ck);
+		/* guesstimate for T5 == 1x 32K clock + APLL lock time */
+		udelay(100);
+	} else {
+		clk_disable(usb_host_ck);
+		clk_disable(usb_dc_ck);
+	}
+}
+
+/*
+ * Board specific gang-switched transceiver power on/off.
+ * NOTE:  OSK supplies power from DC, not battery.
+ */
+static int omap_ohci_transceiver_power(int on)
+{
+	if (on) {
+		if (machine_is_omap_innovator() && cpu_is_omap1510())
+			__raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
+				| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+		else if (machine_is_omap_osk())
+			tps65010_set_gpio_out_value(GPIO1, LOW);
+	} else {
+		if (machine_is_omap_innovator() && cpu_is_omap1510())
+			__raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
+				& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+		else if (machine_is_omap_osk())
+			tps65010_set_gpio_out_value(GPIO1, HIGH);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_ARCH_OMAP15XX
+/*
+ * OMAP-1510 specific Local Bus clock on/off
+ */
+static int omap_1510_local_bus_power(int on)
+{
+	if (on) {
+		omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL);
+		udelay(200);
+	} else {
+		omap_writel(0, OMAP1510_LB_MMU_CTL);
+	}
+
+	return 0;
+}
+
+/*
+ * OMAP-1510 specific Local Bus initialization
+ * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE.
+ *       See also arch/mach-omap/memory.h for __virt_to_dma() and
+ *       __dma_to_virt() which need to match with the physical
+ *       Local Bus address below.
+ */
+static int omap_1510_local_bus_init(void)
+{
+	unsigned int tlb;
+	unsigned long lbaddr, physaddr;
+
+	omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4,
+	       OMAP1510_LB_CLOCK_DIV);
+
+	/* Configure the Local Bus MMU table */
+	for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) {
+		lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET;
+		physaddr = tlb * 0x00100000 + PHYS_OFFSET;
+		omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H);
+		omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc,
+		       OMAP1510_LB_MMU_CAM_L);
+		omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H);
+		omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L);
+		omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK);
+		omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB);
+	}
+
+	/* Enable the walking table */
+	omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL);
+	udelay(200);
+
+	return 0;
+}
+#else
+#define omap_1510_local_bus_power(x)	{}
+#define omap_1510_local_bus_init()	{}
+#endif
+
+#ifdef	CONFIG_USB_OTG
+
+static void start_hnp(struct ohci_hcd *ohci)
+{
+	struct usb_hcd *hcd = ohci_to_hcd(ohci);
+	const unsigned	port = hcd->self.otg_port - 1;
+	unsigned long	flags;
+	u32 l;
+
+	otg_start_hnp(hcd->usb_phy->otg);
+
+	local_irq_save(flags);
+	hcd->usb_phy->otg->state = OTG_STATE_A_SUSPEND;
+	writel (RH_PS_PSS, &ohci->regs->roothub.portstatus [port]);
+	l = omap_readl(OTG_CTRL);
+	l &= ~OTG_A_BUSREQ;
+	omap_writel(l, OTG_CTRL);
+	local_irq_restore(flags);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_omap_reset(struct usb_hcd *hcd)
+{
+	struct ohci_hcd		*ohci = hcd_to_ohci(hcd);
+	struct omap_usb_config	*config = dev_get_platdata(hcd->self.controller);
+	int			need_transceiver = (config->otg != 0);
+	int			ret;
+
+	dev_dbg(hcd->self.controller, "starting USB Controller\n");
+
+	if (config->otg) {
+		hcd->self.otg_port = config->otg;
+		/* default/minimum OTG power budget:  8 mA */
+		hcd->power_budget = 8;
+	}
+
+	/* boards can use OTG transceivers in non-OTG modes */
+	need_transceiver = need_transceiver
+			|| machine_is_omap_h2() || machine_is_omap_h3();
+
+	/* XXX OMAP16xx only */
+	if (config->ocpi_enable)
+		config->ocpi_enable();
+
+#ifdef	CONFIG_USB_OTG
+	if (need_transceiver) {
+		hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
+			int	status = otg_set_host(hcd->usb_phy->otg,
+						&ohci_to_hcd(ohci)->self);
+			dev_dbg(hcd->self.controller, "init %s phy, status %d\n",
+					hcd->usb_phy->label, status);
+			if (status) {
+				usb_put_phy(hcd->usb_phy);
+				return status;
+			}
+		} else {
+			return -EPROBE_DEFER;
+		}
+		hcd->skip_phy_initialization = 1;
+		ohci->start_hnp = start_hnp;
+	}
+#endif
+
+	omap_ohci_clock_power(1);
+
+	if (cpu_is_omap15xx()) {
+		omap_1510_local_bus_power(1);
+		omap_1510_local_bus_init();
+	}
+
+	ret = ohci_setup(hcd);
+	if (ret < 0)
+		return ret;
+
+	if (config->otg || config->rwc) {
+		ohci->hc_control = OHCI_CTRL_RWC;
+		writel(OHCI_CTRL_RWC, &ohci->regs->control);
+	}
+
+	/* board-specific power switching and overcurrent support */
+	if (machine_is_omap_osk() || machine_is_omap_innovator()) {
+		u32	rh = roothub_a (ohci);
+
+		/* power switching (ganged by default) */
+		rh &= ~RH_A_NPS;
+
+		/* TPS2045 switch for internal transceiver (port 1) */
+		if (machine_is_omap_osk()) {
+			ohci_to_hcd(ohci)->power_budget = 250;
+
+			rh &= ~RH_A_NOCP;
+
+			/* gpio9 for overcurrent detction */
+			omap_cfg_reg(W8_1610_GPIO9);
+			gpio_request(9, "OHCI overcurrent");
+			gpio_direction_input(9);
+
+			/* for paranoia's sake:  disable USB.PUEN */
+			omap_cfg_reg(W4_USB_HIGHZ);
+		}
+		ohci_writel(ohci, rh, &ohci->regs->roothub.a);
+		ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
+	} else if (machine_is_nokia770()) {
+		/* We require a self-powered hub, which should have
+		 * plenty of power. */
+		ohci_to_hcd(ohci)->power_budget = 0;
+	}
+
+	/* FIXME hub_wq hub requests should manage power switching */
+	omap_ohci_transceiver_power(1);
+
+	/* board init will have already handled HMC and mux setup.
+	 * any external transceiver should already be initialized
+	 * too, so all configured ports use the right signaling now.
+	 */
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * ohci_hcd_omap_probe - initialize OMAP-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ohci_hcd_omap_probe(struct platform_device *pdev)
+{
+	int retval, irq;
+	struct usb_hcd *hcd = 0;
+
+	if (pdev->num_resources != 2) {
+		dev_err(&pdev->dev, "invalid num_resources: %i\n",
+		       pdev->num_resources);
+		return -ENODEV;
+	}
+
+	if (pdev->resource[0].flags != IORESOURCE_MEM
+			|| pdev->resource[1].flags != IORESOURCE_IRQ) {
+		dev_err(&pdev->dev, "invalid resource type\n");
+		return -ENODEV;
+	}
+
+	usb_host_ck = clk_get(&pdev->dev, "usb_hhc_ck");
+	if (IS_ERR(usb_host_ck))
+		return PTR_ERR(usb_host_ck);
+
+	if (!cpu_is_omap15xx())
+		usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
+	else
+		usb_dc_ck = clk_get(&pdev->dev, "lb_ck");
+
+	if (IS_ERR(usb_dc_ck)) {
+		clk_put(usb_host_ck);
+		return PTR_ERR(usb_dc_ck);
+	}
+
+
+	hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev,
+			dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto err0;
+	}
+	hcd->rsrc_start = pdev->resource[0].start;
+	hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		dev_dbg(&pdev->dev, "request_mem_region failed\n");
+		retval = -EBUSY;
+		goto err1;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		dev_err(&pdev->dev, "can't ioremap OHCI HCD\n");
+		retval = -ENOMEM;
+		goto err2;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = -ENXIO;
+		goto err3;
+	}
+	retval = usb_add_hcd(hcd, irq, 0);
+	if (retval)
+		goto err3;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+err3:
+	iounmap(hcd->regs);
+err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+	usb_put_hcd(hcd);
+err0:
+	clk_put(usb_dc_ck);
+	clk_put(usb_host_ck);
+	return retval;
+}
+
+
+/* may be called with controller, bus, and devices active */
+
+/**
+ * ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of ohci_hcd_omap_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int ohci_hcd_omap_remove(struct platform_device *pdev)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(pdev);
+
+	dev_dbg(hcd->self.controller, "stopping USB Controller\n");
+	usb_remove_hcd(hcd);
+	omap_ohci_clock_power(0);
+	if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
+		(void) otg_set_host(hcd->usb_phy->otg, 0);
+		usb_put_phy(hcd->usb_phy);
+	}
+	if (machine_is_omap_osk())
+		gpio_free(9);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+	clk_put(usb_dc_ck);
+	clk_put(usb_host_ck);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+
+static int ohci_omap_suspend(struct platform_device *pdev, pm_message_t message)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	bool do_wakeup = device_may_wakeup(&pdev->dev);
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	omap_ohci_clock_power(0);
+	return ret;
+}
+
+static int ohci_omap_resume(struct platform_device *dev)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(dev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	omap_ohci_clock_power(1);
+	ohci_resume(hcd, false);
+	return 0;
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Driver definition to register with the OMAP bus
+ */
+static struct platform_driver ohci_hcd_omap_driver = {
+	.probe		= ohci_hcd_omap_probe,
+	.remove		= ohci_hcd_omap_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+#ifdef	CONFIG_PM
+	.suspend	= ohci_omap_suspend,
+	.resume		= ohci_omap_resume,
+#endif
+	.driver		= {
+		.name	= "ohci",
+	},
+};
+
+static const struct ohci_driver_overrides omap_overrides __initconst = {
+	.product_desc	= "OMAP OHCI",
+	.reset		= ohci_omap_reset
+};
+
+static int __init ohci_omap_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
+	return platform_driver_register(&ohci_hcd_omap_driver);
+}
+module_init(ohci_omap_init);
+
+static void __exit ohci_omap_cleanup(void)
+{
+	platform_driver_unregister(&ohci_hcd_omap_driver);
+}
+module_exit(ohci_omap_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:ohci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
new file mode 100644
index 0000000..fbcd349
--- /dev/null
+++ b/drivers/usb/host/ohci-pci.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * [ Initialisation is based on Linus'  ]
+ * [ uhci code and gregs ohci fragments ]
+ * [ (C) Copyright 1999 Linus Torvalds  ]
+ * [ (C) Copyright 1999 Gregory P. Smith]
+ *
+ * PCI Bus Glue
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+#include "pci-quirks.h"
+
+#define DRIVER_DESC "OHCI PCI platform driver"
+
+static const char hcd_name[] = "ohci-pci";
+
+
+/*-------------------------------------------------------------------------*/
+
+static int broken_suspend(struct usb_hcd *hcd)
+{
+	device_init_wakeup(&hcd->self.root_hub->dev, 0);
+	return 0;
+}
+
+/* AMD 756, for most chips (early revs), corrupts register
+ * values on read ... so enable the vendor workaround.
+ */
+static int ohci_quirk_amd756(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+	ohci->flags = OHCI_QUIRK_AMD756;
+	ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
+
+	/* also erratum 10 (suspend/resume issues) */
+	return broken_suspend(hcd);
+}
+
+/* Apple's OHCI driver has a lot of bizarre workarounds
+ * for this chip.  Evidently control and bulk lists
+ * can get confused.  (B&W G3 models, and ...)
+ */
+static int ohci_quirk_opti(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+	ohci_dbg (ohci, "WARNING: OPTi workarounds unavailable\n");
+
+	return 0;
+}
+
+/* Check for NSC87560. We have to look at the bridge (fn1) to
+ * identify the USB (fn2). This quirk might apply to more or
+ * even all NSC stuff.
+ */
+static int ohci_quirk_ns(struct usb_hcd *hcd)
+{
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+	struct pci_dev	*b;
+
+	b  = pci_get_slot (pdev->bus, PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
+	if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
+	    && b->vendor == PCI_VENDOR_ID_NS) {
+		struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+		ohci->flags |= OHCI_QUIRK_SUPERIO;
+		ohci_dbg (ohci, "Using NSC SuperIO setup\n");
+	}
+	pci_dev_put(b);
+
+	return 0;
+}
+
+/* Check for Compaq's ZFMicro chipset, which needs short
+ * delays before control or bulk queues get re-activated
+ * in finish_unlinks()
+ */
+static int ohci_quirk_zfmicro(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+	ohci->flags |= OHCI_QUIRK_ZFMICRO;
+	ohci_dbg(ohci, "enabled Compaq ZFMicro chipset quirks\n");
+
+	return 0;
+}
+
+/* Check for Toshiba SCC OHCI which has big endian registers
+ * and little endian in memory data structures
+ */
+static int ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+	/* That chip is only present in the southbridge of some
+	 * cell based platforms which are supposed to select
+	 * CONFIG_USB_OHCI_BIG_ENDIAN_MMIO. We verify here if
+	 * that was the case though.
+	 */
+#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+	ohci->flags |= OHCI_QUIRK_BE_MMIO;
+	ohci_dbg (ohci, "enabled big endian Toshiba quirk\n");
+	return 0;
+#else
+	ohci_err (ohci, "unsupported big endian Toshiba quirk\n");
+	return -ENXIO;
+#endif
+}
+
+/* Check for NEC chip and apply quirk for allegedly lost interrupts.
+ */
+
+static void ohci_quirk_nec_worker(struct work_struct *work)
+{
+	struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
+	int status;
+
+	status = ohci_restart(ohci);
+	if (status != 0)
+		ohci_err(ohci, "Restarting NEC controller failed in %s, %d\n",
+			 "ohci_restart", status);
+}
+
+static int ohci_quirk_nec(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+
+	ohci->flags |= OHCI_QUIRK_NEC;
+	INIT_WORK(&ohci->nec_work, ohci_quirk_nec_worker);
+	ohci_dbg (ohci, "enabled NEC chipset lost interrupt quirk\n");
+
+	return 0;
+}
+
+static int ohci_quirk_amd700(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	if (usb_amd_find_chipset_info())
+		ohci->flags |= OHCI_QUIRK_AMD_PLL;
+
+	/* SB800 needs pre-fetch fix */
+	if (usb_amd_prefetch_quirk()) {
+		ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
+		ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
+	}
+
+	ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
+	return 0;
+}
+
+static int ohci_quirk_qemu(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci->flags |= OHCI_QUIRK_QEMU;
+	ohci_dbg(ohci, "enabled qemu quirk\n");
+	return 0;
+}
+
+/* List of quirks for OHCI */
+static const struct pci_device_id ohci_pci_quirks[] = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x740c),
+		.driver_data = (unsigned long)ohci_quirk_amd756,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_OPTI, 0xc861),
+		.driver_data = (unsigned long)ohci_quirk_opti,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_ANY_ID),
+		.driver_data = (unsigned long)ohci_quirk_ns,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xa0f8),
+		.driver_data = (unsigned long)ohci_quirk_zfmicro,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6),
+		.driver_data = (unsigned long)ohci_quirk_toshiba_scc,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB),
+		.driver_data = (unsigned long)ohci_quirk_nec,
+	},
+	{
+		/* Toshiba portege 4000 */
+		.vendor		= PCI_VENDOR_ID_AL,
+		.device		= 0x5237,
+		.subvendor	= PCI_VENDOR_ID_TOSHIBA,
+		.subdevice	= 0x0004,
+		.driver_data	= (unsigned long) broken_suspend,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
+		.driver_data = (unsigned long) broken_suspend,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+		.driver_data = (unsigned long)ohci_quirk_amd700,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_APPLE,
+		.device		= 0x003f,
+		.subvendor	= PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+		.subdevice	= PCI_SUBDEVICE_ID_QEMU,
+		.driver_data	= (unsigned long)ohci_quirk_qemu,
+	},
+
+	/* FIXME for some of the early AMD 760 southbridges, OHCI
+	 * won't work at all.  blacklist them.
+	 */
+
+	{},
+};
+
+static int ohci_pci_reset (struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+	int ret = 0;
+
+	if (hcd->self.controller) {
+		const struct pci_device_id *quirk_id;
+
+		quirk_id = pci_match_id(ohci_pci_quirks, pdev);
+		if (quirk_id != NULL) {
+			int (*quirk)(struct usb_hcd *ohci);
+			quirk = (void *)quirk_id->driver_data;
+			ret = quirk(hcd);
+		}
+	}
+
+	if (ret == 0)
+		ret = ohci_setup(hcd);
+	/*
+	* After ohci setup RWC may not be set for add-in PCI cards.
+	* This transfers PCI PM wakeup capabilities.
+	*/
+	if (device_can_wakeup(&pdev->dev))
+		ohci->hc_control |= OHCI_CTRL_RWC;
+	return ret;
+}
+
+static struct hc_driver __read_mostly ohci_pci_hc_driver;
+
+static const struct ohci_driver_overrides pci_overrides __initconst = {
+	.product_desc =		"OHCI PCI host controller",
+	.reset =		ohci_pci_reset,
+};
+
+static const struct pci_device_id pci_ids [] = { {
+	/* handle any USB OHCI controller */
+	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
+	.driver_data =	(unsigned long) &ohci_pci_hc_driver,
+	}, {
+	/* The device in the ConneXT I/O hub has no class reg */
+	PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
+	.driver_data =	(unsigned long) &ohci_pci_hc_driver,
+	}, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE (pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver ohci_pci_driver = {
+	.name =		(char *) hcd_name,
+	.id_table =	pci_ids,
+
+	.probe =	usb_hcd_pci_probe,
+	.remove =	usb_hcd_pci_remove,
+	.shutdown =	usb_hcd_pci_shutdown,
+
+#ifdef CONFIG_PM
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
+};
+
+static int __init ohci_pci_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+#ifdef	CONFIG_PM
+	/* Entries for the PCI suspend/resume callbacks are special */
+	ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+	ohci_pci_hc_driver.pci_resume = ohci_resume;
+#endif
+
+	return pci_register_driver(&ohci_pci_driver);
+}
+module_init(ohci_pci_init);
+
+static void __exit ohci_pci_cleanup(void)
+{
+	pci_unregister_driver(&ohci_pci_driver);
+}
+module_exit(ohci_pci_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
new file mode 100644
index 0000000..65a1c3f
--- /dev/null
+++ b/drivers/usb/host/ohci-platform.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic platform ohci driver
+ *
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Derived from the OCHI-SSB driver
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI generic platform driver"
+#define OHCI_MAX_CLKS 3
+#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
+
+struct ohci_platform_priv {
+	struct clk *clks[OHCI_MAX_CLKS];
+	struct reset_control *resets;
+};
+
+static const char hcd_name[] = "ohci-platform";
+
+static int ohci_platform_power_on(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+	int clk, ret;
+
+	for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) {
+		ret = clk_prepare_enable(priv->clks[clk]);
+		if (ret)
+			goto err_disable_clks;
+	}
+
+	return 0;
+
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(priv->clks[clk]);
+
+	return ret;
+}
+
+static void ohci_platform_power_off(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+	int clk;
+
+	for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
+		if (priv->clks[clk])
+			clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ohci_platform_hc_driver;
+
+static const struct ohci_driver_overrides platform_overrides __initconst = {
+	.product_desc =		"Generic Platform OHCI controller",
+	.extra_priv_size =	sizeof(struct ohci_platform_priv),
+};
+
+static struct usb_ohci_pdata ohci_platform_defaults = {
+	.power_on =		ohci_platform_power_on,
+	.power_suspend =	ohci_platform_power_off,
+	.power_off =		ohci_platform_power_off,
+};
+
+static int ohci_platform_probe(struct platform_device *dev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res_mem;
+	struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct ohci_platform_priv *priv;
+	struct ohci_hcd *ohci;
+	int err, irq, clk = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * Use reasonable defaults so platforms don't have to provide these
+	 * with DT probing on ARM.
+	 */
+	if (!pdata)
+		pdata = &ohci_platform_defaults;
+
+	err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0) {
+		dev_err(&dev->dev, "no irq provided");
+		return irq;
+	}
+
+	hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+			dev_name(&dev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	platform_set_drvdata(dev, hcd);
+	dev->dev.platform_data = pdata;
+	priv = hcd_to_ohci_priv(hcd);
+	ohci = hcd_to_ohci(hcd);
+
+	if (pdata == &ohci_platform_defaults && dev->dev.of_node) {
+		if (of_property_read_bool(dev->dev.of_node, "big-endian-regs"))
+			ohci->flags |= OHCI_QUIRK_BE_MMIO;
+
+		if (of_property_read_bool(dev->dev.of_node, "big-endian-desc"))
+			ohci->flags |= OHCI_QUIRK_BE_DESC;
+
+		if (of_property_read_bool(dev->dev.of_node, "big-endian"))
+			ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+
+		if (of_property_read_bool(dev->dev.of_node, "no-big-frame-no"))
+			ohci->flags |= OHCI_QUIRK_FRAME_NO;
+
+		if (of_property_read_bool(dev->dev.of_node,
+					  "remote-wakeup-connected"))
+			ohci->hc_control = OHCI_CTRL_RWC;
+
+		of_property_read_u32(dev->dev.of_node, "num-ports",
+				     &ohci->num_ports);
+
+		for (clk = 0; clk < OHCI_MAX_CLKS; clk++) {
+			priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+			if (IS_ERR(priv->clks[clk])) {
+				err = PTR_ERR(priv->clks[clk]);
+				if (err == -EPROBE_DEFER)
+					goto err_put_clks;
+				priv->clks[clk] = NULL;
+				break;
+			}
+		}
+
+		priv->resets = devm_reset_control_array_get_optional_shared(
+								&dev->dev);
+		if (IS_ERR(priv->resets)) {
+			err = PTR_ERR(priv->resets);
+			goto err_put_clks;
+		}
+
+		err = reset_control_deassert(priv->resets);
+		if (err)
+			goto err_put_clks;
+	}
+
+	if (pdata->big_endian_desc)
+		ohci->flags |= OHCI_QUIRK_BE_DESC;
+	if (pdata->big_endian_mmio)
+		ohci->flags |= OHCI_QUIRK_BE_MMIO;
+	if (pdata->no_big_frame_no)
+		ohci->flags |= OHCI_QUIRK_FRAME_NO;
+	if (pdata->num_ports)
+		ohci->num_ports = pdata->num_ports;
+
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+	if (ohci->flags & OHCI_QUIRK_BE_MMIO) {
+		dev_err(&dev->dev,
+			"Error: CONFIG_USB_OHCI_BIG_ENDIAN_MMIO not set\n");
+		err = -EINVAL;
+		goto err_reset;
+	}
+#endif
+#ifndef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
+	if (ohci->flags & OHCI_QUIRK_BE_DESC) {
+		dev_err(&dev->dev,
+			"Error: CONFIG_USB_OHCI_BIG_ENDIAN_DESC not set\n");
+		err = -EINVAL;
+		goto err_reset;
+	}
+#endif
+
+	pm_runtime_set_active(&dev->dev);
+	pm_runtime_enable(&dev->dev);
+	if (pdata->power_on) {
+		err = pdata->power_on(dev);
+		if (err < 0)
+			goto err_reset;
+	}
+
+	res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto err_power;
+	}
+	hcd->rsrc_start = res_mem->start;
+	hcd->rsrc_len = resource_size(res_mem);
+
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err)
+		goto err_power;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	platform_set_drvdata(dev, hcd);
+
+	return err;
+
+err_power:
+	if (pdata->power_off)
+		pdata->power_off(dev);
+err_reset:
+	pm_runtime_disable(&dev->dev);
+	reset_control_assert(priv->resets);
+err_put_clks:
+	while (--clk >= 0)
+		clk_put(priv->clks[clk]);
+
+	if (pdata == &ohci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	usb_put_hcd(hcd);
+
+	return err;
+}
+
+static int ohci_platform_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+	int clk;
+
+	pm_runtime_get_sync(&dev->dev);
+	usb_remove_hcd(hcd);
+
+	if (pdata->power_off)
+		pdata->power_off(dev);
+
+	reset_control_assert(priv->resets);
+
+	for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
+		clk_put(priv->clks[clk]);
+
+	usb_put_hcd(hcd);
+
+	pm_runtime_put_sync(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
+	if (pdata == &ohci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ohci_platform_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev->platform_data;
+	struct platform_device *pdev = to_platform_device(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	if (pdata->power_suspend)
+		pdata->power_suspend(pdev);
+
+	return ret;
+}
+
+static int ohci_platform_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (pdata->power_on) {
+		int err = pdata->power_on(pdev);
+		if (err < 0)
+			return err;
+	}
+
+	ohci_resume(hcd, false);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id ohci_platform_ids[] = {
+	{ .compatible = "generic-ohci", },
+	{ .compatible = "cavium,octeon-6335-ohci", },
+	{ .compatible = "ti,ohci-omap3", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ohci_platform_ids);
+
+static const struct platform_device_id ohci_platform_table[] = {
+	{ "ohci-platform", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, ohci_platform_table);
+
+static SIMPLE_DEV_PM_OPS(ohci_platform_pm_ops, ohci_platform_suspend,
+	ohci_platform_resume);
+
+static struct platform_driver ohci_platform_driver = {
+	.id_table	= ohci_platform_table,
+	.probe		= ohci_platform_probe,
+	.remove		= ohci_platform_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "ohci-platform",
+		.pm	= &ohci_platform_pm_ops,
+		.of_match_table = ohci_platform_ids,
+	}
+};
+
+static int __init ohci_platform_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
+	return platform_driver_register(&ohci_platform_driver);
+}
+module_init(ohci_platform_init);
+
+static void __exit ohci_platform_cleanup(void)
+{
+	platform_driver_unregister(&ohci_platform_driver);
+}
+module_exit(ohci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_AUTHOR("Alan Stern");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
new file mode 100644
index 0000000..76a9b40
--- /dev/null
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ * (C) Copyright 2006 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Bus glue for OHCI HC on the of_platform bus
+ *
+ * Modified for of_platform bus from ohci-sa1111.c
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/signal.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/prom.h>
+
+
+static int
+ohci_ppc_of_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	int		ret;
+
+	if ((ret = ohci_init(ohci)) < 0)
+		return ret;
+
+	if ((ret = ohci_run(ohci)) < 0) {
+		dev_err(hcd->self.controller, "can't start %s\n",
+			hcd->self.bus_name);
+		ohci_stop(hcd);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct hc_driver ohci_ppc_of_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"OF OHCI",
+	.hcd_priv_size =	sizeof(struct ohci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.start =		ohci_ppc_of_start,
+	.stop =			ohci_stop,
+	.shutdown = 		ohci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	ohci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend =		ohci_bus_suspend,
+	.bus_resume =		ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+
+static int ohci_hcd_ppc_of_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct usb_hcd *hcd;
+	struct ohci_hcd	*ohci;
+	struct resource res;
+	int irq;
+
+	int rv;
+	int is_bigendian;
+	struct device_node *np;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	is_bigendian =
+		of_device_is_compatible(dn, "ohci-bigendian") ||
+		of_device_is_compatible(dn, "ohci-be");
+
+	dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	hcd = usb_create_hcd(&ohci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = resource_size(&res);
+
+	hcd->regs = devm_ioremap_resource(&op->dev, &res);
+	if (IS_ERR(hcd->regs)) {
+		rv = PTR_ERR(hcd->regs);
+		goto err_rmr;
+	}
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (irq == NO_IRQ) {
+		dev_err(&op->dev, "%s: irq_of_parse_and_map failed\n",
+			__FILE__);
+		rv = -EBUSY;
+		goto err_rmr;
+	}
+
+	ohci = hcd_to_ohci(hcd);
+	if (is_bigendian) {
+		ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
+		if (of_device_is_compatible(dn, "fsl,mpc5200-ohci"))
+			ohci->flags |= OHCI_QUIRK_FRAME_NO;
+		if (of_device_is_compatible(dn, "mpc5200-ohci"))
+			ohci->flags |= OHCI_QUIRK_FRAME_NO;
+	}
+
+	ohci_hcd_init(ohci);
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return 0;
+	}
+
+	/* by now, 440epx is known to show usb_23 erratum */
+	np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
+
+	/* Work around - At this point ohci_run has executed, the
+	* controller is running, everything, the root ports, etc., is
+	* set up.  If the ehci driver is loaded, put the ohci core in
+	* the suspended state.  The ehci driver will bring it out of
+	* suspended state when / if a non-high speed USB device is
+	* attached to the USB Host port.  If the ehci driver is not
+	* loaded, do nothing. request_mem_region is used to test if
+	* the ehci driver is loaded.
+	*/
+	if (np !=  NULL) {
+		if (!of_address_to_resource(np, 0, &res)) {
+			if (!request_mem_region(res.start, 0x4, hcd_name)) {
+				writel_be((readl_be(&ohci->regs->control) |
+					OHCI_USB_SUSPEND), &ohci->regs->control);
+					(void) readl_be(&ohci->regs->control);
+			} else
+				release_mem_region(res.start, 0x4);
+		} else
+			pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
+	}
+
+	irq_dispose_mapping(irq);
+err_rmr:
+ 	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+static int ohci_hcd_ppc_of_remove(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	irq_dispose_mapping(hcd->irq);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static const struct of_device_id ohci_hcd_ppc_of_match[] = {
+#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
+	{
+		.name = "usb",
+		.compatible = "ohci-bigendian",
+	},
+	{
+		.name = "usb",
+		.compatible = "ohci-be",
+	},
+#endif
+#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
+	{
+		.name = "usb",
+		.compatible = "ohci-littledian",
+	},
+	{
+		.name = "usb",
+		.compatible = "ohci-le",
+	},
+#endif
+	{},
+};
+MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
+
+#if	!defined(CONFIG_USB_OHCI_HCD_PPC_OF_BE) && \
+	!defined(CONFIG_USB_OHCI_HCD_PPC_OF_LE)
+#error "No endianness selected for ppc-of-ohci"
+#endif
+
+
+static struct platform_driver ohci_hcd_ppc_of_driver = {
+	.probe		= ohci_hcd_ppc_of_probe,
+	.remove		= ohci_hcd_ppc_of_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name = "ppc-of-ohci",
+		.of_match_table = ohci_hcd_ppc_of_match,
+	},
+};
+
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
new file mode 100644
index 0000000..395f9d3
--- /dev/null
+++ b/drivers/usb/host/ohci-ps3.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  PS3 OHCI Host Controller driver
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ */
+
+#include <asm/firmware.h>
+#include <asm/ps3.h>
+
+static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci->flags |= OHCI_QUIRK_BE_MMIO;
+	ohci_hcd_init(ohci);
+	return ohci_init(ohci);
+}
+
+static int ps3_ohci_hc_start(struct usb_hcd *hcd)
+{
+	int result;
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	/* Handle root hub init quirk in spider south bridge. */
+	/* Also set PwrOn2PwrGood to 0x7f (254ms). */
+
+	ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM,
+		&ohci->regs->roothub.a);
+	ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b);
+
+	result = ohci_run(ohci);
+
+	if (result < 0) {
+		dev_err(hcd->self.controller, "can't start %s\n",
+			hcd->self.bus_name);
+		ohci_stop(hcd);
+	}
+
+	return result;
+}
+
+static const struct hc_driver ps3_ohci_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "PS3 OHCI Host Controller",
+	.hcd_priv_size		= sizeof(struct ohci_hcd),
+	.irq			= ohci_irq,
+	.flags			= HCD_MEMORY | HCD_USB11,
+	.reset			= ps3_ohci_hc_reset,
+	.start			= ps3_ohci_hc_start,
+	.stop			= ohci_stop,
+	.shutdown		= ohci_shutdown,
+	.urb_enqueue		= ohci_urb_enqueue,
+	.urb_dequeue		= ohci_urb_dequeue,
+	.endpoint_disable	= ohci_endpoint_disable,
+	.get_frame_number	= ohci_get_frame,
+	.hub_status_data	= ohci_hub_status_data,
+	.hub_control		= ohci_hub_control,
+	.start_port_reset	= ohci_start_port_reset,
+#if defined(CONFIG_PM)
+	.bus_suspend 		= ohci_bus_suspend,
+	.bus_resume 		= ohci_bus_resume,
+#endif
+};
+
+static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
+{
+	int result;
+	struct usb_hcd *hcd;
+	unsigned int virq;
+	static u64 dummy_mask;
+
+	if (usb_disabled()) {
+		result = -ENODEV;
+		goto fail_start;
+	}
+
+	result = ps3_open_hv_device(dev);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		result = -EPERM;
+		goto fail_open;
+	}
+
+	result = ps3_dma_region_create(dev->d_region);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
+			"(%d)\n", __func__, __LINE__, result);
+		BUG_ON("check region type");
+		goto fail_dma_region;
+	}
+
+	result = ps3_mmio_region_create(dev->m_region);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
+			__func__, __LINE__);
+		result = -EPERM;
+		goto fail_mmio_region;
+	}
+
+	dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
+		__LINE__, dev->m_region->lpar_addr);
+
+	result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
+			__func__, __LINE__, virq);
+		result = -EPERM;
+		goto fail_irq;
+	}
+
+	dummy_mask = DMA_BIT_MASK(32);
+	dev->core.dma_mask = &dummy_mask;
+	dma_set_coherent_mask(&dev->core, dummy_mask);
+
+	hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core));
+
+	if (!hcd) {
+		dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
+			__LINE__);
+		result = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	hcd->rsrc_start = dev->m_region->lpar_addr;
+	hcd->rsrc_len = dev->m_region->len;
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
+		dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
+			__func__, __LINE__);
+
+	hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
+
+	if (!hcd->regs) {
+		dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
+			__LINE__);
+		result = -EPERM;
+		goto fail_ioremap;
+	}
+
+	dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->rsrc_start);
+	dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len   %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->rsrc_len);
+	dev_dbg(&dev->core, "%s:%d: hcd->regs       %lxh\n", __func__, __LINE__,
+		(unsigned long)hcd->regs);
+	dev_dbg(&dev->core, "%s:%d: virq            %lu\n", __func__, __LINE__,
+		(unsigned long)virq);
+
+	ps3_system_bus_set_drvdata(dev, hcd);
+
+	result = usb_add_hcd(hcd, virq, 0);
+
+	if (result) {
+		dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
+			__func__, __LINE__, result);
+		goto fail_add_hcd;
+	}
+
+	device_wakeup_enable(hcd->self.controller);
+	return result;
+
+fail_add_hcd:
+	iounmap(hcd->regs);
+fail_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	ps3_io_irq_destroy(virq);
+fail_irq:
+	ps3_free_mmio_region(dev->m_region);
+fail_mmio_region:
+	ps3_dma_region_free(dev->d_region);
+fail_dma_region:
+	ps3_close_hv_device(dev);
+fail_open:
+fail_start:
+	return result;
+}
+
+static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
+{
+	unsigned int tmp;
+	struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
+
+	BUG_ON(!hcd);
+
+	dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
+	dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
+
+	tmp = hcd->irq;
+
+	ohci_shutdown(hcd);
+	usb_remove_hcd(hcd);
+
+	ps3_system_bus_set_drvdata(dev, NULL);
+
+	BUG_ON(!hcd->regs);
+	iounmap(hcd->regs);
+
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+
+	ps3_io_irq_destroy(tmp);
+	ps3_free_mmio_region(dev->m_region);
+
+	ps3_dma_region_free(dev->d_region);
+	ps3_close_hv_device(dev);
+
+	return 0;
+}
+
+static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
+{
+	return firmware_has_feature(FW_FEATURE_PS3_LV1)
+		? ps3_system_bus_driver_register(drv)
+		: 0;
+}
+
+static void ps3_ohci_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+	if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+		ps3_system_bus_driver_unregister(drv);
+}
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_OHCI);
+
+static struct ps3_system_bus_driver ps3_ohci_driver = {
+	.core.name = "ps3-ohci-driver",
+	.core.owner = THIS_MODULE,
+	.match_id = PS3_MATCH_ID_OHCI,
+	.probe = ps3_ohci_probe,
+	.remove = ps3_ohci_remove,
+	.shutdown = ps3_ohci_remove,
+};
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
new file mode 100644
index 0000000..3e24749
--- /dev/null
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Bus Glue for pxa27x
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * Modified for LH7A404 from ohci-sa1111.c
+ *  by Durgesh Pattamatta <pattamattad@sharpsec.com>
+ *
+ * Modified for pxa27x from ohci-lh7a404.c
+ *  by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/usb-ohci-pxa27x.h>
+#include <linux/platform_data/usb-pxa3xx-ulpi.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+
+#include <mach/hardware.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI PXA27x/PXA3x driver"
+
+/*
+ * UHC: USB Host Controller (OHCI-like) register definitions
+ */
+#define UHCREV		(0x0000) /* UHC HCI Spec Revision */
+#define UHCHCON		(0x0004) /* UHC Host Control Register */
+#define UHCCOMS		(0x0008) /* UHC Command Status Register */
+#define UHCINTS		(0x000C) /* UHC Interrupt Status Register */
+#define UHCINTE		(0x0010) /* UHC Interrupt Enable */
+#define UHCINTD		(0x0014) /* UHC Interrupt Disable */
+#define UHCHCCA		(0x0018) /* UHC Host Controller Comm. Area */
+#define UHCPCED		(0x001C) /* UHC Period Current Endpt Descr */
+#define UHCCHED		(0x0020) /* UHC Control Head Endpt Descr */
+#define UHCCCED		(0x0024) /* UHC Control Current Endpt Descr */
+#define UHCBHED		(0x0028) /* UHC Bulk Head Endpt Descr */
+#define UHCBCED		(0x002C) /* UHC Bulk Current Endpt Descr */
+#define UHCDHEAD	(0x0030) /* UHC Done Head */
+#define UHCFMI		(0x0034) /* UHC Frame Interval */
+#define UHCFMR		(0x0038) /* UHC Frame Remaining */
+#define UHCFMN		(0x003C) /* UHC Frame Number */
+#define UHCPERS		(0x0040) /* UHC Periodic Start */
+#define UHCLS		(0x0044) /* UHC Low Speed Threshold */
+
+#define UHCRHDA		(0x0048) /* UHC Root Hub Descriptor A */
+#define UHCRHDA_NOCP	(1 << 12)	/* No over current protection */
+#define UHCRHDA_OCPM	(1 << 11)	/* Over Current Protection Mode */
+#define UHCRHDA_POTPGT(x) \
+			(((x) & 0xff) << 24) /* Power On To Power Good Time */
+
+#define UHCRHDB		(0x004C) /* UHC Root Hub Descriptor B */
+#define UHCRHS		(0x0050) /* UHC Root Hub Status */
+#define UHCRHPS1	(0x0054) /* UHC Root Hub Port 1 Status */
+#define UHCRHPS2	(0x0058) /* UHC Root Hub Port 2 Status */
+#define UHCRHPS3	(0x005C) /* UHC Root Hub Port 3 Status */
+
+#define UHCSTAT		(0x0060) /* UHC Status Register */
+#define UHCSTAT_UPS3	(1 << 16)	/* USB Power Sense Port3 */
+#define UHCSTAT_SBMAI	(1 << 15)	/* System Bus Master Abort Interrupt*/
+#define UHCSTAT_SBTAI	(1 << 14)	/* System Bus Target Abort Interrupt*/
+#define UHCSTAT_UPRI	(1 << 13)	/* USB Port Resume Interrupt */
+#define UHCSTAT_UPS2	(1 << 12)	/* USB Power Sense Port 2 */
+#define UHCSTAT_UPS1	(1 << 11)	/* USB Power Sense Port 1 */
+#define UHCSTAT_HTA	(1 << 10)	/* HCI Target Abort */
+#define UHCSTAT_HBA	(1 << 8)	/* HCI Buffer Active */
+#define UHCSTAT_RWUE	(1 << 7)	/* HCI Remote Wake Up Event */
+
+#define UHCHR           (0x0064) /* UHC Reset Register */
+#define UHCHR_SSEP3	(1 << 11)	/* Sleep Standby Enable for Port3 */
+#define UHCHR_SSEP2	(1 << 10)	/* Sleep Standby Enable for Port2 */
+#define UHCHR_SSEP1	(1 << 9)	/* Sleep Standby Enable for Port1 */
+#define UHCHR_PCPL	(1 << 7)	/* Power control polarity low */
+#define UHCHR_PSPL	(1 << 6)	/* Power sense polarity low */
+#define UHCHR_SSE	(1 << 5)	/* Sleep Standby Enable */
+#define UHCHR_UIT	(1 << 4)	/* USB Interrupt Test */
+#define UHCHR_SSDC	(1 << 3)	/* Simulation Scale Down Clock */
+#define UHCHR_CGR	(1 << 2)	/* Clock Generation Reset */
+#define UHCHR_FHR	(1 << 1)	/* Force Host Controller Reset */
+#define UHCHR_FSBIR	(1 << 0)	/* Force System Bus Iface Reset */
+
+#define UHCHIE          (0x0068) /* UHC Interrupt Enable Register*/
+#define UHCHIE_UPS3IE	(1 << 14)	/* Power Sense Port3 IntEn */
+#define UHCHIE_UPRIE	(1 << 13)	/* Port Resume IntEn */
+#define UHCHIE_UPS2IE	(1 << 12)	/* Power Sense Port2 IntEn */
+#define UHCHIE_UPS1IE	(1 << 11)	/* Power Sense Port1 IntEn */
+#define UHCHIE_TAIE	(1 << 10)	/* HCI Interface Transfer Abort
+					   Interrupt Enable*/
+#define UHCHIE_HBAIE	(1 << 8)	/* HCI Buffer Active IntEn */
+#define UHCHIE_RWIE	(1 << 7)	/* Remote Wake-up IntEn */
+
+#define UHCHIT          (0x006C) /* UHC Interrupt Test register */
+
+#define PXA_UHC_MAX_PORTNUM    3
+
+static const char hcd_name[] = "ohci-pxa27x";
+
+static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
+
+struct pxa27x_ohci {
+	struct clk	*clk;
+	void __iomem	*mmio_base;
+	struct regulator *vbus[3];
+	bool		vbus_enabled[3];
+};
+
+#define to_pxa27x_ohci(hcd)	(struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
+
+/*
+  PMM_NPS_MODE -- PMM Non-power switching mode
+      Ports are powered continuously.
+
+  PMM_GLOBAL_MODE -- PMM global switching mode
+      All ports are powered at the same time.
+
+  PMM_PERPORT_MODE -- PMM per port switching mode
+      Ports are powered individually.
+ */
+static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
+{
+	uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
+	uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB);
+
+	switch (mode) {
+	case PMM_NPS_MODE:
+		uhcrhda |= RH_A_NPS;
+		break;
+	case PMM_GLOBAL_MODE:
+		uhcrhda &= ~(RH_A_NPS & RH_A_PSM);
+		break;
+	case PMM_PERPORT_MODE:
+		uhcrhda &= ~(RH_A_NPS);
+		uhcrhda |= RH_A_PSM;
+
+		/* Set port power control mask bits, only 3 ports. */
+		uhcrhdb |= (0x7<<17);
+		break;
+	default:
+		printk( KERN_ERR
+			"Invalid mode %d, set to non-power switch mode.\n",
+			mode );
+
+		uhcrhda |= RH_A_NPS;
+	}
+
+	__raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
+	__raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB);
+	return 0;
+}
+
+static int pxa27x_ohci_set_vbus_power(struct pxa27x_ohci *pxa_ohci,
+				      unsigned int port, bool enable)
+{
+	struct regulator *vbus = pxa_ohci->vbus[port];
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(vbus))
+		return 0;
+
+	if (enable && !pxa_ohci->vbus_enabled[port])
+		ret = regulator_enable(vbus);
+	else if (!enable && pxa_ohci->vbus_enabled[port])
+		ret = regulator_disable(vbus);
+
+	if (ret < 0)
+		return ret;
+
+	pxa_ohci->vbus_enabled[port] = enable;
+
+	return 0;
+}
+
+static int pxa27x_ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+				   u16 wIndex, char *buf, u16 wLength)
+{
+	struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+	int ret;
+
+	switch (typeReq) {
+	case SetPortFeature:
+	case ClearPortFeature:
+		if (!wIndex || wIndex > 3)
+			return -EPIPE;
+
+		if (wValue != USB_PORT_FEAT_POWER)
+			break;
+
+		ret = pxa27x_ohci_set_vbus_power(pxa_ohci, wIndex - 1,
+						 typeReq == SetPortFeature);
+		if (ret)
+			return ret;
+		break;
+	}
+
+	return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+/*-------------------------------------------------------------------------*/
+
+static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
+				   struct pxaohci_platform_data *inf)
+{
+	uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
+	uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
+
+	if (inf->flags & ENABLE_PORT1)
+		uhchr &= ~UHCHR_SSEP1;
+
+	if (inf->flags & ENABLE_PORT2)
+		uhchr &= ~UHCHR_SSEP2;
+
+	if (inf->flags & ENABLE_PORT3)
+		uhchr &= ~UHCHR_SSEP3;
+
+	if (inf->flags & POWER_CONTROL_LOW)
+		uhchr |= UHCHR_PCPL;
+
+	if (inf->flags & POWER_SENSE_LOW)
+		uhchr |= UHCHR_PSPL;
+
+	if (inf->flags & NO_OC_PROTECTION)
+		uhcrhda |= UHCRHDA_NOCP;
+	else
+		uhcrhda &= ~UHCRHDA_NOCP;
+
+	if (inf->flags & OC_MODE_PERPORT)
+		uhcrhda |= UHCRHDA_OCPM;
+	else
+		uhcrhda &= ~UHCRHDA_OCPM;
+
+	if (inf->power_on_delay) {
+		uhcrhda &= ~UHCRHDA_POTPGT(0xff);
+		uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2);
+	}
+
+	__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+	__raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
+}
+
+static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci)
+{
+	uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
+
+	__raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
+	udelay(11);
+	__raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
+}
+
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph()	do {} while (0)
+#endif
+
+static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
+{
+	int retval;
+	struct pxaohci_platform_data *inf;
+	uint32_t uhchr;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	inf = dev_get_platdata(dev);
+
+	retval = clk_prepare_enable(pxa_ohci->clk);
+	if (retval)
+		return retval;
+
+	pxa27x_reset_hc(pxa_ohci);
+
+	uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
+	__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+
+	while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
+		cpu_relax();
+
+	pxa27x_setup_hc(pxa_ohci, inf);
+
+	if (inf->init)
+		retval = inf->init(dev);
+
+	if (retval < 0) {
+		clk_disable_unprepare(pxa_ohci->clk);
+		return retval;
+	}
+
+	if (cpu_is_pxa3xx())
+		pxa3xx_u2d_start_hc(&hcd->self);
+
+	uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
+	__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
+	__raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
+
+	/* Clear any OTG Pin Hold */
+	pxa27x_clear_otgph();
+	return 0;
+}
+
+static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
+{
+	struct pxaohci_platform_data *inf;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	uint32_t uhccoms;
+
+	inf = dev_get_platdata(dev);
+
+	if (cpu_is_pxa3xx())
+		pxa3xx_u2d_stop_hc(&hcd->self);
+
+	if (inf->exit)
+		inf->exit(dev);
+
+	pxa27x_reset_hc(pxa_ohci);
+
+	/* Host Controller Reset */
+	uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01;
+	__raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS);
+	udelay(10);
+
+	clk_disable_unprepare(pxa_ohci->clk);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ohci_dt_ids[] = {
+	{ .compatible = "marvell,pxa-ohci" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
+
+static int ohci_pxa_of_init(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct pxaohci_platform_data *pdata;
+	u32 tmp;
+	int ret;
+
+	if (!np)
+		return 0;
+
+	/* Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	if (of_property_read_bool(np, "marvell,enable-port1"))
+		pdata->flags |= ENABLE_PORT1;
+	if (of_property_read_bool(np, "marvell,enable-port2"))
+		pdata->flags |= ENABLE_PORT2;
+	if (of_property_read_bool(np, "marvell,enable-port3"))
+		pdata->flags |= ENABLE_PORT3;
+	if (of_property_read_bool(np, "marvell,port-sense-low"))
+		pdata->flags |= POWER_SENSE_LOW;
+	if (of_property_read_bool(np, "marvell,power-control-low"))
+		pdata->flags |= POWER_CONTROL_LOW;
+	if (of_property_read_bool(np, "marvell,no-oc-protection"))
+		pdata->flags |= NO_OC_PROTECTION;
+	if (of_property_read_bool(np, "marvell,oc-mode-perport"))
+		pdata->flags |= OC_MODE_PERPORT;
+	if (!of_property_read_u32(np, "marvell,power-on-delay", &tmp))
+		pdata->power_on_delay = tmp;
+	if (!of_property_read_u32(np, "marvell,port-mode", &tmp))
+		pdata->port_mode = tmp;
+	if (!of_property_read_u32(np, "marvell,power-budget", &tmp))
+		pdata->power_budget = tmp;
+
+	pdev->dev.platform_data = pdata;
+
+	return 0;
+}
+#else
+static int ohci_pxa_of_init(struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+
+/**
+ * ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
+{
+	int retval, irq;
+	struct usb_hcd *hcd;
+	struct pxaohci_platform_data *inf;
+	struct pxa27x_ohci *pxa_ohci;
+	struct ohci_hcd *ohci;
+	struct resource *r;
+	struct clk *usb_clk;
+	unsigned int i;
+
+	retval = ohci_pxa_of_init(pdev);
+	if (retval)
+		return retval;
+
+	inf = dev_get_platdata(&pdev->dev);
+
+	if (!inf)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		pr_err("no resource of IORESOURCE_IRQ");
+		return irq;
+	}
+
+	usb_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(usb_clk))
+		return PTR_ERR(usb_clk);
+
+	hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x");
+	if (!hcd)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err;
+	}
+	hcd->rsrc_start = r->start;
+	hcd->rsrc_len = resource_size(r);
+
+	/* initialize "struct pxa27x_ohci" */
+	pxa_ohci = to_pxa27x_ohci(hcd);
+	pxa_ohci->clk = usb_clk;
+	pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
+
+	for (i = 0; i < 3; ++i) {
+		char name[6];
+
+		if (!(inf->flags & (ENABLE_PORT1 << i)))
+			continue;
+
+		sprintf(name, "vbus%u", i + 1);
+		pxa_ohci->vbus[i] = devm_regulator_get(&pdev->dev, name);
+	}
+
+	retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
+	if (retval < 0) {
+		pr_debug("pxa27x_start_hc failed");
+		goto err;
+	}
+
+	/* Select Power Management Mode */
+	pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
+
+	if (inf->power_budget)
+		hcd->power_budget = inf->power_budget;
+
+	/* The value of NDP in roothub_a is incorrect on this hardware */
+	ohci = hcd_to_ohci(hcd);
+	ohci->num_ports = 3;
+
+	retval = usb_add_hcd(hcd, irq, 0);
+	if (retval == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return retval;
+	}
+
+	pxa27x_stop_hc(pxa_ohci, &pdev->dev);
+ err:
+	usb_put_hcd(hcd);
+	return retval;
+}
+
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ */
+static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+	unsigned int i;
+
+	usb_remove_hcd(hcd);
+	pxa27x_stop_hc(pxa_ohci, &pdev->dev);
+
+	for (i = 0; i < 3; ++i)
+		pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	pxa27x_stop_hc(pxa_ohci, dev);
+	return ret;
+}
+
+static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
+	struct pxaohci_platform_data *inf = dev_get_platdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	int status;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	status = pxa27x_start_hc(pxa_ohci, dev);
+	if (status < 0)
+		return status;
+
+	/* Select Power Management Mode */
+	pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
+
+	ohci_resume(hcd, false);
+	return 0;
+}
+
+static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
+	.suspend	= ohci_hcd_pxa27x_drv_suspend,
+	.resume		= ohci_hcd_pxa27x_drv_resume,
+};
+#endif
+
+static struct platform_driver ohci_hcd_pxa27x_driver = {
+	.probe		= ohci_hcd_pxa27x_probe,
+	.remove		= ohci_hcd_pxa27x_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "pxa27x-ohci",
+		.of_match_table = of_match_ptr(pxa_ohci_dt_ids),
+#ifdef CONFIG_PM
+		.pm	= &ohci_hcd_pxa27x_pm_ops,
+#endif
+	},
+};
+
+static const struct ohci_driver_overrides pxa27x_overrides __initconst = {
+	.extra_priv_size =      sizeof(struct pxa27x_ohci),
+};
+
+static int __init ohci_pxa27x_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
+	ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
+
+	return platform_driver_register(&ohci_hcd_pxa27x_driver);
+}
+module_init(ohci_pxa27x_init);
+
+static void __exit ohci_pxa27x_cleanup(void)
+{
+	platform_driver_unregister(&ohci_hcd_pxa27x_driver);
+}
+module_exit(ohci_pxa27x_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa27x-ohci");
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
new file mode 100644
index 0000000..4ccb85a
--- /dev/null
+++ b/drivers/usb/host/ohci-q.c
@@ -0,0 +1,1235 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
+{
+	int		last = urb_priv->length - 1;
+
+	if (last >= 0) {
+		int		i;
+		struct td	*td;
+
+		for (i = 0; i <= last; i++) {
+			td = urb_priv->td [i];
+			if (td)
+				td_free (hc, td);
+		}
+	}
+
+	list_del (&urb_priv->pending);
+	kfree (urb_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * URB goes back to driver, and isn't reissued.
+ * It's completely gone from HC data structures.
+ * PRECONDITION:  ohci lock held, irqs blocked.
+ */
+static void
+finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
+__releases(ohci->lock)
+__acquires(ohci->lock)
+{
+	struct device *dev = ohci_to_hcd(ohci)->self.controller;
+	struct usb_host_endpoint *ep = urb->ep;
+	struct urb_priv *urb_priv;
+
+	// ASSERT (urb->hcpriv != 0);
+
+ restart:
+	urb_free_priv (ohci, urb->hcpriv);
+	urb->hcpriv = NULL;
+	if (likely(status == -EINPROGRESS))
+		status = 0;
+
+	switch (usb_pipetype (urb->pipe)) {
+	case PIPE_ISOCHRONOUS:
+		ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
+		if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
+			if (quirk_amdiso(ohci))
+				usb_amd_quirk_pll_enable();
+			if (quirk_amdprefetch(ohci))
+				sb800_prefetch(dev, 0);
+		}
+		break;
+	case PIPE_INTERRUPT:
+		ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
+		break;
+	}
+
+	/* urb->complete() can reenter this HCD */
+	usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
+	spin_unlock (&ohci->lock);
+	usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
+	spin_lock (&ohci->lock);
+
+	/* stop periodic dma if it's not needed */
+	if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+			&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
+		ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	}
+
+	/*
+	 * An isochronous URB that is sumitted too late won't have any TDs
+	 * (marked by the fact that the td_cnt value is larger than the
+	 * actual number of TDs).  If the next URB on this endpoint is like
+	 * that, give it back now.
+	 */
+	if (!list_empty(&ep->urb_list)) {
+		urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+		urb_priv = urb->hcpriv;
+		if (urb_priv->td_cnt > urb_priv->length) {
+			status = 0;
+			goto restart;
+		}
+	}
+}
+
+
+/*-------------------------------------------------------------------------*
+ * ED handling functions
+ *-------------------------------------------------------------------------*/
+
+/* search for the right schedule branch to use for a periodic ed.
+ * does some load balancing; returns the branch, or negative errno.
+ */
+static int balance (struct ohci_hcd *ohci, int interval, int load)
+{
+	int	i, branch = -ENOSPC;
+
+	/* iso periods can be huge; iso tds specify frame numbers */
+	if (interval > NUM_INTS)
+		interval = NUM_INTS;
+
+	/* search for the least loaded schedule branch of that period
+	 * that has enough bandwidth left unreserved.
+	 */
+	for (i = 0; i < interval ; i++) {
+		if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
+			int	j;
+
+			/* usb 1.1 says 90% of one frame */
+			for (j = i; j < NUM_INTS; j += interval) {
+				if ((ohci->load [j] + load) > 900)
+					break;
+			}
+			if (j < NUM_INTS)
+				continue;
+			branch = i;
+		}
+	}
+	return branch;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* both iso and interrupt requests have periods; this routine puts them
+ * into the schedule tree in the apppropriate place.  most iso devices use
+ * 1msec periods, but that's not required.
+ */
+static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
+{
+	unsigned	i;
+
+	ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n",
+		(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
+		ed, ed->branch, ed->load, ed->interval);
+
+	for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+		struct ed	**prev = &ohci->periodic [i];
+		__hc32		*prev_p = &ohci->hcca->int_table [i];
+		struct ed	*here = *prev;
+
+		/* sorting each branch by period (slow before fast)
+		 * lets us share the faster parts of the tree.
+		 * (plus maybe: put interrupt eds before iso)
+		 */
+		while (here && ed != here) {
+			if (ed->interval > here->interval)
+				break;
+			prev = &here->ed_next;
+			prev_p = &here->hwNextED;
+			here = *prev;
+		}
+		if (ed != here) {
+			ed->ed_next = here;
+			if (here)
+				ed->hwNextED = *prev_p;
+			wmb ();
+			*prev = ed;
+			*prev_p = cpu_to_hc32(ohci, ed->dma);
+			wmb();
+		}
+		ohci->load [i] += ed->load;
+	}
+	ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
+}
+
+/* link an ed into one of the HC chains */
+
+static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
+{
+	int	branch;
+
+	ed->ed_prev = NULL;
+	ed->ed_next = NULL;
+	ed->hwNextED = 0;
+	wmb ();
+
+	/* we care about rm_list when setting CLE/BLE in case the HC was at
+	 * work on some TD when CLE/BLE was turned off, and isn't quiesced
+	 * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
+	 *
+	 * control and bulk EDs are doubly linked (ed_next, ed_prev), but
+	 * periodic ones are singly linked (ed_next). that's because the
+	 * periodic schedule encodes a tree like figure 3-5 in the ohci
+	 * spec:  each qh can have several "previous" nodes, and the tree
+	 * doesn't have unused/idle descriptors.
+	 */
+	switch (ed->type) {
+	case PIPE_CONTROL:
+		if (ohci->ed_controltail == NULL) {
+			WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
+			ohci_writel (ohci, ed->dma,
+					&ohci->regs->ed_controlhead);
+		} else {
+			ohci->ed_controltail->ed_next = ed;
+			ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
+								ed->dma);
+		}
+		ed->ed_prev = ohci->ed_controltail;
+		if (!ohci->ed_controltail && !ohci->ed_rm_list) {
+			wmb();
+			ohci->hc_control |= OHCI_CTRL_CLE;
+			ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
+			ohci_writel (ohci, ohci->hc_control,
+					&ohci->regs->control);
+		}
+		ohci->ed_controltail = ed;
+		break;
+
+	case PIPE_BULK:
+		if (ohci->ed_bulktail == NULL) {
+			WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
+			ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
+		} else {
+			ohci->ed_bulktail->ed_next = ed;
+			ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
+								ed->dma);
+		}
+		ed->ed_prev = ohci->ed_bulktail;
+		if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
+			wmb();
+			ohci->hc_control |= OHCI_CTRL_BLE;
+			ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
+			ohci_writel (ohci, ohci->hc_control,
+					&ohci->regs->control);
+		}
+		ohci->ed_bulktail = ed;
+		break;
+
+	// case PIPE_INTERRUPT:
+	// case PIPE_ISOCHRONOUS:
+	default:
+		branch = balance (ohci, ed->interval, ed->load);
+		if (branch < 0) {
+			ohci_dbg (ohci,
+				"ERR %d, interval %d msecs, load %d\n",
+				branch, ed->interval, ed->load);
+			// FIXME if there are TDs queued, fail them!
+			return branch;
+		}
+		ed->branch = branch;
+		periodic_link (ohci, ed);
+	}
+
+	/* the HC may not see the schedule updates yet, but if it does
+	 * then they'll be properly ordered.
+	 */
+
+	ed->state = ED_OPER;
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* scan the periodic table to find and unlink this ED */
+static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
+{
+	int	i;
+
+	for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+		struct ed	*temp;
+		struct ed	**prev = &ohci->periodic [i];
+		__hc32		*prev_p = &ohci->hcca->int_table [i];
+
+		while (*prev && (temp = *prev) != ed) {
+			prev_p = &temp->hwNextED;
+			prev = &temp->ed_next;
+		}
+		if (*prev) {
+			*prev_p = ed->hwNextED;
+			*prev = ed->ed_next;
+		}
+		ohci->load [i] -= ed->load;
+	}
+	ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
+
+	ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
+		(ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
+		ed, ed->branch, ed->load, ed->interval);
+}
+
+/* unlink an ed from one of the HC chains.
+ * just the link to the ed is unlinked.
+ * the link from the ed still points to another operational ed or 0
+ * so the HC can eventually finish the processing of the unlinked ed
+ * (assuming it already started that, which needn't be true).
+ *
+ * ED_UNLINK is a transient state: the HC may still see this ED, but soon
+ * it won't.  ED_SKIP means the HC will finish its current transaction,
+ * but won't start anything new.  The TD queue may still grow; device
+ * drivers don't know about this HCD-internal state.
+ *
+ * When the HC can't see the ED, something changes ED_UNLINK to one of:
+ *
+ *  - ED_OPER: when there's any request queued, the ED gets rescheduled
+ *    immediately.  HC should be working on them.
+ *
+ *  - ED_IDLE: when there's no TD queue or the HC isn't running.
+ *
+ * When finish_unlinks() runs later, after SOF interrupt, it will often
+ * complete one or more URB unlinks before making that state change.
+ */
+static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
+{
+	ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
+	wmb ();
+	ed->state = ED_UNLINK;
+
+	/* To deschedule something from the control or bulk list, just
+	 * clear CLE/BLE and wait.  There's no safe way to scrub out list
+	 * head/current registers until later, and "later" isn't very
+	 * tightly specified.  Figure 6-5 and Section 6.4.2.2 show how
+	 * the HC is reading the ED queues (while we modify them).
+	 *
+	 * For now, ed_schedule() is "later".  It might be good paranoia
+	 * to scrub those registers in finish_unlinks(), in case of bugs
+	 * that make the HC try to use them.
+	 */
+	switch (ed->type) {
+	case PIPE_CONTROL:
+		/* remove ED from the HC's list: */
+		if (ed->ed_prev == NULL) {
+			if (!ed->hwNextED) {
+				ohci->hc_control &= ~OHCI_CTRL_CLE;
+				ohci_writel (ohci, ohci->hc_control,
+						&ohci->regs->control);
+				// a ohci_readl() later syncs CLE with the HC
+			} else
+				ohci_writel (ohci,
+					hc32_to_cpup (ohci, &ed->hwNextED),
+					&ohci->regs->ed_controlhead);
+		} else {
+			ed->ed_prev->ed_next = ed->ed_next;
+			ed->ed_prev->hwNextED = ed->hwNextED;
+		}
+		/* remove ED from the HCD's list: */
+		if (ohci->ed_controltail == ed) {
+			ohci->ed_controltail = ed->ed_prev;
+			if (ohci->ed_controltail)
+				ohci->ed_controltail->ed_next = NULL;
+		} else if (ed->ed_next) {
+			ed->ed_next->ed_prev = ed->ed_prev;
+		}
+		break;
+
+	case PIPE_BULK:
+		/* remove ED from the HC's list: */
+		if (ed->ed_prev == NULL) {
+			if (!ed->hwNextED) {
+				ohci->hc_control &= ~OHCI_CTRL_BLE;
+				ohci_writel (ohci, ohci->hc_control,
+						&ohci->regs->control);
+				// a ohci_readl() later syncs BLE with the HC
+			} else
+				ohci_writel (ohci,
+					hc32_to_cpup (ohci, &ed->hwNextED),
+					&ohci->regs->ed_bulkhead);
+		} else {
+			ed->ed_prev->ed_next = ed->ed_next;
+			ed->ed_prev->hwNextED = ed->hwNextED;
+		}
+		/* remove ED from the HCD's list: */
+		if (ohci->ed_bulktail == ed) {
+			ohci->ed_bulktail = ed->ed_prev;
+			if (ohci->ed_bulktail)
+				ohci->ed_bulktail->ed_next = NULL;
+		} else if (ed->ed_next) {
+			ed->ed_next->ed_prev = ed->ed_prev;
+		}
+		break;
+
+	// case PIPE_INTERRUPT:
+	// case PIPE_ISOCHRONOUS:
+	default:
+		periodic_unlink (ohci, ed);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* get and maybe (re)init an endpoint. init _should_ be done only as part
+ * of enumeration, usb_set_configuration() or usb_set_interface().
+ */
+static struct ed *ed_get (
+	struct ohci_hcd		*ohci,
+	struct usb_host_endpoint *ep,
+	struct usb_device	*udev,
+	unsigned int		pipe,
+	int			interval
+) {
+	struct ed		*ed;
+	unsigned long		flags;
+
+	spin_lock_irqsave (&ohci->lock, flags);
+
+	ed = ep->hcpriv;
+	if (!ed) {
+		struct td	*td;
+		int		is_out;
+		u32		info;
+
+		ed = ed_alloc (ohci, GFP_ATOMIC);
+		if (!ed) {
+			/* out of memory */
+			goto done;
+		}
+
+		/* dummy td; end of td list for ed */
+		td = td_alloc (ohci, GFP_ATOMIC);
+		if (!td) {
+			/* out of memory */
+			ed_free (ohci, ed);
+			ed = NULL;
+			goto done;
+		}
+		ed->dummy = td;
+		ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
+		ed->hwHeadP = ed->hwTailP;	/* ED_C, ED_H zeroed */
+		ed->state = ED_IDLE;
+
+		is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
+
+		/* FIXME usbcore changes dev->devnum before SET_ADDRESS
+		 * succeeds ... otherwise we wouldn't need "pipe".
+		 */
+		info = usb_pipedevice (pipe);
+		ed->type = usb_pipetype(pipe);
+
+		info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
+		info |= usb_endpoint_maxp(&ep->desc) << 16;
+		if (udev->speed == USB_SPEED_LOW)
+			info |= ED_LOWSPEED;
+		/* only control transfers store pids in tds */
+		if (ed->type != PIPE_CONTROL) {
+			info |= is_out ? ED_OUT : ED_IN;
+			if (ed->type != PIPE_BULK) {
+				/* periodic transfers... */
+				if (ed->type == PIPE_ISOCHRONOUS)
+					info |= ED_ISO;
+				else if (interval > 32)	/* iso can be bigger */
+					interval = 32;
+				ed->interval = interval;
+				ed->load = usb_calc_bus_time (
+					udev->speed, !is_out,
+					ed->type == PIPE_ISOCHRONOUS,
+					usb_endpoint_maxp(&ep->desc))
+						/ 1000;
+			}
+		}
+		ed->hwINFO = cpu_to_hc32(ohci, info);
+
+		ep->hcpriv = ed;
+	}
+
+done:
+	spin_unlock_irqrestore (&ohci->lock, flags);
+	return ed;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* request unlinking of an endpoint from an operational HC.
+ * put the ep on the rm_list
+ * real work is done at the next start frame (SF) hardware interrupt
+ * caller guarantees HCD is running, so hardware access is safe,
+ * and that ed->state is ED_OPER
+ */
+static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
+{
+	ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
+	ed_deschedule (ohci, ed);
+
+	/* rm_list is just singly linked, for simplicity */
+	ed->ed_next = ohci->ed_rm_list;
+	ed->ed_prev = NULL;
+	ohci->ed_rm_list = ed;
+
+	/* enable SOF interrupt */
+	ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
+	ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
+	// flush those writes, and get latest HCCA contents
+	(void) ohci_readl (ohci, &ohci->regs->control);
+
+	/* SF interrupt might get delayed; record the frame counter value that
+	 * indicates when the HC isn't looking at it, so concurrent unlinks
+	 * behave.  frame_no wraps every 2^16 msec, and changes right before
+	 * SF is triggered.
+	 */
+	ed->tick = ohci_frame_no(ohci) + 1;
+
+}
+
+/*-------------------------------------------------------------------------*
+ * TD handling functions
+ *-------------------------------------------------------------------------*/
+
+/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
+
+static void
+td_fill (struct ohci_hcd *ohci, u32 info,
+	dma_addr_t data, int len,
+	struct urb *urb, int index)
+{
+	struct td		*td, *td_pt;
+	struct urb_priv		*urb_priv = urb->hcpriv;
+	int			is_iso = info & TD_ISO;
+	int			hash;
+
+	// ASSERT (index < urb_priv->length);
+
+	/* aim for only one interrupt per urb.  mostly applies to control
+	 * and iso; other urbs rarely need more than one TD per urb.
+	 * this way, only final tds (or ones with an error) cause IRQs.
+	 * at least immediately; use DI=6 in case any control request is
+	 * tempted to die part way through.  (and to force the hc to flush
+	 * its donelist soonish, even on unlink paths.)
+	 *
+	 * NOTE: could delay interrupts even for the last TD, and get fewer
+	 * interrupts ... increasing per-urb latency by sharing interrupts.
+	 * Drivers that queue bulk urbs may request that behavior.
+	 */
+	if (index != (urb_priv->length - 1)
+			|| (urb->transfer_flags & URB_NO_INTERRUPT))
+		info |= TD_DI_SET (6);
+
+	/* use this td as the next dummy */
+	td_pt = urb_priv->td [index];
+
+	/* fill the old dummy TD */
+	td = urb_priv->td [index] = urb_priv->ed->dummy;
+	urb_priv->ed->dummy = td_pt;
+
+	td->ed = urb_priv->ed;
+	td->next_dl_td = NULL;
+	td->index = index;
+	td->urb = urb;
+	td->data_dma = data;
+	if (!len)
+		data = 0;
+
+	td->hwINFO = cpu_to_hc32 (ohci, info);
+	if (is_iso) {
+		td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
+		*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
+						(data & 0x0FFF) | 0xE000);
+	} else {
+		td->hwCBP = cpu_to_hc32 (ohci, data);
+	}
+	if (data)
+		td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
+	else
+		td->hwBE = 0;
+	td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
+
+	/* append to queue */
+	list_add_tail (&td->td_list, &td->ed->td_list);
+
+	/* hash it for later reverse mapping */
+	hash = TD_HASH_FUNC (td->td_dma);
+	td->td_hash = ohci->td_hash [hash];
+	ohci->td_hash [hash] = td;
+
+	/* HC might read the TD (or cachelines) right away ... */
+	wmb ();
+	td->ed->hwTailP = td->hwNextTD;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare all TDs of a transfer, and queue them onto the ED.
+ * Caller guarantees HC is active.
+ * Usually the ED is already on the schedule, so TDs might be
+ * processed as soon as they're queued.
+ */
+static void td_submit_urb (
+	struct ohci_hcd	*ohci,
+	struct urb	*urb
+) {
+	struct urb_priv	*urb_priv = urb->hcpriv;
+	struct device *dev = ohci_to_hcd(ohci)->self.controller;
+	dma_addr_t	data;
+	int		data_len = urb->transfer_buffer_length;
+	int		cnt = 0;
+	u32		info = 0;
+	int		is_out = usb_pipeout (urb->pipe);
+	int		periodic = 0;
+	int		i, this_sg_len, n;
+	struct scatterlist	*sg;
+
+	/* OHCI handles the bulk/interrupt data toggles itself.  We just
+	 * use the device toggle bits for resetting, and rely on the fact
+	 * that resetting toggle is meaningless if the endpoint is active.
+	 */
+	if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
+		usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
+			is_out, 1);
+		urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
+	}
+
+	list_add (&urb_priv->pending, &ohci->pending);
+
+	i = urb->num_mapped_sgs;
+	if (data_len > 0 && i > 0) {
+		sg = urb->sg;
+		data = sg_dma_address(sg);
+
+		/*
+		 * urb->transfer_buffer_length may be smaller than the
+		 * size of the scatterlist (or vice versa)
+		 */
+		this_sg_len = min_t(int, sg_dma_len(sg), data_len);
+	} else {
+		sg = NULL;
+		if (data_len)
+			data = urb->transfer_dma;
+		else
+			data = 0;
+		this_sg_len = data_len;
+	}
+
+	/* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
+	 * using TD_CC_GET, as well as by seeing them on the done list.
+	 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
+	 */
+	switch (urb_priv->ed->type) {
+
+	/* Bulk and interrupt are identical except for where in the schedule
+	 * their EDs live.
+	 */
+	case PIPE_INTERRUPT:
+		/* ... and periodic urbs have extra accounting */
+		periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
+			&& ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
+		/* FALLTHROUGH */
+	case PIPE_BULK:
+		info = is_out
+			? TD_T_TOGGLE | TD_CC | TD_DP_OUT
+			: TD_T_TOGGLE | TD_CC | TD_DP_IN;
+		/* TDs _could_ transfer up to 8K each */
+		for (;;) {
+			n = min(this_sg_len, 4096);
+
+			/* maybe avoid ED halt on final TD short read */
+			if (n >= data_len || (i == 1 && n >= this_sg_len)) {
+				if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+					info |= TD_R;
+			}
+			td_fill(ohci, info, data, n, urb, cnt);
+			this_sg_len -= n;
+			data_len -= n;
+			data += n;
+			cnt++;
+
+			if (this_sg_len <= 0) {
+				if (--i <= 0 || data_len <= 0)
+					break;
+				sg = sg_next(sg);
+				data = sg_dma_address(sg);
+				this_sg_len = min_t(int, sg_dma_len(sg),
+						data_len);
+			}
+		}
+		if ((urb->transfer_flags & URB_ZERO_PACKET)
+				&& cnt < urb_priv->length) {
+			td_fill (ohci, info, 0, 0, urb, cnt);
+			cnt++;
+		}
+		/* maybe kickstart bulk list */
+		if (urb_priv->ed->type == PIPE_BULK) {
+			wmb ();
+			ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
+		}
+		break;
+
+	/* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
+	 * any DATA phase works normally, and the STATUS ack is special.
+	 */
+	case PIPE_CONTROL:
+		info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
+		td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
+		if (data_len > 0) {
+			info = TD_CC | TD_R | TD_T_DATA1;
+			info |= is_out ? TD_DP_OUT : TD_DP_IN;
+			/* NOTE:  mishandles transfers >8K, some >4K */
+			td_fill (ohci, info, data, data_len, urb, cnt++);
+		}
+		info = (is_out || data_len == 0)
+			? TD_CC | TD_DP_IN | TD_T_DATA1
+			: TD_CC | TD_DP_OUT | TD_T_DATA1;
+		td_fill (ohci, info, data, 0, urb, cnt++);
+		/* maybe kickstart control list */
+		wmb ();
+		ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
+		break;
+
+	/* ISO has no retransmit, so no toggle; and it uses special TDs.
+	 * Each TD could handle multiple consecutive frames (interval 1);
+	 * we could often reduce the number of TDs here.
+	 */
+	case PIPE_ISOCHRONOUS:
+		for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets;
+				cnt++) {
+			int	frame = urb->start_frame;
+
+			// FIXME scheduling should handle frame counter
+			// roll-around ... exotic case (and OHCI has
+			// a 2^16 iso range, vs other HCs max of 2^10)
+			frame += cnt * urb->interval;
+			frame &= 0xffff;
+			td_fill (ohci, TD_CC | TD_ISO | frame,
+				data + urb->iso_frame_desc [cnt].offset,
+				urb->iso_frame_desc [cnt].length, urb, cnt);
+		}
+		if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
+			if (quirk_amdiso(ohci))
+				usb_amd_quirk_pll_disable();
+			if (quirk_amdprefetch(ohci))
+				sb800_prefetch(dev, 1);
+		}
+		periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
+			&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
+		break;
+	}
+
+	/* start periodic dma if needed */
+	if (periodic) {
+		wmb ();
+		ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
+		ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+	}
+
+	// ASSERT (urb_priv->length == cnt);
+}
+
+/*-------------------------------------------------------------------------*
+ * Done List handling functions
+ *-------------------------------------------------------------------------*/
+
+/* calculate transfer length/status and update the urb */
+static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
+{
+	u32	tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
+	int	cc = 0;
+	int	status = -EINPROGRESS;
+
+	list_del (&td->td_list);
+
+	/* ISO ... drivers see per-TD length/status */
+	if (tdINFO & TD_ISO) {
+		u16	tdPSW = ohci_hwPSW(ohci, td, 0);
+		int	dlen = 0;
+
+		/* NOTE:  assumes FC in tdINFO == 0, and that
+		 * only the first of 0..MAXPSW psws is used.
+		 */
+
+		cc = (tdPSW >> 12) & 0xF;
+		if (tdINFO & TD_CC)	/* hc didn't touch? */
+			return status;
+
+		if (usb_pipeout (urb->pipe))
+			dlen = urb->iso_frame_desc [td->index].length;
+		else {
+			/* short reads are always OK for ISO */
+			if (cc == TD_DATAUNDERRUN)
+				cc = TD_CC_NOERROR;
+			dlen = tdPSW & 0x3ff;
+		}
+		urb->actual_length += dlen;
+		urb->iso_frame_desc [td->index].actual_length = dlen;
+		urb->iso_frame_desc [td->index].status = cc_to_error [cc];
+
+		if (cc != TD_CC_NOERROR)
+			ohci_dbg(ohci,
+				"urb %p iso td %p (%d) len %d cc %d\n",
+				urb, td, 1 + td->index, dlen, cc);
+
+	/* BULK, INT, CONTROL ... drivers see aggregate length/status,
+	 * except that "setup" bytes aren't counted and "short" transfers
+	 * might not be reported as errors.
+	 */
+	} else {
+		int	type = usb_pipetype (urb->pipe);
+		u32	tdBE = hc32_to_cpup (ohci, &td->hwBE);
+
+		cc = TD_CC_GET (tdINFO);
+
+		/* update packet status if needed (short is normally ok) */
+		if (cc == TD_DATAUNDERRUN
+				&& !(urb->transfer_flags & URB_SHORT_NOT_OK))
+			cc = TD_CC_NOERROR;
+		if (cc != TD_CC_NOERROR && cc < 0x0E)
+			status = cc_to_error[cc];
+
+		/* count all non-empty packets except control SETUP packet */
+		if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
+			if (td->hwCBP == 0)
+				urb->actual_length += tdBE - td->data_dma + 1;
+			else
+				urb->actual_length +=
+					  hc32_to_cpup (ohci, &td->hwCBP)
+					- td->data_dma;
+		}
+
+		if (cc != TD_CC_NOERROR && cc < 0x0E)
+			ohci_dbg(ohci,
+				"urb %p td %p (%d) cc %d, len=%d/%d\n",
+				urb, td, 1 + td->index, cc,
+				urb->actual_length,
+				urb->transfer_buffer_length);
+	}
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
+{
+	struct urb		*urb = td->urb;
+	urb_priv_t		*urb_priv = urb->hcpriv;
+	struct ed		*ed = td->ed;
+	struct list_head	*tmp = td->td_list.next;
+	__hc32			toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
+
+	/* clear ed halt; this is the td that caused it, but keep it inactive
+	 * until its urb->complete() has a chance to clean up.
+	 */
+	ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
+	wmb ();
+	ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
+
+	/* Get rid of all later tds from this urb.  We don't have
+	 * to be careful: no errors and nothing was transferred.
+	 * Also patch the ed so it looks as if those tds completed normally.
+	 */
+	while (tmp != &ed->td_list) {
+		struct td	*next;
+
+		next = list_entry (tmp, struct td, td_list);
+		tmp = next->td_list.next;
+
+		if (next->urb != urb)
+			break;
+
+		/* NOTE: if multi-td control DATA segments get supported,
+		 * this urb had one of them, this td wasn't the last td
+		 * in that segment (TD_R clear), this ed halted because
+		 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
+		 * then we need to leave the control STATUS packet queued
+		 * and clear ED_SKIP.
+		 */
+
+		list_del(&next->td_list);
+		urb_priv->td_cnt++;
+		ed->hwHeadP = next->hwNextTD | toggle;
+	}
+
+	/* help for troubleshooting:  report anything that
+	 * looks odd ... that doesn't include protocol stalls
+	 * (or maybe some other things)
+	 */
+	switch (cc) {
+	case TD_DATAUNDERRUN:
+		if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
+			break;
+		/* fallthrough */
+	case TD_CC_STALL:
+		if (usb_pipecontrol (urb->pipe))
+			break;
+		/* fallthrough */
+	default:
+		ohci_dbg (ohci,
+			"urb %p path %s ep%d%s %08x cc %d --> status %d\n",
+			urb, urb->dev->devpath,
+			usb_pipeendpoint (urb->pipe),
+			usb_pipein (urb->pipe) ? "in" : "out",
+			hc32_to_cpu (ohci, td->hwINFO),
+			cc, cc_to_error [cc]);
+	}
+}
+
+/* Add a TD to the done list */
+static void add_to_done_list(struct ohci_hcd *ohci, struct td *td)
+{
+	struct td	*td2, *td_prev;
+	struct ed	*ed;
+
+	if (td->next_dl_td)
+		return;		/* Already on the list */
+
+	/* Add all the TDs going back until we reach one that's on the list */
+	ed = td->ed;
+	td2 = td_prev = td;
+	list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) {
+		if (td2->next_dl_td)
+			break;
+		td2->next_dl_td = td_prev;
+		td_prev = td2;
+	}
+
+	if (ohci->dl_end)
+		ohci->dl_end->next_dl_td = td_prev;
+	else
+		ohci->dl_start = td_prev;
+
+	/*
+	 * Make td->next_dl_td point to td itself, to mark the fact
+	 * that td is on the done list.
+	 */
+	ohci->dl_end = td->next_dl_td = td;
+
+	/* Did we just add the latest pending TD? */
+	td2 = ed->pending_td;
+	if (td2 && td2->next_dl_td)
+		ed->pending_td = NULL;
+}
+
+/* Get the entries on the hardware done queue and put them on our list */
+static void update_done_list(struct ohci_hcd *ohci)
+{
+	u32		td_dma;
+	struct td	*td = NULL;
+
+	td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
+	ohci->hcca->done_head = 0;
+	wmb();
+
+	/* get TD from hc's singly linked list, and
+	 * add to ours.  ed->td_list changes later.
+	 */
+	while (td_dma) {
+		int		cc;
+
+		td = dma_to_td (ohci, td_dma);
+		if (!td) {
+			ohci_err (ohci, "bad entry %8x\n", td_dma);
+			break;
+		}
+
+		td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
+		cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
+
+		/* Non-iso endpoints can halt on error; un-halt,
+		 * and dequeue any other TDs from this urb.
+		 * No other TD could have caused the halt.
+		 */
+		if (cc != TD_CC_NOERROR
+				&& (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
+			ed_halted(ohci, td, cc);
+
+		td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
+		add_to_done_list(ohci, td);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
+static void finish_unlinks(struct ohci_hcd *ohci)
+{
+	unsigned	tick = ohci_frame_no(ohci);
+	struct ed	*ed, **last;
+
+rescan_all:
+	for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
+		struct list_head	*entry, *tmp;
+		int			completed, modified;
+		__hc32			*prev;
+
+		/* only take off EDs that the HC isn't using, accounting for
+		 * frame counter wraps and EDs with partially retired TDs
+		 */
+		if (likely(ohci->rh_state == OHCI_RH_RUNNING) &&
+				tick_before(tick, ed->tick)) {
+skip_ed:
+			last = &ed->ed_next;
+			continue;
+		}
+		if (!list_empty(&ed->td_list)) {
+			struct td	*td;
+			u32		head;
+
+			td = list_first_entry(&ed->td_list, struct td, td_list);
+
+			/* INTR_WDH may need to clean up first */
+			head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK;
+			if (td->td_dma != head &&
+					ohci->rh_state == OHCI_RH_RUNNING)
+				goto skip_ed;
+
+			/* Don't mess up anything already on the done list */
+			if (td->next_dl_td)
+				goto skip_ed;
+		}
+
+		/* ED's now officially unlinked, hc doesn't see */
+		ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+		ed->hwNextED = 0;
+		wmb();
+		ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+
+		/* reentrancy:  if we drop the schedule lock, someone might
+		 * have modified this list.  normally it's just prepending
+		 * entries (which we'd ignore), but paranoia won't hurt.
+		 */
+		*last = ed->ed_next;
+		ed->ed_next = NULL;
+		modified = 0;
+
+		/* unlink urbs as requested, but rescan the list after
+		 * we call a completion since it might have unlinked
+		 * another (earlier) urb
+		 *
+		 * When we get here, the HC doesn't see this ed.  But it
+		 * must not be rescheduled until all completed URBs have
+		 * been given back to the driver.
+		 */
+rescan_this:
+		completed = 0;
+		prev = &ed->hwHeadP;
+		list_for_each_safe (entry, tmp, &ed->td_list) {
+			struct td	*td;
+			struct urb	*urb;
+			urb_priv_t	*urb_priv;
+			__hc32		savebits;
+			u32		tdINFO;
+
+			td = list_entry (entry, struct td, td_list);
+			urb = td->urb;
+			urb_priv = td->urb->hcpriv;
+
+			if (!urb->unlinked) {
+				prev = &td->hwNextTD;
+				continue;
+			}
+
+			/* patch pointer hc uses */
+			savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
+			*prev = td->hwNextTD | savebits;
+
+			/* If this was unlinked, the TD may not have been
+			 * retired ... so manually save the data toggle.
+			 * The controller ignores the value we save for
+			 * control and ISO endpoints.
+			 */
+			tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
+			if ((tdINFO & TD_T) == TD_T_DATA0)
+				ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
+			else if ((tdINFO & TD_T) == TD_T_DATA1)
+				ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
+
+			/* HC may have partly processed this TD */
+			td_done (ohci, urb, td);
+			urb_priv->td_cnt++;
+
+			/* if URB is done, clean up */
+			if (urb_priv->td_cnt >= urb_priv->length) {
+				modified = completed = 1;
+				finish_urb(ohci, urb, 0);
+			}
+		}
+		if (completed && !list_empty (&ed->td_list))
+			goto rescan_this;
+
+		/*
+		 * If no TDs are queued, ED is now idle.
+		 * Otherwise, if the HC is running, reschedule.
+		 * If the HC isn't running, add ED back to the
+		 * start of the list for later processing.
+		 */
+		if (list_empty(&ed->td_list)) {
+			ed->state = ED_IDLE;
+			list_del(&ed->in_use_list);
+		} else if (ohci->rh_state == OHCI_RH_RUNNING) {
+			ed_schedule(ohci, ed);
+		} else {
+			ed->ed_next = ohci->ed_rm_list;
+			ohci->ed_rm_list = ed;
+			/* Don't loop on the same ED */
+			if (last == &ohci->ed_rm_list)
+				last = &ed->ed_next;
+		}
+
+		if (modified)
+			goto rescan_all;
+	}
+
+	/* maybe reenable control and bulk lists */
+	if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
+		u32	command = 0, control = 0;
+
+		if (ohci->ed_controltail) {
+			command |= OHCI_CLF;
+			if (quirk_zfmicro(ohci))
+				mdelay(1);
+			if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
+				control |= OHCI_CTRL_CLE;
+				ohci_writel (ohci, 0,
+					&ohci->regs->ed_controlcurrent);
+			}
+		}
+		if (ohci->ed_bulktail) {
+			command |= OHCI_BLF;
+			if (quirk_zfmicro(ohci))
+				mdelay(1);
+			if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
+				control |= OHCI_CTRL_BLE;
+				ohci_writel (ohci, 0,
+					&ohci->regs->ed_bulkcurrent);
+			}
+		}
+
+		/* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
+		if (control) {
+			ohci->hc_control |= control;
+			if (quirk_zfmicro(ohci))
+				mdelay(1);
+			ohci_writel (ohci, ohci->hc_control,
+					&ohci->regs->control);
+		}
+		if (command) {
+			if (quirk_zfmicro(ohci))
+				mdelay(1);
+			ohci_writel (ohci, command, &ohci->regs->cmdstatus);
+		}
+	}
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Take back a TD from the host controller */
+static void takeback_td(struct ohci_hcd *ohci, struct td *td)
+{
+	struct urb	*urb = td->urb;
+	urb_priv_t	*urb_priv = urb->hcpriv;
+	struct ed	*ed = td->ed;
+	int		status;
+
+	/* update URB's length and status from TD */
+	status = td_done(ohci, urb, td);
+	urb_priv->td_cnt++;
+
+	/* If all this urb's TDs are done, call complete() */
+	if (urb_priv->td_cnt >= urb_priv->length)
+		finish_urb(ohci, urb, status);
+
+	/* clean schedule:  unlink EDs that are no longer busy */
+	if (list_empty(&ed->td_list)) {
+		if (ed->state == ED_OPER)
+			start_ed_unlink(ohci, ed);
+
+	/* ... reenabling halted EDs only after fault cleanup */
+	} else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
+			== cpu_to_hc32(ohci, ED_SKIP)) {
+		td = list_entry(ed->td_list.next, struct td, td_list);
+		if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
+			ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
+			/* ... hc may need waking-up */
+			switch (ed->type) {
+			case PIPE_CONTROL:
+				ohci_writel(ohci, OHCI_CLF,
+						&ohci->regs->cmdstatus);
+				break;
+			case PIPE_BULK:
+				ohci_writel(ohci, OHCI_BLF,
+						&ohci->regs->cmdstatus);
+				break;
+			}
+		}
+	}
+}
+
+/*
+ * Process normal completions (error or success) and clean the schedules.
+ *
+ * This is the main path for handing urbs back to drivers.  The only other
+ * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
+ * instead of scanning the (re-reversed) donelist as this does.
+ */
+static void process_done_list(struct ohci_hcd *ohci)
+{
+	struct td	*td;
+
+	while (ohci->dl_start) {
+		td = ohci->dl_start;
+		if (td == ohci->dl_end)
+			ohci->dl_start = ohci->dl_end = NULL;
+		else
+			ohci->dl_start = td->next_dl_td;
+
+		takeback_td(ohci, td);
+	}
+}
+
+/*
+ * TD takeback and URB giveback must be single-threaded.
+ * This routine takes care of it all.
+ */
+static void ohci_work(struct ohci_hcd *ohci)
+{
+	if (ohci->working) {
+		ohci->restart_work = 1;
+		return;
+	}
+	ohci->working = 1;
+
+ restart:
+	process_done_list(ohci);
+	if (ohci->ed_rm_list)
+		finish_unlinks(ohci);
+
+	if (ohci->restart_work) {
+		ohci->restart_work = 0;
+		goto restart;
+	}
+	ohci->working = 0;
+}
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
new file mode 100644
index 0000000..4511e27
--- /dev/null
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * USB Bus Glue for Samsung S3C2410
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * Modified for S3C2410 from ohci-sa1111.c, ohci-omap.c and ohci-lh7a40.c
+ *	by Ben Dooks, <ben@simtec.co.uk>
+ *	Copyright (C) 2004 Simtec Electronics
+ *
+ * Thanks to basprog@mail.ru for updates to newer kernels
+ *
+ * This file is licenced under the GPL.
+*/
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/usb-ohci-s3c2410.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+
+#define valid_port(idx) ((idx) == 1 || (idx) == 2)
+
+/* clock device associated with the hcd */
+
+
+#define DRIVER_DESC "OHCI S3C2410 driver"
+
+static const char hcd_name[] = "ohci-s3c2410";
+
+static struct clk *clk;
+static struct clk *usb_clk;
+
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
+
+/* forward definitions */
+
+static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
+
+/* conversion functions */
+
+static struct s3c2410_hcd_info *to_s3c2410_info(struct usb_hcd *hcd)
+{
+	return dev_get_platdata(hcd->self.controller);
+}
+
+static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
+{
+	struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
+
+	dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
+
+	clk_prepare_enable(usb_clk);
+	mdelay(2);			/* let the bus clock stabilise */
+
+	clk_prepare_enable(clk);
+
+	if (info != NULL) {
+		info->hcd	= hcd;
+		info->report_oc = s3c2410_hcd_oc;
+
+		if (info->enable_oc != NULL)
+			(info->enable_oc)(info, 1);
+	}
+}
+
+static void s3c2410_stop_hc(struct platform_device *dev)
+{
+	struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
+
+	dev_dbg(&dev->dev, "s3c2410_stop_hc:\n");
+
+	if (info != NULL) {
+		info->report_oc = NULL;
+		info->hcd	= NULL;
+
+		if (info->enable_oc != NULL)
+			(info->enable_oc)(info, 0);
+	}
+
+	clk_disable_unprepare(clk);
+	clk_disable_unprepare(usb_clk);
+}
+
+/* ohci_s3c2410_hub_status_data
+ *
+ * update the status data from the hub with anything that
+ * has been detected by our system
+*/
+
+static int
+ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
+	struct s3c2410_hcd_port *port;
+	int orig;
+	int portno;
+
+	orig = ohci_hub_status_data(hcd, buf);
+
+	if (info == NULL)
+		return orig;
+
+	port = &info->port[0];
+
+	/* mark any changed port as changed */
+
+	for (portno = 0; portno < 2; port++, portno++) {
+		if (port->oc_changed == 1 &&
+		    port->flags & S3C_HCDFLG_USED) {
+			dev_dbg(hcd->self.controller,
+				"oc change on port %d\n", portno);
+
+			if (orig < 1)
+				orig = 1;
+
+			buf[0] |= 1<<(portno+1);
+		}
+	}
+
+	return orig;
+}
+
+/* s3c2410_usb_set_power
+ *
+ * configure the power on a port, by calling the platform device
+ * routine registered with the platform device
+*/
+
+static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info,
+				  int port, int to)
+{
+	if (info == NULL)
+		return;
+
+	if (info->power_control != NULL) {
+		info->port[port-1].power = to;
+		(info->power_control)(port-1, to);
+	}
+}
+
+/* ohci_s3c2410_hub_control
+ *
+ * look at control requests to the hub, and see if we need
+ * to take any action or over-ride the results from the
+ * request.
+*/
+
+static int ohci_s3c2410_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength)
+{
+	struct s3c2410_hcd_info *info = to_s3c2410_info(hcd);
+	struct usb_hub_descriptor *desc;
+	int ret = -EINVAL;
+	u32 *data = (u32 *)buf;
+
+	dev_dbg(hcd->self.controller,
+		"s3c2410_hub_control(%p,0x%04x,0x%04x,0x%04x,%p,%04x)\n",
+		hcd, typeReq, wValue, wIndex, buf, wLength);
+
+	/* if we are only an humble host without any special capabilities
+	 * process the request straight away and exit */
+
+	if (info == NULL) {
+		ret = ohci_hub_control(hcd, typeReq, wValue,
+				       wIndex, buf, wLength);
+		goto out;
+	}
+
+	/* check the request to see if it needs handling */
+
+	switch (typeReq) {
+	case SetPortFeature:
+		if (wValue == USB_PORT_FEAT_POWER) {
+			dev_dbg(hcd->self.controller, "SetPortFeat: POWER\n");
+			s3c2410_usb_set_power(info, wIndex, 1);
+			goto out;
+		}
+		break;
+
+	case ClearPortFeature:
+		switch (wValue) {
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: C_OVER_CURRENT\n");
+
+			if (valid_port(wIndex)) {
+				info->port[wIndex-1].oc_changed = 0;
+				info->port[wIndex-1].oc_status = 0;
+			}
+
+			goto out;
+
+		case USB_PORT_FEAT_OVER_CURRENT:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: OVER_CURRENT\n");
+
+			if (valid_port(wIndex))
+				info->port[wIndex-1].oc_status = 0;
+
+			goto out;
+
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->self.controller,
+				"ClearPortFeature: POWER\n");
+
+			if (valid_port(wIndex)) {
+				s3c2410_usb_set_power(info, wIndex, 0);
+				return 0;
+			}
+		}
+		break;
+	}
+
+	ret = ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+	if (ret)
+		goto out;
+
+	switch (typeReq) {
+	case GetHubDescriptor:
+
+		/* update the hub's descriptor */
+
+		desc = (struct usb_hub_descriptor *)buf;
+
+		if (info->power_control == NULL)
+			return ret;
+
+		dev_dbg(hcd->self.controller, "wHubCharacteristics 0x%04x\n",
+			desc->wHubCharacteristics);
+
+		/* remove the old configurations for power-switching, and
+		 * over-current protection, and insert our new configuration
+		 */
+
+		desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_LPSM);
+		desc->wHubCharacteristics |= cpu_to_le16(
+			HUB_CHAR_INDV_PORT_LPSM);
+
+		if (info->enable_oc) {
+			desc->wHubCharacteristics &= ~cpu_to_le16(
+				HUB_CHAR_OCPM);
+			desc->wHubCharacteristics |=  cpu_to_le16(
+				HUB_CHAR_INDV_PORT_OCPM);
+		}
+
+		dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n",
+			desc->wHubCharacteristics);
+
+		return ret;
+
+	case GetPortStatus:
+		/* check port status */
+
+		dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex);
+
+		if (valid_port(wIndex)) {
+			if (info->port[wIndex-1].oc_changed)
+				*data |= cpu_to_le32(RH_PS_OCIC);
+
+			if (info->port[wIndex-1].oc_status)
+				*data |= cpu_to_le32(RH_PS_POCI);
+		}
+	}
+
+ out:
+	return ret;
+}
+
+/* s3c2410_hcd_oc
+ *
+ * handle an over-current report
+*/
+
+static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
+{
+	struct s3c2410_hcd_port *port;
+	struct usb_hcd *hcd;
+	unsigned long flags;
+	int portno;
+
+	if (info == NULL)
+		return;
+
+	port = &info->port[0];
+	hcd = info->hcd;
+
+	local_irq_save(flags);
+
+	for (portno = 0; portno < 2; port++, portno++) {
+		if (port_oc & (1<<portno) &&
+		    port->flags & S3C_HCDFLG_USED) {
+			port->oc_status = 1;
+			port->oc_changed = 1;
+
+			/* ok, once over-current is detected,
+			   the port needs to be powered down */
+			s3c2410_usb_set_power(info, portno+1, 0);
+		}
+	}
+
+	local_irq_restore(flags);
+}
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/*
+ * ohci_hcd_s3c2410_remove - shutdown processing for HCD
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of ohci_hcd_3c2410_probe(), first invoking
+ * the HCD's stop() method.  It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+*/
+
+static int
+ohci_hcd_s3c2410_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+	usb_remove_hcd(hcd);
+	s3c2410_stop_hc(dev);
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+/**
+ * ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = NULL;
+	struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
+	int retval;
+
+	s3c2410_usb_set_power(info, 1, 1);
+	s3c2410_usb_set_power(info, 2, 1);
+
+	hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx");
+	if (hcd == NULL)
+		return -ENOMEM;
+
+	hcd->rsrc_start = dev->resource[0].start;
+	hcd->rsrc_len	= resource_size(&dev->resource[0]);
+
+	hcd->regs = devm_ioremap_resource(&dev->dev, &dev->resource[0]);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err_put;
+	}
+
+	clk = devm_clk_get(&dev->dev, "usb-host");
+	if (IS_ERR(clk)) {
+		dev_err(&dev->dev, "cannot get usb-host clock\n");
+		retval = PTR_ERR(clk);
+		goto err_put;
+	}
+
+	usb_clk = devm_clk_get(&dev->dev, "usb-bus-host");
+	if (IS_ERR(usb_clk)) {
+		dev_err(&dev->dev, "cannot get usb-bus-host clock\n");
+		retval = PTR_ERR(usb_clk);
+		goto err_put;
+	}
+
+	s3c2410_start_hc(dev, hcd);
+
+	retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
+	if (retval != 0)
+		goto err_ioremap;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+ err_ioremap:
+	s3c2410_stop_hc(dev);
+
+ err_put:
+	usb_put_hcd(hcd);
+	return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+	int rc = 0;
+
+	rc = ohci_suspend(hcd, do_wakeup);
+	if (rc)
+		return rc;
+
+	s3c2410_stop_hc(pdev);
+
+	return rc;
+}
+
+static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+
+	s3c2410_start_hc(pdev, hcd);
+
+	ohci_resume(hcd, false);
+
+	return 0;
+}
+#else
+#define ohci_hcd_s3c2410_drv_suspend	NULL
+#define ohci_hcd_s3c2410_drv_resume	NULL
+#endif
+
+static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
+	.suspend	= ohci_hcd_s3c2410_drv_suspend,
+	.resume		= ohci_hcd_s3c2410_drv_resume,
+};
+
+static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = {
+	{ .compatible = "samsung,s3c2410-ohci" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
+
+static struct platform_driver ohci_hcd_s3c2410_driver = {
+	.probe		= ohci_hcd_s3c2410_probe,
+	.remove		= ohci_hcd_s3c2410_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "s3c2410-ohci",
+		.pm	= &ohci_hcd_s3c2410_pm_ops,
+		.of_match_table	= ohci_hcd_s3c2410_dt_ids,
+	},
+};
+
+static int __init ohci_s3c2410_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+	ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
+
+	/*
+	 * The Samsung HW has some unusual quirks, which require
+	 * Sumsung-specific workarounds. We override certain hc_driver
+	 * functions here to achieve that. We explicitly do not enhance
+	 * ohci_driver_overrides to allow this more easily, since this
+	 * is an unusual case, and we don't want to encourage others to
+	 * override these functions by making it too easy.
+	 */
+
+	ohci_s3c2410_hc_driver.hub_status_data	= ohci_s3c2410_hub_status_data;
+	ohci_s3c2410_hc_driver.hub_control	= ohci_s3c2410_hub_control;
+
+	return platform_driver_register(&ohci_hcd_s3c2410_driver);
+}
+module_init(ohci_s3c2410_init);
+
+static void __exit ohci_s3c2410_cleanup(void)
+{
+	platform_driver_unregister(&ohci_hcd_s3c2410_driver);
+}
+module_exit(ohci_s3c2410_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c2410-ohci");
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
new file mode 100644
index 0000000..ebec9a7
--- /dev/null
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * SA1111 Bus Glue
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <asm/mach-types.h>
+#include <asm/hardware/sa1111.h>
+
+#ifndef CONFIG_SA1111
+#error "This file is SA-1111 bus glue.  CONFIG_SA1111 must be defined."
+#endif
+
+#define USB_STATUS	0x0118
+#define USB_RESET	0x011c
+#define USB_IRQTEST	0x0120
+
+#define USB_RESET_FORCEIFRESET	(1 << 0)
+#define USB_RESET_FORCEHCRESET	(1 << 1)
+#define USB_RESET_CLKGENRESET	(1 << 2)
+#define USB_RESET_SIMSCALEDOWN	(1 << 3)
+#define USB_RESET_USBINTTEST	(1 << 4)
+#define USB_RESET_SLEEPSTBYEN	(1 << 5)
+#define USB_RESET_PWRSENSELOW	(1 << 6)
+#define USB_RESET_PWRCTRLLOW	(1 << 7)
+
+#define USB_STATUS_IRQHCIRMTWKUP  (1 <<  7)
+#define USB_STATUS_IRQHCIBUFFACC  (1 <<  8)
+#define USB_STATUS_NIRQHCIM       (1 <<  9)
+#define USB_STATUS_NHCIMFCLR      (1 << 10)
+#define USB_STATUS_USBPWRSENSE    (1 << 11)
+
+#if 0
+static void dump_hci_status(struct usb_hcd *hcd, const char *label)
+{
+	unsigned long status = readl_relaxed(hcd->regs + USB_STATUS);
+
+	printk(KERN_DEBUG "%s USB_STATUS = { %s%s%s%s%s}\n", label,
+	     ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
+	     ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
+	     ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
+	     ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
+	     ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
+}
+#endif
+
+static int ohci_sa1111_reset(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci_hcd_init(ohci);
+	return ohci_init(ohci);
+}
+
+static int ohci_sa1111_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	int ret;
+
+	ret = ohci_run(ohci);
+	if (ret < 0) {
+		ohci_err(ohci, "can't start\n");
+		ohci_stop(hcd);
+	}
+	return ret;
+}
+
+static const struct hc_driver ohci_sa1111_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"SA-1111 OHCI",
+	.hcd_priv_size =	sizeof(struct ohci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		ohci_sa1111_reset,
+	.start =		ohci_sa1111_start,
+	.stop =			ohci_stop,
+	.shutdown =		ohci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	ohci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend =		ohci_bus_suspend,
+	.bus_resume =		ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+static int sa1111_start_hc(struct sa1111_dev *dev)
+{
+	unsigned int usb_rst = 0;
+	int ret;
+
+	dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
+
+	if (machine_is_xp860() ||
+	    machine_is_assabet() ||
+	    machine_is_pfs168() ||
+	    machine_is_badge4())
+		usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
+
+	/*
+	 * Configure the power sense and control lines.  Place the USB
+	 * host controller in reset.
+	 */
+	writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+		      dev->mapbase + USB_RESET);
+
+	/*
+	 * Now, carefully enable the USB clock, and take
+	 * the USB host controller out of reset.
+	 */
+	ret = sa1111_enable_device(dev);
+	if (ret == 0) {
+		udelay(11);
+		writel_relaxed(usb_rst, dev->mapbase + USB_RESET);
+	}
+
+	return ret;
+}
+
+static void sa1111_stop_hc(struct sa1111_dev *dev)
+{
+	unsigned int usb_rst;
+
+	dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
+
+	/*
+	 * Put the USB host controller into reset.
+	 */
+	usb_rst = readl_relaxed(dev->mapbase + USB_RESET);
+	writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+		      dev->mapbase + USB_RESET);
+
+	/*
+	 * Stop the USB clock.
+	 */
+	sa1111_disable_device(dev);
+}
+
+/**
+ * ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it.
+ */
+static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
+{
+	struct usb_hcd *hcd;
+	int ret, irq;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * We don't call dma_set_mask_and_coherent() here because the
+	 * DMA mask has already been appropraitely setup by the core
+	 * SA-1111 bus code (which includes bug workarounds.)
+	 */
+
+	hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = dev->res.start;
+	hcd->rsrc_len = resource_size(&dev->res);
+
+	irq = sa1111_get_irq(dev, 1);
+	if (irq <= 0) {
+		ret = irq ? : -ENXIO;
+		goto err1;
+	}
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		dev_dbg(&dev->dev, "request_mem_region failed\n");
+		ret = -EBUSY;
+		goto err1;
+	}
+
+	hcd->regs = dev->mapbase;
+
+	ret = sa1111_start_hc(dev);
+	if (ret)
+		goto err2;
+
+	ret = usb_add_hcd(hcd, irq, 0);
+	if (ret == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return ret;
+	}
+
+	sa1111_stop_hc(dev);
+ err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ err1:
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+/**
+ * ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
+ * @dev: USB Host Controller being removed
+ *
+ * Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
+ * the HCD's stop() method.
+ */
+static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
+{
+	struct usb_hcd *hcd = sa1111_get_drvdata(dev);
+
+	usb_remove_hcd(hcd);
+	sa1111_stop_hc(dev);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+static void ohci_hcd_sa1111_shutdown(struct device *_dev)
+{
+	struct sa1111_dev *dev = to_sa1111_device(_dev);
+	struct usb_hcd *hcd = sa1111_get_drvdata(dev);
+
+	if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+		hcd->driver->shutdown(hcd);
+		sa1111_stop_hc(dev);
+	}
+}
+
+static struct sa1111_driver ohci_hcd_sa1111_driver = {
+	.drv = {
+		.name	= "sa1111-ohci",
+		.owner	= THIS_MODULE,
+		.shutdown = ohci_hcd_sa1111_shutdown,
+	},
+	.devid		= SA1111_DEVID_USB,
+	.probe		= ohci_hcd_sa1111_probe,
+	.remove		= ohci_hcd_sa1111_remove,
+};
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
new file mode 100644
index 0000000..c9233cd
--- /dev/null
+++ b/drivers/usb/host/ohci-sm501.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2005 David Brownell
+ * (C) Copyright 2002 Hewlett-Packard Company
+ * (C) Copyright 2008 Magnus Damm
+ *
+ * SM501 Bus Glue - based on ohci-omap.c
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/sm501.h>
+#include <linux/sm501-regs.h>
+
+static int ohci_sm501_init(struct usb_hcd *hcd)
+{
+	return ohci_init(hcd_to_ohci(hcd));
+}
+
+static int ohci_sm501_start(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	int ret;
+
+	ret = ohci_run(hcd_to_ohci(hcd));
+	if (ret < 0) {
+		dev_err(dev, "can't start %s", hcd->self.bus_name);
+		ohci_stop(hcd);
+	}
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ohci_sm501_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"SM501 OHCI",
+	.hcd_priv_size =	sizeof(struct ohci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		ohci_sm501_init,
+	.start =		ohci_sm501_start,
+	.stop =			ohci_stop,
+	.shutdown =		ohci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	ohci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend =		ohci_bus_suspend,
+	.bus_resume =		ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
+{
+	const struct hc_driver *driver = &ohci_sm501_hc_driver;
+	struct device *dev = &pdev->dev;
+	struct resource	*res, *mem;
+	int retval, irq;
+	struct usb_hcd *hcd = NULL;
+
+	irq = retval = platform_get_irq(pdev, 0);
+	if (retval < 0)
+		goto err0;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (mem == NULL) {
+		dev_err(dev, "no resource definition for memory\n");
+		retval = -ENOENT;
+		goto err0;
+	}
+
+	if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
+		dev_err(dev, "request_mem_region failed\n");
+		retval = -EBUSY;
+		goto err0;
+	}
+
+	/* The sm501 chip is equipped with local memory that may be used
+	 * by on-chip devices such as the video controller and the usb host.
+	 * This driver uses dma_declare_coherent_memory() to make sure
+	 * usb allocations with dma_alloc_coherent() allocate from
+	 * this local memory. The dma_handle returned by dma_alloc_coherent()
+	 * will be an offset starting from 0 for the first local memory byte.
+	 *
+	 * So as long as data is allocated using dma_alloc_coherent() all is
+	 * fine. This is however not always the case - buffers may be allocated
+	 * using kmalloc() - so the usb core needs to be told that it must copy
+	 * data into our local memory if the buffers happen to be placed in
+	 * regular memory. The HCD_LOCAL_MEM flag does just that.
+	 */
+
+	retval = dma_declare_coherent_memory(dev, mem->start,
+					 mem->start - mem->parent->start,
+					 resource_size(mem),
+					 DMA_MEMORY_EXCLUSIVE);
+	if (retval) {
+		dev_err(dev, "cannot declare coherent memory\n");
+		goto err1;
+	}
+
+	/* allocate, reserve and remap resources for registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(dev, "no resource definition for registers\n");
+		retval = -ENOENT;
+		goto err2;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto err2;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,	pdev->name)) {
+		dev_err(dev, "request_mem_region failed\n");
+		retval = -EBUSY;
+		goto err3;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (hcd->regs == NULL) {
+		dev_err(dev, "cannot remap registers\n");
+		retval = -ENXIO;
+		goto err4;
+	}
+
+	ohci_hcd_init(hcd_to_ohci(hcd));
+
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (retval)
+		goto err5;
+	device_wakeup_enable(hcd->self.controller);
+
+	/* enable power and unmask interrupts */
+
+	sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 1);
+	sm501_modify_reg(dev->parent, SM501_IRQ_MASK, 1 << 6, 0);
+
+	return 0;
+err5:
+	iounmap(hcd->regs);
+err4:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err3:
+	usb_put_hcd(hcd);
+err2:
+	dma_release_declared_memory(dev);
+err1:
+	release_mem_region(mem->start, resource_size(mem));
+err0:
+	return retval;
+}
+
+static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct resource	*mem;
+
+	usb_remove_hcd(hcd);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+	dma_release_declared_memory(&pdev->dev);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (mem)
+		release_mem_region(mem->start, resource_size(mem));
+
+	/* mask interrupts and disable power */
+
+	sm501_modify_reg(pdev->dev.parent, SM501_IRQ_MASK, 0, 1 << 6);
+	sm501_unit_power(pdev->dev.parent, SM501_GATE_USB_HOST, 0);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_PM
+static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_hcd  *hcd = platform_get_drvdata(pdev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
+	return ret;
+}
+
+static int ohci_sm501_resume(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct usb_hcd	*hcd = platform_get_drvdata(pdev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 1);
+	ohci_resume(hcd, false);
+	return 0;
+}
+#else
+#define ohci_sm501_suspend NULL
+#define ohci_sm501_resume NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Driver definition to register with the SM501 bus
+ */
+static struct platform_driver ohci_hcd_sm501_driver = {
+	.probe		= ohci_hcd_sm501_drv_probe,
+	.remove		= ohci_hcd_sm501_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.suspend	= ohci_sm501_suspend,
+	.resume		= ohci_sm501_resume,
+	.driver		= {
+		.name	= "sm501-usb",
+	},
+};
+MODULE_ALIAS("platform:sm501-usb");
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
new file mode 100644
index 0000000..69fa046
--- /dev/null
+++ b/drivers/usb/host/ohci-spear.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* OHCI HCD (Host Controller Driver) for USB.
+*
+* Copyright (C) 2010 ST Microelectronics.
+* Deepak Sikri<deepak.sikri@st.com>
+*
+* Based on various ohci-*.c drivers
+*/
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define DRIVER_DESC "OHCI SPEAr driver"
+
+static const char hcd_name[] = "SPEAr-ohci";
+struct spear_ohci {
+	struct clk *clk;
+};
+
+#define to_spear_ohci(hcd)     (struct spear_ohci *)(hcd_to_ohci(hcd)->priv)
+
+static struct hc_driver __read_mostly ohci_spear_hc_driver;
+
+static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
+{
+	const struct hc_driver *driver = &ohci_spear_hc_driver;
+	struct ohci_hcd *ohci;
+	struct usb_hcd *hcd = NULL;
+	struct clk *usbh_clk;
+	struct spear_ohci *sohci_p;
+	struct resource *res;
+	int retval, irq;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto fail;
+	}
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval)
+		goto fail;
+
+	usbh_clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(usbh_clk)) {
+		dev_err(&pdev->dev, "Error getting interface clock\n");
+		retval = PTR_ERR(usbh_clk);
+		goto fail;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		retval = PTR_ERR(hcd->regs);
+		goto err_put_hcd;
+	}
+
+	hcd->rsrc_start = pdev->resource[0].start;
+	hcd->rsrc_len = resource_size(res);
+
+	sohci_p = to_spear_ohci(hcd);
+	sohci_p->clk = usbh_clk;
+
+	clk_prepare_enable(sohci_p->clk);
+
+	ohci = hcd_to_ohci(hcd);
+
+	retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
+	if (retval == 0) {
+		device_wakeup_enable(hcd->self.controller);
+		return retval;
+	}
+
+	clk_disable_unprepare(sohci_p->clk);
+err_put_hcd:
+	usb_put_hcd(hcd);
+fail:
+	dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+	return retval;
+}
+
+static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+
+	usb_remove_hcd(hcd);
+	if (sohci_p->clk)
+		clk_disable_unprepare(sohci_p->clk);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+#if defined(CONFIG_PM)
+static int spear_ohci_hcd_drv_suspend(struct platform_device *pdev,
+		pm_message_t message)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+	bool do_wakeup = device_may_wakeup(&pdev->dev);
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	clk_disable_unprepare(sohci_p->clk);
+
+	return ret;
+}
+
+static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	struct spear_ohci *sohci_p = to_spear_ohci(hcd);
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	clk_prepare_enable(sohci_p->clk);
+	ohci_resume(hcd, false);
+	return 0;
+}
+#endif
+
+static const struct of_device_id spear_ohci_id_table[] = {
+	{ .compatible = "st,spear600-ohci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, spear_ohci_id_table);
+
+/* Driver definition to register with the platform bus */
+static struct platform_driver spear_ohci_hcd_driver = {
+	.probe =	spear_ohci_hcd_drv_probe,
+	.remove =	spear_ohci_hcd_drv_remove,
+#ifdef CONFIG_PM
+	.suspend =	spear_ohci_hcd_drv_suspend,
+	.resume =	spear_ohci_hcd_drv_resume,
+#endif
+	.driver = {
+		.name = "spear-ohci",
+		.of_match_table = spear_ohci_id_table,
+	},
+};
+
+static const struct ohci_driver_overrides spear_overrides __initconst = {
+	.extra_priv_size = sizeof(struct spear_ohci),
+};
+static int __init ohci_spear_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
+	return platform_driver_register(&spear_ohci_hcd_driver);
+}
+module_init(ohci_spear_init);
+
+static void __exit ohci_spear_cleanup(void)
+{
+	platform_driver_unregister(&spear_ohci_hcd_driver);
+}
+module_exit(ohci_spear_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Deepak Sikri");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spear-ohci");
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
new file mode 100644
index 0000000..992807c
--- /dev/null
+++ b/drivers/usb/host/ohci-st.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ST OHCI driver
+ *
+ * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Derived from ohci-platform.c
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "ohci.h"
+
+#define USB_MAX_CLKS 3
+
+struct st_ohci_platform_priv {
+	struct clk *clks[USB_MAX_CLKS];
+	struct clk *clk48;
+	struct reset_control *rst;
+	struct reset_control *pwr;
+	struct phy *phy;
+};
+
+#define DRIVER_DESC "OHCI STMicroelectronics driver"
+
+#define hcd_to_ohci_priv(h) \
+	((struct st_ohci_platform_priv *)hcd_to_ohci(h)->priv)
+
+static const char hcd_name[] = "ohci-st";
+
+static int st_ohci_platform_power_on(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+	int clk, ret;
+
+	ret = reset_control_deassert(priv->pwr);
+	if (ret)
+		return ret;
+
+	ret = reset_control_deassert(priv->rst);
+	if (ret)
+		goto err_assert_power;
+
+	/* some SoCs don't have a dedicated 48Mhz clock, but those that do
+	   need the rate to be explicitly set */
+	if (priv->clk48) {
+		ret = clk_set_rate(priv->clk48, 48000000);
+		if (ret)
+			goto err_assert_reset;
+	}
+
+	for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++) {
+		ret = clk_prepare_enable(priv->clks[clk]);
+		if (ret)
+			goto err_disable_clks;
+	}
+
+	ret = phy_init(priv->phy);
+	if (ret)
+		goto err_disable_clks;
+
+	ret = phy_power_on(priv->phy);
+	if (ret)
+		goto err_exit_phy;
+
+	return 0;
+
+err_exit_phy:
+	phy_exit(priv->phy);
+err_disable_clks:
+	while (--clk >= 0)
+		clk_disable_unprepare(priv->clks[clk]);
+err_assert_reset:
+	reset_control_assert(priv->rst);
+err_assert_power:
+	reset_control_assert(priv->pwr);
+
+	return ret;
+}
+
+static void st_ohci_platform_power_off(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+
+	int clk;
+
+	reset_control_assert(priv->pwr);
+
+	reset_control_assert(priv->rst);
+
+	phy_power_off(priv->phy);
+
+	phy_exit(priv->phy);
+
+	for (clk = USB_MAX_CLKS - 1; clk >= 0; clk--)
+		if (priv->clks[clk])
+			clk_disable_unprepare(priv->clks[clk]);
+}
+
+static struct hc_driver __read_mostly ohci_platform_hc_driver;
+
+static const struct ohci_driver_overrides platform_overrides __initconst = {
+	.product_desc =		"ST OHCI controller",
+	.extra_priv_size =	sizeof(struct st_ohci_platform_priv),
+};
+
+static struct usb_ohci_pdata ohci_platform_defaults = {
+	.power_on =		st_ohci_platform_power_on,
+	.power_suspend =	st_ohci_platform_power_off,
+	.power_off =		st_ohci_platform_power_off,
+};
+
+static int st_ohci_platform_probe(struct platform_device *dev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res_mem;
+	struct usb_ohci_pdata *pdata = &ohci_platform_defaults;
+	struct st_ohci_platform_priv *priv;
+	struct ohci_hcd *ohci;
+	int err, irq, clk = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0) {
+		dev_err(&dev->dev, "no irq provided");
+		return irq;
+	}
+
+	res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!res_mem) {
+		dev_err(&dev->dev, "no memory resource provided");
+		return -ENXIO;
+	}
+
+	hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
+			dev_name(&dev->dev));
+	if (!hcd)
+		return -ENOMEM;
+
+	platform_set_drvdata(dev, hcd);
+	dev->dev.platform_data = pdata;
+	priv = hcd_to_ohci_priv(hcd);
+	ohci = hcd_to_ohci(hcd);
+
+	priv->phy = devm_phy_get(&dev->dev, "usb");
+	if (IS_ERR(priv->phy)) {
+		err = PTR_ERR(priv->phy);
+		goto err_put_hcd;
+	}
+
+	for (clk = 0; clk < USB_MAX_CLKS; clk++) {
+		priv->clks[clk] = of_clk_get(dev->dev.of_node, clk);
+		if (IS_ERR(priv->clks[clk])) {
+			err = PTR_ERR(priv->clks[clk]);
+			if (err == -EPROBE_DEFER)
+				goto err_put_clks;
+			priv->clks[clk] = NULL;
+			break;
+		}
+	}
+
+	/* some SoCs don't have a dedicated 48Mhz clock, but those that
+	   do need the rate to be explicitly set */
+	priv->clk48 = devm_clk_get(&dev->dev, "clk48");
+	if (IS_ERR(priv->clk48)) {
+		dev_info(&dev->dev, "48MHz clk not found\n");
+		priv->clk48 = NULL;
+	}
+
+	priv->pwr =
+		devm_reset_control_get_optional_shared(&dev->dev, "power");
+	if (IS_ERR(priv->pwr)) {
+		err = PTR_ERR(priv->pwr);
+		goto err_put_clks;
+	}
+
+	priv->rst =
+		devm_reset_control_get_optional_shared(&dev->dev, "softreset");
+	if (IS_ERR(priv->rst)) {
+		err = PTR_ERR(priv->rst);
+		goto err_put_clks;
+	}
+
+	if (pdata->power_on) {
+		err = pdata->power_on(dev);
+		if (err < 0)
+			goto err_power;
+	}
+
+	hcd->rsrc_start = res_mem->start;
+	hcd->rsrc_len = resource_size(res_mem);
+
+	hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+	if (IS_ERR(hcd->regs)) {
+		err = PTR_ERR(hcd->regs);
+		goto err_power;
+	}
+	err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (err)
+		goto err_power;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	platform_set_drvdata(dev, hcd);
+
+	return err;
+
+err_power:
+	if (pdata->power_off)
+		pdata->power_off(dev);
+
+err_put_clks:
+	while (--clk >= 0)
+		clk_put(priv->clks[clk]);
+err_put_hcd:
+	if (pdata == &ohci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	usb_put_hcd(hcd);
+
+	return err;
+}
+
+static int st_ohci_platform_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
+	struct st_ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+	int clk;
+
+	usb_remove_hcd(hcd);
+
+	if (pdata->power_off)
+		pdata->power_off(dev);
+
+
+	for (clk = 0; clk < USB_MAX_CLKS && priv->clks[clk]; clk++)
+		clk_put(priv->clks[clk]);
+
+	usb_put_hcd(hcd);
+
+	if (pdata == &ohci_platform_defaults)
+		dev->dev.platform_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int st_ohci_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev->platform_data;
+	struct platform_device *pdev = to_platform_device(dev);
+	bool do_wakeup = device_may_wakeup(dev);
+	int ret;
+
+	ret = ohci_suspend(hcd, do_wakeup);
+	if (ret)
+		return ret;
+
+	if (pdata->power_suspend)
+		pdata->power_suspend(pdev);
+
+	return ret;
+}
+
+static int st_ohci_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+	int err;
+
+	if (pdata->power_on) {
+		err = pdata->power_on(pdev);
+		if (err < 0)
+			return err;
+	}
+
+	ohci_resume(hcd, false);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(st_ohci_pm_ops, st_ohci_suspend, st_ohci_resume);
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id st_ohci_platform_ids[] = {
+	{ .compatible = "st,st-ohci-300x", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st_ohci_platform_ids);
+
+static struct platform_driver ohci_platform_driver = {
+	.probe		= st_ohci_platform_probe,
+	.remove		= st_ohci_platform_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name	= "st-ohci",
+#ifdef CONFIG_PM_SLEEP
+		.pm	= &st_ohci_pm_ops,
+#endif
+		.of_match_table = st_ohci_platform_ids,
+	}
+};
+
+static int __init ohci_platform_init(void)
+{
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+	ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
+	return platform_driver_register(&ohci_platform_driver);
+}
+module_init(ohci_platform_init);
+
+static void __exit ohci_platform_cleanup(void)
+{
+	platform_driver_unregister(&ohci_platform_driver);
+}
+module_exit(ohci_platform_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
new file mode 100644
index 0000000..a631dbb
--- /dev/null
+++ b/drivers/usb/host/ohci-tmio.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OHCI HCD(Host Controller Driver) for USB.
+ *
+ *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *(C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
+ * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
+ * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
+ *
+ * This is known to work with the following variants:
+ *	TC6393XB revision 3	(32kB SRAM)
+ *
+ * The TMIO's OHCI core DMAs through a small internal buffer that
+ * is directly addressable by the CPU.
+ *
+ * Written from sparse documentation from Toshiba and Sharp's driver
+ * for the 2.4 kernel,
+ *	usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
+ */
+
+/*#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/namei.h>
+#include <linux/sched.h>*/
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/dma-mapping.h>
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * USB Host Controller Configuration Register
+ */
+#define CCR_REVID	0x08	/* b Revision ID				*/
+#define CCR_BASE	0x10	/* l USB Control Register Base Address Low	*/
+#define CCR_ILME	0x40	/* b Internal Local Memory Enable		*/
+#define CCR_PM		0x4c	/* w Power Management			*/
+#define CCR_INTC	0x50	/* b INT Control				*/
+#define CCR_LMW1L	0x54	/* w Local Memory Window 1 LMADRS Low	*/
+#define CCR_LMW1H	0x56	/* w Local Memory Window 1 LMADRS High	*/
+#define CCR_LMW1BL	0x58	/* w Local Memory Window 1 Base Address Low	*/
+#define CCR_LMW1BH	0x5A	/* w Local Memory Window 1 Base Address High	*/
+#define CCR_LMW2L	0x5C	/* w Local Memory Window 2 LMADRS Low	*/
+#define CCR_LMW2H	0x5E	/* w Local Memory Window 2 LMADRS High	*/
+#define CCR_LMW2BL	0x60	/* w Local Memory Window 2 Base Address Low	*/
+#define CCR_LMW2BH	0x62	/* w Local Memory Window 2 Base Address High	*/
+#define CCR_MISC	0xFC	/* b MISC					*/
+
+#define CCR_PM_GKEN      0x0001
+#define CCR_PM_CKRNEN    0x0002
+#define CCR_PM_USBPW1    0x0004
+#define CCR_PM_USBPW2    0x0008
+#define CCR_PM_USBPW3    0x0010
+#define CCR_PM_PMEE      0x0100
+#define CCR_PM_PMES      0x8000
+
+/*-------------------------------------------------------------------------*/
+
+struct tmio_hcd {
+	void __iomem		*ccr;
+	spinlock_t		lock; /* protects RMW cycles */
+};
+
+#define hcd_to_tmio(hcd)	((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
+
+/*-------------------------------------------------------------------------*/
+
+static void tmio_write_pm(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	u16 pm;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
+	     CCR_PM_PMEE | CCR_PM_PMES;
+
+	tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+	spin_unlock_irqrestore(&tmio->lock, flags);
+}
+
+static void tmio_stop_hc(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	u16 pm;
+
+	pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
+	switch (ohci->num_ports) {
+		default:
+			dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
+		case 3:
+			pm |= CCR_PM_USBPW3;
+		case 2:
+			pm |= CCR_PM_USBPW2;
+		case 1:
+			pm |= CCR_PM_USBPW1;
+	}
+	tmio_iowrite8(0, tmio->ccr + CCR_INTC);
+	tmio_iowrite8(0, tmio->ccr + CCR_ILME);
+	tmio_iowrite16(0, tmio->ccr + CCR_BASE);
+	tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
+	tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+}
+
+static void tmio_start_hc(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long base = hcd->rsrc_start;
+
+	tmio_write_pm(dev);
+	tmio_iowrite16(base, tmio->ccr + CCR_BASE);
+	tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
+	tmio_iowrite8(1, tmio->ccr + CCR_ILME);
+	tmio_iowrite8(2, tmio->ccr + CCR_INTC);
+
+	dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
+			tmio_ioread8(tmio->ccr + CCR_REVID),
+			(u64) hcd->rsrc_start, hcd->irq);
+}
+
+static int ohci_tmio_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	int ret;
+
+	if ((ret = ohci_init(ohci)) < 0)
+		return ret;
+
+	if ((ret = ohci_run(ohci)) < 0) {
+		dev_err(hcd->self.controller, "can't start %s\n",
+			hcd->self.bus_name);
+		ohci_stop(hcd);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct hc_driver ohci_tmio_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"TMIO OHCI USB Host Controller",
+	.hcd_priv_size =	sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
+
+	/* generic hardware linkage */
+	.irq =			ohci_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
+
+	/* basic lifecycle operations */
+	.start =		ohci_tmio_start,
+	.stop =			ohci_stop,
+	.shutdown =		ohci_shutdown,
+
+	/* managing i/o requests and associated device resources */
+	.urb_enqueue =		ohci_urb_enqueue,
+	.urb_dequeue =		ohci_urb_dequeue,
+	.endpoint_disable =	ohci_endpoint_disable,
+
+	/* scheduling support */
+	.get_frame_number =	ohci_get_frame,
+
+	/* root hub support */
+	.hub_status_data =	ohci_hub_status_data,
+	.hub_control =		ohci_hub_control,
+#ifdef	CONFIG_PM
+	.bus_suspend =		ohci_bus_suspend,
+	.bus_resume =		ohci_bus_resume,
+#endif
+	.start_port_reset =	ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver ohci_hcd_tmio_driver;
+
+static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+	struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
+	int irq = platform_get_irq(dev, 0);
+	struct tmio_hcd *tmio;
+	struct ohci_hcd *ohci;
+	struct usb_hcd *hcd;
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (!cell)
+		return -EINVAL;
+
+	hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto err_usb_create_hcd;
+	}
+
+	hcd->rsrc_start = regs->start;
+	hcd->rsrc_len = resource_size(regs);
+
+	tmio = hcd_to_tmio(hcd);
+
+	spin_lock_init(&tmio->lock);
+
+	tmio->ccr = ioremap(config->start, resource_size(config));
+	if (!tmio->ccr) {
+		ret = -ENOMEM;
+		goto err_ioremap_ccr;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		ret = -ENOMEM;
+		goto err_ioremap_regs;
+	}
+
+	ret = dma_declare_coherent_memory(&dev->dev, sram->start, sram->start,
+				resource_size(sram), DMA_MEMORY_EXCLUSIVE);
+	if (ret)
+		goto err_dma_declare;
+
+	if (cell->enable) {
+		ret = cell->enable(dev);
+		if (ret)
+			goto err_enable;
+	}
+
+	tmio_start_hc(dev);
+	ohci = hcd_to_ohci(hcd);
+	ohci_hcd_init(ohci);
+
+	ret = usb_add_hcd(hcd, irq, 0);
+	if (ret)
+		goto err_add_hcd;
+
+	device_wakeup_enable(hcd->self.controller);
+	if (ret == 0)
+		return ret;
+
+	usb_remove_hcd(hcd);
+
+err_add_hcd:
+	tmio_stop_hc(dev);
+	if (cell->disable)
+		cell->disable(dev);
+err_enable:
+	dma_release_declared_memory(&dev->dev);
+err_dma_declare:
+	iounmap(hcd->regs);
+err_ioremap_regs:
+	iounmap(tmio->ccr);
+err_ioremap_ccr:
+	usb_put_hcd(hcd);
+err_usb_create_hcd:
+
+	return ret;
+}
+
+static int ohci_hcd_tmio_drv_remove(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+
+	usb_remove_hcd(hcd);
+	tmio_stop_hc(dev);
+	if (cell->disable)
+		cell->disable(dev);
+	dma_release_declared_memory(&dev->dev);
+	iounmap(hcd->regs);
+	iounmap(tmio->ccr);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long flags;
+	u8 misc;
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+	misc |= 1 << 3; /* USSUSP */
+	tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+	spin_unlock_irqrestore(&tmio->lock, flags);
+
+	if (cell->suspend) {
+		ret = cell->suspend(dev);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+	unsigned long flags;
+	u8 misc;
+	int ret;
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	if (cell->resume) {
+		ret = cell->resume(dev);
+		if (ret)
+			return ret;
+	}
+
+	tmio_start_hc(dev);
+
+	spin_lock_irqsave(&tmio->lock, flags);
+
+	misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+	misc &= ~(1 << 3); /* USSUSP */
+	tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+	spin_unlock_irqrestore(&tmio->lock, flags);
+
+	ohci_resume(hcd, false);
+
+	return 0;
+}
+#else
+#define ohci_hcd_tmio_drv_suspend NULL
+#define ohci_hcd_tmio_drv_resume NULL
+#endif
+
+static struct platform_driver ohci_hcd_tmio_driver = {
+	.probe		= ohci_hcd_tmio_drv_probe,
+	.remove		= ohci_hcd_tmio_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.suspend	= ohci_hcd_tmio_drv_suspend,
+	.resume		= ohci_hcd_tmio_drv_resume,
+	.driver		= {
+		.name	= "tmio-ohci",
+	},
+};
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
new file mode 100644
index 0000000..ef4813b
--- /dev/null
+++ b/drivers/usb/host/ohci.h
@@ -0,0 +1,741 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under the GPL.
+ */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given OHCI_BIG_ENDIAN), depending on the
+ * host controller implementation.
+ */
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+
+/*
+ * OHCI Endpoint Descriptor (ED) ... holds TD queue
+ * See OHCI spec, section 4.2
+ *
+ * This is a "Queue Head" for those transfers, which is why
+ * both EHCI and UHCI call similar structures a "QH".
+ */
+struct ed {
+	/* first fields are hardware-specified */
+	__hc32			hwINFO;      /* endpoint config bitmap */
+	/* info bits defined by hcd */
+#define ED_DEQUEUE	(1 << 27)
+	/* info bits defined by the hardware */
+#define ED_ISO		(1 << 15)
+#define ED_SKIP		(1 << 14)
+#define ED_LOWSPEED	(1 << 13)
+#define ED_OUT		(0x01 << 11)
+#define ED_IN		(0x02 << 11)
+	__hc32			hwTailP;	/* tail of TD list */
+	__hc32			hwHeadP;	/* head of TD list (hc r/w) */
+#define ED_C		(0x02)			/* toggle carry */
+#define ED_H		(0x01)			/* halted */
+	__hc32			hwNextED;	/* next ED in list */
+
+	/* rest are purely for the driver's use */
+	dma_addr_t		dma;		/* addr of ED */
+	struct td		*dummy;		/* next TD to activate */
+
+	/* host's view of schedule */
+	struct ed		*ed_next;	/* on schedule or rm_list */
+	struct ed		*ed_prev;	/* for non-interrupt EDs */
+	struct list_head	td_list;	/* "shadow list" of our TDs */
+	struct list_head	in_use_list;
+
+	/* create --> IDLE --> OPER --> ... --> IDLE --> destroy
+	 * usually:  OPER --> UNLINK --> (IDLE | OPER) --> ...
+	 */
+	u8			state;		/* ED_{IDLE,UNLINK,OPER} */
+#define ED_IDLE		0x00		/* NOT linked to HC */
+#define ED_UNLINK	0x01		/* being unlinked from hc */
+#define ED_OPER		0x02		/* IS linked to hc */
+
+	u8			type;		/* PIPE_{BULK,...} */
+
+	/* periodic scheduling params (for intr and iso) */
+	u8			branch;
+	u16			interval;
+	u16			load;
+	u16			last_iso;	/* iso only */
+
+	/* HC may see EDs on rm_list until next frame (frame_no == tick) */
+	u16			tick;
+
+	/* Detect TDs not added to the done queue */
+	unsigned		takeback_wdh_cnt;
+	struct td		*pending_td;
+#define	OKAY_TO_TAKEBACK(ohci, ed)			\
+		((int) (ohci->wdh_cnt - ed->takeback_wdh_cnt) >= 0)
+
+} __attribute__ ((aligned(16)));
+
+#define ED_MASK	((u32)~0x0f)		/* strip hw status in low addr bits */
+
+
+/*
+ * OHCI Transfer Descriptor (TD) ... one per transfer segment
+ * See OHCI spec, sections 4.3.1 (general = control/bulk/interrupt)
+ * and 4.3.2 (iso)
+ */
+struct td {
+	/* first fields are hardware-specified */
+	__hc32		hwINFO;		/* transfer info bitmask */
+
+	/* hwINFO bits for both general and iso tds: */
+#define TD_CC       0xf0000000			/* condition code */
+#define TD_CC_GET(td_p) ((td_p >>28) & 0x0f)
+//#define TD_CC_SET(td_p, cc) (td_p) = ((td_p) & 0x0fffffff) | (((cc) & 0x0f) << 28)
+#define TD_DI       0x00E00000			/* frames before interrupt */
+#define TD_DI_SET(X) (((X) & 0x07)<< 21)
+	/* these two bits are available for definition/use by HCDs in both
+	 * general and iso tds ... others are available for only one type
+	 */
+#define TD_DONE     0x00020000			/* retired to donelist */
+#define TD_ISO      0x00010000			/* copy of ED_ISO */
+
+	/* hwINFO bits for general tds: */
+#define TD_EC       0x0C000000			/* error count */
+#define TD_T        0x03000000			/* data toggle state */
+#define TD_T_DATA0  0x02000000				/* DATA0 */
+#define TD_T_DATA1  0x03000000				/* DATA1 */
+#define TD_T_TOGGLE 0x00000000				/* uses ED_C */
+#define TD_DP       0x00180000			/* direction/pid */
+#define TD_DP_SETUP 0x00000000			/* SETUP pid */
+#define TD_DP_IN    0x00100000				/* IN pid */
+#define TD_DP_OUT   0x00080000				/* OUT pid */
+							/* 0x00180000 rsvd */
+#define TD_R        0x00040000			/* round: short packets OK? */
+
+	/* (no hwINFO #defines yet for iso tds) */
+
+	__hc32		hwCBP;		/* Current Buffer Pointer (or 0) */
+	__hc32		hwNextTD;	/* Next TD Pointer */
+	__hc32		hwBE;		/* Memory Buffer End Pointer */
+
+	/* PSW is only for ISO.  Only 1 PSW entry is used, but on
+	 * big-endian PPC hardware that's the second entry.
+	 */
+#define MAXPSW	2
+	__hc16		hwPSW [MAXPSW];
+
+	/* rest are purely for the driver's use */
+	__u8		index;
+	struct ed	*ed;
+	struct td	*td_hash;	/* dma-->td hashtable */
+	struct td	*next_dl_td;
+	struct urb	*urb;
+
+	dma_addr_t	td_dma;		/* addr of this TD */
+	dma_addr_t	data_dma;	/* addr of data it points to */
+
+	struct list_head td_list;	/* "shadow list", TDs on same ED */
+} __attribute__ ((aligned(32)));	/* c/b/i need 16; only iso needs 32 */
+
+#define TD_MASK	((u32)~0x1f)		/* strip hw status in low addr bits */
+
+/*
+ * Hardware transfer status codes -- CC from td->hwINFO or td->hwPSW
+ */
+#define TD_CC_NOERROR      0x00
+#define TD_CC_CRC          0x01
+#define TD_CC_BITSTUFFING  0x02
+#define TD_CC_DATATOGGLEM  0x03
+#define TD_CC_STALL        0x04
+#define TD_DEVNOTRESP      0x05
+#define TD_PIDCHECKFAIL    0x06
+#define TD_UNEXPECTEDPID   0x07
+#define TD_DATAOVERRUN     0x08
+#define TD_DATAUNDERRUN    0x09
+    /* 0x0A, 0x0B reserved for hardware */
+#define TD_BUFFEROVERRUN   0x0C
+#define TD_BUFFERUNDERRUN  0x0D
+    /* 0x0E, 0x0F reserved for HCD */
+#define TD_NOTACCESSED     0x0F
+
+
+/* map OHCI TD status codes (CC) to errno values */
+static const int cc_to_error [16] = {
+	/* No  Error  */               0,
+	/* CRC Error  */               -EILSEQ,
+	/* Bit Stuff  */               -EPROTO,
+	/* Data Togg  */               -EILSEQ,
+	/* Stall      */               -EPIPE,
+	/* DevNotResp */               -ETIME,
+	/* PIDCheck   */               -EPROTO,
+	/* UnExpPID   */               -EPROTO,
+	/* DataOver   */               -EOVERFLOW,
+	/* DataUnder  */               -EREMOTEIO,
+	/* (for hw)   */               -EIO,
+	/* (for hw)   */               -EIO,
+	/* BufferOver */               -ECOMM,
+	/* BuffUnder  */               -ENOSR,
+	/* (for HCD)  */               -EALREADY,
+	/* (for HCD)  */               -EALREADY
+};
+
+
+/*
+ * The HCCA (Host Controller Communications Area) is a 256 byte
+ * structure defined section 4.4.1 of the OHCI spec. The HC is
+ * told the base address of it.  It must be 256-byte aligned.
+ */
+struct ohci_hcca {
+#define NUM_INTS 32
+	__hc32	int_table [NUM_INTS];	/* periodic schedule */
+
+	/*
+	 * OHCI defines u16 frame_no, followed by u16 zero pad.
+	 * Since some processors can't do 16 bit bus accesses,
+	 * portable access must be a 32 bits wide.
+	 */
+	__hc32	frame_no;		/* current frame number */
+	__hc32	done_head;		/* info returned for an interrupt */
+	u8	reserved_for_hc [116];
+	u8	what [4];		/* spec only identifies 252 bytes :) */
+} __attribute__ ((aligned(256)));
+
+/*
+ * This is the structure of the OHCI controller's memory mapped I/O region.
+ * You must use readl() and writel() (in <asm/io.h>) to access these fields!!
+ * Layout is in section 7 (and appendix B) of the spec.
+ */
+struct ohci_regs {
+	/* control and status registers (section 7.1) */
+	__hc32	revision;
+	__hc32	control;
+	__hc32	cmdstatus;
+	__hc32	intrstatus;
+	__hc32	intrenable;
+	__hc32	intrdisable;
+
+	/* memory pointers (section 7.2) */
+	__hc32	hcca;
+	__hc32	ed_periodcurrent;
+	__hc32	ed_controlhead;
+	__hc32	ed_controlcurrent;
+	__hc32	ed_bulkhead;
+	__hc32	ed_bulkcurrent;
+	__hc32	donehead;
+
+	/* frame counters (section 7.3) */
+	__hc32	fminterval;
+	__hc32	fmremaining;
+	__hc32	fmnumber;
+	__hc32	periodicstart;
+	__hc32	lsthresh;
+
+	/* Root hub ports (section 7.4) */
+	struct	ohci_roothub_regs {
+		__hc32	a;
+		__hc32	b;
+		__hc32	status;
+#define MAX_ROOT_PORTS	15	/* maximum OHCI root hub ports (RH_A_NDP) */
+		__hc32	portstatus [MAX_ROOT_PORTS];
+	} roothub;
+
+	/* and optional "legacy support" registers (appendix B) at 0x0100 */
+
+} __attribute__ ((aligned(32)));
+
+
+/* OHCI CONTROL AND STATUS REGISTER MASKS */
+
+/*
+ * HcControl (control) register masks
+ */
+#define OHCI_CTRL_CBSR	(3 << 0)	/* control/bulk service ratio */
+#define OHCI_CTRL_PLE	(1 << 2)	/* periodic list enable */
+#define OHCI_CTRL_IE	(1 << 3)	/* isochronous enable */
+#define OHCI_CTRL_CLE	(1 << 4)	/* control list enable */
+#define OHCI_CTRL_BLE	(1 << 5)	/* bulk list enable */
+#define OHCI_CTRL_HCFS	(3 << 6)	/* host controller functional state */
+#define OHCI_CTRL_IR	(1 << 8)	/* interrupt routing */
+#define OHCI_CTRL_RWC	(1 << 9)	/* remote wakeup connected */
+#define OHCI_CTRL_RWE	(1 << 10)	/* remote wakeup enable */
+
+/* pre-shifted values for HCFS */
+#	define OHCI_USB_RESET	(0 << 6)
+#	define OHCI_USB_RESUME	(1 << 6)
+#	define OHCI_USB_OPER	(2 << 6)
+#	define OHCI_USB_SUSPEND	(3 << 6)
+
+/*
+ * HcCommandStatus (cmdstatus) register masks
+ */
+#define OHCI_HCR	(1 << 0)	/* host controller reset */
+#define OHCI_CLF	(1 << 1)	/* control list filled */
+#define OHCI_BLF	(1 << 2)	/* bulk list filled */
+#define OHCI_OCR	(1 << 3)	/* ownership change request */
+#define OHCI_SOC	(3 << 16)	/* scheduling overrun count */
+
+/*
+ * masks used with interrupt registers:
+ * HcInterruptStatus (intrstatus)
+ * HcInterruptEnable (intrenable)
+ * HcInterruptDisable (intrdisable)
+ */
+#define OHCI_INTR_SO	(1 << 0)	/* scheduling overrun */
+#define OHCI_INTR_WDH	(1 << 1)	/* writeback of done_head */
+#define OHCI_INTR_SF	(1 << 2)	/* start frame */
+#define OHCI_INTR_RD	(1 << 3)	/* resume detect */
+#define OHCI_INTR_UE	(1 << 4)	/* unrecoverable error */
+#define OHCI_INTR_FNO	(1 << 5)	/* frame number overflow */
+#define OHCI_INTR_RHSC	(1 << 6)	/* root hub status change */
+#define OHCI_INTR_OC	(1 << 30)	/* ownership change */
+#define OHCI_INTR_MIE	(1 << 31)	/* master interrupt enable */
+
+
+/* OHCI ROOT HUB REGISTER MASKS */
+
+/* roothub.portstatus [i] bits */
+#define RH_PS_CCS            0x00000001		/* current connect status */
+#define RH_PS_PES            0x00000002		/* port enable status*/
+#define RH_PS_PSS            0x00000004		/* port suspend status */
+#define RH_PS_POCI           0x00000008		/* port over current indicator */
+#define RH_PS_PRS            0x00000010		/* port reset status */
+#define RH_PS_PPS            0x00000100		/* port power status */
+#define RH_PS_LSDA           0x00000200		/* low speed device attached */
+#define RH_PS_CSC            0x00010000		/* connect status change */
+#define RH_PS_PESC           0x00020000		/* port enable status change */
+#define RH_PS_PSSC           0x00040000		/* port suspend status change */
+#define RH_PS_OCIC           0x00080000		/* over current indicator change */
+#define RH_PS_PRSC           0x00100000		/* port reset status change */
+
+/* roothub.status bits */
+#define RH_HS_LPS	     0x00000001		/* local power status */
+#define RH_HS_OCI	     0x00000002		/* over current indicator */
+#define RH_HS_DRWE	     0x00008000		/* device remote wakeup enable */
+#define RH_HS_LPSC	     0x00010000		/* local power status change */
+#define RH_HS_OCIC	     0x00020000		/* over current indicator change */
+#define RH_HS_CRWE	     0x80000000		/* clear remote wakeup enable */
+
+/* roothub.b masks */
+#define RH_B_DR		0x0000ffff		/* device removable flags */
+#define RH_B_PPCM	0xffff0000		/* port power control mask */
+
+/* roothub.a masks */
+#define	RH_A_NDP	(0xff << 0)		/* number of downstream ports */
+#define	RH_A_PSM	(1 << 8)		/* power switching mode */
+#define	RH_A_NPS	(1 << 9)		/* no power switching */
+#define	RH_A_DT		(1 << 10)		/* device type (mbz) */
+#define	RH_A_OCPM	(1 << 11)		/* over current protection mode */
+#define	RH_A_NOCP	(1 << 12)		/* no over current protection */
+#define	RH_A_POTPGT	(0xff << 24)		/* power on to power good time */
+
+
+/* hcd-private per-urb state */
+typedef struct urb_priv {
+	struct ed		*ed;
+	u16			length;		// # tds in this request
+	u16			td_cnt;		// tds already serviced
+	struct list_head	pending;
+	struct td		*td [0];	// all TDs in this request
+
+} urb_priv_t;
+
+#define TD_HASH_SIZE    64    /* power'o'two */
+// sizeof (struct td) ~= 64 == 2^6 ...
+#define TD_HASH_FUNC(td_dma) ((td_dma ^ (td_dma >> 6)) % TD_HASH_SIZE)
+
+
+/*
+ * This is the full ohci controller description
+ *
+ * Note how the "proper" USB information is just
+ * a subset of what the full implementation needs. (Linus)
+ */
+
+enum ohci_rh_state {
+	OHCI_RH_HALTED,
+	OHCI_RH_SUSPENDED,
+	OHCI_RH_RUNNING
+};
+
+struct ohci_hcd {
+	spinlock_t		lock;
+
+	/*
+	 * I/O memory used to communicate with the HC (dma-consistent)
+	 */
+	struct ohci_regs __iomem *regs;
+
+	/*
+	 * main memory used to communicate with the HC (dma-consistent).
+	 * hcd adds to schedule for a live hc any time, but removals finish
+	 * only at the start of the next frame.
+	 */
+	struct ohci_hcca	*hcca;
+	dma_addr_t		hcca_dma;
+
+	struct ed		*ed_rm_list;		/* to be removed */
+
+	struct ed		*ed_bulktail;		/* last in bulk list */
+	struct ed		*ed_controltail;	/* last in ctrl list */
+	struct ed		*periodic [NUM_INTS];	/* shadow int_table */
+
+	void (*start_hnp)(struct ohci_hcd *ohci);
+
+	/*
+	 * memory management for queue data structures
+	 */
+	struct dma_pool		*td_cache;
+	struct dma_pool		*ed_cache;
+	struct td		*td_hash [TD_HASH_SIZE];
+	struct td		*dl_start, *dl_end;	/* the done list */
+	struct list_head	pending;
+	struct list_head	eds_in_use;	/* all EDs with at least 1 TD */
+
+	/*
+	 * driver state
+	 */
+	enum ohci_rh_state	rh_state;
+	int			num_ports;
+	int			load [NUM_INTS];
+	u32			hc_control;	/* copy of hc control reg */
+	unsigned long		next_statechange;	/* suspend/resume */
+	u32			fminterval;		/* saved register */
+	unsigned		autostop:1;	/* rh auto stopping/stopped */
+	unsigned		working:1;
+	unsigned		restart_work:1;
+
+	unsigned long		flags;		/* for HC bugs */
+#define	OHCI_QUIRK_AMD756	0x01			/* erratum #4 */
+#define	OHCI_QUIRK_SUPERIO	0x02			/* natsemi */
+#define	OHCI_QUIRK_INITRESET	0x04			/* SiS, OPTi, ... */
+#define	OHCI_QUIRK_BE_DESC	0x08			/* BE descriptors */
+#define	OHCI_QUIRK_BE_MMIO	0x10			/* BE registers */
+#define	OHCI_QUIRK_ZFMICRO	0x20			/* Compaq ZFMicro chipset*/
+#define	OHCI_QUIRK_NEC		0x40			/* lost interrupts */
+#define	OHCI_QUIRK_FRAME_NO	0x80			/* no big endian frame_no shift */
+#define	OHCI_QUIRK_HUB_POWER	0x100			/* distrust firmware power/oc setup */
+#define	OHCI_QUIRK_AMD_PLL	0x200			/* AMD PLL quirk*/
+#define	OHCI_QUIRK_AMD_PREFETCH	0x400			/* pre-fetch for ISO transfer */
+#define	OHCI_QUIRK_GLOBAL_SUSPEND	0x800		/* must suspend ports */
+#define	OHCI_QUIRK_QEMU		0x1000			/* relax timing expectations */
+
+	// there are also chip quirks/bugs in init logic
+
+	unsigned		prev_frame_no;
+	unsigned		wdh_cnt, prev_wdh_cnt;
+	u32			prev_donehead;
+	struct timer_list	io_watchdog;
+
+	struct work_struct	nec_work;	/* Worker for NEC quirk */
+
+	struct dentry		*debug_dir;
+
+	/* platform-specific data -- must come last */
+	unsigned long           priv[0] __aligned(sizeof(s64));
+
+};
+
+#ifdef CONFIG_USB_PCI
+static inline int quirk_nec(struct ohci_hcd *ohci)
+{
+	return ohci->flags & OHCI_QUIRK_NEC;
+}
+static inline int quirk_zfmicro(struct ohci_hcd *ohci)
+{
+	return ohci->flags & OHCI_QUIRK_ZFMICRO;
+}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+	return ohci->flags & OHCI_QUIRK_AMD_PLL;
+}
+static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
+{
+	return ohci->flags & OHCI_QUIRK_AMD_PREFETCH;
+}
+#else
+static inline int quirk_nec(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+static inline int quirk_zfmicro(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+#endif
+
+/* convert between an hcd pointer and the corresponding ohci_hcd */
+static inline struct ohci_hcd *hcd_to_ohci (struct usb_hcd *hcd)
+{
+	return (struct ohci_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
+{
+	return container_of ((void *) ohci, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define ohci_dbg(ohci, fmt, args...) \
+	dev_dbg (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
+#define ohci_err(ohci, fmt, args...) \
+	dev_err (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
+#define ohci_info(ohci, fmt, args...) \
+	dev_info (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
+#define ohci_warn(ohci, fmt, args...) \
+	dev_warn (ohci_to_hcd(ohci)->self.controller , fmt , ## args )
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * While most USB host controllers implement their registers and
+ * in-memory communication descriptors in little-endian format,
+ * a minority (notably the IBM STB04XXX and the Motorola MPC5200
+ * processors) implement them in big endian format.
+ *
+ * In addition some more exotic implementations like the Toshiba
+ * Spider (aka SCC) cell southbridge are "mixed" endian, that is,
+ * they have a different endianness for registers vs. in-memory
+ * descriptors.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ * That leads to some tricky Kconfig rules howevber. There are
+ * different defaults based on some arch/ppc platforms, though
+ * the basic rules are:
+ *
+ * Controller type              Kconfig options needed
+ * ---------------              ----------------------
+ * little endian                CONFIG_USB_OHCI_LITTLE_ENDIAN
+ *
+ * fully big endian             CONFIG_USB_OHCI_BIG_ENDIAN_DESC _and_
+ *                              CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+ *
+ * mixed endian                 CONFIG_USB_OHCI_LITTLE_ENDIAN _and_
+ *                              CONFIG_USB_OHCI_BIG_ENDIAN_{MMIO,DESC}
+ *
+ * (If you have a mixed endian controller, you -must- also define
+ * CONFIG_USB_OHCI_LITTLE_ENDIAN or things will not work when building
+ * both your mixed endian and a fully big endian controller support in
+ * the same kernel image).
+ */
+
+#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
+#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
+#define big_endian_desc(ohci)	(ohci->flags & OHCI_QUIRK_BE_DESC)
+#else
+#define big_endian_desc(ohci)	1		/* only big endian */
+#endif
+#else
+#define big_endian_desc(ohci)	0		/* only little endian */
+#endif
+
+#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
+#define big_endian_mmio(ohci)	(ohci->flags & OHCI_QUIRK_BE_MMIO)
+#else
+#define big_endian_mmio(ohci)	1		/* only big endian */
+#endif
+#else
+#define big_endian_mmio(ohci)	0		/* only little endian */
+#endif
+
+/*
+ * Big-endian read/write functions are arch-specific.
+ * Other arches can be added if/when they're needed.
+ *
+ */
+static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci,
+					__hc32 __iomem * regs)
+{
+#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+	return big_endian_mmio(ohci) ?
+		readl_be (regs) :
+		readl (regs);
+#else
+	return readl (regs);
+#endif
+}
+
+static inline void _ohci_writel (const struct ohci_hcd *ohci,
+				 const unsigned int val, __hc32 __iomem *regs)
+{
+#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
+	big_endian_mmio(ohci) ?
+		writel_be (val, regs) :
+		writel (val, regs);
+#else
+		writel (val, regs);
+#endif
+}
+
+#define ohci_readl(o,r)		_ohci_readl(o,r)
+#define ohci_writel(o,v,r)	_ohci_writel(o,v,r)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* cpu to ohci */
+static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x)
+{
+	return big_endian_desc(ohci) ?
+		(__force __hc16)cpu_to_be16(x) :
+		(__force __hc16)cpu_to_le16(x);
+}
+
+static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x)
+{
+	return big_endian_desc(ohci) ?
+		cpu_to_be16p(x) :
+		cpu_to_le16p(x);
+}
+
+static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x)
+{
+	return big_endian_desc(ohci) ?
+		(__force __hc32)cpu_to_be32(x) :
+		(__force __hc32)cpu_to_le32(x);
+}
+
+static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x)
+{
+	return big_endian_desc(ohci) ?
+		cpu_to_be32p(x) :
+		cpu_to_le32p(x);
+}
+
+/* ohci to cpu */
+static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x)
+{
+	return big_endian_desc(ohci) ?
+		be16_to_cpu((__force __be16)x) :
+		le16_to_cpu((__force __le16)x);
+}
+
+static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x)
+{
+	return big_endian_desc(ohci) ?
+		be16_to_cpup((__force __be16 *)x) :
+		le16_to_cpup((__force __le16 *)x);
+}
+
+static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x)
+{
+	return big_endian_desc(ohci) ?
+		be32_to_cpu((__force __be32)x) :
+		le32_to_cpu((__force __le32)x);
+}
+
+static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
+{
+	return big_endian_desc(ohci) ?
+		be32_to_cpup((__force __be32 *)x) :
+		le32_to_cpup((__force __le32 *)x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The HCCA frame number is 16 bits, but is accessed as 32 bits since not all
+ * hardware handles 16 bit reads.  Depending on the SoC implementation, the
+ * frame number can wind up in either bits [31:16] (default) or
+ * [15:0] (OHCI_QUIRK_FRAME_NO) on big endian hosts.
+ *
+ * Somewhat similarly, the 16-bit PSW fields in a transfer descriptor are
+ * reordered on BE.
+ */
+
+static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
+{
+	u32 tmp;
+	if (big_endian_desc(ohci)) {
+		tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
+		if (!(ohci->flags & OHCI_QUIRK_FRAME_NO))
+			tmp >>= 16;
+	} else
+		tmp = le32_to_cpup((__force __le32 *)&ohci->hcca->frame_no);
+
+	return (u16)tmp;
+}
+
+static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci,
+                                 const struct td *td, int index)
+{
+	return (__hc16 *)(big_endian_desc(ohci) ?
+			&td->hwPSW[index ^ 1] : &td->hwPSW[index]);
+}
+
+static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
+                               const struct td *td, int index)
+{
+	return hc16_to_cpup(ohci, ohci_hwPSWp(ohci, td, index));
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define	FI			0x2edf		/* 12000 bits per frame (-1) */
+#define	FSMP(fi)		(0x7fff & ((6 * ((fi) - 210)) / 7))
+#define	FIT			(1 << 31)
+#define LSTHRESH		0x628		/* lowspeed bit threshold */
+
+static inline void periodic_reinit (struct ohci_hcd *ohci)
+{
+	u32	fi = ohci->fminterval & 0x03fff;
+	u32	fit = ohci_readl(ohci, &ohci->regs->fminterval) & FIT;
+
+	ohci_writel (ohci, (fit ^ FIT) | ohci->fminterval,
+						&ohci->regs->fminterval);
+	ohci_writel (ohci, ((9 * fi) / 10) & 0x3fff,
+						&ohci->regs->periodicstart);
+}
+
+/* AMD-756 (D2 rev) reports corrupt register contents in some cases.
+ * The erratum (#4) description is incorrect.  AMD's workaround waits
+ * till some bits (mostly reserved) are clear; ok for all revs.
+ */
+#define read_roothub(hc, register, mask) ({ \
+	u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
+	if (temp == -1) \
+		hc->rh_state = OHCI_RH_HALTED; \
+	else if (hc->flags & OHCI_QUIRK_AMD756) \
+		while (temp & mask) \
+			temp = ohci_readl (hc, &hc->regs->roothub.register); \
+	temp; })
+
+static inline u32 roothub_a (struct ohci_hcd *hc)
+	{ return read_roothub (hc, a, 0xfc0fe000); }
+static inline u32 roothub_b (struct ohci_hcd *hc)
+	{ return ohci_readl (hc, &hc->regs->roothub.b); }
+static inline u32 roothub_status (struct ohci_hcd *hc)
+	{ return ohci_readl (hc, &hc->regs->roothub.status); }
+static inline u32 roothub_portstatus (struct ohci_hcd *hc, int i)
+	{ return read_roothub (hc, portstatus [i], 0xffe0fce0); }
+
+/* Declarations of things exported for use by ohci platform drivers */
+
+struct ohci_driver_overrides {
+	const char	*product_desc;
+	size_t		extra_priv_size;
+	int		(*reset)(struct usb_hcd *hcd);
+};
+
+extern void	ohci_init_driver(struct hc_driver *drv,
+				const struct ohci_driver_overrides *over);
+extern int	ohci_restart(struct ohci_hcd *ohci);
+extern int	ohci_setup(struct usb_hcd *hcd);
+extern int	ohci_suspend(struct usb_hcd *hcd, bool do_wakeup);
+extern int	ohci_resume(struct usb_hcd *hcd, bool hibernated);
+extern int	ohci_hub_control(struct usb_hcd	*hcd, u16 typeReq, u16 wValue,
+				 u16 wIndex, char *buf, u16 wLength);
+extern int	ohci_hub_status_data(struct usb_hcd *hcd, char *buf);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
new file mode 100644
index 0000000..c5e6e8d
--- /dev/null
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -0,0 +1,3901 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
+ *
+ * This code is *strongly* based on EHCI-HCD code by David Brownell since
+ * the chip is a quasi-EHCI compatible.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "oxu210hp.h"
+
+#define DRIVER_VERSION "0.0.50"
+
+/*
+ * Main defines
+ */
+
+#define oxu_dbg(oxu, fmt, args...) \
+		dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_err(oxu, fmt, args...) \
+		dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_info(oxu, fmt, args...) \
+		dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define DEBUG
+#endif
+
+static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
+{
+	return container_of((void *) oxu, struct usb_hcd, hcd_priv);
+}
+
+static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
+{
+	return (struct oxu_hcd *) (hcd->hcd_priv);
+}
+
+/*
+ * Debug stuff
+ */
+
+#undef OXU_URB_TRACE
+#undef OXU_VERBOSE_DEBUG
+
+#ifdef OXU_VERBOSE_DEBUG
+#define oxu_vdbg			oxu_dbg
+#else
+#define oxu_vdbg(oxu, fmt, args...)	/* Nop */
+#endif
+
+#ifdef DEBUG
+
+static int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+	return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", status,
+		(status & STS_ASS) ? " Async" : "",
+		(status & STS_PSS) ? " Periodic" : "",
+		(status & STS_RECL) ? " Recl" : "",
+		(status & STS_HALT) ? " Halt" : "",
+		(status & STS_IAA) ? " IAA" : "",
+		(status & STS_FATAL) ? " FATAL" : "",
+		(status & STS_FLR) ? " FLR" : "",
+		(status & STS_PCD) ? " PCD" : "",
+		(status & STS_ERR) ? " ERR" : "",
+		(status & STS_INT) ? " INT" : ""
+		);
+}
+
+static int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+	return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
+		label, label[0] ? " " : "", enable,
+		(enable & STS_IAA) ? " IAA" : "",
+		(enable & STS_FATAL) ? " FATAL" : "",
+		(enable & STS_FLR) ? " FLR" : "",
+		(enable & STS_PCD) ? " PCD" : "",
+		(enable & STS_ERR) ? " ERR" : "",
+		(enable & STS_INT) ? " INT" : ""
+		);
+}
+
+static const char *const fls_strings[] =
+    { "1024", "512", "256", "??" };
+
+static int dbg_command_buf(char *buf, unsigned len,
+				const char *label, u32 command)
+{
+	return scnprintf(buf, len,
+		"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+		label, label[0] ? " " : "", command,
+		(command & CMD_PARK) ? "park" : "(park)",
+		CMD_PARK_CNT(command),
+		(command >> 16) & 0x3f,
+		(command & CMD_LRESET) ? " LReset" : "",
+		(command & CMD_IAAD) ? " IAAD" : "",
+		(command & CMD_ASE) ? " Async" : "",
+		(command & CMD_PSE) ? " Periodic" : "",
+		fls_strings[(command >> 2) & 0x3],
+		(command & CMD_RESET) ? " Reset" : "",
+		(command & CMD_RUN) ? "RUN" : "HALT"
+		);
+}
+
+static int dbg_port_buf(char *buf, unsigned len, const char *label,
+				int port, u32 status)
+{
+	char	*sig;
+
+	/* signaling state */
+	switch (status & (3 << 10)) {
+	case 0 << 10:
+		sig = "se0";
+		break;
+	case 1 << 10:
+		sig = "k";	/* low speed */
+		break;
+	case 2 << 10:
+		sig = "j";
+		break;
+	default:
+		sig = "?";
+		break;
+	}
+
+	return scnprintf(buf, len,
+		"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", port, status,
+		(status & PORT_POWER) ? " POWER" : "",
+		(status & PORT_OWNER) ? " OWNER" : "",
+		sig,
+		(status & PORT_RESET) ? " RESET" : "",
+		(status & PORT_SUSPEND) ? " SUSPEND" : "",
+		(status & PORT_RESUME) ? " RESUME" : "",
+		(status & PORT_OCC) ? " OCC" : "",
+		(status & PORT_OC) ? " OC" : "",
+		(status & PORT_PEC) ? " PEC" : "",
+		(status & PORT_PE) ? " PE" : "",
+		(status & PORT_CSC) ? " CSC" : "",
+		(status & PORT_CONNECT) ? " CONNECT" : ""
+	    );
+}
+
+#else
+
+static inline int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{ return 0; }
+
+#endif /* DEBUG */
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(oxu, label, status) { \
+	char _buf[80]; \
+	dbg_status_buf(_buf, sizeof _buf, label, status); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_cmd(oxu, label, command) { \
+	char _buf[80]; \
+	dbg_command_buf(_buf, sizeof _buf, label, command); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_port(oxu, label, port, status) { \
+	char _buf[80]; \
+	dbg_port_buf(_buf, sizeof _buf, label, port, status); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+/*
+ * Module parameters
+ */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh;			/* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* Initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* For flakey hardware, ignore overcurrent indicators */
+static bool ignore_oc;
+module_param(ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
+
+
+static void ehci_work(struct oxu_hcd *oxu);
+static int oxu_hub_control(struct usb_hcd *hcd,
+				u16 typeReq, u16 wValue, u16 wIndex,
+				char *buf, u16 wLength);
+
+/*
+ * Local functions
+ */
+
+/* Low level read/write registers functions */
+static inline u32 oxu_readl(void *base, u32 reg)
+{
+	return readl(base + reg);
+}
+
+static inline void oxu_writel(void *base, u32 reg, u32 val)
+{
+	writel(val, base + reg);
+}
+
+static inline void timer_action_done(struct oxu_hcd *oxu,
+					enum ehci_timer_action action)
+{
+	clear_bit(action, &oxu->actions);
+}
+
+static inline void timer_action(struct oxu_hcd *oxu,
+					enum ehci_timer_action action)
+{
+	if (!test_and_set_bit(action, &oxu->actions)) {
+		unsigned long t;
+
+		switch (action) {
+		case TIMER_IAA_WATCHDOG:
+			t = EHCI_IAA_JIFFIES;
+			break;
+		case TIMER_IO_WATCHDOG:
+			t = EHCI_IO_JIFFIES;
+			break;
+		case TIMER_ASYNC_OFF:
+			t = EHCI_ASYNC_JIFFIES;
+			break;
+		case TIMER_ASYNC_SHRINK:
+		default:
+			t = EHCI_SHRINK_JIFFIES;
+			break;
+		}
+		t += jiffies;
+		/* all timings except IAA watchdog can be overridden.
+		 * async queue SHRINK often precedes IAA.  while it's ready
+		 * to go OFF neither can matter, and afterwards the IO
+		 * watchdog stops unless there's still periodic traffic.
+		 */
+		if (action != TIMER_IAA_WATCHDOG
+				&& t > oxu->watchdog.expires
+				&& timer_pending(&oxu->watchdog))
+			return;
+		mod_timer(&oxu->watchdog, t);
+	}
+}
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown:  shutting down the bridge before the devices using it.
+ */
+static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
+					u32 mask, u32 done, int usec)
+{
+	u32 result;
+
+	do {
+		result = readl(ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+/* Force HC to halt state from unknown (EHCI spec section 2.3) */
+static int ehci_halt(struct oxu_hcd *oxu)
+{
+	u32	temp = readl(&oxu->regs->status);
+
+	/* disable any irqs left enabled by previous code */
+	writel(0, &oxu->regs->intr_enable);
+
+	if ((temp & STS_HALT) != 0)
+		return 0;
+
+	temp = readl(&oxu->regs->command);
+	temp &= ~CMD_RUN;
+	writel(temp, &oxu->regs->command);
+	return handshake(oxu, &oxu->regs->status,
+			  STS_HALT, STS_HALT, 16 * 125);
+}
+
+/* Put TDI/ARC silicon into EHCI mode */
+static void tdi_reset(struct oxu_hcd *oxu)
+{
+	u32 __iomem *reg_ptr;
+	u32 tmp;
+
+	reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
+	tmp = readl(reg_ptr);
+	tmp |= 0x3;
+	writel(tmp, reg_ptr);
+}
+
+/* Reset a non-running (STS_HALT == 1) controller */
+static int ehci_reset(struct oxu_hcd *oxu)
+{
+	int	retval;
+	u32	command = readl(&oxu->regs->command);
+
+	command |= CMD_RESET;
+	dbg_cmd(oxu, "reset", command);
+	writel(command, &oxu->regs->command);
+	oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+	oxu->next_statechange = jiffies;
+	retval = handshake(oxu, &oxu->regs->command,
+			    CMD_RESET, 0, 250 * 1000);
+
+	if (retval)
+		return retval;
+
+	tdi_reset(oxu);
+
+	return retval;
+}
+
+/* Idle the controller (from running) */
+static void ehci_quiesce(struct oxu_hcd *oxu)
+{
+	u32	temp;
+
+#ifdef DEBUG
+	BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state));
+#endif
+
+	/* wait for any schedule enables/disables to take effect */
+	temp = readl(&oxu->regs->command) << 10;
+	temp &= STS_ASS | STS_PSS;
+	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+				temp, 16 * 125) != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return;
+	}
+
+	/* then disable anything that's still active */
+	temp = readl(&oxu->regs->command);
+	temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
+	writel(temp, &oxu->regs->command);
+
+	/* hardware can take 16 microframes to turn off ... */
+	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+				0, 16 * 125) != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return;
+	}
+}
+
+static int check_reset_complete(struct oxu_hcd *oxu, int index,
+				u32 __iomem *status_reg, int port_status)
+{
+	if (!(port_status & PORT_CONNECT)) {
+		oxu->reset_done[index] = 0;
+		return port_status;
+	}
+
+	/* if reset finished and it's still not enabled -- handoff */
+	if (!(port_status & PORT_PE)) {
+		oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
+				index+1);
+		return port_status;
+	} else
+		oxu_dbg(oxu, "port %d high speed\n", index + 1);
+
+	return port_status;
+}
+
+static void ehci_hub_descriptor(struct oxu_hcd *oxu,
+				struct usb_hub_descriptor *desc)
+{
+	int ports = HCS_N_PORTS(oxu->hcs_params);
+	u16 temp;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bPwrOn2PwrGood = 10;	/* oxu 1.0, 2.3.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+	temp = HUB_CHAR_INDV_PORT_OCPM;	/* per-port overcurrent reporting */
+	if (HCS_PPC(oxu->hcs_params))
+		temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
+	else
+		temp |= HUB_CHAR_NO_LPSM; /* no power switching */
+	desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
+}
+
+
+/* Allocate an OXU210HP on-chip memory data buffer
+ *
+ * An on-chip memory data buffer is required for each OXU210HP USB transfer.
+ * Each transfer descriptor has one or more on-chip memory data buffers.
+ *
+ * Data buffers are allocated from a fix sized pool of data blocks.
+ * To minimise fragmentation and give reasonable memory utlisation,
+ * data buffers are allocated with sizes the power of 2 multiples of
+ * the block size, starting on an address a multiple of the allocated size.
+ *
+ * FIXME: callers of this function require a buffer to be allocated for
+ * len=0. This is a waste of on-chip memory and should be fix. Then this
+ * function should be changed to not allocate a buffer for len=0.
+ */
+static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
+{
+	int n_blocks;	/* minium blocks needed to hold len */
+	int a_blocks;	/* blocks allocated */
+	int i, j;
+
+	/* Don't allocte bigger than supported */
+	if (len > BUFFER_SIZE * BUFFER_NUM) {
+		oxu_err(oxu, "buffer too big (%d)\n", len);
+		return -ENOMEM;
+	}
+
+	spin_lock(&oxu->mem_lock);
+
+	/* Number of blocks needed to hold len */
+	n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
+
+	/* Round the number of blocks up to the power of 2 */
+	for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
+		;
+
+	/* Find a suitable available data buffer */
+	for (i = 0; i < BUFFER_NUM;
+			i += max(a_blocks, (int)oxu->db_used[i])) {
+
+		/* Check all the required blocks are available */
+		for (j = 0; j < a_blocks; j++)
+			if (oxu->db_used[i + j])
+				break;
+
+		if (j != a_blocks)
+			continue;
+
+		/* Allocate blocks found! */
+		qtd->buffer = (void *) &oxu->mem->db_pool[i];
+		qtd->buffer_dma = virt_to_phys(qtd->buffer);
+
+		qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
+		oxu->db_used[i] = a_blocks;
+
+		spin_unlock(&oxu->mem_lock);
+
+		return 0;
+	}
+
+	/* Failed */
+
+	spin_unlock(&oxu->mem_lock);
+
+	return -ENOMEM;
+}
+
+static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
+							 / BUFFER_SIZE;
+	oxu->db_used[index] = 0;
+	qtd->qtd_buffer_len = 0;
+	qtd->buffer_dma = 0;
+	qtd->buffer = NULL;
+
+	spin_unlock(&oxu->mem_lock);
+}
+
+static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
+{
+	memset(qtd, 0, sizeof *qtd);
+	qtd->qtd_dma = dma;
+	qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+	qtd->hw_next = EHCI_LIST_END;
+	qtd->hw_alt_next = EHCI_LIST_END;
+	INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+	int index;
+
+	if (qtd->buffer)
+		oxu_buf_free(oxu, qtd);
+
+	spin_lock(&oxu->mem_lock);
+
+	index = qtd - &oxu->mem->qtd_pool[0];
+	oxu->qtd_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+}
+
+static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
+{
+	int i;
+	struct ehci_qtd *qtd = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < QTD_NUM; i++)
+		if (!oxu->qtd_used[i])
+			break;
+
+	if (i < QTD_NUM) {
+		qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
+		memset(qtd, 0, sizeof *qtd);
+
+		qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+		qtd->hw_next = EHCI_LIST_END;
+		qtd->hw_alt_next = EHCI_LIST_END;
+		INIT_LIST_HEAD(&qtd->qtd_list);
+
+		qtd->qtd_dma = virt_to_phys(qtd);
+
+		oxu->qtd_used[i] = 1;
+	}
+
+	spin_unlock(&oxu->mem_lock);
+
+	return qtd;
+}
+
+static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = qh - &oxu->mem->qh_pool[0];
+	oxu->qh_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+}
+
+static void qh_destroy(struct kref *kref)
+{
+	struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
+	struct oxu_hcd *oxu = qh->oxu;
+
+	/* clean qtds first, and know this is not linked */
+	if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+		oxu_dbg(oxu, "unused qh not empty!\n");
+		BUG();
+	}
+	if (qh->dummy)
+		oxu_qtd_free(oxu, qh->dummy);
+	oxu_qh_free(oxu, qh);
+}
+
+static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
+{
+	int i;
+	struct ehci_qh *qh = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < QHEAD_NUM; i++)
+		if (!oxu->qh_used[i])
+			break;
+
+	if (i < QHEAD_NUM) {
+		qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
+		memset(qh, 0, sizeof *qh);
+
+		kref_init(&qh->kref);
+		qh->oxu = oxu;
+		qh->qh_dma = virt_to_phys(qh);
+		INIT_LIST_HEAD(&qh->qtd_list);
+
+		/* dummy td enables safe urb queuing */
+		qh->dummy = ehci_qtd_alloc(oxu);
+		if (qh->dummy == NULL) {
+			oxu_dbg(oxu, "no dummy td\n");
+			oxu->qh_used[i] = 0;
+			qh = NULL;
+			goto unlock;
+		}
+
+		oxu->qh_used[i] = 1;
+	}
+unlock:
+	spin_unlock(&oxu->mem_lock);
+
+	return qh;
+}
+
+/* to share a qh (cpu threads, or hc) */
+static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
+{
+	kref_get(&qh->kref);
+	return qh;
+}
+
+static inline void qh_put(struct ehci_qh *qh)
+{
+	kref_put(&qh->kref, qh_destroy);
+}
+
+static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = murb - &oxu->murb_pool[0];
+	oxu->murb_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+}
+
+static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
+
+{
+	int i;
+	struct oxu_murb *murb = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < MURB_NUM; i++)
+		if (!oxu->murb_used[i])
+			break;
+
+	if (i < MURB_NUM) {
+		murb = &(oxu->murb_pool)[i];
+
+		oxu->murb_used[i] = 1;
+	}
+
+	spin_unlock(&oxu->mem_lock);
+
+	return murb;
+}
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+static void ehci_mem_cleanup(struct oxu_hcd *oxu)
+{
+	kfree(oxu->murb_pool);
+	oxu->murb_pool = NULL;
+
+	if (oxu->async)
+		qh_put(oxu->async);
+	oxu->async = NULL;
+
+	del_timer(&oxu->urb_timer);
+
+	oxu->periodic = NULL;
+
+	/* shadow periodic table */
+	kfree(oxu->pshadow);
+	oxu->pshadow = NULL;
+}
+
+/* Remember to add cleanup code (above) if you add anything here.
+ */
+static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
+{
+	int i;
+
+	for (i = 0; i < oxu->periodic_size; i++)
+		oxu->mem->frame_list[i] = EHCI_LIST_END;
+	for (i = 0; i < QHEAD_NUM; i++)
+		oxu->qh_used[i] = 0;
+	for (i = 0; i < QTD_NUM; i++)
+		oxu->qtd_used[i] = 0;
+
+	oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
+	if (!oxu->murb_pool)
+		goto fail;
+
+	for (i = 0; i < MURB_NUM; i++)
+		oxu->murb_used[i] = 0;
+
+	oxu->async = oxu_qh_alloc(oxu);
+	if (!oxu->async)
+		goto fail;
+
+	oxu->periodic = (__le32 *) &oxu->mem->frame_list;
+	oxu->periodic_dma = virt_to_phys(oxu->periodic);
+
+	for (i = 0; i < oxu->periodic_size; i++)
+		oxu->periodic[i] = EHCI_LIST_END;
+
+	/* software shadow of hardware table */
+	oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
+	if (oxu->pshadow != NULL)
+		return 0;
+
+fail:
+	oxu_dbg(oxu, "couldn't init memory\n");
+	ehci_mem_cleanup(oxu);
+	return -ENOMEM;
+}
+
+/* Fill a qtd, returning how much of the buffer we were able to queue up.
+ */
+static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
+				int token, int maxpacket)
+{
+	int i, count;
+	u64 addr = buf;
+
+	/* one buffer entry per 4K ... first might be short or unaligned */
+	qtd->hw_buf[0] = cpu_to_le32((u32)addr);
+	qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
+	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
+	if (likely(len < count))		/* ... iff needed */
+		count = len;
+	else {
+		buf +=  0x1000;
+		buf &= ~0x0fff;
+
+		/* per-qtd limit: from 16K to 20K (best alignment) */
+		for (i = 1; count < len && i < 5; i++) {
+			addr = buf;
+			qtd->hw_buf[i] = cpu_to_le32((u32)addr);
+			qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
+			buf += 0x1000;
+			if ((count + 0x1000) < len)
+				count += 0x1000;
+			else
+				count = len;
+		}
+
+		/* short packets may only terminate transfers */
+		if (count != len)
+			count -= (count % maxpacket);
+	}
+	qtd->hw_token = cpu_to_le32((count << 16) | token);
+	qtd->length = count;
+
+	return count;
+}
+
+static inline void qh_update(struct oxu_hcd *oxu,
+				struct ehci_qh *qh, struct ehci_qtd *qtd)
+{
+	/* writes to an active overlay are unsafe */
+	BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+	qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
+	qh->hw_alt_next = EHCI_LIST_END;
+
+	/* Except for control endpoints, we make hardware maintain data
+	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+	 * ever clear it.
+	 */
+	if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
+		unsigned	is_out, epnum;
+
+		is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
+		epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
+		if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+			qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
+			usb_settoggle(qh->dev, epnum, is_out, 1);
+		}
+	}
+
+	/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
+	wmb();
+	qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* If it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	struct ehci_qtd *qtd;
+
+	if (list_empty(&qh->qtd_list))
+		qtd = qh->dummy;
+	else {
+		qtd = list_entry(qh->qtd_list.next,
+				struct ehci_qtd, qtd_list);
+		/* first qtd may already be partially processed */
+		if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
+			qtd = NULL;
+	}
+
+	if (qtd)
+		qh_update(oxu, qh, qtd);
+}
+
+static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
+				size_t length, u32 token)
+{
+	/* count IN/OUT bytes, not SETUP (even short packets) */
+	if (likely(QTD_PID(token) != 2))
+		urb->actual_length += length - QTD_LENGTH(token);
+
+	/* don't modify error codes */
+	if (unlikely(urb->status != -EINPROGRESS))
+		return;
+
+	/* force cleanup after short read; not always an error */
+	if (unlikely(IS_SHORT_READ(token)))
+		urb->status = -EREMOTEIO;
+
+	/* serious "can't proceed" faults reported by the hardware */
+	if (token & QTD_STS_HALT) {
+		if (token & QTD_STS_BABBLE) {
+			/* FIXME "must" disable babbling device's port too */
+			urb->status = -EOVERFLOW;
+		} else if (token & QTD_STS_MMF) {
+			/* fs/ls interrupt xfer missed the complete-split */
+			urb->status = -EPROTO;
+		} else if (token & QTD_STS_DBE) {
+			urb->status = (QTD_PID(token) == 1) /* IN ? */
+				? -ENOSR  /* hc couldn't read data */
+				: -ECOMM; /* hc couldn't write data */
+		} else if (token & QTD_STS_XACT) {
+			/* timeout, bad crc, wrong PID, etc; retried */
+			if (QTD_CERR(token))
+				urb->status = -EPIPE;
+			else {
+				oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
+					urb->dev->devpath,
+					usb_pipeendpoint(urb->pipe),
+					usb_pipein(urb->pipe) ? "in" : "out");
+				urb->status = -EPROTO;
+			}
+		/* CERR nonzero + no errors + halt --> stall */
+		} else if (QTD_CERR(token))
+			urb->status = -EPIPE;
+		else	/* unknown */
+			urb->status = -EPROTO;
+
+		oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
+			usb_pipedevice(urb->pipe),
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			token, urb->status);
+	}
+}
+
+static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
+__releases(oxu->lock)
+__acquires(oxu->lock)
+{
+	if (likely(urb->hcpriv != NULL)) {
+		struct ehci_qh	*qh = (struct ehci_qh *) urb->hcpriv;
+
+		/* S-mask in a QH means it's an interrupt urb */
+		if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
+
+			/* ... update hc-wide periodic stats (for usbfs) */
+			oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
+		}
+		qh_put(qh);
+	}
+
+	urb->hcpriv = NULL;
+	switch (urb->status) {
+	case -EINPROGRESS:		/* success */
+		urb->status = 0;
+	default:			/* fault */
+		break;
+	case -EREMOTEIO:		/* fault or normal */
+		if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+			urb->status = 0;
+		break;
+	case -ECONNRESET:		/* canceled */
+	case -ENOENT:
+		break;
+	}
+
+#ifdef OXU_URB_TRACE
+	oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
+		__func__, urb->dev->devpath, urb,
+		usb_pipeendpoint(urb->pipe),
+		usb_pipein(urb->pipe) ? "in" : "out",
+		urb->status,
+		urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+	/* complete() can reenter this HCD */
+	spin_unlock(&oxu->lock);
+	usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
+	spin_lock(&oxu->lock);
+}
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+#define HALT_BIT cpu_to_le32(QTD_STS_HALT)
+
+/* Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current.  Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	struct ehci_qtd *last = NULL, *end = qh->dummy;
+	struct ehci_qtd	*qtd, *tmp;
+	int stopped;
+	unsigned count = 0;
+	int do_status = 0;
+	u8 state;
+	struct oxu_murb *murb = NULL;
+
+	if (unlikely(list_empty(&qh->qtd_list)))
+		return count;
+
+	/* completions (or tasks on other cpus) must never clobber HALT
+	 * till we've gone through and cleaned everything up, even when
+	 * they add urbs to this qh's queue or mark them for unlinking.
+	 *
+	 * NOTE:  unlinking expects to be done in queue order.
+	 */
+	state = qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	stopped = (state == QH_STATE_IDLE);
+
+	/* remove de-activated QTDs from front of queue.
+	 * after faults (including short reads), cleanup this urb
+	 * then let the queue advance.
+	 * if queue is stopped, handles unlinks.
+	 */
+	list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
+		struct urb *urb;
+		u32 token = 0;
+
+		urb = qtd->urb;
+
+		/* Clean up any state from previous QTD ...*/
+		if (last) {
+			if (likely(last->urb != urb)) {
+				if (last->urb->complete == NULL) {
+					murb = (struct oxu_murb *) last->urb;
+					last->urb = murb->main;
+					if (murb->last) {
+						ehci_urb_done(oxu, last->urb);
+						count++;
+					}
+					oxu_murb_free(oxu, murb);
+				} else {
+					ehci_urb_done(oxu, last->urb);
+					count++;
+				}
+			}
+			oxu_qtd_free(oxu, last);
+			last = NULL;
+		}
+
+		/* ignore urbs submitted during completions we reported */
+		if (qtd == end)
+			break;
+
+		/* hardware copies qtd out of qh overlay */
+		rmb();
+		token = le32_to_cpu(qtd->hw_token);
+
+		/* always clean up qtds the hc de-activated */
+		if ((token & QTD_STS_ACTIVE) == 0) {
+
+			if ((token & QTD_STS_HALT) != 0) {
+				stopped = 1;
+
+			/* magic dummy for some short reads; qh won't advance.
+			 * that silicon quirk can kick in with this dummy too.
+			 */
+			} else if (IS_SHORT_READ(token) &&
+					!(qtd->hw_alt_next & EHCI_LIST_END)) {
+				stopped = 1;
+				goto halt;
+			}
+
+		/* stop scanning when we reach qtds the hc is using */
+		} else if (likely(!stopped &&
+				HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
+			break;
+
+		} else {
+			stopped = 1;
+
+			if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
+				urb->status = -ESHUTDOWN;
+
+			/* ignore active urbs unless some previous qtd
+			 * for the urb faulted (including short read) or
+			 * its urb was canceled.  we may patch qh or qtds.
+			 */
+			if (likely(urb->status == -EINPROGRESS))
+				continue;
+
+			/* issue status after short control reads */
+			if (unlikely(do_status != 0)
+					&& QTD_PID(token) == 0 /* OUT */) {
+				do_status = 0;
+				continue;
+			}
+
+			/* token in overlay may be most current */
+			if (state == QH_STATE_IDLE
+					&& cpu_to_le32(qtd->qtd_dma)
+						== qh->hw_current)
+				token = le32_to_cpu(qh->hw_token);
+
+			/* force halt for unlinked or blocked qh, so we'll
+			 * patch the qh later and so that completions can't
+			 * activate it while we "know" it's stopped.
+			 */
+			if ((HALT_BIT & qh->hw_token) == 0) {
+halt:
+				qh->hw_token |= HALT_BIT;
+				wmb();
+			}
+		}
+
+		/* Remove it from the queue */
+		qtd_copy_status(oxu, urb->complete ?
+					urb : ((struct oxu_murb *) urb)->main,
+				qtd->length, token);
+		if ((usb_pipein(qtd->urb->pipe)) &&
+				(NULL != qtd->transfer_buffer))
+			memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
+		do_status = (urb->status == -EREMOTEIO)
+				&& usb_pipecontrol(urb->pipe);
+
+		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+			last = list_entry(qtd->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			last->hw_next = qtd->hw_next;
+		}
+		list_del(&qtd->qtd_list);
+		last = qtd;
+	}
+
+	/* last urb's completion might still need calling */
+	if (likely(last != NULL)) {
+		if (last->urb->complete == NULL) {
+			murb = (struct oxu_murb *) last->urb;
+			last->urb = murb->main;
+			if (murb->last) {
+				ehci_urb_done(oxu, last->urb);
+				count++;
+			}
+			oxu_murb_free(oxu, murb);
+		} else {
+			ehci_urb_done(oxu, last->urb);
+			count++;
+		}
+		oxu_qtd_free(oxu, last);
+	}
+
+	/* restore original state; caller must unlink or relink */
+	qh->qh_state = state;
+
+	/* be sure the hardware's done with the qh before refreshing
+	 * it after fault cleanup, or recovering from silicon wrongly
+	 * overlaying the dummy qtd (which reduces DMA chatter).
+	 */
+	if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
+		switch (state) {
+		case QH_STATE_IDLE:
+			qh_refresh(oxu, qh);
+			break;
+		case QH_STATE_LINKED:
+			/* should be rare for periodic transfers,
+			 * except maybe high bandwidth ...
+			 */
+			if ((cpu_to_le32(QH_SMASK)
+					& qh->hw_info2) != 0) {
+				intr_deschedule(oxu, qh);
+				(void) qh_schedule(oxu, qh);
+			} else
+				unlink_async(oxu, qh);
+			break;
+		/* otherwise, unlink already started */
+		}
+	}
+
+	return count;
+}
+
+/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize)		(1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize)	((wMaxPacketSize) & 0x07ff)
+
+/* Reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(struct oxu_hcd *oxu,
+				struct urb *urb, struct list_head *head)
+{
+	struct ehci_qtd	*qtd, *temp;
+
+	list_for_each_entry_safe(qtd, temp, head, qtd_list) {
+		list_del(&qtd->qtd_list);
+		oxu_qtd_free(oxu, qtd);
+	}
+}
+
+/* Create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
+						struct urb *urb,
+						struct list_head *head,
+						gfp_t flags)
+{
+	struct ehci_qtd	*qtd, *qtd_prev;
+	dma_addr_t buf;
+	int len, maxpacket;
+	int is_input;
+	u32 token;
+	void *transfer_buf = NULL;
+	int ret;
+
+	/*
+	 * URBs map to sequences of QTDs: one logical transaction
+	 */
+	qtd = ehci_qtd_alloc(oxu);
+	if (unlikely(!qtd))
+		return NULL;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+	/* for split transactions, SplitXState initialized to zero */
+
+	len = urb->transfer_buffer_length;
+	is_input = usb_pipein(urb->pipe);
+	if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
+		urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
+
+	if (usb_pipecontrol(urb->pipe)) {
+		/* SETUP pid */
+		ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
+		if (ret)
+			goto cleanup;
+
+		qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+		memcpy(qtd->buffer, qtd->urb->setup_packet,
+				sizeof(struct usb_ctrlrequest));
+
+		/* ... and always at least one more pid */
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc(oxu);
+		if (unlikely(!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+
+		/* for zero length DATA stages, STATUS is always IN */
+		if (len == 0)
+			token |= (1 /* "in" */ << 8);
+	}
+
+	/*
+	 * Data transfer stage: buffer setup
+	 */
+
+	ret = oxu_buf_alloc(oxu, qtd, len);
+	if (ret)
+		goto cleanup;
+
+	buf = qtd->buffer_dma;
+	transfer_buf = urb->transfer_buffer;
+
+	if (!is_input)
+		memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
+
+	if (is_input)
+		token |= (1 /* "in" */ << 8);
+	/* else it's already initted to "out" pid (0 << 8) */
+
+	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last one may be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+	for (;;) {
+		int this_qtd_len;
+
+		this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
+		qtd->transfer_buffer = transfer_buf;
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+		transfer_buf += this_qtd_len;
+		if (is_input)
+			qtd->hw_alt_next = oxu->async->hw_alt_next;
+
+		/* qh makes control packets use qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+			token ^= QTD_TOGGLE;
+
+		if (likely(len <= 0))
+			break;
+
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc(oxu);
+		if (unlikely(!qtd))
+			goto cleanup;
+		if (likely(len > 0)) {
+			ret = oxu_buf_alloc(oxu, qtd, len);
+			if (ret)
+				goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/* unless the bulk/interrupt caller wants a chance to clean
+	 * up after short reads, hc should advance qh past this urb
+	 */
+	if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+				|| usb_pipecontrol(urb->pipe)))
+		qtd->hw_alt_next = EHCI_LIST_END;
+
+	/*
+	 * control requests may need a terminating data "status" ack;
+	 * bulk ones may need a terminating short packet (zero length).
+	 */
+	if (likely(urb->transfer_buffer_length != 0)) {
+		int	one_more = 0;
+
+		if (usb_pipecontrol(urb->pipe)) {
+			one_more = 1;
+			token ^= 0x0100;	/* "in" <--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+		} else if (usb_pipebulk(urb->pipe)
+				&& (urb->transfer_flags & URB_ZERO_PACKET)
+				&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = ehci_qtd_alloc(oxu);
+			if (unlikely(!qtd))
+				goto cleanup;
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+			list_add_tail(&qtd->qtd_list, head);
+
+			/* never any data in such packets */
+			qtd_fill(qtd, 0, 0, token, 0);
+		}
+	}
+
+	/* by default, enable interrupt on urb completion */
+		qtd->hw_token |= cpu_to_le32(QTD_IOC);
+	return head;
+
+cleanup:
+	qtd_list_free(oxu, urb, head);
+	return NULL;
+}
+
+/* Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled.  For highspeed, that's
+ * just one microframe in the s-mask.  For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
+				struct urb *urb, gfp_t flags)
+{
+	struct ehci_qh *qh = oxu_qh_alloc(oxu);
+	u32 info1 = 0, info2 = 0;
+	int is_input, type;
+	int maxp = 0;
+
+	if (!qh)
+		return qh;
+
+	/*
+	 * init endpoint/device data for this QH
+	 */
+	info1 |= usb_pipeendpoint(urb->pipe) << 8;
+	info1 |= usb_pipedevice(urb->pipe) << 0;
+
+	is_input = usb_pipein(urb->pipe);
+	type = usb_pipetype(urb->pipe);
+	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+	/* Compute interrupt scheduling parameters just once, and save.
+	 * - allowing for high bandwidth, how many nsec/uframe are used?
+	 * - split transactions need a second CSPLIT uframe; same question
+	 * - splits also need a schedule gap (for full/low speed I/O)
+	 * - qh has a polling interval
+	 *
+	 * For control/bulk requests, the HC or TT handles these.
+	 */
+	if (type == PIPE_INTERRUPT) {
+		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+								is_input, 0,
+				hb_mult(maxp) * max_packet(maxp)));
+		qh->start = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->c_usecs = 0;
+			qh->gap_uf = 0;
+
+			qh->period = urb->interval >> 3;
+			if (qh->period == 0 && urb->interval != 1) {
+				/* NOTE interval 2 or 4 uframes could work.
+				 * But interval 1 scheduling is simpler, and
+				 * includes high bandwidth.
+				 */
+				oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
+					urb->interval);
+				goto done;
+			}
+		} else {
+			struct usb_tt	*tt = urb->dev->tt;
+			int		think_time;
+
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, maxp) / (125 * 1000);
+
+			/* FIXME this just approximates SPLIT/CSPLIT times */
+			if (is_input) {		/* SPLIT, gap, CSPLIT+DATA */
+				qh->c_usecs = qh->usecs + HS_USECS(0);
+				qh->usecs = HS_USECS(1);
+			} else {		/* SPLIT+DATA, gap, CSPLIT */
+				qh->usecs += HS_USECS(1);
+				qh->c_usecs = HS_USECS(0);
+			}
+
+			think_time = tt ? tt->think_time : 0;
+			qh->tt_usecs = NS_TO_US(think_time +
+					usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, max_packet(maxp)));
+			qh->period = urb->interval;
+		}
+	}
+
+	/* support for tt scheduling, and access to toggles */
+	qh->dev = urb->dev;
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= (1 << 12);	/* EPS "low" */
+		/* FALL THROUGH */
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT)
+			info1 |= (EHCI_TUNE_RL_TT << 28);
+		if (type == PIPE_CONTROL) {
+			info1 |= (1 << 27);	/* for TT */
+			info1 |= 1 << 14;	/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (EHCI_TUNE_MULT_TT << 30);
+		info2 |= urb->dev->ttport << 23;
+
+		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+		break;
+
+	case USB_SPEED_HIGH:		/* no TT involved */
+		info1 |= (2 << 12);	/* EPS "high" */
+		if (type == PIPE_CONTROL) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
+			info1 |= 1 << 14;	/* toggle from qtd */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else if (type == PIPE_BULK) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 512 << 16;	/* usb2 fixed maxpacket */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else {		/* PIPE_INTERRUPT */
+			info1 |= max_packet(maxp) << 16;
+			info2 |= hb_mult(maxp) << 30;
+		}
+		break;
+	default:
+		oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
+done:
+		qh_put(qh);
+		return NULL;
+	}
+
+	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+	/* init as live, toggle clear, advance to dummy */
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_info1 = cpu_to_le32(info1);
+	qh->hw_info2 = cpu_to_le32(info2);
+	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+	qh_refresh(oxu, qh);
+	return qh;
+}
+
+/* Move qh (and its qtds) onto async queue; maybe enable queue.
+ */
+static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	__le32 dma = QH_NEXT(qh->qh_dma);
+	struct ehci_qh *head;
+
+	/* (re)start the async schedule? */
+	head = oxu->async;
+	timer_action_done(oxu, TIMER_ASYNC_OFF);
+	if (!head->qh_next.qh) {
+		u32	cmd = readl(&oxu->regs->command);
+
+		if (!(cmd & CMD_ASE)) {
+			/* in case a clear of CMD_ASE didn't take yet */
+			(void)handshake(oxu, &oxu->regs->status,
+					STS_ASS, 0, 150);
+			cmd |= CMD_ASE | CMD_RUN;
+			writel(cmd, &oxu->regs->command);
+			oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+			/* posted write need not be known to HC yet ... */
+		}
+	}
+
+	/* clear halt and/or toggle; and maybe recover from silicon quirk */
+	if (qh->qh_state == QH_STATE_IDLE)
+		qh_refresh(oxu, qh);
+
+	/* splice right after start */
+	qh->qh_next = head->qh_next;
+	qh->hw_next = head->hw_next;
+	wmb();
+
+	head->qh_next.qh = qh;
+	head->hw_next = dma;
+
+	qh->qh_state = QH_STATE_LINKED;
+	/* qtd completions reported later by interrupt */
+}
+
+#define	QH_ADDR_MASK	cpu_to_le32(0x7f)
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
+				struct urb *urb, struct list_head *qtd_list,
+				int epnum, void	**ptr)
+{
+	struct ehci_qh *qh = NULL;
+
+	qh = (struct ehci_qh *) *ptr;
+	if (unlikely(qh == NULL)) {
+		/* can't sleep here, we have oxu->lock... */
+		qh = qh_make(oxu, urb, GFP_ATOMIC);
+		*ptr = qh;
+	}
+	if (likely(qh != NULL)) {
+		struct ehci_qtd	*qtd;
+
+		if (unlikely(list_empty(qtd_list)))
+			qtd = NULL;
+		else
+			qtd = list_entry(qtd_list->next, struct ehci_qtd,
+					qtd_list);
+
+		/* control qh may need patching ... */
+		if (unlikely(epnum == 0)) {
+
+			/* usb_reset_device() briefly reverts to address 0 */
+			if (usb_pipedevice(urb->pipe) == 0)
+				qh->hw_info1 &= ~QH_ADDR_MASK;
+		}
+
+		/* just one way to queue requests: swap with the dummy qtd.
+		 * only hc or qh_refresh() ever modify the overlay.
+		 */
+		if (likely(qtd != NULL)) {
+			struct ehci_qtd	*dummy;
+			dma_addr_t dma;
+			__le32 token;
+
+			/* to avoid racing the HC, use the dummy td instead of
+			 * the first td of our list (becomes new dummy).  both
+			 * tds stay deactivated until we're done, when the
+			 * HC is allowed to fetch the old dummy (4.10.2).
+			 */
+			token = qtd->hw_token;
+			qtd->hw_token = HALT_BIT;
+			wmb();
+			dummy = qh->dummy;
+
+			dma = dummy->qtd_dma;
+			*dummy = *qtd;
+			dummy->qtd_dma = dma;
+
+			list_del(&qtd->qtd_list);
+			list_add(&dummy->qtd_list, qtd_list);
+			list_splice(qtd_list, qh->qtd_list.prev);
+
+			ehci_qtd_init(qtd, qtd->qtd_dma);
+			qh->dummy = qtd;
+
+			/* hc must see the new dummy at list end */
+			dma = qtd->qtd_dma;
+			qtd = list_entry(qh->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			qtd->hw_next = QTD_NEXT(dma);
+
+			/* let the hc process these next qtds */
+			dummy->hw_token = (token & ~(0x80));
+			wmb();
+			dummy->hw_token = token;
+
+			urb->hcpriv = qh_get(qh);
+		}
+	}
+	return qh;
+}
+
+static int submit_async(struct oxu_hcd	*oxu, struct urb *urb,
+			struct list_head *qtd_list, gfp_t mem_flags)
+{
+	struct ehci_qtd	*qtd;
+	int epnum;
+	unsigned long flags;
+	struct ehci_qh *qh = NULL;
+	int rc = 0;
+
+	qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+	epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef OXU_URB_TRACE
+	oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+		__func__, urb->dev->devpath, urb,
+		epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+		urb->transfer_buffer_length,
+		qtd, urb->ep->hcpriv);
+#endif
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
+		rc = -ESHUTDOWN;
+		goto done;
+	}
+
+	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	if (unlikely(qh == NULL)) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	/* Control/bulk operations through TTs don't need scheduling,
+	 * the HC and TT handle it when the TT has a buffer ready.
+	 */
+	if (likely(qh->qh_state == QH_STATE_IDLE))
+		qh_link_async(oxu, qh_get(qh));
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	if (unlikely(qh == NULL))
+		qtd_list_free(oxu, urb, qtd_list);
+	return rc;
+}
+
+/* The async qh for the qtds being reclaimed are now unlinked from the HC */
+
+static void end_unlink_async(struct oxu_hcd *oxu)
+{
+	struct ehci_qh *qh = oxu->reclaim;
+	struct ehci_qh *next;
+
+	timer_action_done(oxu, TIMER_IAA_WATCHDOG);
+
+	qh->qh_state = QH_STATE_IDLE;
+	qh->qh_next.qh = NULL;
+	qh_put(qh);			/* refcount from reclaim */
+
+	/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
+	next = qh->reclaim;
+	oxu->reclaim = next;
+	oxu->reclaim_ready = 0;
+	qh->reclaim = NULL;
+
+	qh_completions(oxu, qh);
+
+	if (!list_empty(&qh->qtd_list)
+			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+		qh_link_async(oxu, qh);
+	else {
+		qh_put(qh);		/* refcount from async list */
+
+		/* it's not free to turn the async schedule on/off; leave it
+		 * active but idle for a while once it empties.
+		 */
+		if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
+				&& oxu->async->qh_next.qh == NULL)
+			timer_action(oxu, TIMER_ASYNC_OFF);
+	}
+
+	if (next) {
+		oxu->reclaim = NULL;
+		start_unlink_async(oxu, next);
+	}
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own oxu->lock */
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int cmd = readl(&oxu->regs->command);
+	struct ehci_qh *prev;
+
+#ifdef DEBUG
+	assert_spin_locked(&oxu->lock);
+	BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
+				&& qh->qh_state != QH_STATE_UNLINK_WAIT));
+#endif
+
+	/* stop async schedule right now? */
+	if (unlikely(qh == oxu->async)) {
+		/* can't get here without STS_ASS set */
+		if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
+				&& !oxu->reclaim) {
+			/* ... and CMD_IAAD clear */
+			writel(cmd & ~CMD_ASE, &oxu->regs->command);
+			wmb();
+			/* handshake later, if we need to */
+			timer_action_done(oxu, TIMER_ASYNC_OFF);
+		}
+		return;
+	}
+
+	qh->qh_state = QH_STATE_UNLINK;
+	oxu->reclaim = qh = qh_get(qh);
+
+	prev = oxu->async;
+	while (prev->qh_next.qh != qh)
+		prev = prev->qh_next.qh;
+
+	prev->hw_next = qh->hw_next;
+	prev->qh_next = qh->qh_next;
+	wmb();
+
+	if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
+		/* if (unlikely(qh->reclaim != 0))
+		 *	this will recurse, probably not much
+		 */
+		end_unlink_async(oxu);
+		return;
+	}
+
+	oxu->reclaim_ready = 0;
+	cmd |= CMD_IAAD;
+	writel(cmd, &oxu->regs->command);
+	(void) readl(&oxu->regs->command);
+	timer_action(oxu, TIMER_IAA_WATCHDOG);
+}
+
+static void scan_async(struct oxu_hcd *oxu)
+{
+	struct ehci_qh *qh;
+	enum ehci_timer_action action = TIMER_IO_WATCHDOG;
+
+	if (!++(oxu->stamp))
+		oxu->stamp++;
+	timer_action_done(oxu, TIMER_ASYNC_SHRINK);
+rescan:
+	qh = oxu->async->qh_next.qh;
+	if (likely(qh != NULL)) {
+		do {
+			/* clean any finished work for this qh */
+			if (!list_empty(&qh->qtd_list)
+					&& qh->stamp != oxu->stamp) {
+				int temp;
+
+				/* unlinks could happen here; completion
+				 * reporting drops the lock.  rescan using
+				 * the latest schedule, but don't rescan
+				 * qhs we already finished (no looping).
+				 */
+				qh = qh_get(qh);
+				qh->stamp = oxu->stamp;
+				temp = qh_completions(oxu, qh);
+				qh_put(qh);
+				if (temp != 0)
+					goto rescan;
+			}
+
+			/* unlink idle entries, reducing HC PCI usage as well
+			 * as HCD schedule-scanning costs.  delay for any qh
+			 * we just scanned, there's a not-unusual case that it
+			 * doesn't stay idle for long.
+			 * (plus, avoids some kind of re-activation race.)
+			 */
+			if (list_empty(&qh->qtd_list)) {
+				if (qh->stamp == oxu->stamp)
+					action = TIMER_ASYNC_SHRINK;
+				else if (!oxu->reclaim
+					    && qh->qh_state == QH_STATE_LINKED)
+					start_unlink_async(oxu, qh);
+			}
+
+			qh = qh->qh_next.qh;
+		} while (qh);
+	}
+	if (action == TIMER_ASYNC_SHRINK)
+		timer_action(oxu, TIMER_ASYNC_SHRINK);
+}
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd/sitd
+ * @tag: hardware tag for type of this record
+ */
+static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
+						__le32 tag)
+{
+	switch (tag) {
+	default:
+	case Q_TYPE_QH:
+		return &periodic->qh->qh_next;
+	}
+}
+
+/* caller must hold oxu->lock */
+static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
+{
+	union ehci_shadow *prev_p = &oxu->pshadow[frame];
+	__le32 *hw_p = &oxu->periodic[frame];
+	union ehci_shadow here = *prev_p;
+
+	/* find predecessor of "ptr"; hw and shadow lists are in sync */
+	while (here.ptr && here.ptr != ptr) {
+		prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
+		hw_p = here.hw_next;
+		here = *prev_p;
+	}
+	/* an interrupt entry (at list end) could have been shared */
+	if (!here.ptr)
+		return;
+
+	/* update shadow and hardware lists ... the old "next" pointers
+	 * from ptr may still be in use, the caller updates them.
+	 */
+	*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
+	*hw_p = *here.hw_next;
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short periodic_usecs(struct oxu_hcd *oxu,
+					unsigned frame, unsigned uframe)
+{
+	__le32 *hw_p = &oxu->periodic[frame];
+	union ehci_shadow *q = &oxu->pshadow[frame];
+	unsigned usecs = 0;
+
+	while (q->ptr) {
+		switch (Q_NEXT_TYPE(*hw_p)) {
+		case Q_TYPE_QH:
+		default:
+			/* is it in the S-mask? */
+			if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
+				usecs += q->qh->usecs;
+			/* ... or C-mask? */
+			if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
+				usecs += q->qh->c_usecs;
+			hw_p = &q->qh->hw_next;
+			q = &q->qh->qh_next;
+			break;
+		}
+	}
+#ifdef DEBUG
+	if (usecs > 100)
+		oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
+						frame * 8 + uframe, usecs);
+#endif
+	return usecs;
+}
+
+static int enable_periodic(struct oxu_hcd *oxu)
+{
+	u32 cmd;
+	int status;
+
+	/* did clearing PSE did take effect yet?
+	 * takes effect only at frame boundaries...
+	 */
+	status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
+	if (status != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		usb_hc_died(oxu_to_hcd(oxu));
+		return status;
+	}
+
+	cmd = readl(&oxu->regs->command) | CMD_PSE;
+	writel(cmd, &oxu->regs->command);
+	/* posted write ... PSS happens later */
+	oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+
+	/* make sure ehci_work scans these */
+	oxu->next_uframe = readl(&oxu->regs->frame_index)
+		% (oxu->periodic_size << 3);
+	return 0;
+}
+
+static int disable_periodic(struct oxu_hcd *oxu)
+{
+	u32 cmd;
+	int status;
+
+	/* did setting PSE not take effect yet?
+	 * takes effect only at frame boundaries...
+	 */
+	status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
+	if (status != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		usb_hc_died(oxu_to_hcd(oxu));
+		return status;
+	}
+
+	cmd = readl(&oxu->regs->command) & ~CMD_PSE;
+	writel(cmd, &oxu->regs->command);
+	/* posted write ... */
+
+	oxu->next_uframe = -1;
+	return 0;
+}
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; oxu 0.96+)
+ */
+static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned i;
+	unsigned period = qh->period;
+
+	dev_dbg(&qh->dev->dev,
+		"link qh%d-%04x/%p start %d [%d/%d us]\n",
+		period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+		qh, qh->start, qh->usecs, qh->c_usecs);
+
+	/* high bandwidth, or otherwise every microframe */
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->start; i < oxu->periodic_size; i += period) {
+		union ehci_shadow	*prev = &oxu->pshadow[i];
+		__le32			*hw_p = &oxu->periodic[i];
+		union ehci_shadow	here = *prev;
+		__le32			type = 0;
+
+		/* skip the iso nodes at list head */
+		while (here.ptr) {
+			type = Q_NEXT_TYPE(*hw_p);
+			if (type == Q_TYPE_QH)
+				break;
+			prev = periodic_next_shadow(prev, type);
+			hw_p = &here.qh->hw_next;
+			here = *prev;
+		}
+
+		/* sorting each branch by period (slow-->fast)
+		 * enables sharing interior tree nodes
+		 */
+		while (here.ptr && qh != here.qh) {
+			if (qh->period > here.qh->period)
+				break;
+			prev = &here.qh->qh_next;
+			hw_p = &here.qh->hw_next;
+			here = *prev;
+		}
+		/* link in this qh, unless some earlier pass did that */
+		if (qh != here.qh) {
+			qh->qh_next = here;
+			if (here.qh)
+				qh->hw_next = *hw_p;
+			wmb();
+			prev->qh = qh;
+			*hw_p = QH_NEXT(qh->qh_dma);
+		}
+	}
+	qh->qh_state = QH_STATE_LINKED;
+	qh_get(qh);
+
+	/* update per-qh bandwidth for usbfs */
+	oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	/* maybe enable periodic schedule processing */
+	if (!oxu->periodic_sched++)
+		return enable_periodic(oxu);
+
+	return 0;
+}
+
+static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned i;
+	unsigned period;
+
+	/* FIXME:
+	 *   IF this isn't high speed
+	 *   and this qh is active in the current uframe
+	 *   (and overlay token SplitXstate is false?)
+	 * THEN
+	 *   qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
+	 */
+
+	/* high bandwidth, or otherwise part of every microframe */
+	period = qh->period;
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->start; i < oxu->periodic_size; i += period)
+		periodic_unlink(oxu, i, qh);
+
+	/* update per-qh bandwidth for usbfs */
+	oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	dev_dbg(&qh->dev->dev,
+		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+		qh->period,
+		le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+		qh, qh->start, qh->usecs, qh->c_usecs);
+
+	/* qh->qh_next still "live" to HC */
+	qh->qh_state = QH_STATE_UNLINK;
+	qh->qh_next.ptr = NULL;
+	qh_put(qh);
+
+	/* maybe turn off periodic schedule */
+	oxu->periodic_sched--;
+	if (!oxu->periodic_sched)
+		(void) disable_periodic(oxu);
+}
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned wait;
+
+	qh_unlink_periodic(oxu, qh);
+
+	/* simple/paranoid:  always delay, expecting the HC needs to read
+	 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
+	 * expect hub_wq to clean up after any CSPLITs we won't issue.
+	 * active high speed queues may need bigger delays...
+	 */
+	if (list_empty(&qh->qtd_list)
+		|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
+		wait = 2;
+	else
+		wait = 55;	/* worst case: 3 * 1024 */
+
+	udelay(wait);
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_next = EHCI_LIST_END;
+	wmb();
+}
+
+static int check_period(struct oxu_hcd *oxu,
+			unsigned frame, unsigned uframe,
+			unsigned period, unsigned usecs)
+{
+	int claimed;
+
+	/* complete split running into next frame?
+	 * given FSTN support, we could sometimes check...
+	 */
+	if (uframe >= 8)
+		return 0;
+
+	/*
+	 * 80% periodic == 100 usec/uframe available
+	 * convert "usecs we need" to "max already claimed"
+	 */
+	usecs = 100 - usecs;
+
+	/* we "know" 2 and 4 uframe intervals were rejected; so
+	 * for period 0, check _every_ microframe in the schedule.
+	 */
+	if (unlikely(period == 0)) {
+		do {
+			for (uframe = 0; uframe < 7; uframe++) {
+				claimed = periodic_usecs(oxu, frame, uframe);
+				if (claimed > usecs)
+					return 0;
+			}
+		} while ((frame += 1) < oxu->periodic_size);
+
+	/* just check the specified uframe, at that period */
+	} else {
+		do {
+			claimed = periodic_usecs(oxu, frame, uframe);
+			if (claimed > usecs)
+				return 0;
+		} while ((frame += period) < oxu->periodic_size);
+	}
+
+	return 1;
+}
+
+static int check_intr_schedule(struct oxu_hcd	*oxu,
+				unsigned frame, unsigned uframe,
+				const struct ehci_qh *qh, __le32 *c_maskp)
+{
+	int retval = -ENOSPC;
+
+	if (qh->c_usecs && uframe >= 6)		/* FSTN territory? */
+		goto done;
+
+	if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
+		goto done;
+	if (!qh->c_usecs) {
+		retval = 0;
+		*c_maskp = 0;
+		goto done;
+	}
+
+done:
+	return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int		status;
+	unsigned	uframe;
+	__le32		c_mask;
+	unsigned	frame;		/* 0..(qh->period - 1), or NO_FRAME */
+
+	qh_refresh(oxu, qh);
+	qh->hw_next = EHCI_LIST_END;
+	frame = qh->start;
+
+	/* reuse the previous schedule slots, if we can */
+	if (frame < qh->period) {
+		uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
+		status = check_intr_schedule(oxu, frame, --uframe,
+				qh, &c_mask);
+	} else {
+		uframe = 0;
+		c_mask = 0;
+		status = -ENOSPC;
+	}
+
+	/* else scan the schedule to find a group of slots such that all
+	 * uframes have enough periodic bandwidth available.
+	 */
+	if (status) {
+		/* "normal" case, uframing flexible except with splits */
+		if (qh->period) {
+			frame = qh->period - 1;
+			do {
+				for (uframe = 0; uframe < 8; uframe++) {
+					status = check_intr_schedule(oxu,
+							frame, uframe, qh,
+							&c_mask);
+					if (status == 0)
+						break;
+				}
+			} while (status && frame--);
+
+		/* qh->period == 0 means every uframe */
+		} else {
+			frame = 0;
+			status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
+		}
+		if (status)
+			goto done;
+		qh->start = frame;
+
+		/* reset S-frame and (maybe) C-frame masks */
+		qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
+		qh->hw_info2 |= qh->period
+			? cpu_to_le32(1 << uframe)
+			: cpu_to_le32(QH_SMASK);
+		qh->hw_info2 |= c_mask;
+	} else
+		oxu_dbg(oxu, "reused qh %p schedule\n", qh);
+
+	/* stuff into the periodic schedule */
+	status = qh_link_periodic(oxu, qh);
+done:
+	return status;
+}
+
+static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
+			struct list_head *qtd_list, gfp_t mem_flags)
+{
+	unsigned epnum;
+	unsigned long flags;
+	struct ehci_qh *qh;
+	int status = 0;
+	struct list_head	empty;
+
+	/* get endpoint and transfer/schedule data */
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+
+	if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
+		status = -ESHUTDOWN;
+		goto done;
+	}
+
+	/* get qh and force any scheduling errors */
+	INIT_LIST_HEAD(&empty);
+	qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
+	if (qh == NULL) {
+		status = -ENOMEM;
+		goto done;
+	}
+	if (qh->qh_state == QH_STATE_IDLE) {
+		status = qh_schedule(oxu, qh);
+		if (status != 0)
+			goto done;
+	}
+
+	/* then queue the urb's tds to the qh */
+	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	BUG_ON(qh == NULL);
+
+	/* ... update usbfs periodic stats */
+	oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
+
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	if (status)
+		qtd_list_free(oxu, urb, qtd_list);
+
+	return status;
+}
+
+static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
+						gfp_t mem_flags)
+{
+	oxu_dbg(oxu, "iso support is missing!\n");
+	return -ENOSYS;
+}
+
+static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
+						gfp_t mem_flags)
+{
+	oxu_dbg(oxu, "split iso support is missing!\n");
+	return -ENOSYS;
+}
+
+static void scan_periodic(struct oxu_hcd *oxu)
+{
+	unsigned frame, clock, now_uframe, mod;
+	unsigned modified;
+
+	mod = oxu->periodic_size << 3;
+
+	/*
+	 * When running, scan from last scan point up to "now"
+	 * else clean up by scanning everything that's left.
+	 * Touches as few pages as possible:  cache-friendly.
+	 */
+	now_uframe = oxu->next_uframe;
+	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+		clock = readl(&oxu->regs->frame_index);
+	else
+		clock = now_uframe + mod - 1;
+	clock %= mod;
+
+	for (;;) {
+		union ehci_shadow	q, *q_p;
+		__le32			type, *hw_p;
+		unsigned		uframes;
+
+		/* don't scan past the live uframe */
+		frame = now_uframe >> 3;
+		if (frame == (clock >> 3))
+			uframes = now_uframe & 0x07;
+		else {
+			/* safe to scan the whole frame at once */
+			now_uframe |= 0x07;
+			uframes = 8;
+		}
+
+restart:
+		/* scan each element in frame's queue for completions */
+		q_p = &oxu->pshadow[frame];
+		hw_p = &oxu->periodic[frame];
+		q.ptr = q_p->ptr;
+		type = Q_NEXT_TYPE(*hw_p);
+		modified = 0;
+
+		while (q.ptr != NULL) {
+			union ehci_shadow temp;
+
+			switch (type) {
+			case Q_TYPE_QH:
+				/* handle any completions */
+				temp.qh = qh_get(q.qh);
+				type = Q_NEXT_TYPE(q.qh->hw_next);
+				q = q.qh->qh_next;
+				modified = qh_completions(oxu, temp.qh);
+				if (unlikely(list_empty(&temp.qh->qtd_list)))
+					intr_deschedule(oxu, temp.qh);
+				qh_put(temp.qh);
+				break;
+			default:
+				oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
+					type, frame, q.ptr);
+				q.ptr = NULL;
+			}
+
+			/* assume completion callbacks modify the queue */
+			if (unlikely(modified))
+				goto restart;
+		}
+
+		/* Stop when we catch up to the HC */
+
+		/* FIXME:  this assumes we won't get lapped when
+		 * latencies climb; that should be rare, but...
+		 * detect it, and just go all the way around.
+		 * FLR might help detect this case, so long as latencies
+		 * don't exceed periodic_size msec (default 1.024 sec).
+		 */
+
+		/* FIXME: likewise assumes HC doesn't halt mid-scan */
+
+		if (now_uframe == clock) {
+			unsigned	now;
+
+			if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+				break;
+			oxu->next_uframe = now_uframe;
+			now = readl(&oxu->regs->frame_index) % mod;
+			if (now_uframe == now)
+				break;
+
+			/* rescan the rest of this frame, then ... */
+			clock = now;
+		} else {
+			now_uframe++;
+			now_uframe %= mod;
+		}
+	}
+}
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
+{
+	int port = HCS_N_PORTS(oxu->hcs_params);
+
+	while (port--)
+		writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
+}
+
+static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
+{
+	unsigned port;
+
+	if (!HCS_PPC(oxu->hcs_params))
+		return;
+
+	oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+	for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
+		(void) oxu_hub_control(oxu_to_hcd(oxu),
+				is_on ? SetPortFeature : ClearPortFeature,
+				USB_PORT_FEAT_POWER,
+				port--, NULL, 0);
+	msleep(20);
+}
+
+/* Called from some interrupts, timers, and so on.
+ * It calls driver completion functions, after dropping oxu->lock.
+ */
+static void ehci_work(struct oxu_hcd *oxu)
+{
+	timer_action_done(oxu, TIMER_IO_WATCHDOG);
+	if (oxu->reclaim_ready)
+		end_unlink_async(oxu);
+
+	/* another CPU may drop oxu->lock during a schedule scan while
+	 * it reports urb completions.  this flag guards against bogus
+	 * attempts at re-entrant schedule scanning.
+	 */
+	if (oxu->scanning)
+		return;
+	oxu->scanning = 1;
+	scan_async(oxu);
+	if (oxu->next_uframe != -1)
+		scan_periodic(oxu);
+	oxu->scanning = 0;
+
+	/* the IO watchdog guards against hardware or driver bugs that
+	 * misplace IRQs, and should let us run completely without IRQs.
+	 * such lossage has been observed on both VT6202 and VT8235.
+	 */
+	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
+			(oxu->async->qh_next.ptr != NULL ||
+			 oxu->periodic_sched != 0))
+		timer_action(oxu, TIMER_IO_WATCHDOG);
+}
+
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	/* if we need to use IAA and it's busy, defer */
+	if (qh->qh_state == QH_STATE_LINKED
+			&& oxu->reclaim
+			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
+		struct ehci_qh		*last;
+
+		for (last = oxu->reclaim;
+				last->reclaim;
+				last = last->reclaim)
+			continue;
+		qh->qh_state = QH_STATE_UNLINK_WAIT;
+		last->reclaim = qh;
+
+	/* bypass IAA if the hc can't care */
+	} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
+		end_unlink_async(oxu);
+
+	/* something else might have unlinked the qh by now */
+	if (qh->qh_state == QH_STATE_LINKED)
+		start_unlink_async(oxu, qh);
+}
+
+/*
+ * USB host controller methods
+ */
+
+static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 status, pcd_status = 0;
+	int bh;
+
+	spin_lock(&oxu->lock);
+
+	status = readl(&oxu->regs->status);
+
+	/* e.g. cardbus physical eject */
+	if (status == ~(u32) 0) {
+		oxu_dbg(oxu, "device removed\n");
+		goto dead;
+	}
+
+	/* Shared IRQ? */
+	status &= INTR_MASK;
+	if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
+		spin_unlock(&oxu->lock);
+		return IRQ_NONE;
+	}
+
+	/* clear (just) interrupts */
+	writel(status, &oxu->regs->status);
+	readl(&oxu->regs->command);	/* unblock posted write */
+	bh = 0;
+
+#ifdef OXU_VERBOSE_DEBUG
+	/* unrequested/ignored: Frame List Rollover */
+	dbg_status(oxu, "irq", status);
+#endif
+
+	/* INT, ERR, and IAA interrupt rates can be throttled */
+
+	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+	if (likely((status & (STS_INT|STS_ERR)) != 0))
+		bh = 1;
+
+	/* complete the unlinking of some qh [4.15.2.3] */
+	if (status & STS_IAA) {
+		oxu->reclaim_ready = 1;
+		bh = 1;
+	}
+
+	/* remote wakeup [4.3.1] */
+	if (status & STS_PCD) {
+		unsigned i = HCS_N_PORTS(oxu->hcs_params);
+		pcd_status = status;
+
+		/* resume root hub? */
+		if (!(readl(&oxu->regs->command) & CMD_RUN))
+			usb_hcd_resume_root_hub(hcd);
+
+		while (i--) {
+			int pstatus = readl(&oxu->regs->port_status[i]);
+
+			if (pstatus & PORT_OWNER)
+				continue;
+			if (!(pstatus & PORT_RESUME)
+					|| oxu->reset_done[i] != 0)
+				continue;
+
+			/* start USB_RESUME_TIMEOUT resume signaling from this
+			 * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
+			 * stop that signaling.
+			 */
+			oxu->reset_done[i] = jiffies +
+				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+		}
+	}
+
+	/* PCI errors [4.15.2.4] */
+	if (unlikely((status & STS_FATAL) != 0)) {
+		/* bogus "fatal" IRQs appear on some chips... why?  */
+		status = readl(&oxu->regs->status);
+		dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
+		dbg_status(oxu, "fatal", status);
+		if (status & STS_HALT) {
+			oxu_err(oxu, "fatal error\n");
+dead:
+			ehci_reset(oxu);
+			writel(0, &oxu->regs->configured_flag);
+			usb_hc_died(hcd);
+			/* generic layer kills/unlinks all urbs, then
+			 * uses oxu_stop to clean up the rest
+			 */
+			bh = 1;
+		}
+	}
+
+	if (bh)
+		ehci_work(oxu);
+	spin_unlock(&oxu->lock);
+	if (pcd_status & STS_PCD)
+		usb_hcd_poll_rh_status(hcd);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t oxu_irq(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int ret = IRQ_HANDLED;
+
+	u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
+	u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
+
+	/* Disable all interrupt */
+	oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
+
+	if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
+		(!oxu->is_otg && (status & OXU_USBSPHI)))
+		oxu210_hcd_irq(hcd);
+	else
+		ret = IRQ_NONE;
+
+	/* Enable all interrupt back */
+	oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
+
+	return ret;
+}
+
+static void oxu_watchdog(struct timer_list *t)
+{
+	struct oxu_hcd	*oxu = from_timer(oxu, t, watchdog);
+	unsigned long flags;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+
+	/* lost IAA irqs wedge things badly; seen with a vt8235 */
+	if (oxu->reclaim) {
+		u32 status = readl(&oxu->regs->status);
+		if (status & STS_IAA) {
+			oxu_vdbg(oxu, "lost IAA\n");
+			writel(STS_IAA, &oxu->regs->status);
+			oxu->reclaim_ready = 1;
+		}
+	}
+
+	/* stop async processing after it's idled a bit */
+	if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
+		start_unlink_async(oxu, oxu->async);
+
+	/* oxu could run by timer, without IRQs ... */
+	ehci_work(oxu);
+
+	spin_unlock_irqrestore(&oxu->lock, flags);
+}
+
+/* One-time init, only for memory state.
+ */
+static int oxu_hcd_init(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp;
+	int retval;
+	u32 hcc_params;
+
+	spin_lock_init(&oxu->lock);
+
+	timer_setup(&oxu->watchdog, oxu_watchdog, 0);
+
+	/*
+	 * hw default: 1K periodic list heads, one per frame.
+	 * periodic_size can shrink by USBCMD update if hcc_params allows.
+	 */
+	oxu->periodic_size = DEFAULT_I_TDPS;
+	retval = ehci_mem_init(oxu, GFP_KERNEL);
+	if (retval < 0)
+		return retval;
+
+	/* controllers may cache some of the periodic schedule ... */
+	hcc_params = readl(&oxu->caps->hcc_params);
+	if (HCC_ISOC_CACHE(hcc_params))		/* full frame cache */
+		oxu->i_thresh = 8;
+	else					/* N microframes cached */
+		oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
+
+	oxu->reclaim = NULL;
+	oxu->reclaim_ready = 0;
+	oxu->next_uframe = -1;
+
+	/*
+	 * dedicate a qh for the async ring head, since we couldn't unlink
+	 * a 'real' qh without stopping the async schedule [4.8].  use it
+	 * as the 'reclamation list head' too.
+	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
+	 * from automatically advancing to the next td after short reads.
+	 */
+	oxu->async->qh_next.qh = NULL;
+	oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
+	oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
+	oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
+	oxu->async->hw_qtd_next = EHCI_LIST_END;
+	oxu->async->qh_state = QH_STATE_LINKED;
+	oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
+
+	/* clear interrupt enables, set irq latency */
+	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+		log2_irq_thresh = 0;
+	temp = 1 << (16 + log2_irq_thresh);
+	if (HCC_CANPARK(hcc_params)) {
+		/* HW default park == 3, on hardware that supports it (like
+		 * NVidia and ALI silicon), maximizes throughput on the async
+		 * schedule by avoiding QH fetches between transfers.
+		 *
+		 * With fast usb storage devices and NForce2, "park" seems to
+		 * make problems:  throughput reduction (!), data errors...
+		 */
+		if (park) {
+			park = min(park, (unsigned) 3);
+			temp |= CMD_PARK;
+			temp |= park << 8;
+		}
+		oxu_dbg(oxu, "park %d\n", park);
+	}
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		temp &= ~(3 << 2);
+		temp |= (EHCI_TUNE_FLS << 2);
+	}
+	oxu->command = temp;
+
+	return 0;
+}
+
+/* Called during probe() after chip reset completes.
+ */
+static int oxu_reset(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	spin_lock_init(&oxu->mem_lock);
+	INIT_LIST_HEAD(&oxu->urb_list);
+	oxu->urb_len = 0;
+
+	/* FIMXE */
+	hcd->self.controller->dma_mask = NULL;
+
+	if (oxu->is_otg) {
+		oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
+		oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
+			HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+		oxu->mem = hcd->regs + OXU_SPH_MEM;
+	} else {
+		oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
+		oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
+			HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+		oxu->mem = hcd->regs + OXU_OTG_MEM;
+	}
+
+	oxu->hcs_params = readl(&oxu->caps->hcs_params);
+	oxu->sbrn = 0x20;
+
+	return oxu_hcd_init(hcd);
+}
+
+static int oxu_run(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int retval;
+	u32 temp, hcc_params;
+
+	hcd->uses_new_polling = 1;
+
+	/* EHCI spec section 4.1 */
+	retval = ehci_reset(oxu);
+	if (retval != 0) {
+		ehci_mem_cleanup(oxu);
+		return retval;
+	}
+	writel(oxu->periodic_dma, &oxu->regs->frame_list);
+	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+	/* hcc_params controls whether oxu->regs->segment must (!!!)
+	 * be used; it constrains QH/ITD/SITD and QTD locations.
+	 * dma_pool consistent memory always uses segment zero.
+	 * streaming mappings for I/O buffers, like pci_map_single(),
+	 * can return segments above 4GB, if the device allows.
+	 *
+	 * NOTE:  the dma mask is visible through dev->dma_mask, so
+	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+	 * host side drivers though.
+	 */
+	hcc_params = readl(&oxu->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params))
+		writel(0, &oxu->regs->segment);
+
+	oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
+				CMD_ASE | CMD_RESET);
+	oxu->command |= CMD_RUN;
+	writel(oxu->command, &oxu->regs->command);
+	dbg_cmd(oxu, "init", oxu->command);
+
+	/*
+	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+	 * are explicitly handed to companion controller(s), so no TT is
+	 * involved with the root hub.  (Except where one is integrated,
+	 * and there's no companion controller unless maybe for USB OTG.)
+	 */
+	hcd->state = HC_STATE_RUNNING;
+	writel(FLAG_CF, &oxu->regs->configured_flag);
+	readl(&oxu->regs->command);	/* unblock posted writes */
+
+	temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
+	oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
+		((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
+		temp >> 8, temp & 0xff, DRIVER_VERSION,
+		ignore_oc ? ", overcurrent ignored" : "");
+
+	writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
+
+	return 0;
+}
+
+static void oxu_stop(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	/* Turn off port power on all root hub ports. */
+	ehci_port_power(oxu, 0);
+
+	/* no more interrupts ... */
+	del_timer_sync(&oxu->watchdog);
+
+	spin_lock_irq(&oxu->lock);
+	if (HC_IS_RUNNING(hcd->state))
+		ehci_quiesce(oxu);
+
+	ehci_reset(oxu);
+	writel(0, &oxu->regs->intr_enable);
+	spin_unlock_irq(&oxu->lock);
+
+	/* let companion controllers work when we aren't */
+	writel(0, &oxu->regs->configured_flag);
+
+	/* root hub is shut down separately (first, when possible) */
+	spin_lock_irq(&oxu->lock);
+	if (oxu->async)
+		ehci_work(oxu);
+	spin_unlock_irq(&oxu->lock);
+	ehci_mem_cleanup(oxu);
+
+	dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
+}
+
+/* Kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void oxu_shutdown(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	(void) ehci_halt(oxu);
+	ehci_turn_off_all_ports(oxu);
+
+	/* make BIOS/etc use companion controller during reboot */
+	writel(0, &oxu->regs->configured_flag);
+
+	/* unblock posted writes */
+	readl(&oxu->regs->configured_flag);
+}
+
+/* Non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+				gfp_t mem_flags)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	struct list_head qtd_list;
+
+	INIT_LIST_HEAD(&qtd_list);
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return submit_async(oxu, urb, &qtd_list, mem_flags);
+
+	case PIPE_INTERRUPT:
+		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return intr_submit(oxu, urb, &qtd_list, mem_flags);
+
+	case PIPE_ISOCHRONOUS:
+		if (urb->dev->speed == USB_SPEED_HIGH)
+			return itd_submit(oxu, urb, mem_flags);
+		else
+			return sitd_submit(oxu, urb, mem_flags);
+	}
+}
+
+/* This function is responsible for breaking URBs with big data size
+ * into smaller size and processing small urbs in sequence.
+ */
+static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+				gfp_t mem_flags)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int num, rem;
+	int transfer_buffer_length;
+	void *transfer_buffer;
+	struct urb *murb;
+	int i, ret;
+
+	/* If not bulk pipe just enqueue the URB */
+	if (!usb_pipebulk(urb->pipe))
+		return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+	/* Otherwise we should verify the USB transfer buffer size! */
+	transfer_buffer = urb->transfer_buffer;
+	transfer_buffer_length = urb->transfer_buffer_length;
+
+	num = urb->transfer_buffer_length / 4096;
+	rem = urb->transfer_buffer_length % 4096;
+	if (rem != 0)
+		num++;
+
+	/* If URB is smaller than 4096 bytes just enqueue it! */
+	if (num == 1)
+		return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+	/* Ok, we have more job to do! :) */
+
+	for (i = 0; i < num - 1; i++) {
+		/* Get free micro URB poll till a free urb is received */
+
+		do {
+			murb = (struct urb *) oxu_murb_alloc(oxu);
+			if (!murb)
+				schedule();
+		} while (!murb);
+
+		/* Coping the urb */
+		memcpy(murb, urb, sizeof(struct urb));
+
+		murb->transfer_buffer_length = 4096;
+		murb->transfer_buffer = transfer_buffer + i * 4096;
+
+		/* Null pointer for the encodes that this is a micro urb */
+		murb->complete = NULL;
+
+		((struct oxu_murb *) murb)->main = urb;
+		((struct oxu_murb *) murb)->last = 0;
+
+		/* This loop is to guarantee urb to be processed when there's
+		 * not enough resources at a particular time by retrying.
+		 */
+		do {
+			ret  = __oxu_urb_enqueue(hcd, murb, mem_flags);
+			if (ret)
+				schedule();
+		} while (ret);
+	}
+
+	/* Last urb requires special handling  */
+
+	/* Get free micro URB poll till a free urb is received */
+	do {
+		murb = (struct urb *) oxu_murb_alloc(oxu);
+		if (!murb)
+			schedule();
+	} while (!murb);
+
+	/* Coping the urb */
+	memcpy(murb, urb, sizeof(struct urb));
+
+	murb->transfer_buffer_length = rem > 0 ? rem : 4096;
+	murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
+
+	/* Null pointer for the encodes that this is a micro urb */
+	murb->complete = NULL;
+
+	((struct oxu_murb *) murb)->main = urb;
+	((struct oxu_murb *) murb)->last = 1;
+
+	do {
+		ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
+		if (ret)
+			schedule();
+	} while (ret);
+
+	return ret;
+}
+
+/* Remove from hardware lists.
+ * Completions normally happen asynchronously
+ */
+static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	struct ehci_qh *qh;
+	unsigned long flags;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		qh = (struct ehci_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		unlink_async(oxu, qh);
+		break;
+
+	case PIPE_INTERRUPT:
+		qh = (struct ehci_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		switch (qh->qh_state) {
+		case QH_STATE_LINKED:
+			intr_deschedule(oxu, qh);
+			/* FALL THROUGH */
+		case QH_STATE_IDLE:
+			qh_completions(oxu, qh);
+			break;
+		default:
+			oxu_dbg(oxu, "bogus qh %p state %d\n",
+					qh, qh->qh_state);
+			goto done;
+		}
+
+		/* reschedule QH iff another request is queued */
+		if (!list_empty(&qh->qtd_list)
+				&& HC_IS_RUNNING(hcd->state)) {
+			int status;
+
+			status = qh_schedule(oxu, qh);
+			spin_unlock_irqrestore(&oxu->lock, flags);
+
+			if (status != 0) {
+				/* shouldn't happen often, but ...
+				 * FIXME kill those tds' urbs
+				 */
+				dev_err(hcd->self.controller,
+					"can't reschedule qh %p, err %d\n", qh,
+					status);
+			}
+			return status;
+		}
+		break;
+	}
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return 0;
+}
+
+/* Bulk qh holds the data toggle */
+static void oxu_endpoint_disable(struct usb_hcd *hcd,
+					struct usb_host_endpoint *ep)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	unsigned long		flags;
+	struct ehci_qh		*qh, *tmp;
+
+	/* ASSERT:  any requests/urbs are being unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+
+rescan:
+	spin_lock_irqsave(&oxu->lock, flags);
+	qh = ep->hcpriv;
+	if (!qh)
+		goto done;
+
+	/* endpoints can be iso streams.  for now, we don't
+	 * accelerate iso completions ... so spin a while.
+	 */
+	if (qh->hw_info1 == 0) {
+		oxu_vdbg(oxu, "iso delay\n");
+		goto idle_timeout;
+	}
+
+	if (!HC_IS_RUNNING(hcd->state))
+		qh->qh_state = QH_STATE_IDLE;
+	switch (qh->qh_state) {
+	case QH_STATE_LINKED:
+		for (tmp = oxu->async->qh_next.qh;
+				tmp && tmp != qh;
+				tmp = tmp->qh_next.qh)
+			continue;
+		/* periodic qh self-unlinks on empty */
+		if (!tmp)
+			goto nogood;
+		unlink_async(oxu, qh);
+		/* FALL THROUGH */
+	case QH_STATE_UNLINK:		/* wait for hw to finish? */
+idle_timeout:
+		spin_unlock_irqrestore(&oxu->lock, flags);
+		schedule_timeout_uninterruptible(1);
+		goto rescan;
+	case QH_STATE_IDLE:		/* fully unlinked */
+		if (list_empty(&qh->qtd_list)) {
+			qh_put(qh);
+			break;
+		}
+		/* fall through */
+	default:
+nogood:
+		/* caller was supposed to have unlinked any requests;
+		 * that's not our job.  just leak this memory.
+		 */
+		oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
+			qh, ep->desc.bEndpointAddress, qh->qh_state,
+			list_empty(&qh->qtd_list) ? "" : "(has tds)");
+		break;
+	}
+	ep->hcpriv = NULL;
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+}
+
+static int oxu_get_frame(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	return (readl(&oxu->regs->frame_index) >> 3) %
+		oxu->periodic_size;
+}
+
+/* Build "status change" packet (one or two bytes) from HC registers */
+static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp, mask, status = 0;
+	int ports, i, retval = 1;
+	unsigned long flags;
+
+	/* if !PM, root hub timers won't get shut down ... */
+	if (!HC_IS_RUNNING(hcd->state))
+		return 0;
+
+	/* init status to no-changes */
+	buf[0] = 0;
+	ports = HCS_N_PORTS(oxu->hcs_params);
+	if (ports > 7) {
+		buf[1] = 0;
+		retval++;
+	}
+
+	/* Some boards (mostly VIA?) report bogus overcurrent indications,
+	 * causing massive log spam unless we completely ignore them.  It
+	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
+	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+	 * PORT_POWER; that's surprising, but maybe within-spec.
+	 */
+	if (!ignore_oc)
+		mask = PORT_CSC | PORT_PEC | PORT_OCC;
+	else
+		mask = PORT_CSC | PORT_PEC;
+
+	/* no hub change reports (bit 0) for now (power, ...) */
+
+	/* port N changes (bit N)? */
+	spin_lock_irqsave(&oxu->lock, flags);
+	for (i = 0; i < ports; i++) {
+		temp = readl(&oxu->regs->port_status[i]);
+
+		/*
+		 * Return status information even for ports with OWNER set.
+		 * Otherwise hub_wq wouldn't see the disconnect event when a
+		 * high-speed device is switched over to the companion
+		 * controller by the user.
+		 */
+
+		if (!(temp & PORT_CONNECT))
+			oxu->reset_done[i] = 0;
+		if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
+				time_after_eq(jiffies, oxu->reset_done[i]))) {
+			if (i < 7)
+				buf[0] |= 1 << (i + 1);
+			else
+				buf[1] |= 1 << (i - 7);
+			status = STS_PCD;
+		}
+	}
+	/* FIXME autosuspend idle root hubs */
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return status ? retval : 0;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
+						unsigned int portsc)
+{
+	switch ((portsc >> 26) & 3) {
+	case 0:
+		return 0;
+	case 1:
+		return USB_PORT_STAT_LOW_SPEED;
+	case 2:
+	default:
+		return USB_PORT_STAT_HIGH_SPEED;
+	}
+}
+
+#define	PORT_WAKE_BITS	(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
+				u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int ports = HCS_N_PORTS(oxu->hcs_params);
+	u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
+	u32 temp, status;
+	unsigned long	flags;
+	int retval = 0;
+	unsigned selector;
+
+	/*
+	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+	 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+	 * (track current state ourselves) ... blink for diagnostics,
+	 * power, "this is the one", etc.  EHCI spec supports this.
+	 */
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = readl(status_reg);
+
+		/*
+		 * Even if OWNER is set, so the port is owned by the
+		 * companion controller, hub_wq needs to be able to clear
+		 * the port-change status bits (especially
+		 * USB_PORT_STAT_C_CONNECTION).
+		 */
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			writel(temp & ~PORT_PE, status_reg);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (temp & PORT_RESET)
+				goto error;
+			if (temp & PORT_SUSPEND) {
+				if ((temp & PORT_PE) == 0)
+					goto error;
+				/* resume signaling for 20 msec */
+				temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+				writel(temp | PORT_RESUME, status_reg);
+				oxu->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+			}
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			/* we auto-clear this feature */
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(oxu->hcs_params))
+				writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
+					  status_reg);
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* GetPortStatus clears reset */
+			break;
+		default:
+			goto error;
+		}
+		readl(&oxu->regs->command);	/* unblock posted write */
+		break;
+	case GetHubDescriptor:
+		ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
+			buf);
+		break;
+	case GetHubStatus:
+		/* no hub-wide feature/status flags */
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		status = 0;
+		temp = readl(status_reg);
+
+		/* wPortChange bits */
+		if (temp & PORT_CSC)
+			status |= USB_PORT_STAT_C_CONNECTION << 16;
+		if (temp & PORT_PEC)
+			status |= USB_PORT_STAT_C_ENABLE << 16;
+		if ((temp & PORT_OCC) && !ignore_oc)
+			status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+		/* whoever resumes must GetPortStatus to complete it!! */
+		if (temp & PORT_RESUME) {
+
+			/* Remote Wakeup received? */
+			if (!oxu->reset_done[wIndex]) {
+				/* resume signaling for 20 msec */
+				oxu->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+				/* check the port again */
+				mod_timer(&oxu_to_hcd(oxu)->rh_timer,
+						oxu->reset_done[wIndex]);
+			}
+
+			/* resume completed? */
+			else if (time_after_eq(jiffies,
+					oxu->reset_done[wIndex])) {
+				status |= USB_PORT_STAT_C_SUSPEND << 16;
+				oxu->reset_done[wIndex] = 0;
+
+				/* stop resume signaling */
+				temp = readl(status_reg);
+				writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
+					status_reg);
+				retval = handshake(oxu, status_reg,
+					   PORT_RESUME, 0, 2000 /* 2msec */);
+				if (retval != 0) {
+					oxu_err(oxu,
+						"port %d resume error %d\n",
+						wIndex + 1, retval);
+					goto error;
+				}
+				temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+			}
+		}
+
+		/* whoever resets must GetPortStatus to complete it!! */
+		if ((temp & PORT_RESET)
+				&& time_after_eq(jiffies,
+					oxu->reset_done[wIndex])) {
+			status |= USB_PORT_STAT_C_RESET << 16;
+			oxu->reset_done[wIndex] = 0;
+
+			/* force reset to complete */
+			writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
+					status_reg);
+			/* REVISIT:  some hardware needs 550+ usec to clear
+			 * this bit; seems too long to spin routinely...
+			 */
+			retval = handshake(oxu, status_reg,
+					PORT_RESET, 0, 750);
+			if (retval != 0) {
+				oxu_err(oxu, "port %d reset error %d\n",
+					wIndex + 1, retval);
+				goto error;
+			}
+
+			/* see what we found out */
+			temp = check_reset_complete(oxu, wIndex, status_reg,
+					readl(status_reg));
+		}
+
+		/* transfer dedicated ports to the companion hc */
+		if ((temp & PORT_CONNECT) &&
+				test_bit(wIndex, &oxu->companion_ports)) {
+			temp &= ~PORT_RWC_BITS;
+			temp |= PORT_OWNER;
+			writel(temp, status_reg);
+			oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
+			temp = readl(status_reg);
+		}
+
+		/*
+		 * Even if OWNER is set, there's no harm letting hub_wq
+		 * see the wPortStatus values (they should all be 0 except
+		 * for PORT_POWER anyway).
+		 */
+
+		if (temp & PORT_CONNECT) {
+			status |= USB_PORT_STAT_CONNECTION;
+			/* status may be from integrated TT */
+			status |= oxu_port_speed(oxu, temp);
+		}
+		if (temp & PORT_PE)
+			status |= USB_PORT_STAT_ENABLE;
+		if (temp & (PORT_SUSPEND|PORT_RESUME))
+			status |= USB_PORT_STAT_SUSPEND;
+		if (temp & PORT_OC)
+			status |= USB_PORT_STAT_OVERCURRENT;
+		if (temp & PORT_RESET)
+			status |= USB_PORT_STAT_RESET;
+		if (temp & PORT_POWER)
+			status |= USB_PORT_STAT_POWER;
+
+#ifndef	OXU_VERBOSE_DEBUG
+	if (status & ~0xffff)	/* only if wPortChange is interesting */
+#endif
+		dbg_port(oxu, "GetStatus", wIndex + 1, temp);
+		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		selector = wIndex >> 8;
+		wIndex &= 0xff;
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = readl(status_reg);
+		if (temp & PORT_OWNER)
+			break;
+
+		temp &= ~PORT_RWC_BITS;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if ((temp & PORT_PE) == 0
+					|| (temp & PORT_RESET) != 0)
+				goto error;
+			if (device_may_wakeup(&hcd->self.root_hub->dev))
+				temp |= PORT_WAKE_BITS;
+			writel(temp | PORT_SUSPEND, status_reg);
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(oxu->hcs_params))
+				writel(temp | PORT_POWER, status_reg);
+			break;
+		case USB_PORT_FEAT_RESET:
+			if (temp & PORT_RESUME)
+				goto error;
+			/* line status bits may report this as low speed,
+			 * which can be fine if this root hub has a
+			 * transaction translator built in.
+			 */
+			oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
+			temp |= PORT_RESET;
+			temp &= ~PORT_PE;
+
+			/*
+			 * caller must wait, then call GetPortStatus
+			 * usb 2.0 spec says 50 ms resets on root
+			 */
+			oxu->reset_done[wIndex] = jiffies
+					+ msecs_to_jiffies(50);
+			writel(temp, status_reg);
+			break;
+
+		/* For downstream facing ports (these):  one hub port is put
+		 * into test mode according to USB2 11.24.2.13, then the hub
+		 * must be reset (which for root hub now means rmmod+modprobe,
+		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+		 * about the EHCI-specific stuff.
+		 */
+		case USB_PORT_FEAT_TEST:
+			if (!selector || selector > 5)
+				goto error;
+			ehci_quiesce(oxu);
+			ehci_halt(oxu);
+			temp |= selector << 16;
+			writel(temp, status_reg);
+			break;
+
+		default:
+			goto error;
+		}
+		readl(&oxu->regs->command);	/* unblock posted writes */
+		break;
+
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return retval;
+}
+
+#ifdef CONFIG_PM
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int port;
+	int mask;
+
+	oxu_dbg(oxu, "suspend root hub\n");
+
+	if (time_before(jiffies, oxu->next_statechange))
+		msleep(5);
+
+	port = HCS_N_PORTS(oxu->hcs_params);
+	spin_lock_irq(&oxu->lock);
+
+	/* stop schedules, clean any completed work */
+	if (HC_IS_RUNNING(hcd->state)) {
+		ehci_quiesce(oxu);
+		hcd->state = HC_STATE_QUIESCING;
+	}
+	oxu->command = readl(&oxu->regs->command);
+	if (oxu->reclaim)
+		oxu->reclaim_ready = 1;
+	ehci_work(oxu);
+
+	/* Unlike other USB host controller types, EHCI doesn't have
+	 * any notion of "global" or bus-wide suspend.  The driver has
+	 * to manually suspend all the active unsuspended ports, and
+	 * then manually resume them in the bus_resume() routine.
+	 */
+	oxu->bus_suspended = 0;
+	while (port--) {
+		u32 __iomem *reg = &oxu->regs->port_status[port];
+		u32 t1 = readl(reg) & ~PORT_RWC_BITS;
+		u32 t2 = t1;
+
+		/* keep track of which ports we suspend */
+		if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
+				!(t1 & PORT_SUSPEND)) {
+			t2 |= PORT_SUSPEND;
+			set_bit(port, &oxu->bus_suspended);
+		}
+
+		/* enable remote wakeup on all ports */
+		if (device_may_wakeup(&hcd->self.root_hub->dev))
+			t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
+		else
+			t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
+
+		if (t1 != t2) {
+			oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
+				port + 1, t1, t2);
+			writel(t2, reg);
+		}
+	}
+
+	/* turn off now-idle HC */
+	del_timer_sync(&oxu->watchdog);
+	ehci_halt(oxu);
+	hcd->state = HC_STATE_SUSPENDED;
+
+	/* allow remote wakeup */
+	mask = INTR_MASK;
+	if (!device_may_wakeup(&hcd->self.root_hub->dev))
+		mask &= ~STS_PCD;
+	writel(mask, &oxu->regs->intr_enable);
+	readl(&oxu->regs->intr_enable);
+
+	oxu->next_statechange = jiffies + msecs_to_jiffies(10);
+	spin_unlock_irq(&oxu->lock);
+	return 0;
+}
+
+/* Caller has locked the root hub, and should reset/reinit on error */
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp;
+	int i;
+
+	if (time_before(jiffies, oxu->next_statechange))
+		msleep(5);
+	spin_lock_irq(&oxu->lock);
+
+	/* Ideally and we've got a real resume here, and no port's power
+	 * was lost.  (For PCI, that means Vaux was maintained.)  But we
+	 * could instead be restoring a swsusp snapshot -- so that BIOS was
+	 * the last user of the controller, not reset/pm hardware keeping
+	 * state we gave to it.
+	 */
+	temp = readl(&oxu->regs->intr_enable);
+	oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
+
+	/* at least some APM implementations will try to deliver
+	 * IRQs right away, so delay them until we're ready.
+	 */
+	writel(0, &oxu->regs->intr_enable);
+
+	/* re-init operational registers */
+	writel(0, &oxu->regs->segment);
+	writel(oxu->periodic_dma, &oxu->regs->frame_list);
+	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+	/* restore CMD_RUN, framelist size, and irq threshold */
+	writel(oxu->command, &oxu->regs->command);
+
+	/* Some controller/firmware combinations need a delay during which
+	 * they set up the port statuses.  See Bugzilla #8190. */
+	mdelay(8);
+
+	/* manually resume the ports we suspended during bus_suspend() */
+	i = HCS_N_PORTS(oxu->hcs_params);
+	while (i--) {
+		temp = readl(&oxu->regs->port_status[i]);
+		temp &= ~(PORT_RWC_BITS
+			| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
+		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+			temp |= PORT_RESUME;
+		}
+		writel(temp, &oxu->regs->port_status[i]);
+	}
+	i = HCS_N_PORTS(oxu->hcs_params);
+	mdelay(20);
+	while (i--) {
+		temp = readl(&oxu->regs->port_status[i]);
+		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+			temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+			writel(temp, &oxu->regs->port_status[i]);
+			oxu_vdbg(oxu, "resumed port %d\n", i + 1);
+		}
+	}
+	(void) readl(&oxu->regs->command);
+
+	/* maybe re-activate the schedule(s) */
+	temp = 0;
+	if (oxu->async->qh_next.qh)
+		temp |= CMD_ASE;
+	if (oxu->periodic_sched)
+		temp |= CMD_PSE;
+	if (temp) {
+		oxu->command |= temp;
+		writel(oxu->command, &oxu->regs->command);
+	}
+
+	oxu->next_statechange = jiffies + msecs_to_jiffies(5);
+	hcd->state = HC_STATE_RUNNING;
+
+	/* Now we can safely re-enable irqs */
+	writel(INTR_MASK, &oxu->regs->intr_enable);
+
+	spin_unlock_irq(&oxu->lock);
+	return 0;
+}
+
+#else
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_PM */
+
+static const struct hc_driver oxu_hc_driver = {
+	.description =		"oxu210hp_hcd",
+	.product_desc =		"oxu210hp HCD",
+	.hcd_priv_size =	sizeof(struct oxu_hcd),
+
+	/*
+	 * Generic hardware linkage
+	 */
+	.irq =			oxu_irq,
+	.flags =		HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * Basic lifecycle operations
+	 */
+	.reset =		oxu_reset,
+	.start =		oxu_run,
+	.stop =			oxu_stop,
+	.shutdown =		oxu_shutdown,
+
+	/*
+	 * Managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		oxu_urb_enqueue,
+	.urb_dequeue =		oxu_urb_dequeue,
+	.endpoint_disable =	oxu_endpoint_disable,
+
+	/*
+	 * Scheduling support
+	 */
+	.get_frame_number =	oxu_get_frame,
+
+	/*
+	 * Root hub support
+	 */
+	.hub_status_data =	oxu_hub_status_data,
+	.hub_control =		oxu_hub_control,
+	.bus_suspend =		oxu_bus_suspend,
+	.bus_resume =		oxu_bus_resume,
+};
+
+/*
+ * Module stuff
+ */
+
+static void oxu_configuration(struct platform_device *pdev, void *base)
+{
+	u32 tmp;
+
+	/* Initialize top level registers.
+	 * First write ever
+	 */
+	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+	oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
+	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+
+	tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
+	oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
+
+	oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
+					OXU_COMPARATOR | OXU_ASO_OP);
+
+	tmp = oxu_readl(base, OXU_CLKCTRL_SET);
+	oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
+
+	/* Clear all top interrupt enable */
+	oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
+
+	/* Clear all top interrupt status */
+	oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
+
+	/* Enable all needed top interrupt except OTG SPH core */
+	oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
+}
+
+static int oxu_verify_id(struct platform_device *pdev, void *base)
+{
+	u32 id;
+	static const char * const bo[] = {
+		"reserved",
+		"128-pin LQFP",
+		"84-pin TFBGA",
+		"reserved",
+	};
+
+	/* Read controller signature register to find a match */
+	id = oxu_readl(base, OXU_DEVICEID);
+	dev_info(&pdev->dev, "device ID %x\n", id);
+	if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
+		return -1;
+
+	dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
+		id >> OXU_REV_SHIFT,
+		bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
+		(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
+		(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
+
+	return 0;
+}
+
+static const struct hc_driver oxu_hc_driver;
+static struct usb_hcd *oxu_create(struct platform_device *pdev,
+				unsigned long memstart, unsigned long memlen,
+				void *base, int irq, int otg)
+{
+	struct device *dev = &pdev->dev;
+
+	struct usb_hcd *hcd;
+	struct oxu_hcd *oxu;
+	int ret;
+
+	/* Set endian mode and host mode */
+	oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
+				OXU_USBMODE,
+				OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
+
+	hcd = usb_create_hcd(&oxu_hc_driver, dev,
+				otg ? "oxu210hp_otg" : "oxu210hp_sph");
+	if (!hcd)
+		return ERR_PTR(-ENOMEM);
+
+	hcd->rsrc_start = memstart;
+	hcd->rsrc_len = memlen;
+	hcd->regs = base;
+	hcd->irq = irq;
+	hcd->state = HC_STATE_HALT;
+
+	oxu = hcd_to_oxu(hcd);
+	oxu->is_otg = otg;
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	device_wakeup_enable(hcd->self.controller);
+	return hcd;
+}
+
+static int oxu_init(struct platform_device *pdev,
+				unsigned long memstart, unsigned long memlen,
+				void *base, int irq)
+{
+	struct oxu_info *info = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd;
+	int ret;
+
+	/* First time configuration at start up */
+	oxu_configuration(pdev, base);
+
+	ret = oxu_verify_id(pdev, base);
+	if (ret) {
+		dev_err(&pdev->dev, "no devices found!\n");
+		return -ENODEV;
+	}
+
+	/* Create the OTG controller */
+	hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
+	if (IS_ERR(hcd)) {
+		dev_err(&pdev->dev, "cannot create OTG controller!\n");
+		ret = PTR_ERR(hcd);
+		goto error_create_otg;
+	}
+	info->hcd[0] = hcd;
+
+	/* Create the SPH host controller */
+	hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
+	if (IS_ERR(hcd)) {
+		dev_err(&pdev->dev, "cannot create SPH controller!\n");
+		ret = PTR_ERR(hcd);
+		goto error_create_sph;
+	}
+	info->hcd[1] = hcd;
+
+	oxu_writel(base, OXU_CHIPIRQEN_SET,
+		oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
+
+	return 0;
+
+error_create_sph:
+	usb_remove_hcd(info->hcd[0]);
+	usb_put_hcd(info->hcd[0]);
+
+error_create_otg:
+	return ret;
+}
+
+static int oxu_drv_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	void *base;
+	unsigned long memstart, memlen;
+	int irq, ret;
+	struct oxu_info *info;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * Get the platform resources
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	irq = res->start;
+	dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base)) {
+		ret = PTR_ERR(base);
+		goto error;
+	}
+	memstart = res->start;
+	memlen = resource_size(res);
+
+	ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
+	if (ret) {
+		dev_err(&pdev->dev, "error setting irq type\n");
+		ret = -EFAULT;
+		goto error;
+	}
+
+	/* Allocate a driver data struct to hold useful info for both
+	 * SPH & OTG devices
+	 */
+	info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
+	if (!info) {
+		ret = -EFAULT;
+		goto error;
+	}
+	platform_set_drvdata(pdev, info);
+
+	ret = oxu_init(pdev, memstart, memlen, base, irq);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "cannot init USB devices\n");
+		goto error;
+	}
+
+	dev_info(&pdev->dev, "devices enabled and running\n");
+	platform_set_drvdata(pdev, info);
+
+	return 0;
+
+error:
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+	return ret;
+}
+
+static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
+{
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+}
+
+static int oxu_drv_remove(struct platform_device *pdev)
+{
+	struct oxu_info *info = platform_get_drvdata(pdev);
+
+	oxu_remove(pdev, info->hcd[0]);
+	oxu_remove(pdev, info->hcd[1]);
+
+	return 0;
+}
+
+static void oxu_drv_shutdown(struct platform_device *pdev)
+{
+	oxu_drv_remove(pdev);
+}
+
+#if 0
+/* FIXME: TODO */
+static int oxu_drv_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	return 0;
+}
+
+static int oxu_drv_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	return 0;
+}
+#else
+#define oxu_drv_suspend	NULL
+#define oxu_drv_resume	NULL
+#endif
+
+static struct platform_driver oxu_driver = {
+	.probe		= oxu_drv_probe,
+	.remove		= oxu_drv_remove,
+	.shutdown	= oxu_drv_shutdown,
+	.suspend	= oxu_drv_suspend,
+	.resume		= oxu_drv_resume,
+	.driver = {
+		.name = "oxu210hp-hcd",
+		.bus = &platform_bus_type
+	}
+};
+
+module_platform_driver(oxu_driver);
+
+MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/oxu210hp.h b/drivers/usb/host/oxu210hp.h
new file mode 100644
index 0000000..4370441
--- /dev/null
+++ b/drivers/usb/host/oxu210hp.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Host interface registers
+ */
+
+#define OXU_DEVICEID			0x00
+	#define OXU_REV_MASK		0xffff0000
+	#define OXU_REV_SHIFT		16
+	#define OXU_REV_2100		0x2100
+	#define OXU_BO_SHIFT		8
+	#define OXU_BO_MASK		(0x3 << OXU_BO_SHIFT)
+	#define OXU_MAJ_REV_SHIFT	4
+	#define OXU_MAJ_REV_MASK	(0xf << OXU_MAJ_REV_SHIFT)
+	#define OXU_MIN_REV_SHIFT	0
+	#define OXU_MIN_REV_MASK	(0xf << OXU_MIN_REV_SHIFT)
+#define OXU_HOSTIFCONFIG		0x04
+#define OXU_SOFTRESET			0x08
+	#define OXU_SRESET		(1 << 0)
+
+#define OXU_PIOBURSTREADCTRL		0x0C
+
+#define OXU_CHIPIRQSTATUS		0x10
+#define OXU_CHIPIRQEN_SET		0x14
+#define OXU_CHIPIRQEN_CLR		0x18
+	#define OXU_USBSPHLPWUI		0x00000080
+	#define OXU_USBOTGLPWUI		0x00000040
+	#define OXU_USBSPHI		0x00000002
+	#define OXU_USBOTGI		0x00000001
+
+#define OXU_CLKCTRL_SET			0x1C
+	#define OXU_SYSCLKEN		0x00000008
+	#define OXU_USBSPHCLKEN		0x00000002
+	#define OXU_USBOTGCLKEN		0x00000001
+
+#define OXU_ASO				0x68
+	#define OXU_SPHPOEN		0x00000100
+	#define OXU_OVRCCURPUPDEN	0x00000800
+	#define OXU_ASO_OP		(1 << 10)
+	#define OXU_COMPARATOR		0x000004000
+
+#define OXU_USBMODE			0x1A8
+	#define OXU_VBPS		0x00000020
+	#define OXU_ES_LITTLE		0x00000000
+	#define OXU_CM_HOST_ONLY	0x00000003
+
+/*
+ * Proper EHCI structs & defines
+ */
+
+/* Magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT		0
+#define EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT	1
+#define EHCI_TUNE_FLS		2	/* (small) 256 frame schedule */
+
+struct oxu_hcd;
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct ehci_caps {
+	/* these fields are specified as 8 and 16 bit registers,
+	 * but some hosts can't perform 8 or 16 bit PCI accesses.
+	 */
+	u32		hc_capbase;
+#define HC_LENGTH(p)		(((p)>>00)&0x00ff)	/* bits 7:0 */
+#define HC_VERSION(p)		(((p)>>16)&0xffff)	/* bits 31:16 */
+	u32		hcs_params;     /* HCSPARAMS - offset 0x4 */
+#define HCS_DEBUG_PORT(p)	(((p)>>20)&0xf)	/* bits 23:20, debug port? */
+#define HCS_INDICATOR(p)	((p)&(1 << 16))	/* true: has port indicators */
+#define HCS_N_CC(p)		(((p)>>12)&0xf)	/* bits 15:12, #companion HCs */
+#define HCS_N_PCC(p)		(((p)>>8)&0xf)	/* bits 11:8, ports per CC */
+#define HCS_PORTROUTED(p)	((p)&(1 << 7))	/* true: port routing */
+#define HCS_PPC(p)		((p)&(1 << 4))	/* true: port power control */
+#define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
+
+	u32		hcc_params;      /* HCCPARAMS - offset 0x8 */
+#define HCC_EXT_CAPS(p)		(((p)>>8)&0xff)	/* for pci extended caps */
+#define HCC_ISOC_CACHE(p)       ((p)&(1 << 7))  /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p)       (((p)>>4)&0x7)  /* bits 6:4, uframes cached */
+#define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
+#define HCC_64BIT_ADDR(p)       ((p)&(1))       /* true: can use 64-bit addr */
+	u8		portroute[8];	 /* nibbles for routing - offset 0xC */
+} __attribute__ ((packed));
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct ehci_regs {
+	/* USBCMD: offset 0x00 */
+	u32		command;
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK	(1<<11)		/* enable "park" on async qh */
+#define CMD_PARK_CNT(c)	(((c)>>8)&3)	/* how many transfers to park for */
+#define CMD_LRESET	(1<<7)		/* partial reset (no ports, etc) */
+#define CMD_IAAD	(1<<6)		/* "doorbell" interrupt async advance */
+#define CMD_ASE		(1<<5)		/* async schedule enable */
+#define CMD_PSE		(1<<4)		/* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET	(1<<1)		/* reset HC not bus */
+#define CMD_RUN		(1<<0)		/* start/stop HC */
+
+	/* USBSTS: offset 0x04 */
+	u32		status;
+#define STS_ASS		(1<<15)		/* Async Schedule Status */
+#define STS_PSS		(1<<14)		/* Periodic Schedule Status */
+#define STS_RECL	(1<<13)		/* Reclamation */
+#define STS_HALT	(1<<12)		/* Not running (any reason) */
+/* some bits reserved */
+	/* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA		(1<<5)		/* Interrupted on async advance */
+#define STS_FATAL	(1<<4)		/* such as some PCI access errors */
+#define STS_FLR		(1<<3)		/* frame list rolled over */
+#define STS_PCD		(1<<2)		/* port change detect */
+#define STS_ERR		(1<<1)		/* "error" completion (overflow, ...) */
+#define STS_INT		(1<<0)		/* "normal" completion (short, ...) */
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+	/* USBINTR: offset 0x08 */
+	u32		intr_enable;
+
+	/* FRINDEX: offset 0x0C */
+	u32		frame_index;	/* current microframe number */
+	/* CTRLDSSEGMENT: offset 0x10 */
+	u32		segment;	/* address bits 63:32 if needed */
+	/* PERIODICLISTBASE: offset 0x14 */
+	u32		frame_list;	/* points to periodic list */
+	/* ASYNCLISTADDR: offset 0x18 */
+	u32		async_next;	/* address of next async queue head */
+
+	u32		reserved[9];
+
+	/* CONFIGFLAG: offset 0x40 */
+	u32		configured_flag;
+#define FLAG_CF		(1<<0)		/* true: we'll support "high speed" */
+
+	/* PORTSC: offset 0x44 */
+	u32		port_status[0];	/* up to N_PORTS */
+/* 31:23 reserved */
+#define PORT_WKOC_E	(1<<22)		/* wake on overcurrent (enable) */
+#define PORT_WKDISC_E	(1<<21)		/* wake on disconnect (enable) */
+#define PORT_WKCONN_E	(1<<20)		/* wake on connect (enable) */
+/* 19:16 for port testing */
+#define PORT_LED_OFF	(0<<14)
+#define PORT_LED_AMBER	(1<<14)
+#define PORT_LED_GREEN	(2<<14)
+#define PORT_LED_MASK	(3<<14)
+#define PORT_OWNER	(1<<13)		/* true: companion hc owns this port */
+#define PORT_POWER	(1<<12)		/* true: has power (see PPC) */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))	/* USB 1.1 device */
+/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+/* 9 reserved */
+#define PORT_RESET	(1<<8)		/* reset port */
+#define PORT_SUSPEND	(1<<7)		/* suspend port */
+#define PORT_RESUME	(1<<6)		/* resume it */
+#define PORT_OCC	(1<<5)		/* over current change */
+#define PORT_OC		(1<<4)		/* over current active */
+#define PORT_PEC	(1<<3)		/* port enable change */
+#define PORT_PE		(1<<2)		/* port enable */
+#define PORT_CSC	(1<<1)		/* connect status change */
+#define PORT_CONNECT	(1<<0)		/* device connected */
+#define PORT_RWC_BITS   (PORT_CSC | PORT_PEC | PORT_OCC)
+} __attribute__ ((packed));
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console.  (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+	u32	control;
+#define DBGP_OWNER	(1<<30)
+#define DBGP_ENABLED	(1<<28)
+#define DBGP_DONE	(1<<16)
+#define DBGP_INUSE	(1<<10)
+#define DBGP_ERRCODE(x)	(((x)>>7)&0x07)
+#	define DBGP_ERR_BAD	1
+#	define DBGP_ERR_SIGNAL	2
+#define DBGP_ERROR	(1<<6)
+#define DBGP_GO		(1<<5)
+#define DBGP_OUT	(1<<4)
+#define DBGP_LEN(x)	(((x)>>0)&0x0f)
+	u32	pids;
+#define DBGP_PID_GET(x)		(((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok)	(((data)<<8)|(tok))
+	u32	data03;
+	u32	data47;
+	u32	address;
+#define DBGP_EPADDR(dev, ep)	(((dev)<<8)|(ep))
+} __attribute__ ((packed));
+
+
+#define	QTD_NEXT(dma)	cpu_to_le32((u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct ehci_qtd {
+	/* first part defined by EHCI spec */
+	__le32			hw_next;		/* see EHCI 3.5.1 */
+	__le32			hw_alt_next;		/* see EHCI 3.5.2 */
+	__le32			hw_token;		/* see EHCI 3.5.3 */
+#define	QTD_TOGGLE	(1 << 31)	/* data toggle */
+#define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	QTD_IOC		(1 << 15)	/* interrupt on complete */
+#define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)	(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT	(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
+#define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
+#define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
+#define	QTD_STS_STS	(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING	(1 << 0)	/* issue PING? */
+	__le32			hw_buf[5];		/* see EHCI 3.5.4 */
+	__le32			hw_buf_hi[5];		/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		qtd_dma;		/* qtd address */
+	struct list_head	qtd_list;		/* sw qtd list */
+	struct urb		*urb;			/* qtd's urb */
+	size_t			length;			/* length of buffer */
+
+	u32			qtd_buffer_len;
+	void			*buffer;
+	dma_addr_t		buffer_dma;
+	void			*transfer_buffer;
+	void			*transfer_dma;
+} __attribute__ ((aligned(32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK cpu_to_le32 (~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/* Type tag from {qh, itd, sitd, fstn}->hw_next */
+#define Q_NEXT_TYPE(dma) ((dma) & cpu_to_le32 (3 << 1))
+
+/* values for that type tag */
+#define Q_TYPE_QH	cpu_to_le32 (1 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define	QH_NEXT(dma)	(cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define	EHCI_LIST_END	cpu_to_le32(1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure.  That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule.  Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union ehci_shadow {
+	struct ehci_qh		*qh;		/* Q_TYPE_QH */
+	__le32			*hw_next;	/* (all types) */
+	void			*ptr;
+};
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+struct ehci_qh {
+	/* first part defined by EHCI spec */
+	__le32			hw_next;	 /* see EHCI 3.6.1 */
+	__le32			hw_info1;	/* see EHCI 3.6.2 */
+#define	QH_HEAD		0x00008000
+	__le32			hw_info2;	/* see EHCI 3.6.2 */
+#define	QH_SMASK	0x000000ff
+#define	QH_CMASK	0x0000ff00
+#define	QH_HUBADDR	0x007f0000
+#define	QH_HUBPORT	0x3f800000
+#define	QH_MULT		0xc0000000
+	__le32			hw_current;	 /* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct ehci_qtd) */
+	__le32			hw_qtd_next;
+	__le32			hw_alt_next;
+	__le32			hw_token;
+	__le32			hw_buf[5];
+	__le32			hw_buf_hi[5];
+
+	/* the rest is HCD-private */
+	dma_addr_t		qh_dma;		/* address of qh */
+	union ehci_shadow	qh_next;	/* ptr to qh; or periodic */
+	struct list_head	qtd_list;	/* sw qtd list */
+	struct ehci_qtd		*dummy;
+	struct ehci_qh		*reclaim;	/* next to reclaim */
+
+	struct oxu_hcd		*oxu;
+	struct kref		kref;
+	unsigned		stamp;
+
+	u8			qh_state;
+#define	QH_STATE_LINKED		1		/* HC sees this */
+#define	QH_STATE_UNLINK		2		/* HC may still see this */
+#define	QH_STATE_IDLE		3		/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on reclaim q */
+#define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
+
+	/* periodic schedule info */
+	u8			usecs;		/* intr bandwidth */
+	u8			gap_uf;		/* uframes split/csplit gap */
+	u8			c_usecs;	/* ... split completion bw */
+	u16			tt_usecs;	/* tt downstream bandwidth */
+	unsigned short		period;		/* polling interval */
+	unsigned short		start;		/* where polling starts */
+#define NO_FRAME ((unsigned short)~0)			/* pick new start */
+	struct usb_device	*dev;		/* access to TT */
+} __attribute__ ((aligned(32)));
+
+/*
+ * Proper OXU210HP structs
+ */
+
+#define OXU_OTG_CORE_OFFSET	0x00400
+#define OXU_OTG_CAP_OFFSET	(OXU_OTG_CORE_OFFSET + 0x100)
+#define OXU_SPH_CORE_OFFSET	0x00800
+#define OXU_SPH_CAP_OFFSET	(OXU_SPH_CORE_OFFSET + 0x100)
+
+#define OXU_OTG_MEM		0xE000
+#define OXU_SPH_MEM		0x16000
+
+/* Only how many elements & element structure are specifies here. */
+/* 2 host controllers are enabled - total size <= 28 kbytes */
+#define	DEFAULT_I_TDPS		1024
+#define QHEAD_NUM		16
+#define QTD_NUM			32
+#define SITD_NUM		8
+#define MURB_NUM		8
+
+#define BUFFER_NUM		8
+#define BUFFER_SIZE		512
+
+struct oxu_info {
+	struct usb_hcd *hcd[2];
+};
+
+struct oxu_buf {
+	u8			buffer[BUFFER_SIZE];
+} __attribute__ ((aligned(BUFFER_SIZE)));
+
+struct oxu_onchip_mem {
+	struct oxu_buf		db_pool[BUFFER_NUM];
+
+	u32			frame_list[DEFAULT_I_TDPS];
+	struct ehci_qh		qh_pool[QHEAD_NUM];
+	struct ehci_qtd		qtd_pool[QTD_NUM];
+} __attribute__ ((aligned(4 << 10)));
+
+#define	EHCI_MAX_ROOT_PORTS	15		/* see HCS_N_PORTS */
+
+struct oxu_murb {
+	struct urb		urb;
+	struct urb		*main;
+	u8			last;
+};
+
+struct oxu_hcd {				/* one per controller */
+	unsigned int		is_otg:1;
+
+	u8			qh_used[QHEAD_NUM];
+	u8			qtd_used[QTD_NUM];
+	u8			db_used[BUFFER_NUM];
+	u8			murb_used[MURB_NUM];
+
+	struct oxu_onchip_mem	__iomem *mem;
+	spinlock_t		mem_lock;
+
+	struct timer_list	urb_timer;
+
+	struct ehci_caps __iomem *caps;
+	struct ehci_regs __iomem *regs;
+
+	__u32			hcs_params;	/* cached register copy */
+	spinlock_t		lock;
+
+	/* async schedule support */
+	struct ehci_qh		*async;
+	struct ehci_qh		*reclaim;
+	unsigned		reclaim_ready:1;
+	unsigned		scanning:1;
+
+	/* periodic schedule support */
+	unsigned		periodic_size;
+	__le32			*periodic;	/* hw periodic table */
+	dma_addr_t		periodic_dma;
+	unsigned		i_thresh;	/* uframes HC might cache */
+
+	union ehci_shadow	*pshadow;	/* mirror hw periodic table */
+	int			next_uframe;	/* scan periodic, start here */
+	unsigned		periodic_sched;	/* periodic activity count */
+
+	/* per root hub port */
+	unsigned long		reset_done[EHCI_MAX_ROOT_PORTS];
+	/* bit vectors (one bit per port) */
+	unsigned long		bus_suspended;	/* which ports were
+						 * already suspended at the
+						 * start of a bus suspend
+						 */
+	unsigned long		companion_ports;/* which ports are dedicated
+						 * to the companion controller
+						 */
+
+	struct timer_list	watchdog;
+	unsigned long		actions;
+	unsigned		stamp;
+	unsigned long		next_statechange;
+	u32			command;
+
+	/* SILICON QUIRKS */
+	struct list_head	urb_list;	/* this is the head to urb
+						 * queue that didn't get enough
+						 * resources
+						 */
+	struct oxu_murb		*murb_pool;	/* murb per split big urb */
+	unsigned urb_len;
+
+	u8			sbrn;		/* packed release number */
+};
+
+#define EHCI_IAA_JIFFIES	(HZ/100)	/* arbitrary; ~10 msec */
+#define EHCI_IO_JIFFIES	 	(HZ/10)		/* io watchdog > irq_thresh */
+#define EHCI_ASYNC_JIFFIES      (HZ/20)		/* async idle timeout */
+#define EHCI_SHRINK_JIFFIES     (HZ/200)	/* async qh unlink delay */
+
+enum ehci_timer_action {
+	TIMER_IO_WATCHDOG,
+	TIMER_IAA_WATCHDOG,
+	TIMER_ASYNC_SHRINK,
+	TIMER_ASYNC_OFF,
+};
+
+#include <linux/oxu210hp.h>
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
new file mode 100644
index 0000000..3625a5c
--- /dev/null
+++ b/drivers/usb/host/pci-quirks.c
@@ -0,0 +1,1270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains code to reset and initialize USB host controllers.
+ * Some of it includes work-arounds for PCI hardware and BIOS quirks.
+ * It may need to run early during booting -- before USB would normally
+ * initialize -- to ensure that Linux doesn't use any legacy modes.
+ *
+ *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *  (and others)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include "pci-quirks.h"
+#include "xhci-ext-caps.h"
+
+
+#define UHCI_USBLEGSUP		0xc0		/* legacy support */
+#define UHCI_USBCMD		0		/* command register */
+#define UHCI_USBINTR		4		/* interrupt register */
+#define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
+#define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
+#define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
+#define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
+#define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
+#define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
+#define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
+
+#define OHCI_CONTROL		0x04
+#define OHCI_CMDSTATUS		0x08
+#define OHCI_INTRSTATUS		0x0c
+#define OHCI_INTRENABLE		0x10
+#define OHCI_INTRDISABLE	0x14
+#define OHCI_FMINTERVAL		0x34
+#define OHCI_HCFS		(3 << 6)	/* hc functional state */
+#define OHCI_HCR		(1 << 0)	/* host controller reset */
+#define OHCI_OCR		(1 << 3)	/* ownership change request */
+#define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
+#define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
+#define OHCI_INTR_OC		(1 << 30)	/* ownership change */
+
+#define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
+#define EHCI_USBCMD		0		/* command register */
+#define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
+#define EHCI_USBSTS		4		/* status register */
+#define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
+#define EHCI_USBINTR		8		/* interrupt register */
+#define EHCI_CONFIGFLAG		0x40		/* configured flag register */
+#define EHCI_USBLEGSUP		0		/* legacy support register */
+#define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
+#define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
+#define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
+#define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
+
+/* AMD quirk use */
+#define	AB_REG_BAR_LOW		0xe0
+#define	AB_REG_BAR_HIGH		0xe1
+#define	AB_REG_BAR_SB700	0xf0
+#define	AB_INDX(addr)		((addr) + 0x00)
+#define	AB_DATA(addr)		((addr) + 0x04)
+#define	AX_INDXC		0x30
+#define	AX_DATAC		0x34
+
+#define PT_ADDR_INDX		0xE8
+#define PT_READ_INDX		0xE4
+#define PT_SIG_1_ADDR		0xA520
+#define PT_SIG_2_ADDR		0xA521
+#define PT_SIG_3_ADDR		0xA522
+#define PT_SIG_4_ADDR		0xA523
+#define PT_SIG_1_DATA		0x78
+#define PT_SIG_2_DATA		0x56
+#define PT_SIG_3_DATA		0x34
+#define PT_SIG_4_DATA		0x12
+#define PT4_P1_REG		0xB521
+#define PT4_P2_REG		0xB522
+#define PT2_P1_REG		0xD520
+#define PT2_P2_REG		0xD521
+#define PT1_P1_REG		0xD522
+#define PT1_P2_REG		0xD523
+
+#define	NB_PCIE_INDX_ADDR	0xe0
+#define	NB_PCIE_INDX_DATA	0xe4
+#define	PCIE_P_CNTL		0x10040
+#define	BIF_NB			0x10002
+#define	NB_PIF0_PWRDOWN_0	0x01100012
+#define	NB_PIF0_PWRDOWN_1	0x01100013
+
+#define USB_INTEL_XUSB2PR      0xD0
+#define USB_INTEL_USB2PRM      0xD4
+#define USB_INTEL_USB3_PSSEN   0xD8
+#define USB_INTEL_USB3PRM      0xDC
+
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG	0xF8
+#define ASMT_DATA_WRITE1_REG	0xFC
+#define ASMT_CONTROL_REG	0xE0
+#define ASMT_CONTROL_WRITE_BIT	0x02
+#define ASMT_WRITEREG_CMD	0x10423
+#define ASMT_FLOWCTL_ADDR	0xFA30
+#define ASMT_FLOWCTL_DATA	0xBA
+#define ASMT_PSEUDO_DATA	0
+
+/*
+ * amd_chipset_gen values represent AMD different chipset generations
+ */
+enum amd_chipset_gen {
+	NOT_AMD_CHIPSET = 0,
+	AMD_CHIPSET_SB600,
+	AMD_CHIPSET_SB700,
+	AMD_CHIPSET_SB800,
+	AMD_CHIPSET_HUDSON2,
+	AMD_CHIPSET_BOLTON,
+	AMD_CHIPSET_YANGTZE,
+	AMD_CHIPSET_TAISHAN,
+	AMD_CHIPSET_UNKNOWN,
+};
+
+struct amd_chipset_type {
+	enum amd_chipset_gen gen;
+	u8 rev;
+};
+
+static struct amd_chipset_info {
+	struct pci_dev	*nb_dev;
+	struct pci_dev	*smbus_dev;
+	int nb_type;
+	struct amd_chipset_type sb_type;
+	int isoc_reqs;
+	int probe_count;
+	int probe_result;
+} amd_chipset;
+
+static DEFINE_SPINLOCK(amd_lock);
+
+/*
+ * amd_chipset_sb_type_init - initialize amd chipset southbridge type
+ *
+ * AMD FCH/SB generation and revision is identified by SMBus controller
+ * vendor, device and revision IDs.
+ *
+ * Returns: 1 if it is an AMD chipset, 0 otherwise.
+ */
+static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+{
+	u8 rev = 0;
+	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
+
+	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+	if (pinfo->smbus_dev) {
+		rev = pinfo->smbus_dev->revision;
+		if (rev >= 0x10 && rev <= 0x1f)
+			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
+		else if (rev >= 0x30 && rev <= 0x3f)
+			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+		else if (rev >= 0x40 && rev <= 0x4f)
+			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+	} else {
+		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+
+		if (pinfo->smbus_dev) {
+			rev = pinfo->smbus_dev->revision;
+			if (rev >= 0x11 && rev <= 0x14)
+				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+			else if (rev >= 0x15 && rev <= 0x18)
+				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+			else if (rev >= 0x39 && rev <= 0x3a)
+				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+		} else {
+			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+							  0x145c, NULL);
+			if (pinfo->smbus_dev) {
+				rev = pinfo->smbus_dev->revision;
+				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+			} else {
+				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+				return 0;
+			}
+		}
+	}
+	pinfo->sb_type.rev = rev;
+	return 1;
+}
+
+void sb800_prefetch(struct device *dev, int on)
+{
+	u16 misc;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pci_read_config_word(pdev, 0x50, &misc);
+	if (on == 0)
+		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
+	else
+		pci_write_config_word(pdev, 0x50, misc | 0x0300);
+}
+EXPORT_SYMBOL_GPL(sb800_prefetch);
+
+int usb_amd_find_chipset_info(void)
+{
+	unsigned long flags;
+	struct amd_chipset_info info;
+	int ret;
+
+	spin_lock_irqsave(&amd_lock, flags);
+
+	/* probe only once */
+	if (amd_chipset.probe_count > 0) {
+		amd_chipset.probe_count++;
+		spin_unlock_irqrestore(&amd_lock, flags);
+		return amd_chipset.probe_result;
+	}
+	memset(&info, 0, sizeof(info));
+	spin_unlock_irqrestore(&amd_lock, flags);
+
+	if (!amd_chipset_sb_type_init(&info)) {
+		ret = 0;
+		goto commit;
+	}
+
+	/* Below chipset generations needn't enable AMD PLL quirk */
+	if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
+			info.sb_type.gen == AMD_CHIPSET_SB600 ||
+			info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+			(info.sb_type.gen == AMD_CHIPSET_SB700 &&
+			info.sb_type.rev > 0x3b)) {
+		if (info.smbus_dev) {
+			pci_dev_put(info.smbus_dev);
+			info.smbus_dev = NULL;
+		}
+		ret = 0;
+		goto commit;
+	}
+
+	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
+	if (info.nb_dev) {
+		info.nb_type = 1;
+	} else {
+		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+		if (info.nb_dev) {
+			info.nb_type = 2;
+		} else {
+			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+						     0x9600, NULL);
+			if (info.nb_dev)
+				info.nb_type = 3;
+		}
+	}
+
+	ret = info.probe_result = 1;
+	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
+
+commit:
+
+	spin_lock_irqsave(&amd_lock, flags);
+	if (amd_chipset.probe_count > 0) {
+		/* race - someone else was faster - drop devices */
+
+		/* Mark that we where here */
+		amd_chipset.probe_count++;
+		ret = amd_chipset.probe_result;
+
+		spin_unlock_irqrestore(&amd_lock, flags);
+
+		pci_dev_put(info.nb_dev);
+		pci_dev_put(info.smbus_dev);
+
+	} else {
+		/* no race - commit the result */
+		info.probe_count++;
+		amd_chipset = info;
+		spin_unlock_irqrestore(&amd_lock, flags);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
+{
+	/* Make sure amd chipset type has already been initialized */
+	usb_amd_find_chipset_info();
+	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+		return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
+
+bool usb_amd_hang_symptom_quirk(void)
+{
+	u8 rev;
+
+	usb_amd_find_chipset_info();
+	rev = amd_chipset.sb_type.rev;
+	/* SB600 and old version of SB700 have hang symptom bug */
+	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
+			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+			 rev >= 0x3a && rev <= 0x3b);
+}
+EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
+
+bool usb_amd_prefetch_quirk(void)
+{
+	usb_amd_find_chipset_info();
+	/* SB800 needs pre-fetch fix */
+	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
+}
+EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * This USB quirk prevents the link going into that lower power state
+ * during isochronous transfers.
+ *
+ * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
+ * some AMD platforms may stutter or have breaks occasionally.
+ */
+static void usb_amd_quirk_pll(int disable)
+{
+	u32 addr, addr_low, addr_high, val;
+	u32 bit = disable ? 0 : 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&amd_lock, flags);
+
+	if (disable) {
+		amd_chipset.isoc_reqs++;
+		if (amd_chipset.isoc_reqs > 1) {
+			spin_unlock_irqrestore(&amd_lock, flags);
+			return;
+		}
+	} else {
+		amd_chipset.isoc_reqs--;
+		if (amd_chipset.isoc_reqs > 0) {
+			spin_unlock_irqrestore(&amd_lock, flags);
+			return;
+		}
+	}
+
+	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
+			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
+			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
+		outb_p(AB_REG_BAR_LOW, 0xcd6);
+		addr_low = inb_p(0xcd7);
+		outb_p(AB_REG_BAR_HIGH, 0xcd6);
+		addr_high = inb_p(0xcd7);
+		addr = addr_high << 8 | addr_low;
+
+		outl_p(0x30, AB_INDX(addr));
+		outl_p(0x40, AB_DATA(addr));
+		outl_p(0x34, AB_INDX(addr));
+		val = inl_p(AB_DATA(addr));
+	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
+			amd_chipset.sb_type.rev <= 0x3b) {
+		pci_read_config_dword(amd_chipset.smbus_dev,
+					AB_REG_BAR_SB700, &addr);
+		outl(AX_INDXC, AB_INDX(addr));
+		outl(0x40, AB_DATA(addr));
+		outl(AX_DATAC, AB_INDX(addr));
+		val = inl(AB_DATA(addr));
+	} else {
+		spin_unlock_irqrestore(&amd_lock, flags);
+		return;
+	}
+
+	if (disable) {
+		val &= ~0x08;
+		val |= (1 << 4) | (1 << 9);
+	} else {
+		val |= 0x08;
+		val &= ~((1 << 4) | (1 << 9));
+	}
+	outl_p(val, AB_DATA(addr));
+
+	if (!amd_chipset.nb_dev) {
+		spin_unlock_irqrestore(&amd_lock, flags);
+		return;
+	}
+
+	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
+		addr = PCIE_P_CNTL;
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, &val);
+
+		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+		val |= bit | (bit << 3) | (bit << 12);
+		val |= ((!bit) << 4) | ((!bit) << 9);
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, val);
+
+		addr = BIF_NB;
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, &val);
+		val &= ~(1 << 8);
+		val |= bit << 8;
+
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, val);
+	} else if (amd_chipset.nb_type == 2) {
+		addr = NB_PIF0_PWRDOWN_0;
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, &val);
+		if (disable)
+			val &= ~(0x3f << 7);
+		else
+			val |= 0x3f << 7;
+
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, val);
+
+		addr = NB_PIF0_PWRDOWN_1;
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, &val);
+		if (disable)
+			val &= ~(0x3f << 7);
+		else
+			val |= 0x3f << 7;
+
+		pci_write_config_dword(amd_chipset.nb_dev,
+					NB_PCIE_INDX_DATA, val);
+	}
+
+	spin_unlock_irqrestore(&amd_lock, flags);
+	return;
+}
+
+void usb_amd_quirk_pll_disable(void)
+{
+	usb_amd_quirk_pll(1);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+	unsigned long retry_count;
+	unsigned char value;
+
+	for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+		if (value == 0xff) {
+			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+			return -EIO;
+		}
+
+		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+			return 0;
+
+		udelay(50);
+	}
+
+	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+	return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send command and address to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send data to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
+void usb_amd_quirk_pll_enable(void)
+{
+	usb_amd_quirk_pll(0);
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
+
+void usb_amd_dev_put(void)
+{
+	struct pci_dev *nb, *smbus;
+	unsigned long flags;
+
+	spin_lock_irqsave(&amd_lock, flags);
+
+	amd_chipset.probe_count--;
+	if (amd_chipset.probe_count > 0) {
+		spin_unlock_irqrestore(&amd_lock, flags);
+		return;
+	}
+
+	/* save them to pci_dev_put outside of spinlock */
+	nb    = amd_chipset.nb_dev;
+	smbus = amd_chipset.smbus_dev;
+
+	amd_chipset.nb_dev = NULL;
+	amd_chipset.smbus_dev = NULL;
+	amd_chipset.nb_type = 0;
+	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
+	amd_chipset.isoc_reqs = 0;
+	amd_chipset.probe_result = 0;
+
+	spin_unlock_irqrestore(&amd_lock, flags);
+
+	pci_dev_put(nb);
+	pci_dev_put(smbus);
+}
+EXPORT_SYMBOL_GPL(usb_amd_dev_put);
+
+/*
+ * Check if port is disabled in BIOS on AMD Promontory host.
+ * BIOS Disabled ports may wake on connect/disconnect and need
+ * driver workaround to keep them disabled.
+ * Returns true if port is marked disabled.
+ */
+bool usb_amd_pt_check_port(struct device *device, int port)
+{
+	unsigned char value, port_shift;
+	struct pci_dev *pdev;
+	u16 reg;
+
+	pdev = to_pci_dev(device);
+	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
+
+	pci_read_config_byte(pdev, PT_READ_INDX, &value);
+	if (value != PT_SIG_1_DATA)
+		return false;
+
+	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
+
+	pci_read_config_byte(pdev, PT_READ_INDX, &value);
+	if (value != PT_SIG_2_DATA)
+		return false;
+
+	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
+
+	pci_read_config_byte(pdev, PT_READ_INDX, &value);
+	if (value != PT_SIG_3_DATA)
+		return false;
+
+	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
+
+	pci_read_config_byte(pdev, PT_READ_INDX, &value);
+	if (value != PT_SIG_4_DATA)
+		return false;
+
+	/* Check disabled port setting, if bit is set port is enabled */
+	switch (pdev->device) {
+	case 0x43b9:
+	case 0x43ba:
+	/*
+	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
+	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
+	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
+	 */
+		if (port > 6) {
+			reg = PT4_P2_REG;
+			port_shift = port - 7;
+		} else {
+			reg = PT4_P1_REG;
+			port_shift = port + 1;
+		}
+		break;
+	case 0x43bb:
+	/*
+	 * device is AMD_PROMONTORYA_2(0x43bb)
+	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
+	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
+	 */
+		if (port > 2) {
+			reg = PT2_P2_REG;
+			port_shift = port - 3;
+		} else {
+			reg = PT2_P1_REG;
+			port_shift = port + 5;
+		}
+		break;
+	case 0x43bc:
+	/*
+	 * device is AMD_PROMONTORYA_1(0x43bc)
+	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
+	 * PT1_P2_REG[5..0] represents ports 9 to 4
+	 */
+		if (port > 3) {
+			reg = PT1_P2_REG;
+			port_shift = port - 4;
+		} else {
+			reg = PT1_P1_REG;
+			port_shift = port + 4;
+		}
+		break;
+	default:
+		return false;
+	}
+	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
+	pci_read_config_byte(pdev, PT_READ_INDX, &value);
+
+	return !(value & BIT(port_shift));
+}
+EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+
+/*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+ */
+void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
+{
+	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
+	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
+	 */
+	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
+
+	/* Reset the HC - this will force us to get a
+	 * new notification of any already connected
+	 * ports due to the virtual disconnect that it
+	 * implies.
+	 */
+	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
+	mb();
+	udelay(5);
+	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
+		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
+
+	/* Just to be safe, disable interrupt requests and
+	 * make sure the controller is stopped.
+	 */
+	outw(0, base + UHCI_USBINTR);
+	outw(0, base + UHCI_USBCMD);
+}
+EXPORT_SYMBOL_GPL(uhci_reset_hc);
+
+/*
+ * Initialize a controller that was newly discovered or has just been
+ * resumed.  In either case we can't be sure of its previous state.
+ *
+ * Returns: 1 if the controller was reset, 0 otherwise.
+ */
+int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
+{
+	u16 legsup;
+	unsigned int cmd, intr;
+
+	/*
+	 * When restarting a suspended controller, we expect all the
+	 * settings to be the same as we left them:
+	 *
+	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
+	 *	Controller is stopped and configured with EGSM set;
+	 *	No interrupts enabled except possibly Resume Detect.
+	 *
+	 * If any of these conditions are violated we do a complete reset.
+	 */
+	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
+	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
+		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
+				__func__, legsup);
+		goto reset_needed;
+	}
+
+	cmd = inw(base + UHCI_USBCMD);
+	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
+			!(cmd & UHCI_USBCMD_EGSM)) {
+		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
+				__func__, cmd);
+		goto reset_needed;
+	}
+
+	intr = inw(base + UHCI_USBINTR);
+	if (intr & (~UHCI_USBINTR_RESUME)) {
+		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
+				__func__, intr);
+		goto reset_needed;
+	}
+	return 0;
+
+reset_needed:
+	dev_dbg(&pdev->dev, "Performing full reset\n");
+	uhci_reset_hc(pdev, base);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
+
+static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
+{
+	u16 cmd;
+	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
+}
+
+#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
+#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
+
+static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
+{
+	unsigned long base = 0;
+	int i;
+
+	if (!pio_enabled(pdev))
+		return;
+
+	for (i = 0; i < PCI_ROM_RESOURCE; i++)
+		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
+			base = pci_resource_start(pdev, i);
+			break;
+		}
+
+	if (base)
+		uhci_check_and_reset_hc(pdev, base);
+}
+
+static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
+{
+	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
+}
+
+static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+{
+	void __iomem *base;
+	u32 control;
+	u32 fminterval = 0;
+	bool no_fminterval = false;
+	int cnt;
+
+	if (!mmio_resource_enabled(pdev, 0))
+		return;
+
+	base = pci_ioremap_bar(pdev, 0);
+	if (base == NULL)
+		return;
+
+	/*
+	 * ULi M5237 OHCI controller locks the whole system when accessing
+	 * the OHCI_FMINTERVAL offset.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+		no_fminterval = true;
+
+	control = readl(base + OHCI_CONTROL);
+
+/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
+#ifdef __hppa__
+#define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
+#else
+#define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
+
+	if (control & OHCI_CTRL_IR) {
+		int wait_time = 500; /* arbitrary; 5 seconds */
+		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
+		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
+		while (wait_time > 0 &&
+				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
+			wait_time -= 10;
+			msleep(10);
+		}
+		if (wait_time <= 0)
+			dev_warn(&pdev->dev,
+				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
+				 readl(base + OHCI_CONTROL));
+	}
+#endif
+
+	/* disable interrupts */
+	writel((u32) ~0, base + OHCI_INTRDISABLE);
+
+	/* Reset the USB bus, if the controller isn't already in RESET */
+	if (control & OHCI_HCFS) {
+		/* Go into RESET, preserving RWC (and possibly IR) */
+		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+		readl(base + OHCI_CONTROL);
+
+		/* drive bus reset for at least 50 ms (7.1.7.5) */
+		msleep(50);
+	}
+
+	/* software reset of the controller, preserving HcFmInterval */
+	if (!no_fminterval)
+		fminterval = readl(base + OHCI_FMINTERVAL);
+
+	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+
+	/* reset requires max 10 us delay */
+	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
+		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+			break;
+		udelay(1);
+	}
+
+	if (!no_fminterval)
+		writel(fminterval, base + OHCI_FMINTERVAL);
+
+	/* Now the controller is safely in SUSPEND and nothing can wake it up */
+	iounmap(base);
+}
+
+static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
+	{
+		/*  Pegatron Lucid (ExoPC) */
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
+			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
+		},
+	},
+	{
+		/*  Pegatron Lucid (Ordissimo AIRIS) */
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
+			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+		},
+	},
+	{
+		/*  Pegatron Lucid (Ordissimo) */
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
+			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+		},
+	},
+	{
+		/* HASEE E200 */
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+			DMI_MATCH(DMI_BOARD_NAME, "E210"),
+			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+		},
+	},
+	{ }
+};
+
+static void ehci_bios_handoff(struct pci_dev *pdev,
+					void __iomem *op_reg_base,
+					u32 cap, u8 offset)
+{
+	int try_handoff = 1, tried_handoff = 0;
+
+	/*
+	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+	 * the handoff on its unused controller.  Skip it.
+	 *
+	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+	 */
+	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+			pdev->device == 0x27cc)) {
+		if (dmi_check_system(ehci_dmi_nohandoff_table))
+			try_handoff = 0;
+	}
+
+	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
+		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
+
+#if 0
+/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
+ * but that seems dubious in general (the BIOS left it off intentionally)
+ * and is known to prevent some systems from booting.  so we won't do this
+ * unless maybe we can determine when we're on a system that needs SMI forced.
+ */
+		/* BIOS workaround (?): be sure the pre-Linux code
+		 * receives the SMI
+		 */
+		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
+		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
+				       val | EHCI_USBLEGCTLSTS_SOOE);
+#endif
+
+		/* some systems get upset if this semaphore is
+		 * set for any other reason than forcing a BIOS
+		 * handoff..
+		 */
+		pci_write_config_byte(pdev, offset + 3, 1);
+	}
+
+	/* if boot firmware now owns EHCI, spin till it hands it over. */
+	if (try_handoff) {
+		int msec = 1000;
+		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
+			tried_handoff = 1;
+			msleep(10);
+			msec -= 10;
+			pci_read_config_dword(pdev, offset, &cap);
+		}
+	}
+
+	if (cap & EHCI_USBLEGSUP_BIOS) {
+		/* well, possibly buggy BIOS... try to shut it down,
+		 * and hope nothing goes too wrong
+		 */
+		if (try_handoff)
+			dev_warn(&pdev->dev,
+				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
+				 cap);
+		pci_write_config_byte(pdev, offset + 2, 0);
+	}
+
+	/* just in case, always disable EHCI SMIs */
+	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
+
+	/* If the BIOS ever owned the controller then we can't expect
+	 * any power sessions to remain intact.
+	 */
+	if (tried_handoff)
+		writel(0, op_reg_base + EHCI_CONFIGFLAG);
+}
+
+static void quirk_usb_disable_ehci(struct pci_dev *pdev)
+{
+	void __iomem *base, *op_reg_base;
+	u32	hcc_params, cap, val;
+	u8	offset, cap_length;
+	int	wait_time, count = 256/4;
+
+	if (!mmio_resource_enabled(pdev, 0))
+		return;
+
+	base = pci_ioremap_bar(pdev, 0);
+	if (base == NULL)
+		return;
+
+	cap_length = readb(base);
+	op_reg_base = base + cap_length;
+
+	/* EHCI 0.96 and later may have "extended capabilities"
+	 * spec section 5.1 explains the bios handoff, e.g. for
+	 * booting from USB disk or using a usb keyboard
+	 */
+	hcc_params = readl(base + EHCI_HCC_PARAMS);
+	offset = (hcc_params >> 8) & 0xff;
+	while (offset && --count) {
+		pci_read_config_dword(pdev, offset, &cap);
+
+		switch (cap & 0xff) {
+		case 1:
+			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
+			break;
+		case 0: /* Illegal reserved cap, set cap=0 so we exit */
+			cap = 0; /* fall through */
+		default:
+			dev_warn(&pdev->dev,
+				 "EHCI: unrecognized capability %02x\n",
+				 cap & 0xff);
+		}
+		offset = (cap >> 8) & 0xff;
+	}
+	if (!count)
+		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
+
+	/*
+	 * halt EHCI & disable its interrupts in any case
+	 */
+	val = readl(op_reg_base + EHCI_USBSTS);
+	if ((val & EHCI_USBSTS_HALTED) == 0) {
+		val = readl(op_reg_base + EHCI_USBCMD);
+		val &= ~EHCI_USBCMD_RUN;
+		writel(val, op_reg_base + EHCI_USBCMD);
+
+		wait_time = 2000;
+		do {
+			writel(0x3f, op_reg_base + EHCI_USBSTS);
+			udelay(100);
+			wait_time -= 100;
+			val = readl(op_reg_base + EHCI_USBSTS);
+			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
+				break;
+			}
+		} while (wait_time > 0);
+	}
+	writel(0, op_reg_base + EHCI_USBINTR);
+	writel(0x3f, op_reg_base + EHCI_USBSTS);
+
+	iounmap(base);
+}
+
+/*
+ * handshake - spin reading a register until handshake completes
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @wait_usec: timeout in microseconds
+ * @delay_usec: delay in microseconds to wait between polling
+ *
+ * Polls a register every delay_usec microseconds.
+ * Returns 0 when the mask bits have the value done.
+ * Returns -ETIMEDOUT if this condition is not true after
+ * wait_usec microseconds have passed.
+ */
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+		int wait_usec, int delay_usec)
+{
+	u32	result;
+
+	do {
+		result = readl(ptr);
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(delay_usec);
+		wait_usec -= delay_usec;
+	} while (wait_usec > 0);
+	return -ETIMEDOUT;
+}
+
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports.  These ports can be switched between either
+ * controller.  Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start.  This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over.  The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
+{
+	u32		ports_available;
+	bool		ehci_found = false;
+	struct pci_dev	*companion = NULL;
+
+	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
+	 * switching ports from EHCI to xHCI
+	 */
+	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
+	    xhci_pdev->subsystem_device == 0x90a8)
+		return;
+
+	/* make sure an intel EHCI controller exists */
+	for_each_pci_dev(companion) {
+		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
+		    companion->vendor == PCI_VENDOR_ID_INTEL) {
+			ehci_found = true;
+			break;
+		}
+	}
+
+	if (!ehci_found)
+		return;
+
+	/* Don't switchover the ports if the user hasn't compiled the xHCI
+	 * driver.  Otherwise they will see "dead" USB ports that don't power
+	 * the devices.
+	 */
+	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
+		dev_warn(&xhci_pdev->dev,
+			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
+		dev_warn(&xhci_pdev->dev,
+				"USB 3.0 devices will work at USB 2.0 speeds.\n");
+		usb_disable_xhci_ports(xhci_pdev);
+		return;
+	}
+
+	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
+	 * Indicate the ports that can be changed from OS.
+	 */
+	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
+			&ports_available);
+
+	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
+			ports_available);
+
+	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+	 * Register, to turn on SuperSpeed terminations for the
+	 * switchable ports.
+	 */
+	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+			ports_available);
+
+	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+			&ports_available);
+	dev_dbg(&xhci_pdev->dev,
+		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
+		ports_available);
+
+	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
+	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
+	 */
+
+	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
+			&ports_available);
+
+	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
+			ports_available);
+
+	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+	 * switch the USB 2.0 power and data lines over to the xHCI
+	 * host.
+	 */
+	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+			ports_available);
+
+	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+			&ports_available);
+	dev_dbg(&xhci_pdev->dev,
+		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
+		ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
+
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
+/**
+ * PCI Quirks for xHCI.
+ *
+ * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+ * It signals to the BIOS that the OS wants control of the host controller,
+ * and then waits 1 second for the BIOS to hand over control.
+ * If we timeout, assume the BIOS is broken and take control anyway.
+ */
+static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+{
+	void __iomem *base;
+	int ext_cap_offset;
+	void __iomem *op_reg_base;
+	u32 val;
+	int timeout;
+	int len = pci_resource_len(pdev, 0);
+
+	if (!mmio_resource_enabled(pdev, 0))
+		return;
+
+	base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+	if (base == NULL)
+		return;
+
+	/*
+	 * Find the Legacy Support Capability register -
+	 * this is optional for xHCI host controllers.
+	 */
+	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
+
+	if (!ext_cap_offset)
+		goto hc_init;
+
+	if ((ext_cap_offset + sizeof(val)) > len) {
+		/* We're reading garbage from the controller */
+		dev_warn(&pdev->dev, "xHCI controller failing to respond");
+		goto iounmap;
+	}
+	val = readl(base + ext_cap_offset);
+
+	/* Auto handoff never worked for these devices. Force it and continue */
+	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
+			(pdev->vendor == PCI_VENDOR_ID_RENESAS
+			 && pdev->device == 0x0014)) {
+		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
+		writel(val, base + ext_cap_offset);
+	}
+
+	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
+	if (val & XHCI_HC_BIOS_OWNED) {
+		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
+
+		/* Wait for 1 second with 10 microsecond polling interval */
+		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+				0, 1000000, 10);
+
+		/* Assume a buggy BIOS and take HC ownership anyway */
+		if (timeout) {
+			dev_warn(&pdev->dev,
+				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
+				 val);
+			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
+		}
+	}
+
+	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+	/* Mask off (turn off) any enabled SMIs */
+	val &= XHCI_LEGACY_DISABLE_SMI;
+	/* Mask all SMI events bits, RW1C */
+	val |= XHCI_LEGACY_SMI_EVENTS;
+	/* Disable any BIOS SMIs and clear all SMI events*/
+	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+hc_init:
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+		usb_enable_intel_xhci_ports(pdev);
+
+	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+	/* Wait for the host controller to be ready before writing any
+	 * operational or runtime registers.  Wait 5 seconds and no more.
+	 */
+	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+			5000000, 10);
+	/* Assume a buggy HC and start HC initialization anyway */
+	if (timeout) {
+		val = readl(op_reg_base + XHCI_STS_OFFSET);
+		dev_warn(&pdev->dev,
+			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
+			 val);
+	}
+
+	/* Send the halt and disable interrupts command */
+	val = readl(op_reg_base + XHCI_CMD_OFFSET);
+	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
+	writel(val, op_reg_base + XHCI_CMD_OFFSET);
+
+	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
+	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
+			XHCI_MAX_HALT_USEC, 125);
+	if (timeout) {
+		val = readl(op_reg_base + XHCI_STS_OFFSET);
+		dev_warn(&pdev->dev,
+			 "xHCI HW did not halt within %d usec status = 0x%x\n",
+			 XHCI_MAX_HALT_USEC, val);
+	}
+
+iounmap:
+	iounmap(base);
+}
+
+static void quirk_usb_early_handoff(struct pci_dev *pdev)
+{
+	/* Skip Netlogic mips SoC's internal PCI USB controller.
+	 * This device does not need/support EHCI/OHCI handoff
+	 */
+	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
+		return;
+	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
+			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
+			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
+			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
+		return;
+
+	if (pci_enable_device(pdev) < 0) {
+		dev_warn(&pdev->dev,
+			 "Can't enable PCI device, BIOS handoff failed.\n");
+		return;
+	}
+	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
+		quirk_usb_handoff_uhci(pdev);
+	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
+		quirk_usb_handoff_ohci(pdev);
+	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
+		quirk_usb_disable_ehci(pdev);
+	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+		quirk_usb_handoff_xhci(pdev);
+	pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
new file mode 100644
index 0000000..63c6330
--- /dev/null
+++ b/drivers/usb/host/pci-quirks.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_USB_PCI_QUIRKS_H
+#define __LINUX_USB_PCI_QUIRKS_H
+
+#ifdef CONFIG_USB_PCI
+void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
+int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
+int usb_amd_find_chipset_info(void);
+int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
+bool usb_amd_hang_symptom_quirk(void);
+bool usb_amd_prefetch_quirk(void);
+void usb_amd_dev_put(void);
+void usb_amd_quirk_pll_disable(void);
+void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+void sb800_prefetch(struct device *dev, int on);
+bool usb_amd_pt_check_port(struct device *device, int port);
+#else
+struct pci_dev;
+static inline void usb_amd_quirk_pll_disable(void) {}
+static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
+static inline void usb_amd_dev_put(void) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+static inline void sb800_prefetch(struct device *dev, int on) {}
+static inline bool usb_amd_pt_check_port(struct device *device, int port)
+{
+	return false;
+}
+#endif  /* CONFIG_USB_PCI */
+
+#endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
new file mode 100644
index 0000000..984892d
--- /dev/null
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -0,0 +1,2527 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A66597 HCD (Host Controller Driver)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ * Portions Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Portions Copyright (C) 2004-2005 David Brownell
+ * Portions Copyright (C) 1999 Roman Weissgaerber
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+
+#include "r8a66597.h"
+
+MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:r8a66597_hcd");
+
+#define DRIVER_VERSION	"2009-05-26"
+
+static const char hcd_name[] = "r8a66597_hcd";
+
+static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
+static int r8a66597_get_frame(struct usb_hcd *hcd);
+
+/* this function must be called with interrupt disabled */
+static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+			    unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, INTENB0);
+	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+	r8a66597_bset(r8a66597, 1 << pipenum, reg);
+	r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+/* this function must be called with interrupt disabled */
+static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+			     unsigned long reg)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, INTENB0);
+	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+	r8a66597_bclr(r8a66597, 1 << pipenum, reg);
+	r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void set_devadd_reg(struct r8a66597 *r8a66597, u8 r8a66597_address,
+			   u16 usbspd, u8 upphub, u8 hubport, int port)
+{
+	u16 val;
+	unsigned long devadd_reg = get_devadd_addr(r8a66597_address);
+
+	val = (upphub << 11) | (hubport << 8) | (usbspd << 6) | (port & 0x0001);
+	r8a66597_write(r8a66597, val, devadd_reg);
+}
+
+static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
+{
+	u16 tmp;
+	int i = 0;
+
+	if (r8a66597->pdata->on_chip) {
+		clk_prepare_enable(r8a66597->clk);
+		do {
+			r8a66597_write(r8a66597, SCKE, SYSCFG0);
+			tmp = r8a66597_read(r8a66597, SYSCFG0);
+			if (i++ > 1000) {
+				printk(KERN_ERR "r8a66597: reg access fail.\n");
+				return -ENXIO;
+			}
+		} while ((tmp & SCKE) != SCKE);
+		r8a66597_write(r8a66597, 0x04, 0x02);
+	} else {
+		do {
+			r8a66597_write(r8a66597, USBE, SYSCFG0);
+			tmp = r8a66597_read(r8a66597, SYSCFG0);
+			if (i++ > 1000) {
+				printk(KERN_ERR "r8a66597: reg access fail.\n");
+				return -ENXIO;
+			}
+		} while ((tmp & USBE) != USBE);
+		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+		r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
+			      XTAL, SYSCFG0);
+
+		i = 0;
+		r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+		do {
+			msleep(1);
+			tmp = r8a66597_read(r8a66597, SYSCFG0);
+			if (i++ > 500) {
+				printk(KERN_ERR "r8a66597: reg access fail.\n");
+				return -ENXIO;
+			}
+		} while ((tmp & SCKE) != SCKE);
+	}
+
+	return 0;
+}
+
+static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
+{
+	r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+	udelay(1);
+
+	if (r8a66597->pdata->on_chip) {
+		clk_disable_unprepare(r8a66597->clk);
+	} else {
+		r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
+		r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
+		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+	}
+}
+
+static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port)
+{
+	u16 val;
+
+	val = port ? DRPD : DCFM | DRPD;
+	r8a66597_bset(r8a66597, val, get_syscfg_reg(port));
+	r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port));
+
+	r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR, get_dmacfg_reg(port));
+	r8a66597_bclr(r8a66597, DTCHE, get_intenb_reg(port));
+	r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+}
+
+static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
+{
+	u16 val, tmp;
+
+	r8a66597_write(r8a66597, 0, get_intenb_reg(port));
+	r8a66597_write(r8a66597, 0, get_intsts_reg(port));
+
+	r8a66597_port_power(r8a66597, port, 0);
+
+	do {
+		tmp = r8a66597_read(r8a66597, SOFCFG) & EDGESTS;
+		udelay(640);
+	} while (tmp == EDGESTS);
+
+	val = port ? DRPD : DCFM | DRPD;
+	r8a66597_bclr(r8a66597, val, get_syscfg_reg(port));
+	r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
+}
+
+static int enable_controller(struct r8a66597 *r8a66597)
+{
+	int ret, port;
+	u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+	u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+	u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
+
+	ret = r8a66597_clock_enable(r8a66597);
+	if (ret < 0)
+		return ret;
+
+	r8a66597_bset(r8a66597, vif & LDRV, PINCFG);
+	r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+	r8a66597_bset(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+	r8a66597_bset(r8a66597, irq_sense & INTL, SOFCFG);
+	r8a66597_bset(r8a66597, BRDY0, BRDYENB);
+	r8a66597_bset(r8a66597, BEMP0, BEMPENB);
+
+	r8a66597_bset(r8a66597, endian & BIGEND, CFIFOSEL);
+	r8a66597_bset(r8a66597, endian & BIGEND, D0FIFOSEL);
+	r8a66597_bset(r8a66597, endian & BIGEND, D1FIFOSEL);
+	r8a66597_bset(r8a66597, TRNENSEL, SOFCFG);
+
+	r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++)
+		r8a66597_enable_port(r8a66597, port);
+
+	return 0;
+}
+
+static void disable_controller(struct r8a66597 *r8a66597)
+{
+	int port;
+
+	/* disable interrupts */
+	r8a66597_write(r8a66597, 0, INTENB0);
+	r8a66597_write(r8a66597, 0, INTENB1);
+	r8a66597_write(r8a66597, 0, BRDYENB);
+	r8a66597_write(r8a66597, 0, BEMPENB);
+	r8a66597_write(r8a66597, 0, NRDYENB);
+
+	/* clear status */
+	r8a66597_write(r8a66597, 0, BRDYSTS);
+	r8a66597_write(r8a66597, 0, NRDYSTS);
+	r8a66597_write(r8a66597, 0, BEMPSTS);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++)
+		r8a66597_disable_port(r8a66597, port);
+
+	r8a66597_clock_disable(r8a66597);
+}
+
+static int get_parent_r8a66597_address(struct r8a66597 *r8a66597,
+				       struct usb_device *udev)
+{
+	struct r8a66597_device *dev;
+
+	if (udev->parent && udev->parent->devnum != 1)
+		udev = udev->parent;
+
+	dev = dev_get_drvdata(&udev->dev);
+	if (dev)
+		return dev->address;
+	else
+		return 0;
+}
+
+static int is_child_device(char *devpath)
+{
+	return (devpath[2] ? 1 : 0);
+}
+
+static int is_hub_limit(char *devpath)
+{
+	return ((strlen(devpath) >= 4) ? 1 : 0);
+}
+
+static void get_port_number(struct r8a66597 *r8a66597,
+			    char *devpath, u16 *root_port, u16 *hub_port)
+{
+	if (root_port) {
+		*root_port = (devpath[0] & 0x0F) - 1;
+		if (*root_port >= r8a66597->max_root_hub)
+			printk(KERN_ERR "r8a66597: Illegal root port number.\n");
+	}
+	if (hub_port)
+		*hub_port = devpath[2] & 0x0F;
+}
+
+static u16 get_r8a66597_usb_speed(enum usb_device_speed speed)
+{
+	u16 usbspd = 0;
+
+	switch (speed) {
+	case USB_SPEED_LOW:
+		usbspd = LSMODE;
+		break;
+	case USB_SPEED_FULL:
+		usbspd = FSMODE;
+		break;
+	case USB_SPEED_HIGH:
+		usbspd = HSMODE;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: unknown speed\n");
+		break;
+	}
+
+	return usbspd;
+}
+
+static void set_child_connect_map(struct r8a66597 *r8a66597, int address)
+{
+	int idx;
+
+	idx = address / 32;
+	r8a66597->child_connect_map[idx] |= 1 << (address % 32);
+}
+
+static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
+{
+	int idx;
+
+	idx = address / 32;
+	r8a66597->child_connect_map[idx] &= ~(1 << (address % 32));
+}
+
+static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
+{
+	u16 pipenum = pipe->info.pipenum;
+	const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
+	const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
+	const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
+
+	if (dma_ch > R8A66597_PIPE_NO_DMA)	/* dma fifo not use? */
+		dma_ch = R8A66597_PIPE_NO_DMA;
+
+	pipe->fifoaddr = fifoaddr[dma_ch];
+	pipe->fifosel = fifosel[dma_ch];
+	pipe->fifoctr = fifoctr[dma_ch];
+
+	if (pipenum == 0)
+		pipe->pipectr = DCPCTR;
+	else
+		pipe->pipectr = get_pipectr_addr(pipenum);
+
+	if (check_bulk_or_isoc(pipenum)) {
+		pipe->pipetre = get_pipetre_addr(pipenum);
+		pipe->pipetrn = get_pipetrn_addr(pipenum);
+	} else {
+		pipe->pipetre = 0;
+		pipe->pipetrn = 0;
+	}
+}
+
+static struct r8a66597_device *
+get_urb_to_r8a66597_dev(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	if (usb_pipedevice(urb->pipe) == 0)
+		return &r8a66597->device0;
+
+	return dev_get_drvdata(&urb->dev->dev);
+}
+
+static int make_r8a66597_device(struct r8a66597 *r8a66597,
+				struct urb *urb, u8 addr)
+{
+	struct r8a66597_device *dev;
+	int usb_address = urb->setup_packet[2];	/* urb->pipe is address 0 */
+
+	dev = kzalloc(sizeof(struct r8a66597_device), GFP_ATOMIC);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	dev_set_drvdata(&urb->dev->dev, dev);
+	dev->udev = urb->dev;
+	dev->address = addr;
+	dev->usb_address = usb_address;
+	dev->state = USB_STATE_ADDRESS;
+	dev->ep_in_toggle = 0;
+	dev->ep_out_toggle = 0;
+	INIT_LIST_HEAD(&dev->device_list);
+	list_add_tail(&dev->device_list, &r8a66597->child_device);
+
+	get_port_number(r8a66597, urb->dev->devpath,
+			&dev->root_port, &dev->hub_port);
+	if (!is_child_device(urb->dev->devpath))
+		r8a66597->root_hub[dev->root_port].dev = dev;
+
+	set_devadd_reg(r8a66597, dev->address,
+		       get_r8a66597_usb_speed(urb->dev->speed),
+		       get_parent_r8a66597_address(r8a66597, urb->dev),
+		       dev->hub_port, dev->root_port);
+
+	return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	u8 addr;	/* R8A66597's address */
+	struct r8a66597_device *dev;
+
+	if (is_hub_limit(urb->dev->devpath)) {
+		dev_err(&urb->dev->dev, "External hub limit reached.\n");
+		return 0;
+	}
+
+	dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+	if (dev && dev->state >= USB_STATE_ADDRESS)
+		return dev->address;
+
+	for (addr = 1; addr <= R8A66597_MAX_DEVICE; addr++) {
+		if (r8a66597->address_map & (1 << addr))
+			continue;
+
+		dev_dbg(&urb->dev->dev, "alloc_address: r8a66597_addr=%d\n", addr);
+		r8a66597->address_map |= 1 << addr;
+
+		if (make_r8a66597_device(r8a66597, urb, addr) < 0)
+			return 0;
+
+		return addr;
+	}
+
+	dev_err(&urb->dev->dev,
+		"cannot communicate with a USB device more than 10.(%x)\n",
+		r8a66597->address_map);
+
+	return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static void free_usb_address(struct r8a66597 *r8a66597,
+			     struct r8a66597_device *dev, int reset)
+{
+	int port;
+
+	if (!dev)
+		return;
+
+	dev_dbg(&dev->udev->dev, "free_addr: addr=%d\n", dev->address);
+
+	dev->state = USB_STATE_DEFAULT;
+	r8a66597->address_map &= ~(1 << dev->address);
+	dev->address = 0;
+	/*
+	 * Only when resetting USB, it is necessary to erase drvdata. When
+	 * a usb device with usb hub is disconnect, "dev->udev" is already
+	 * freed on usb_desconnect(). So we cannot access the data.
+	 */
+	if (reset)
+		dev_set_drvdata(&dev->udev->dev, NULL);
+	list_del(&dev->device_list);
+	kfree(dev);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++) {
+		if (r8a66597->root_hub[port].dev == dev) {
+			r8a66597->root_hub[port].dev = NULL;
+			break;
+		}
+	}
+}
+
+static void r8a66597_reg_wait(struct r8a66597 *r8a66597, unsigned long reg,
+			      u16 mask, u16 loop)
+{
+	u16 tmp;
+	int i = 0;
+
+	do {
+		tmp = r8a66597_read(r8a66597, reg);
+		if (i++ > 1000000) {
+			printk(KERN_ERR "r8a66597: register%lx, loop %x "
+			       "is timeout\n", reg, loop);
+			break;
+		}
+		ndelay(1);
+	} while ((tmp & mask) != loop);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_start(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
+	if ((pipe->info.pipenum != 0) & ((tmp & PID_STALL) != 0)) /* stall? */
+		r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
+	r8a66597_mdfy(r8a66597, PID_BUF, PID, pipe->pipectr);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_stop(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
+{
+	u16 tmp;
+
+	tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
+	if ((tmp & PID_STALL11) != PID_STALL11)	/* force stall? */
+		r8a66597_mdfy(r8a66597, PID_STALL, PID, pipe->pipectr);
+	r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
+	r8a66597_reg_wait(r8a66597, pipe->pipectr, PBUSY, 0);
+}
+
+/* this function must be called with interrupt disabled */
+static void clear_all_buffer(struct r8a66597 *r8a66597,
+			     struct r8a66597_pipe *pipe)
+{
+	u16 tmp;
+
+	if (!pipe || pipe->info.pipenum == 0)
+		return;
+
+	pipe_stop(r8a66597, pipe);
+	r8a66597_bset(r8a66597, ACLRM, pipe->pipectr);
+	tmp = r8a66597_read(r8a66597, pipe->pipectr);
+	tmp = r8a66597_read(r8a66597, pipe->pipectr);
+	tmp = r8a66597_read(r8a66597, pipe->pipectr);
+	r8a66597_bclr(r8a66597, ACLRM, pipe->pipectr);
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597,
+				 struct r8a66597_pipe *pipe, int toggle)
+{
+	if (toggle)
+		r8a66597_bset(r8a66597, SQSET, pipe->pipectr);
+	else
+		r8a66597_bset(r8a66597, SQCLR, pipe->pipectr);
+}
+
+static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
+{
+	if (r8a66597->pdata->on_chip)
+		return MBW_32;
+	else
+		return MBW_16;
+}
+
+/* this function must be called with interrupt disabled */
+static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	unsigned short mbw = mbw_value(r8a66597);
+
+	r8a66597_mdfy(r8a66597, mbw | pipenum, mbw | CURPIPE, CFIFOSEL);
+	r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum);
+}
+
+/* this function must be called with interrupt disabled */
+static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597,
+					 struct r8a66597_pipe *pipe)
+{
+	unsigned short mbw = mbw_value(r8a66597);
+
+	cfifo_change(r8a66597, 0);
+	r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D0FIFOSEL);
+	r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D1FIFOSEL);
+
+	r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, mbw | CURPIPE,
+		      pipe->fifosel);
+	r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum);
+}
+
+static u16 r8a66597_get_pipenum(struct urb *urb, struct usb_host_endpoint *hep)
+{
+	struct r8a66597_pipe *pipe = hep->hcpriv;
+
+	if (usb_pipeendpoint(urb->pipe) == 0)
+		return 0;
+	else
+		return pipe->info.pipenum;
+}
+
+static u16 get_urb_to_r8a66597_addr(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+
+	return (usb_pipedevice(urb->pipe) == 0) ? 0 : dev->address;
+}
+
+static unsigned short *get_toggle_pointer(struct r8a66597_device *dev,
+					  int urb_pipe)
+{
+	if (!dev)
+		return NULL;
+
+	return usb_pipein(urb_pipe) ? &dev->ep_in_toggle : &dev->ep_out_toggle;
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_set(struct r8a66597 *r8a66597,
+			    struct r8a66597_pipe *pipe,
+			    struct urb *urb, int set)
+{
+	struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+	unsigned char endpoint = usb_pipeendpoint(urb->pipe);
+	unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
+
+	if (!toggle)
+		return;
+
+	if (set)
+		*toggle |= 1 << endpoint;
+	else
+		*toggle &= ~(1 << endpoint);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_save(struct r8a66597 *r8a66597,
+			     struct r8a66597_pipe *pipe,
+			     struct urb *urb)
+{
+	if (r8a66597_read(r8a66597, pipe->pipectr) & SQMON)
+		pipe_toggle_set(r8a66597, pipe, urb, 1);
+	else
+		pipe_toggle_set(r8a66597, pipe, urb, 0);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_restore(struct r8a66597 *r8a66597,
+				struct r8a66597_pipe *pipe,
+				struct urb *urb)
+{
+	struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+	unsigned char endpoint = usb_pipeendpoint(urb->pipe);
+	unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
+
+	if (!toggle)
+		return;
+
+	r8a66597_pipe_toggle(r8a66597, pipe, *toggle & (1 << endpoint));
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_buffer_setting(struct r8a66597 *r8a66597,
+				struct r8a66597_pipe_info *info)
+{
+	u16 val = 0;
+
+	if (info->pipenum == 0)
+		return;
+
+	r8a66597_bset(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
+	r8a66597_bclr(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
+	r8a66597_write(r8a66597, info->pipenum, PIPESEL);
+	if (!info->dir_in)
+		val |= R8A66597_DIR;
+	if (info->type == R8A66597_BULK && info->dir_in)
+		val |= R8A66597_DBLB | R8A66597_SHTNAK;
+	val |= info->type | info->epnum;
+	r8a66597_write(r8a66597, val, PIPECFG);
+
+	r8a66597_write(r8a66597, (info->buf_bsize << 10) | (info->bufnum),
+		       PIPEBUF);
+	r8a66597_write(r8a66597, make_devsel(info->address) | info->maxpacket,
+		       PIPEMAXP);
+	r8a66597_write(r8a66597, info->interval, PIPEPERI);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_setting(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+	struct r8a66597_pipe_info *info;
+	struct urb *urb = td->urb;
+
+	if (td->pipenum > 0) {
+		info = &td->pipe->info;
+		cfifo_change(r8a66597, 0);
+		pipe_buffer_setting(r8a66597, info);
+
+		if (!usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+				   usb_pipeout(urb->pipe)) &&
+		    !usb_pipecontrol(urb->pipe)) {
+			r8a66597_pipe_toggle(r8a66597, td->pipe, 0);
+			pipe_toggle_set(r8a66597, td->pipe, urb, 0);
+			clear_all_buffer(r8a66597, td->pipe);
+			usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+				      usb_pipeout(urb->pipe), 1);
+		}
+		pipe_toggle_restore(r8a66597, td->pipe, urb);
+	}
+}
+
+/* this function must be called with interrupt disabled */
+static u16 get_empty_pipenum(struct r8a66597 *r8a66597,
+			     struct usb_endpoint_descriptor *ep)
+{
+	u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min;
+
+	memset(array, 0, sizeof(array));
+	switch (usb_endpoint_type(ep)) {
+	case USB_ENDPOINT_XFER_BULK:
+		if (usb_endpoint_dir_in(ep))
+			array[i++] = 4;
+		else {
+			array[i++] = 3;
+			array[i++] = 5;
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (usb_endpoint_dir_in(ep)) {
+			array[i++] = 6;
+			array[i++] = 7;
+			array[i++] = 8;
+		} else
+			array[i++] = 9;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (usb_endpoint_dir_in(ep))
+			array[i++] = 2;
+		else
+			array[i++] = 1;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: Illegal type\n");
+		return 0;
+	}
+
+	i = 1;
+	min = array[0];
+	while (array[i] != 0) {
+		if (r8a66597->pipe_cnt[min] > r8a66597->pipe_cnt[array[i]])
+			min = array[i];
+		i++;
+	}
+
+	return min;
+}
+
+static u16 get_r8a66597_type(__u8 type)
+{
+	u16 r8a66597_type;
+
+	switch (type) {
+	case USB_ENDPOINT_XFER_BULK:
+		r8a66597_type = R8A66597_BULK;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		r8a66597_type = R8A66597_INT;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		r8a66597_type = R8A66597_ISO;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: Illegal type\n");
+		r8a66597_type = 0x0000;
+		break;
+	}
+
+	return r8a66597_type;
+}
+
+static u16 get_bufnum(u16 pipenum)
+{
+	u16 bufnum = 0;
+
+	if (pipenum == 0)
+		bufnum = 0;
+	else if (check_bulk_or_isoc(pipenum))
+		bufnum = 8 + (pipenum - 1) * R8A66597_BUF_BSIZE*2;
+	else if (check_interrupt(pipenum))
+		bufnum = 4 + (pipenum - 6);
+	else
+		printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
+
+	return bufnum;
+}
+
+static u16 get_buf_bsize(u16 pipenum)
+{
+	u16 buf_bsize = 0;
+
+	if (pipenum == 0)
+		buf_bsize = 3;
+	else if (check_bulk_or_isoc(pipenum))
+		buf_bsize = R8A66597_BUF_BSIZE - 1;
+	else if (check_interrupt(pipenum))
+		buf_bsize = 0;
+	else
+		printk(KERN_ERR "r8a66597: Illegal pipenum (%d)\n", pipenum);
+
+	return buf_bsize;
+}
+
+/* this function must be called with interrupt disabled */
+static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597,
+				     struct r8a66597_device *dev,
+				     struct r8a66597_pipe *pipe,
+				     struct urb *urb)
+{
+	int i;
+	struct r8a66597_pipe_info *info = &pipe->info;
+	unsigned short mbw = mbw_value(r8a66597);
+
+	/* pipe dma is only for external controlles */
+	if (r8a66597->pdata->on_chip)
+		return;
+
+	if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) {
+		for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) {
+			if ((r8a66597->dma_map & (1 << i)) != 0)
+				continue;
+
+			dev_info(&dev->udev->dev,
+				 "address %d, EndpointAddress 0x%02x use "
+				 "DMA FIFO\n", usb_pipedevice(urb->pipe),
+				 info->dir_in ?
+				 	USB_ENDPOINT_DIR_MASK + info->epnum
+					: info->epnum);
+
+			r8a66597->dma_map |= 1 << i;
+			dev->dma_map |= 1 << i;
+			set_pipe_reg_addr(pipe, i);
+
+			cfifo_change(r8a66597, 0);
+			r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum,
+				      mbw | CURPIPE, pipe->fifosel);
+
+			r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE,
+					  pipe->info.pipenum);
+			r8a66597_bset(r8a66597, BCLR, pipe->fifoctr);
+			break;
+		}
+	}
+}
+
+/* this function must be called with interrupt disabled */
+static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
+				 struct usb_host_endpoint *hep,
+				 struct r8a66597_pipe_info *info)
+{
+	struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+	struct r8a66597_pipe *pipe = hep->hcpriv;
+
+	dev_dbg(&dev->udev->dev, "enable_pipe:\n");
+
+	pipe->info = *info;
+	set_pipe_reg_addr(pipe, R8A66597_PIPE_NO_DMA);
+	r8a66597->pipe_cnt[pipe->info.pipenum]++;
+	dev->pipe_cnt[pipe->info.pipenum]++;
+
+	enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
+}
+
+static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
+			      int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+		void *ptr;
+
+		for (ptr = urb->transfer_buffer;
+		     ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+		     ptr += PAGE_SIZE)
+			flush_dcache_page(virt_to_page(ptr));
+	}
+
+	usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+	spin_unlock(&r8a66597->lock);
+	usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
+	spin_lock(&r8a66597->lock);
+}
+
+/* this function must be called with interrupt disabled */
+static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
+{
+	struct r8a66597_td *td, *next;
+	struct urb *urb;
+	struct list_head *list = &r8a66597->pipe_queue[pipenum];
+
+	if (list_empty(list))
+		return;
+
+	list_for_each_entry_safe(td, next, list, queue) {
+		if (td->address != address)
+			continue;
+
+		urb = td->urb;
+		list_del(&td->queue);
+		kfree(td);
+
+		if (urb)
+			r8a66597_urb_done(r8a66597, urb, -ENODEV);
+
+		break;
+	}
+}
+
+/* this function must be called with interrupt disabled */
+static void disable_r8a66597_pipe_all(struct r8a66597 *r8a66597,
+				      struct r8a66597_device *dev)
+{
+	int check_ep0 = 0;
+	u16 pipenum;
+
+	if (!dev)
+		return;
+
+	for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		if (!dev->pipe_cnt[pipenum])
+			continue;
+
+		if (!check_ep0) {
+			check_ep0 = 1;
+			force_dequeue(r8a66597, 0, dev->address);
+		}
+
+		r8a66597->pipe_cnt[pipenum] -= dev->pipe_cnt[pipenum];
+		dev->pipe_cnt[pipenum] = 0;
+		force_dequeue(r8a66597, pipenum, dev->address);
+	}
+
+	dev_dbg(&dev->udev->dev, "disable_pipe\n");
+
+	r8a66597->dma_map &= ~(dev->dma_map);
+	dev->dma_map = 0;
+}
+
+static u16 get_interval(struct urb *urb, __u8 interval)
+{
+	u16 time = 1;
+	int i;
+
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		if (interval > IITV)
+			time = IITV;
+		else
+			time = interval ? interval - 1 : 0;
+	} else {
+		if (interval > 128) {
+			time = IITV;
+		} else {
+			/* calculate the nearest value for PIPEPERI */
+			for (i = 0; i < 7; i++) {
+				if ((1 << i) < interval &&
+				    (1 << (i + 1) > interval))
+					time = 1 << i;
+			}
+		}
+	}
+
+	return time;
+}
+
+static unsigned long get_timer_interval(struct urb *urb, __u8 interval)
+{
+	__u8 i;
+	unsigned long time = 1;
+
+	if (usb_pipeisoc(urb->pipe))
+		return 0;
+
+	if (get_r8a66597_usb_speed(urb->dev->speed) == HSMODE) {
+		for (i = 0; i < (interval - 1); i++)
+			time *= 2;
+		time = time * 125 / 1000;	/* uSOF -> msec */
+	} else {
+		time = interval;
+	}
+
+	return time;
+}
+
+/* this function must be called with interrupt disabled */
+static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
+			   struct usb_host_endpoint *hep,
+			   struct usb_endpoint_descriptor *ep)
+{
+	struct r8a66597_pipe_info info;
+
+	info.pipenum = get_empty_pipenum(r8a66597, ep);
+	info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
+	info.epnum = usb_endpoint_num(ep);
+	info.maxpacket = usb_endpoint_maxp(ep);
+	info.type = get_r8a66597_type(usb_endpoint_type(ep));
+	info.bufnum = get_bufnum(info.pipenum);
+	info.buf_bsize = get_buf_bsize(info.pipenum);
+	if (info.type == R8A66597_BULK) {
+		info.interval = 0;
+		info.timer_interval = 0;
+	} else {
+		info.interval = get_interval(urb, ep->bInterval);
+		info.timer_interval = get_timer_interval(urb, ep->bInterval);
+	}
+	if (usb_endpoint_dir_in(ep))
+		info.dir_in = 1;
+	else
+		info.dir_in = 0;
+
+	enable_r8a66597_pipe(r8a66597, urb, hep, &info);
+}
+
+static void init_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	struct r8a66597_device *dev;
+
+	dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+	dev->state = USB_STATE_CONFIGURED;
+}
+
+static void pipe_irq_enable(struct r8a66597 *r8a66597, struct urb *urb,
+			    u16 pipenum)
+{
+	if (pipenum == 0 && usb_pipeout(urb->pipe))
+		enable_irq_empty(r8a66597, pipenum);
+	else
+		enable_irq_ready(r8a66597, pipenum);
+
+	if (!usb_pipeisoc(urb->pipe))
+		enable_irq_nrdy(r8a66597, pipenum);
+}
+
+static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	disable_irq_ready(r8a66597, pipenum);
+	disable_irq_nrdy(r8a66597, pipenum);
+}
+
+static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
+{
+	mod_timer(&r8a66597->rh_timer,
+			jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
+					int connect)
+{
+	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+	rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+	rh->scount = R8A66597_MAX_SAMPLING;
+	if (connect)
+		rh->port |= USB_PORT_STAT_CONNECTION;
+	else
+		rh->port &= ~USB_PORT_STAT_CONNECTION;
+	rh->port |= USB_PORT_STAT_C_CONNECTION << 16;
+
+	r8a66597_root_hub_start_polling(r8a66597);
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
+					u16 syssts)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+	if (syssts == SE0) {
+		r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
+		r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+	} else {
+		if (syssts == FS_JSTS)
+			r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port));
+		else if (syssts == LS_JSTS)
+			r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port));
+
+		r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port));
+		r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port));
+
+		if (r8a66597->bus_suspended)
+			usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
+	}
+
+	spin_unlock(&r8a66597->lock);
+	usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
+	spin_lock(&r8a66597->lock);
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
+{
+	u16 speed = get_rh_usb_speed(r8a66597, port);
+	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+	rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED);
+	if (speed == HSMODE)
+		rh->port |= USB_PORT_STAT_HIGH_SPEED;
+	else if (speed == LSMODE)
+		rh->port |= USB_PORT_STAT_LOW_SPEED;
+
+	rh->port &= ~USB_PORT_STAT_RESET;
+	rh->port |= USB_PORT_STAT_ENABLE;
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
+{
+	struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
+
+	disable_r8a66597_pipe_all(r8a66597, dev);
+	free_usb_address(r8a66597, dev, 0);
+
+	start_root_hub_sampling(r8a66597, port, 0);
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_setup_packet(struct r8a66597 *r8a66597,
+				 struct r8a66597_td *td)
+{
+	int i;
+	__le16 *p = (__le16 *)td->urb->setup_packet;
+	unsigned long setup_addr = USBREQ;
+
+	r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket,
+		       DCPMAXP);
+	r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1);
+
+	for (i = 0; i < 4; i++) {
+		r8a66597_write(r8a66597, le16_to_cpu(p[i]), setup_addr);
+		setup_addr += 2;
+	}
+	r8a66597_write(r8a66597, SUREQ, DCPCTR);
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_packet_read(struct r8a66597 *r8a66597,
+				struct r8a66597_td *td)
+{
+	struct urb *urb = td->urb;
+
+	if (usb_pipecontrol(urb->pipe)) {
+		r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
+		r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
+		r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+		if (urb->actual_length == 0) {
+			r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+			r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+		}
+		pipe_irq_disable(r8a66597, td->pipenum);
+		pipe_start(r8a66597, td->pipe);
+		pipe_irq_enable(r8a66597, urb, td->pipenum);
+	} else {
+		if (urb->actual_length == 0) {
+			pipe_irq_disable(r8a66597, td->pipenum);
+			pipe_setting(r8a66597, td);
+			pipe_stop(r8a66597, td->pipe);
+			r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
+
+			if (td->pipe->pipetre) {
+				r8a66597_write(r8a66597, TRCLR,
+						td->pipe->pipetre);
+				r8a66597_write(r8a66597,
+						DIV_ROUND_UP
+						  (urb->transfer_buffer_length,
+						   td->maxpacket),
+						td->pipe->pipetrn);
+				r8a66597_bset(r8a66597, TRENB,
+						td->pipe->pipetre);
+			}
+
+			pipe_start(r8a66597, td->pipe);
+			pipe_irq_enable(r8a66597, urb, td->pipenum);
+		}
+	}
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_packet_write(struct r8a66597 *r8a66597,
+				 struct r8a66597_td *td)
+{
+	u16 tmp;
+	struct urb *urb = td->urb;
+
+	if (usb_pipecontrol(urb->pipe)) {
+		pipe_stop(r8a66597, td->pipe);
+		r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
+		r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
+		r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+		if (urb->actual_length == 0) {
+			r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+			r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+		}
+	} else {
+		if (urb->actual_length == 0)
+			pipe_setting(r8a66597, td);
+		if (td->pipe->pipetre)
+			r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre);
+	}
+	r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
+
+	fifo_change_from_pipe(r8a66597, td->pipe);
+	tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+	if (unlikely((tmp & FRDY) == 0))
+		pipe_irq_enable(r8a66597, urb, td->pipenum);
+	else
+		packet_write(r8a66597, td->pipenum);
+	pipe_start(r8a66597, td->pipe);
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_status_packet(struct r8a66597 *r8a66597,
+				  struct r8a66597_td *td)
+{
+	struct urb *urb = td->urb;
+
+	r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+	pipe_stop(r8a66597, td->pipe);
+
+	if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) {
+		r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
+		r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
+		r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+		r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+		r8a66597_write(r8a66597, BCLR | BVAL, CFIFOCTR);
+		enable_irq_empty(r8a66597, 0);
+	} else {
+		r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
+		r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
+		r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+		r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+		enable_irq_ready(r8a66597, 0);
+	}
+	enable_irq_nrdy(r8a66597, 0);
+	pipe_start(r8a66597, td->pipe);
+}
+
+static int is_set_address(unsigned char *setup_packet)
+{
+	if (((setup_packet[0] & USB_TYPE_MASK) == USB_TYPE_STANDARD) &&
+			setup_packet[1] == USB_REQ_SET_ADDRESS)
+		return 1;
+	else
+		return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+	BUG_ON(!td);
+
+	switch (td->type) {
+	case USB_PID_SETUP:
+		if (is_set_address(td->urb->setup_packet)) {
+			td->set_address = 1;
+			td->urb->setup_packet[2] = alloc_usb_address(r8a66597,
+								     td->urb);
+			if (td->urb->setup_packet[2] == 0)
+				return -EPIPE;
+		}
+		prepare_setup_packet(r8a66597, td);
+		break;
+	case USB_PID_IN:
+		prepare_packet_read(r8a66597, td);
+		break;
+	case USB_PID_OUT:
+		prepare_packet_write(r8a66597, td);
+		break;
+	case USB_PID_ACK:
+		prepare_status_packet(r8a66597, td);
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: invalid type.\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int check_transfer_finish(struct r8a66597_td *td, struct urb *urb)
+{
+	if (usb_pipeisoc(urb->pipe)) {
+		if (urb->number_of_packets == td->iso_cnt)
+			return 1;
+	}
+
+	/* control or bulk or interrupt */
+	if ((urb->transfer_buffer_length <= urb->actual_length) ||
+	    (td->short_packet) || (td->zero_packet))
+		return 1;
+
+	return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+	unsigned long time;
+
+	BUG_ON(!td);
+
+	if (!list_empty(&r8a66597->pipe_queue[td->pipenum]) &&
+	    !usb_pipecontrol(td->urb->pipe) && usb_pipein(td->urb->pipe)) {
+		r8a66597->timeout_map |= 1 << td->pipenum;
+		switch (usb_pipetype(td->urb->pipe)) {
+		case PIPE_INTERRUPT:
+		case PIPE_ISOCHRONOUS:
+			time = 30;
+			break;
+		default:
+			time = 50;
+			break;
+		}
+
+		mod_timer(&r8a66597->timers[td->pipenum].td,
+			  jiffies + msecs_to_jiffies(time));
+	}
+}
+
+/* this function must be called with interrupt disabled */
+static void finish_request(struct r8a66597 *r8a66597, struct r8a66597_td *td,
+		u16 pipenum, struct urb *urb, int status)
+__releases(r8a66597->lock) __acquires(r8a66597->lock)
+{
+	int restart = 0;
+	struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
+
+	r8a66597->timeout_map &= ~(1 << pipenum);
+
+	if (likely(td)) {
+		if (td->set_address && (status != 0 || urb->unlinked))
+			r8a66597->address_map &= ~(1 << urb->setup_packet[2]);
+
+		pipe_toggle_save(r8a66597, td->pipe, urb);
+		list_del(&td->queue);
+		kfree(td);
+	}
+
+	if (!list_empty(&r8a66597->pipe_queue[pipenum]))
+		restart = 1;
+
+	if (likely(urb)) {
+		if (usb_pipeisoc(urb->pipe))
+			urb->start_frame = r8a66597_get_frame(hcd);
+
+		r8a66597_urb_done(r8a66597, urb, status);
+	}
+
+	if (restart) {
+		td = r8a66597_get_td(r8a66597, pipenum);
+		if (unlikely(!td))
+			return;
+
+		start_transfer(r8a66597, td);
+		set_td_timer(r8a66597, td);
+	}
+}
+
+static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	u16 tmp;
+	int rcv_len, bufsize, urb_len, size;
+	u16 *buf;
+	struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+	struct urb *urb;
+	int finish = 0;
+	int status = 0;
+
+	if (unlikely(!td))
+		return;
+	urb = td->urb;
+
+	fifo_change_from_pipe(r8a66597, td->pipe);
+	tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+	if (unlikely((tmp & FRDY) == 0)) {
+		pipe_stop(r8a66597, td->pipe);
+		pipe_irq_disable(r8a66597, pipenum);
+		printk(KERN_ERR "r8a66597: in fifo not ready (%d)\n", pipenum);
+		finish_request(r8a66597, td, pipenum, td->urb, -EPIPE);
+		return;
+	}
+
+	/* prepare parameters */
+	rcv_len = tmp & DTLN;
+	if (usb_pipeisoc(urb->pipe)) {
+		buf = (u16 *)(urb->transfer_buffer +
+				urb->iso_frame_desc[td->iso_cnt].offset);
+		urb_len = urb->iso_frame_desc[td->iso_cnt].length;
+	} else {
+		buf = (void *)urb->transfer_buffer + urb->actual_length;
+		urb_len = urb->transfer_buffer_length - urb->actual_length;
+	}
+	bufsize = min(urb_len, (int) td->maxpacket);
+	if (rcv_len <= bufsize) {
+		size = rcv_len;
+	} else {
+		size = bufsize;
+		status = -EOVERFLOW;
+		finish = 1;
+	}
+
+	/* update parameters */
+	urb->actual_length += size;
+	if (rcv_len == 0)
+		td->zero_packet = 1;
+	if (rcv_len < bufsize) {
+		td->short_packet = 1;
+	}
+	if (usb_pipeisoc(urb->pipe)) {
+		urb->iso_frame_desc[td->iso_cnt].actual_length = size;
+		urb->iso_frame_desc[td->iso_cnt].status = status;
+		td->iso_cnt++;
+		finish = 0;
+	}
+
+	/* check transfer finish */
+	if (finish || check_transfer_finish(td, urb)) {
+		pipe_stop(r8a66597, td->pipe);
+		pipe_irq_disable(r8a66597, pipenum);
+		finish = 1;
+	}
+
+	/* read fifo */
+	if (urb->transfer_buffer) {
+		if (size == 0)
+			r8a66597_write(r8a66597, BCLR, td->pipe->fifoctr);
+		else
+			r8a66597_read_fifo(r8a66597, td->pipe->fifoaddr,
+					   buf, size);
+	}
+
+	if (finish && pipenum != 0)
+		finish_request(r8a66597, td, pipenum, urb, status);
+}
+
+static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	u16 tmp;
+	int bufsize, size;
+	u16 *buf;
+	struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+	struct urb *urb;
+
+	if (unlikely(!td))
+		return;
+	urb = td->urb;
+
+	fifo_change_from_pipe(r8a66597, td->pipe);
+	tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+	if (unlikely((tmp & FRDY) == 0)) {
+		pipe_stop(r8a66597, td->pipe);
+		pipe_irq_disable(r8a66597, pipenum);
+		printk(KERN_ERR "r8a66597: out fifo not ready (%d)\n", pipenum);
+		finish_request(r8a66597, td, pipenum, urb, -EPIPE);
+		return;
+	}
+
+	/* prepare parameters */
+	bufsize = td->maxpacket;
+	if (usb_pipeisoc(urb->pipe)) {
+		buf = (u16 *)(urb->transfer_buffer +
+				urb->iso_frame_desc[td->iso_cnt].offset);
+		size = min(bufsize,
+			   (int)urb->iso_frame_desc[td->iso_cnt].length);
+	} else {
+		buf = (u16 *)(urb->transfer_buffer + urb->actual_length);
+		size = min_t(u32, bufsize,
+			   urb->transfer_buffer_length - urb->actual_length);
+	}
+
+	/* write fifo */
+	if (pipenum > 0)
+		r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS);
+	if (urb->transfer_buffer) {
+		r8a66597_write_fifo(r8a66597, td->pipe, buf, size);
+		if (!usb_pipebulk(urb->pipe) || td->maxpacket != size)
+			r8a66597_write(r8a66597, BVAL, td->pipe->fifoctr);
+	}
+
+	/* update parameters */
+	urb->actual_length += size;
+	if (usb_pipeisoc(urb->pipe)) {
+		urb->iso_frame_desc[td->iso_cnt].actual_length = size;
+		urb->iso_frame_desc[td->iso_cnt].status = 0;
+		td->iso_cnt++;
+	}
+
+	/* check transfer finish */
+	if (check_transfer_finish(td, urb)) {
+		disable_irq_ready(r8a66597, pipenum);
+		enable_irq_empty(r8a66597, pipenum);
+		if (!usb_pipeisoc(urb->pipe))
+			enable_irq_nrdy(r8a66597, pipenum);
+	} else
+		pipe_irq_enable(r8a66597, urb, pipenum);
+}
+
+
+static void check_next_phase(struct r8a66597 *r8a66597, int status)
+{
+	struct r8a66597_td *td = r8a66597_get_td(r8a66597, 0);
+	struct urb *urb;
+	u8 finish = 0;
+
+	if (unlikely(!td))
+		return;
+	urb = td->urb;
+
+	switch (td->type) {
+	case USB_PID_IN:
+	case USB_PID_OUT:
+		if (check_transfer_finish(td, urb))
+			td->type = USB_PID_ACK;
+		break;
+	case USB_PID_SETUP:
+		if (urb->transfer_buffer_length == urb->actual_length)
+			td->type = USB_PID_ACK;
+		else if (usb_pipeout(urb->pipe))
+			td->type = USB_PID_OUT;
+		else
+			td->type = USB_PID_IN;
+		break;
+	case USB_PID_ACK:
+		finish = 1;
+		break;
+	}
+
+	if (finish || status != 0 || urb->unlinked)
+		finish_request(r8a66597, td, 0, urb, status);
+	else
+		start_transfer(r8a66597, td);
+}
+
+static int get_urb_error(struct r8a66597 *r8a66597, u16 pipenum)
+{
+	struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+
+	if (td) {
+		u16 pid = r8a66597_read(r8a66597, td->pipe->pipectr) & PID;
+
+		if (pid == PID_NAK)
+			return -ECONNRESET;
+		else
+			return -EPIPE;
+	}
+	return 0;
+}
+
+static void irq_pipe_ready(struct r8a66597 *r8a66597)
+{
+	u16 check;
+	u16 pipenum;
+	u16 mask;
+	struct r8a66597_td *td;
+
+	mask = r8a66597_read(r8a66597, BRDYSTS)
+	       & r8a66597_read(r8a66597, BRDYENB);
+	r8a66597_write(r8a66597, ~mask, BRDYSTS);
+	if (mask & BRDY0) {
+		td = r8a66597_get_td(r8a66597, 0);
+		if (td && td->type == USB_PID_IN)
+			packet_read(r8a66597, 0);
+		else
+			pipe_irq_disable(r8a66597, 0);
+		check_next_phase(r8a66597, 0);
+	}
+
+	for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		check = 1 << pipenum;
+		if (mask & check) {
+			td = r8a66597_get_td(r8a66597, pipenum);
+			if (unlikely(!td))
+				continue;
+
+			if (td->type == USB_PID_IN)
+				packet_read(r8a66597, pipenum);
+			else if (td->type == USB_PID_OUT)
+				packet_write(r8a66597, pipenum);
+		}
+	}
+}
+
+static void irq_pipe_empty(struct r8a66597 *r8a66597)
+{
+	u16 tmp;
+	u16 check;
+	u16 pipenum;
+	u16 mask;
+	struct r8a66597_td *td;
+
+	mask = r8a66597_read(r8a66597, BEMPSTS)
+	       & r8a66597_read(r8a66597, BEMPENB);
+	r8a66597_write(r8a66597, ~mask, BEMPSTS);
+	if (mask & BEMP0) {
+		cfifo_change(r8a66597, 0);
+		td = r8a66597_get_td(r8a66597, 0);
+		if (td && td->type != USB_PID_OUT)
+			disable_irq_empty(r8a66597, 0);
+		check_next_phase(r8a66597, 0);
+	}
+
+	for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		check = 1 << pipenum;
+		if (mask &  check) {
+			struct r8a66597_td *td;
+			td = r8a66597_get_td(r8a66597, pipenum);
+			if (unlikely(!td))
+				continue;
+
+			tmp = r8a66597_read(r8a66597, td->pipe->pipectr);
+			if ((tmp & INBUFM) == 0) {
+				disable_irq_empty(r8a66597, pipenum);
+				pipe_irq_disable(r8a66597, pipenum);
+				finish_request(r8a66597, td, pipenum, td->urb,
+						0);
+			}
+		}
+	}
+}
+
+static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
+{
+	u16 check;
+	u16 pipenum;
+	u16 mask;
+	int status;
+
+	mask = r8a66597_read(r8a66597, NRDYSTS)
+	       & r8a66597_read(r8a66597, NRDYENB);
+	r8a66597_write(r8a66597, ~mask, NRDYSTS);
+	if (mask & NRDY0) {
+		cfifo_change(r8a66597, 0);
+		status = get_urb_error(r8a66597, 0);
+		pipe_irq_disable(r8a66597, 0);
+		check_next_phase(r8a66597, status);
+	}
+
+	for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		check = 1 << pipenum;
+		if (mask & check) {
+			struct r8a66597_td *td;
+			td = r8a66597_get_td(r8a66597, pipenum);
+			if (unlikely(!td))
+				continue;
+
+			status = get_urb_error(r8a66597, pipenum);
+			pipe_irq_disable(r8a66597, pipenum);
+			pipe_stop(r8a66597, td->pipe);
+			finish_request(r8a66597, td, pipenum, td->urb, status);
+		}
+	}
+}
+
+static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	u16 intsts0, intsts1, intsts2;
+	u16 intenb0, intenb1, intenb2;
+	u16 mask0, mask1, mask2;
+	int status;
+
+	spin_lock(&r8a66597->lock);
+
+	intsts0 = r8a66597_read(r8a66597, INTSTS0);
+	intsts1 = r8a66597_read(r8a66597, INTSTS1);
+	intsts2 = r8a66597_read(r8a66597, INTSTS2);
+	intenb0 = r8a66597_read(r8a66597, INTENB0);
+	intenb1 = r8a66597_read(r8a66597, INTENB1);
+	intenb2 = r8a66597_read(r8a66597, INTENB2);
+
+	mask2 = intsts2 & intenb2;
+	mask1 = intsts1 & intenb1;
+	mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY);
+	if (mask2) {
+		if (mask2 & ATTCH) {
+			r8a66597_write(r8a66597, ~ATTCH, INTSTS2);
+			r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
+
+			/* start usb bus sampling */
+			start_root_hub_sampling(r8a66597, 1, 1);
+		}
+		if (mask2 & DTCH) {
+			r8a66597_write(r8a66597, ~DTCH, INTSTS2);
+			r8a66597_bclr(r8a66597, DTCHE, INTENB2);
+			r8a66597_usb_disconnect(r8a66597, 1);
+		}
+		if (mask2 & BCHG) {
+			r8a66597_write(r8a66597, ~BCHG, INTSTS2);
+			r8a66597_bclr(r8a66597, BCHGE, INTENB2);
+			usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
+		}
+	}
+
+	if (mask1) {
+		if (mask1 & ATTCH) {
+			r8a66597_write(r8a66597, ~ATTCH, INTSTS1);
+			r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
+
+			/* start usb bus sampling */
+			start_root_hub_sampling(r8a66597, 0, 1);
+		}
+		if (mask1 & DTCH) {
+			r8a66597_write(r8a66597, ~DTCH, INTSTS1);
+			r8a66597_bclr(r8a66597, DTCHE, INTENB1);
+			r8a66597_usb_disconnect(r8a66597, 0);
+		}
+		if (mask1 & BCHG) {
+			r8a66597_write(r8a66597, ~BCHG, INTSTS1);
+			r8a66597_bclr(r8a66597, BCHGE, INTENB1);
+			usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
+		}
+
+		if (mask1 & SIGN) {
+			r8a66597_write(r8a66597, ~SIGN, INTSTS1);
+			status = get_urb_error(r8a66597, 0);
+			check_next_phase(r8a66597, status);
+		}
+		if (mask1 & SACK) {
+			r8a66597_write(r8a66597, ~SACK, INTSTS1);
+			check_next_phase(r8a66597, 0);
+		}
+	}
+	if (mask0) {
+		if (mask0 & BRDY)
+			irq_pipe_ready(r8a66597);
+		if (mask0 & BEMP)
+			irq_pipe_empty(r8a66597);
+		if (mask0 & NRDY)
+			irq_pipe_nrdy(r8a66597);
+	}
+
+	spin_unlock(&r8a66597->lock);
+	return IRQ_HANDLED;
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
+{
+	u16 tmp;
+	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+	if (rh->port & USB_PORT_STAT_RESET) {
+		unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+		tmp = r8a66597_read(r8a66597, dvstctr_reg);
+		if ((tmp & USBRST) == USBRST) {
+			r8a66597_mdfy(r8a66597, UACT, USBRST | UACT,
+				      dvstctr_reg);
+			r8a66597_root_hub_start_polling(r8a66597);
+		} else
+			r8a66597_usb_connect(r8a66597, port);
+	}
+
+	if (!(rh->port & USB_PORT_STAT_CONNECTION)) {
+		r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
+		r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+	}
+
+	if (rh->scount > 0) {
+		tmp = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+		if (tmp == rh->old_syssts) {
+			rh->scount--;
+			if (rh->scount == 0)
+				r8a66597_check_syssts(r8a66597, port, tmp);
+			else
+				r8a66597_root_hub_start_polling(r8a66597);
+		} else {
+			rh->scount = R8A66597_MAX_SAMPLING;
+			rh->old_syssts = tmp;
+			r8a66597_root_hub_start_polling(r8a66597);
+		}
+	}
+}
+
+static void r8a66597_interval_timer(struct timer_list *t)
+{
+	struct r8a66597_timers *timers = from_timer(timers, t, interval);
+	struct r8a66597 *r8a66597 = timers->r8a66597;
+	unsigned long flags;
+	u16 pipenum;
+	struct r8a66597_td *td;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+
+	for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		if (!(r8a66597->interval_map & (1 << pipenum)))
+			continue;
+		if (timer_pending(&r8a66597->timers[pipenum].interval))
+			continue;
+
+		td = r8a66597_get_td(r8a66597, pipenum);
+		if (td)
+			start_transfer(r8a66597, td);
+	}
+
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static void r8a66597_td_timer(struct timer_list *t)
+{
+	struct r8a66597_timers *timers = from_timer(timers, t, td);
+	struct r8a66597 *r8a66597 = timers->r8a66597;
+	unsigned long flags;
+	u16 pipenum;
+	struct r8a66597_td *td, *new_td = NULL;
+	struct r8a66597_pipe *pipe;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+		if (!(r8a66597->timeout_map & (1 << pipenum)))
+			continue;
+		if (timer_pending(&r8a66597->timers[pipenum].td))
+			continue;
+
+		td = r8a66597_get_td(r8a66597, pipenum);
+		if (!td) {
+			r8a66597->timeout_map &= ~(1 << pipenum);
+			continue;
+		}
+
+		if (td->urb->actual_length) {
+			set_td_timer(r8a66597, td);
+			break;
+		}
+
+		pipe = td->pipe;
+		pipe_stop(r8a66597, pipe);
+
+		/* Select a different address or endpoint */
+		new_td = td;
+		do {
+			list_move_tail(&new_td->queue,
+				       &r8a66597->pipe_queue[pipenum]);
+			new_td = r8a66597_get_td(r8a66597, pipenum);
+			if (!new_td) {
+				new_td = td;
+				break;
+			}
+		} while (td != new_td && td->address == new_td->address &&
+			td->pipe->info.epnum == new_td->pipe->info.epnum);
+
+		start_transfer(r8a66597, new_td);
+
+		if (td == new_td)
+			r8a66597->timeout_map &= ~(1 << pipenum);
+		else
+			set_td_timer(r8a66597, new_td);
+		break;
+	}
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static void r8a66597_timer(struct timer_list *t)
+{
+	struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
+	unsigned long flags;
+	int port;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++)
+		r8a66597_root_hub_control(r8a66597, port);
+
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static int check_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+
+	if (dev && dev->address && dev->state != USB_STATE_CONFIGURED &&
+	    (urb->dev->state == USB_STATE_CONFIGURED))
+		return 1;
+	else
+		return 0;
+}
+
+static int r8a66597_start(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+
+	hcd->state = HC_STATE_RUNNING;
+	return enable_controller(r8a66597);
+}
+
+static void r8a66597_stop(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+
+	disable_controller(r8a66597);
+}
+
+static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb)
+{
+	unsigned int usb_address = usb_pipedevice(urb->pipe);
+	u16 root_port, hub_port;
+
+	if (usb_address == 0) {
+		get_port_number(r8a66597, urb->dev->devpath,
+				&root_port, &hub_port);
+		set_devadd_reg(r8a66597, 0,
+			       get_r8a66597_usb_speed(urb->dev->speed),
+			       get_parent_r8a66597_address(r8a66597, urb->dev),
+			       hub_port, root_port);
+	}
+}
+
+static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
+					    struct urb *urb,
+					    struct usb_host_endpoint *hep)
+{
+	struct r8a66597_td *td;
+	u16 pipenum;
+
+	td = kzalloc(sizeof(struct r8a66597_td), GFP_ATOMIC);
+	if (td == NULL)
+		return NULL;
+
+	pipenum = r8a66597_get_pipenum(urb, hep);
+	td->pipenum = pipenum;
+	td->pipe = hep->hcpriv;
+	td->urb = urb;
+	td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
+	td->maxpacket = usb_maxpacket(urb->dev, urb->pipe,
+				      !usb_pipein(urb->pipe));
+	if (usb_pipecontrol(urb->pipe))
+		td->type = USB_PID_SETUP;
+	else if (usb_pipein(urb->pipe))
+		td->type = USB_PID_IN;
+	else
+		td->type = USB_PID_OUT;
+	INIT_LIST_HEAD(&td->queue);
+
+	return td;
+}
+
+static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
+				struct urb *urb,
+				gfp_t mem_flags)
+{
+	struct usb_host_endpoint *hep = urb->ep;
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	struct r8a66597_td *td = NULL;
+	int ret, request = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
+		ret = -ENODEV;
+		goto error_not_linked;
+	}
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto error_not_linked;
+
+	if (!hep->hcpriv) {
+		hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
+				GFP_ATOMIC);
+		if (!hep->hcpriv) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		set_pipe_reg_addr(hep->hcpriv, R8A66597_PIPE_NO_DMA);
+		if (usb_pipeendpoint(urb->pipe))
+			init_pipe_info(r8a66597, urb, hep, &hep->desc);
+	}
+
+	if (unlikely(check_pipe_config(r8a66597, urb)))
+		init_pipe_config(r8a66597, urb);
+
+	set_address_zero(r8a66597, urb);
+	td = r8a66597_make_td(r8a66597, urb, hep);
+	if (td == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
+		request = 1;
+	list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
+	urb->hcpriv = td;
+
+	if (request) {
+		if (td->pipe->info.timer_interval) {
+			r8a66597->interval_map |= 1 << td->pipenum;
+			mod_timer(&r8a66597->timers[td->pipenum].interval,
+				  jiffies + msecs_to_jiffies(
+					td->pipe->info.timer_interval));
+		} else {
+			ret = start_transfer(r8a66597, td);
+			if (ret < 0) {
+				list_del(&td->queue);
+				kfree(td);
+			}
+		}
+	} else
+		set_td_timer(r8a66597, td);
+
+error:
+	if (ret)
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+error_not_linked:
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+	return ret;
+}
+
+static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+		int status)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	struct r8a66597_td *td;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc)
+		goto done;
+
+	if (urb->hcpriv) {
+		td = urb->hcpriv;
+		pipe_stop(r8a66597, td->pipe);
+		pipe_irq_disable(r8a66597, td->pipenum);
+		disable_irq_empty(r8a66597, td->pipenum);
+		finish_request(r8a66597, td, td->pipenum, urb, status);
+	}
+ done:
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+	return rc;
+}
+
+static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
+				      struct usb_host_endpoint *hep)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
+	struct r8a66597_td *td;
+	struct urb *urb = NULL;
+	u16 pipenum;
+	unsigned long flags;
+
+	if (pipe == NULL)
+		return;
+	pipenum = pipe->info.pipenum;
+
+	if (pipenum == 0) {
+		kfree(hep->hcpriv);
+		hep->hcpriv = NULL;
+		return;
+	}
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	pipe_stop(r8a66597, pipe);
+	pipe_irq_disable(r8a66597, pipenum);
+	disable_irq_empty(r8a66597, pipenum);
+	td = r8a66597_get_td(r8a66597, pipenum);
+	if (td)
+		urb = td->urb;
+	finish_request(r8a66597, td, pipenum, urb, -ESHUTDOWN);
+	kfree(hep->hcpriv);
+	hep->hcpriv = NULL;
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static int r8a66597_get_frame(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
+}
+
+static void collect_usb_address_map(struct usb_device *udev, unsigned long *map)
+{
+	int chix;
+	struct usb_device *childdev;
+
+	if (udev->state == USB_STATE_CONFIGURED &&
+	    udev->parent && udev->parent->devnum > 1 &&
+	    udev->parent->descriptor.bDeviceClass == USB_CLASS_HUB)
+		map[udev->devnum/32] |= (1 << (udev->devnum % 32));
+
+	usb_hub_for_each_child(udev, chix, childdev)
+		collect_usb_address_map(childdev, map);
+}
+
+/* this function must be called with interrupt disabled */
+static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
+						   int addr)
+{
+	struct r8a66597_device *dev;
+	struct list_head *list = &r8a66597->child_device;
+
+	list_for_each_entry(dev, list, device_list) {
+		if (dev->usb_address != addr)
+			continue;
+
+		return dev;
+	}
+
+	printk(KERN_ERR "r8a66597: get_r8a66597_device fail.(%d)\n", addr);
+	return NULL;
+}
+
+static void update_usb_address_map(struct r8a66597 *r8a66597,
+				   struct usb_device *root_hub,
+				   unsigned long *map)
+{
+	int i, j, addr;
+	unsigned long diff;
+	unsigned long flags;
+
+	for (i = 0; i < 4; i++) {
+		diff = r8a66597->child_connect_map[i] ^ map[i];
+		if (!diff)
+			continue;
+
+		for (j = 0; j < 32; j++) {
+			if (!(diff & (1 << j)))
+				continue;
+
+			addr = i * 32 + j;
+			if (map[i] & (1 << j))
+				set_child_connect_map(r8a66597, addr);
+			else {
+				struct r8a66597_device *dev;
+
+				spin_lock_irqsave(&r8a66597->lock, flags);
+				dev = get_r8a66597_device(r8a66597, addr);
+				disable_r8a66597_pipe_all(r8a66597, dev);
+				free_usb_address(r8a66597, dev, 0);
+				put_child_connect_map(r8a66597, addr);
+				spin_unlock_irqrestore(&r8a66597->lock, flags);
+			}
+		}
+	}
+}
+
+static void r8a66597_check_detect_child(struct r8a66597 *r8a66597,
+					struct usb_hcd *hcd)
+{
+	struct usb_bus *bus;
+	unsigned long now_map[4];
+
+	memset(now_map, 0, sizeof(now_map));
+
+	mutex_lock(&usb_bus_idr_lock);
+	bus = idr_find(&usb_bus_idr, hcd->self.busnum);
+	if (bus && bus->root_hub) {
+		collect_usb_address_map(bus->root_hub, now_map);
+		update_usb_address_map(r8a66597, bus->root_hub, now_map);
+	}
+	mutex_unlock(&usb_bus_idr_lock);
+}
+
+static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	unsigned long flags;
+	int i;
+
+	r8a66597_check_detect_child(r8a66597, hcd);
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+
+	*buf = 0;	/* initialize (no change) */
+
+	for (i = 0; i < r8a66597->max_root_hub; i++) {
+		if (r8a66597->root_hub[i].port & 0xffff0000)
+			*buf |= 1 << (i + 1);
+	}
+
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+	return (*buf != 0);
+}
+
+static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
+				    struct usb_hub_descriptor *desc)
+{
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bHubContrCurrent = 0;
+	desc->bNbrPorts = r8a66597->max_root_hub;
+	desc->bDescLength = 9;
+	desc->bPwrOn2PwrGood = 0;
+	desc->wHubCharacteristics =
+		cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_NO_OCPM);
+	desc->u.hs.DeviceRemovable[0] =
+		((1 << r8a66597->max_root_hub) - 1) << 1;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+}
+
+static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+				u16 wIndex, char *buf, u16 wLength)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	int ret;
+	int port = (wIndex & 0x00FF) - 1;
+	struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+	unsigned long flags;
+
+	ret = 0;
+
+	spin_lock_irqsave(&r8a66597->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (wIndex > r8a66597->max_root_hub)
+			goto error;
+		if (wLength != 0)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			rh->port &= ~USB_PORT_STAT_POWER;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			break;
+		case USB_PORT_FEAT_POWER:
+			r8a66597_port_power(r8a66597, port, 0);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_SUSPEND:
+		case USB_PORT_FEAT_C_CONNECTION:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_RESET:
+			break;
+		default:
+			goto error;
+		}
+		rh->port &= ~(1 << wValue);
+		break;
+	case GetHubDescriptor:
+		r8a66597_hub_descriptor(r8a66597,
+					(struct usb_hub_descriptor *)buf);
+		break;
+	case GetHubStatus:
+		*buf = 0x00;
+		break;
+	case GetPortStatus:
+		if (wIndex > r8a66597->max_root_hub)
+			goto error;
+		*(__le32 *)buf = cpu_to_le32(rh->port);
+		break;
+	case SetPortFeature:
+		if (wIndex > r8a66597->max_root_hub)
+			goto error;
+		if (wLength != 0)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			break;
+		case USB_PORT_FEAT_POWER:
+			r8a66597_port_power(r8a66597, port, 1);
+			rh->port |= USB_PORT_STAT_POWER;
+			break;
+		case USB_PORT_FEAT_RESET: {
+			struct r8a66597_device *dev = rh->dev;
+
+			rh->port |= USB_PORT_STAT_RESET;
+
+			disable_r8a66597_pipe_all(r8a66597, dev);
+			free_usb_address(r8a66597, dev, 1);
+
+			r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
+				      get_dvstctr_reg(port));
+			mod_timer(&r8a66597->rh_timer,
+				  jiffies + msecs_to_jiffies(50));
+			}
+			break;
+		default:
+			goto error;
+		}
+		rh->port |= 1 << wValue;
+		break;
+	default:
+error:
+		ret = -EPIPE;
+		break;
+	}
+
+	spin_unlock_irqrestore(&r8a66597->lock, flags);
+	return ret;
+}
+
+#if defined(CONFIG_PM)
+static int r8a66597_bus_suspend(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	int port;
+
+	dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++) {
+		struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+		unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+		if (!(rh->port & USB_PORT_STAT_ENABLE))
+			continue;
+
+		dev_dbg(&rh->dev->udev->dev, "suspend port = %d\n", port);
+		r8a66597_bclr(r8a66597, UACT, dvstctr_reg);	/* suspend */
+		rh->port |= USB_PORT_STAT_SUSPEND;
+
+		if (rh->dev->udev->do_remote_wakeup) {
+			msleep(3);	/* waiting last SOF */
+			r8a66597_bset(r8a66597, RWUPE, dvstctr_reg);
+			r8a66597_write(r8a66597, ~BCHG, get_intsts_reg(port));
+			r8a66597_bset(r8a66597, BCHGE, get_intenb_reg(port));
+		}
+	}
+
+	r8a66597->bus_suspended = 1;
+
+	return 0;
+}
+
+static int r8a66597_bus_resume(struct usb_hcd *hcd)
+{
+	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+	int port;
+
+	dev_dbg(&r8a66597->device0.udev->dev, "%s\n", __func__);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++) {
+		struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+		unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+		if (!(rh->port & USB_PORT_STAT_SUSPEND))
+			continue;
+
+		dev_dbg(&rh->dev->udev->dev, "resume port = %d\n", port);
+		rh->port &= ~USB_PORT_STAT_SUSPEND;
+		rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+		r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+		msleep(USB_RESUME_TIMEOUT);
+		r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+	}
+
+	return 0;
+
+}
+#else
+#define	r8a66597_bus_suspend	NULL
+#define	r8a66597_bus_resume	NULL
+#endif
+
+static const struct hc_driver r8a66597_hc_driver = {
+	.description =		hcd_name,
+	.hcd_priv_size =	sizeof(struct r8a66597),
+	.irq =			r8a66597_irq,
+
+	/*
+	 * generic hardware linkage
+	 */
+	.flags =		HCD_USB2,
+
+	.start =		r8a66597_start,
+	.stop =			r8a66597_stop,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		r8a66597_urb_enqueue,
+	.urb_dequeue =		r8a66597_urb_dequeue,
+	.endpoint_disable =	r8a66597_endpoint_disable,
+
+	/*
+	 * periodic schedule support
+	 */
+	.get_frame_number =	r8a66597_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	r8a66597_hub_status_data,
+	.hub_control =		r8a66597_hub_control,
+	.bus_suspend =		r8a66597_bus_suspend,
+	.bus_resume =		r8a66597_bus_resume,
+};
+
+#if defined(CONFIG_PM)
+static int r8a66597_suspend(struct device *dev)
+{
+	struct r8a66597		*r8a66597 = dev_get_drvdata(dev);
+	int port;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	disable_controller(r8a66597);
+
+	for (port = 0; port < r8a66597->max_root_hub; port++) {
+		struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+		rh->port = 0x00000000;
+	}
+
+	return 0;
+}
+
+static int r8a66597_resume(struct device *dev)
+{
+	struct r8a66597		*r8a66597 = dev_get_drvdata(dev);
+	struct usb_hcd		*hcd = r8a66597_to_hcd(r8a66597);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	enable_controller(r8a66597);
+	usb_root_hub_lost_power(hcd->self.root_hub);
+
+	return 0;
+}
+
+static const struct dev_pm_ops r8a66597_dev_pm_ops = {
+	.suspend = r8a66597_suspend,
+	.resume = r8a66597_resume,
+	.poweroff = r8a66597_suspend,
+	.restore = r8a66597_resume,
+};
+
+#define R8A66597_DEV_PM_OPS	(&r8a66597_dev_pm_ops)
+#else	/* if defined(CONFIG_PM) */
+#define R8A66597_DEV_PM_OPS	NULL
+#endif
+
+static int r8a66597_remove(struct platform_device *pdev)
+{
+	struct r8a66597		*r8a66597 = platform_get_drvdata(pdev);
+	struct usb_hcd		*hcd = r8a66597_to_hcd(r8a66597);
+
+	del_timer_sync(&r8a66597->rh_timer);
+	usb_remove_hcd(hcd);
+	iounmap(r8a66597->reg);
+	if (r8a66597->pdata->on_chip)
+		clk_put(r8a66597->clk);
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+static int r8a66597_probe(struct platform_device *pdev)
+{
+	char clk_name[8];
+	struct resource *res = NULL, *ires;
+	int irq = -1;
+	void __iomem *reg = NULL;
+	struct usb_hcd *hcd = NULL;
+	struct r8a66597 *r8a66597;
+	int ret = 0;
+	int i;
+	unsigned long irq_trigger;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (pdev->dev.dma_mask) {
+		ret = -EINVAL;
+		dev_err(&pdev->dev, "dma not supported\n");
+		goto clean_up;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "platform_get_resource error.\n");
+		goto clean_up;
+	}
+
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!ires) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev,
+			"platform_get_resource IORESOURCE_IRQ error.\n");
+		goto clean_up;
+	}
+
+	irq = ires->start;
+	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+
+	reg = ioremap(res->start, resource_size(res));
+	if (reg == NULL) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "ioremap error.\n");
+		goto clean_up;
+	}
+
+	if (pdev->dev.platform_data == NULL) {
+		dev_err(&pdev->dev, "no platform data\n");
+		ret = -ENODEV;
+		goto clean_up;
+	}
+
+	/* initialize hcd */
+	hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
+	if (!hcd) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to create hcd\n");
+		goto clean_up;
+	}
+	r8a66597 = hcd_to_r8a66597(hcd);
+	memset(r8a66597, 0, sizeof(struct r8a66597));
+	platform_set_drvdata(pdev, r8a66597);
+	r8a66597->pdata = dev_get_platdata(&pdev->dev);
+	r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
+
+	if (r8a66597->pdata->on_chip) {
+		snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
+		r8a66597->clk = clk_get(&pdev->dev, clk_name);
+		if (IS_ERR(r8a66597->clk)) {
+			dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
+				clk_name);
+			ret = PTR_ERR(r8a66597->clk);
+			goto clean_up2;
+		}
+		r8a66597->max_root_hub = 1;
+	} else
+		r8a66597->max_root_hub = 2;
+
+	spin_lock_init(&r8a66597->lock);
+	timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
+	r8a66597->reg = reg;
+
+	/* make sure no interrupts are pending */
+	ret = r8a66597_clock_enable(r8a66597);
+	if (ret < 0)
+		goto clean_up3;
+	disable_controller(r8a66597);
+
+	for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
+		INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
+		r8a66597->timers[i].r8a66597 = r8a66597;
+		timer_setup(&r8a66597->timers[i].td, r8a66597_td_timer, 0);
+		timer_setup(&r8a66597->timers[i].interval,
+			    r8a66597_interval_timer, 0);
+	}
+	INIT_LIST_HEAD(&r8a66597->child_device);
+
+	hcd->rsrc_start = res->start;
+	hcd->has_tt = 1;
+
+	ret = usb_add_hcd(hcd, irq, irq_trigger);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to add hcd\n");
+		goto clean_up3;
+	}
+	device_wakeup_enable(hcd->self.controller);
+
+	return 0;
+
+clean_up3:
+	if (r8a66597->pdata->on_chip)
+		clk_put(r8a66597->clk);
+clean_up2:
+	usb_put_hcd(hcd);
+
+clean_up:
+	if (reg)
+		iounmap(reg);
+
+	return ret;
+}
+
+static struct platform_driver r8a66597_driver = {
+	.probe =	r8a66597_probe,
+	.remove =	r8a66597_remove,
+	.driver		= {
+		.name = hcd_name,
+		.pm	= R8A66597_DEV_PM_OPS,
+	},
+};
+
+module_platform_driver(r8a66597_driver);
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
new file mode 100644
index 0000000..51973a9
--- /dev/null
+++ b/drivers/usb/host/r8a66597.h
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A66597 HCD (Host Controller Driver)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ * Portions Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Portions Copyright (C) 2004-2005 David Brownell
+ * Portions Copyright (C) 1999 Roman Weissgaerber
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ */
+
+#ifndef __R8A66597_H__
+#define __R8A66597_H__
+
+#include <linux/clk.h>
+#include <linux/usb/r8a66597.h>
+
+#define R8A66597_MAX_NUM_PIPE		10
+#define R8A66597_BUF_BSIZE		8
+#define R8A66597_MAX_DEVICE		10
+#define R8A66597_MAX_ROOT_HUB		2
+#define R8A66597_MAX_SAMPLING		5
+#define R8A66597_RH_POLL_TIME		10
+#define R8A66597_MAX_DMA_CHANNEL	2
+#define R8A66597_PIPE_NO_DMA		R8A66597_MAX_DMA_CHANNEL
+#define check_bulk_or_isoc(pipenum)	((pipenum >= 1 && pipenum <= 5))
+#define check_interrupt(pipenum)	((pipenum >= 6 && pipenum <= 9))
+#define make_devsel(addr)		(addr << 12)
+
+struct r8a66597_pipe_info {
+	unsigned long timer_interval;
+	u16 pipenum;
+	u16 address;	/* R8A66597 HCD usb address */
+	u16 epnum;
+	u16 maxpacket;
+	u16 type;
+	u16 bufnum;
+	u16 buf_bsize;
+	u16 interval;
+	u16 dir_in;
+};
+
+struct r8a66597_pipe {
+	struct r8a66597_pipe_info info;
+
+	unsigned long fifoaddr;
+	unsigned long fifosel;
+	unsigned long fifoctr;
+	unsigned long pipectr;
+	unsigned long pipetre;
+	unsigned long pipetrn;
+};
+
+struct r8a66597_td {
+	struct r8a66597_pipe *pipe;
+	struct urb *urb;
+	struct list_head queue;
+
+	u16 type;
+	u16 pipenum;
+	int iso_cnt;
+
+	u16 address;		/* R8A66597's USB address */
+	u16 maxpacket;
+
+	unsigned zero_packet:1;
+	unsigned short_packet:1;
+	unsigned set_address:1;
+};
+
+struct r8a66597_device {
+	u16	address;	/* R8A66597's USB address */
+	u16	hub_port;
+	u16	root_port;
+
+	unsigned short ep_in_toggle;
+	unsigned short ep_out_toggle;
+	unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE];
+	unsigned char dma_map;
+
+	enum usb_device_state state;
+
+	struct usb_device *udev;
+	int usb_address;
+	struct list_head device_list;
+};
+
+struct r8a66597_root_hub {
+	u32 port;
+	u16 old_syssts;
+	int scount;
+
+	struct r8a66597_device	*dev;
+};
+
+struct r8a66597;
+
+struct r8a66597_timers {
+	struct timer_list td;
+	struct timer_list interval;
+	struct r8a66597 *r8a66597;
+};
+
+struct r8a66597 {
+	spinlock_t lock;
+	void __iomem *reg;
+	struct clk *clk;
+	struct r8a66597_platdata	*pdata;
+	struct r8a66597_device		device0;
+	struct r8a66597_root_hub	root_hub[R8A66597_MAX_ROOT_HUB];
+	struct list_head		pipe_queue[R8A66597_MAX_NUM_PIPE];
+
+	struct timer_list rh_timer;
+	struct r8a66597_timers timers[R8A66597_MAX_NUM_PIPE];
+
+	unsigned short address_map;
+	unsigned short timeout_map;
+	unsigned short interval_map;
+	unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE];
+	unsigned char dma_map;
+	unsigned int max_root_hub;
+
+	struct list_head child_device;
+	unsigned long child_connect_map[4];
+
+	unsigned bus_suspended:1;
+	unsigned irq_sense_low:1;
+};
+
+static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
+{
+	return (struct r8a66597 *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *r8a66597_to_hcd(struct r8a66597 *r8a66597)
+{
+	return container_of((void *)r8a66597, struct usb_hcd, hcd_priv);
+}
+
+static inline struct r8a66597_td *r8a66597_get_td(struct r8a66597 *r8a66597,
+						  u16 pipenum)
+{
+	if (unlikely(list_empty(&r8a66597->pipe_queue[pipenum])))
+		return NULL;
+
+	return list_entry(r8a66597->pipe_queue[pipenum].next,
+			  struct r8a66597_td, queue);
+}
+
+static inline struct urb *r8a66597_get_urb(struct r8a66597 *r8a66597,
+					   u16 pipenum)
+{
+	struct r8a66597_td *td;
+
+	td = r8a66597_get_td(r8a66597, pipenum);
+	return (td ? td->urb : NULL);
+}
+
+static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
+{
+	return ioread16(r8a66597->reg + offset);
+}
+
+static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
+				      unsigned long offset, u16 *buf,
+				      int len)
+{
+	void __iomem *fifoaddr = r8a66597->reg + offset;
+	unsigned long count;
+
+	if (r8a66597->pdata->on_chip) {
+		count = len / 4;
+		ioread32_rep(fifoaddr, buf, count);
+
+		if (len & 0x00000003) {
+			unsigned long tmp = ioread32(fifoaddr);
+			memcpy((unsigned char *)buf + count * 4, &tmp,
+			       len & 0x03);
+		}
+	} else {
+		len = (len + 1) / 2;
+		ioread16_rep(fifoaddr, buf, len);
+	}
+}
+
+static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
+				  unsigned long offset)
+{
+	iowrite16(val, r8a66597->reg + offset);
+}
+
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+				 u16 val, u16 pat, unsigned long offset)
+{
+	u16 tmp;
+	tmp = r8a66597_read(r8a66597, offset);
+	tmp = tmp & (~pat);
+	tmp = tmp | val;
+	r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset)	\
+			r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset)	\
+			r8a66597_mdfy(r8a66597, val, 0, offset)
+
+static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
+				       struct r8a66597_pipe *pipe, u16 *buf,
+				       int len)
+{
+	void __iomem *fifoaddr = r8a66597->reg + pipe->fifoaddr;
+	unsigned long count;
+	unsigned char *pb;
+	int i;
+
+	if (r8a66597->pdata->on_chip) {
+		count = len / 4;
+		iowrite32_rep(fifoaddr, buf, count);
+
+		if (len & 0x00000003) {
+			pb = (unsigned char *)buf + count * 4;
+			for (i = 0; i < (len & 0x00000003); i++) {
+				if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND)
+					iowrite8(pb[i], fifoaddr + i);
+				else
+					iowrite8(pb[i], fifoaddr + 3 - i);
+			}
+		}
+	} else {
+		int odd = len & 0x0001;
+
+		len = len / 2;
+		iowrite16_rep(fifoaddr, buf, len);
+		if (unlikely(odd)) {
+			buf = &buf[len];
+			if (r8a66597->pdata->wr0_shorted_to_wr1)
+				r8a66597_bclr(r8a66597, MBW_16, pipe->fifosel);
+			iowrite8((unsigned char)*buf, fifoaddr);
+			if (r8a66597->pdata->wr0_shorted_to_wr1)
+				r8a66597_bset(r8a66597, MBW_16, pipe->fifosel);
+		}
+	}
+}
+
+static inline unsigned long get_syscfg_reg(int port)
+{
+	return port == 0 ? SYSCFG0 : SYSCFG1;
+}
+
+static inline unsigned long get_syssts_reg(int port)
+{
+	return port == 0 ? SYSSTS0 : SYSSTS1;
+}
+
+static inline unsigned long get_dvstctr_reg(int port)
+{
+	return port == 0 ? DVSTCTR0 : DVSTCTR1;
+}
+
+static inline unsigned long get_dmacfg_reg(int port)
+{
+	return port == 0 ? DMA0CFG : DMA1CFG;
+}
+
+static inline unsigned long get_intenb_reg(int port)
+{
+	return port == 0 ? INTENB1 : INTENB2;
+}
+
+static inline unsigned long get_intsts_reg(int port)
+{
+	return port == 0 ? INTSTS1 : INTSTS2;
+}
+
+static inline u16 get_rh_usb_speed(struct r8a66597 *r8a66597, int port)
+{
+	unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+	return r8a66597_read(r8a66597, dvstctr_reg) & RHST;
+}
+
+static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
+				       int power)
+{
+	unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+	if (r8a66597->pdata->port_power) {
+		r8a66597->pdata->port_power(port, power);
+	} else {
+		if (power)
+			r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+		else
+			r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+	}
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+	u16 clock = 0;
+
+	switch (pdata->xtal) {
+	case R8A66597_PLATDATA_XTAL_12MHZ:
+		clock = XTAL12;
+		break;
+	case R8A66597_PLATDATA_XTAL_24MHZ:
+		clock = XTAL24;
+		break;
+	case R8A66597_PLATDATA_XTAL_48MHZ:
+		clock = XTAL48;
+		break;
+	default:
+		printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+		break;
+	}
+
+	return clock;
+}
+
+#define get_pipectr_addr(pipenum)	(PIPE1CTR + (pipenum - 1) * 2)
+#define get_pipetre_addr(pipenum)	(PIPE1TRE + (pipenum - 1) * 4)
+#define get_pipetrn_addr(pipenum)	(PIPE1TRN + (pipenum - 1) * 4)
+#define get_devadd_addr(address)	(DEVADD0 + address * 2)
+
+#define enable_irq_ready(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define disable_irq_ready(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define enable_irq_empty(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define disable_irq_empty(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define enable_irq_nrdy(r8a66597, pipenum)	\
+	enable_pipe_irq(r8a66597, pipenum, NRDYENB)
+#define disable_irq_nrdy(r8a66597, pipenum)	\
+	disable_pipe_irq(r8a66597, pipenum, NRDYENB)
+
+#endif	/* __R8A66597_H__ */
+
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
new file mode 100644
index 0000000..5b061e5
--- /dev/null
+++ b/drivers/usb/host/sl811-hcd.c
@@ -0,0 +1,1806 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SL811HS HCD (Host Controller Driver) for USB.
+ *
+ * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * Periodic scheduling is based on Roman's OHCI code
+ * 	Copyright (C) 1999 Roman Weissgaerber
+ *
+ * The SL811HS controller handles host side USB (like the SL11H, but with
+ * another register set and SOF generation) as well as peripheral side USB
+ * (like the SL811S).  This driver version doesn't implement the Gadget API
+ * for the peripheral role; or OTG (that'd need much external circuitry).
+ *
+ * For documentation, see the SL811HS spec and the "SL811HS Embedded Host"
+ * document (providing significant pieces missing from that spec); plus
+ * the SL811S spec if you want peripheral side info.
+ */
+
+/*
+ * Status:  Passed basic stress testing, works with hubs, mice, keyboards,
+ * and usb-storage.
+ *
+ * TODO:
+ * - usb suspend/resume triggered by sl811
+ * - various issues noted in the code
+ * - performance work; use both register banks; ...
+ * - use urb->iso_frame_desc[] with ISO transfers
+ */
+
+#undef	VERBOSE
+#undef	PACKET_TRACE
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/sl811.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include "sl811.h"
+
+
+MODULE_DESCRIPTION("SL811HS USB Host Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sl811-hcd");
+
+#define DRIVER_VERSION	"19 May 2005"
+
+/* for now, use only one transfer register bank */
+#undef	USE_B
+
+// #define	QUIRK2
+#define	QUIRK3
+
+static const char hcd_name[] = "sl811-hcd";
+
+/*-------------------------------------------------------------------------*/
+
+static void port_power(struct sl811 *sl811, int is_on)
+{
+	struct usb_hcd	*hcd = sl811_to_hcd(sl811);
+
+	/* hub is inactive unless the port is powered */
+	if (is_on) {
+		if (sl811->port1 & USB_PORT_STAT_POWER)
+			return;
+
+		sl811->port1 = USB_PORT_STAT_POWER;
+		sl811->irq_enable = SL11H_INTMASK_INSRMV;
+	} else {
+		sl811->port1 = 0;
+		sl811->irq_enable = 0;
+		hcd->state = HC_STATE_HALT;
+	}
+	sl811->ctrl1 = 0;
+	sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
+	sl811_write(sl811, SL11H_IRQ_STATUS, ~0);
+
+	if (sl811->board && sl811->board->port_power) {
+		/* switch VBUS, at 500mA unless hub power budget gets set */
+		dev_dbg(hcd->self.controller, "power %s\n",
+			is_on ? "on" : "off");
+		sl811->board->port_power(hcd->self.controller, is_on);
+	}
+
+	/* reset as thoroughly as we can */
+	if (sl811->board && sl811->board->reset)
+		sl811->board->reset(hcd->self.controller);
+	else {
+		sl811_write(sl811, SL11H_CTLREG1, SL11H_CTL1MASK_SE0);
+		mdelay(20);
+	}
+
+	sl811_write(sl811, SL11H_IRQ_ENABLE, 0);
+	sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+	sl811_write(sl811, SL811HS_CTLREG2, SL811HS_CTL2_INIT);
+	sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
+
+	// if !is_on, put into lowpower mode now
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* This is a PIO-only HCD.  Queueing appends URBs to the endpoint's queue,
+ * and may start I/O.  Endpoint queues are scanned during completion irq
+ * handlers (one per packet: ACK, NAK, faults, etc) and urb cancellation.
+ *
+ * Using an external DMA engine to copy a packet at a time could work,
+ * though setup/teardown costs may be too big to make it worthwhile.
+ */
+
+/* SETUP starts a new control request.  Devices are not allowed to
+ * STALL or NAK these; they must cancel any pending control requests.
+ */
+static void setup_packet(
+	struct sl811		*sl811,
+	struct sl811h_ep	*ep,
+	struct urb		*urb,
+	u8			bank,
+	u8			control
+)
+{
+	u8			addr;
+	u8			len;
+	void __iomem		*data_reg;
+
+	addr = SL811HS_PACKET_BUF(bank == 0);
+	len = sizeof(struct usb_ctrlrequest);
+	data_reg = sl811->data_reg;
+	sl811_write_buf(sl811, addr, urb->setup_packet, len);
+
+	/* autoincrementing */
+	sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
+	writeb(len, data_reg);
+	writeb(SL_SETUP /* | ep->epnum */, data_reg);
+	writeb(usb_pipedevice(urb->pipe), data_reg);
+
+	/* always OUT/data0 */
+	sl811_write(sl811, bank + SL11H_HOSTCTLREG,
+			control | SL11H_HCTLMASK_OUT);
+	ep->length = 0;
+	PACKET("SETUP qh%p\n", ep);
+}
+
+/* STATUS finishes control requests, often after IN or OUT data packets */
+static void status_packet(
+	struct sl811		*sl811,
+	struct sl811h_ep	*ep,
+	struct urb		*urb,
+	u8			bank,
+	u8			control
+)
+{
+	int			do_out;
+	void __iomem		*data_reg;
+
+	do_out = urb->transfer_buffer_length && usb_pipein(urb->pipe);
+	data_reg = sl811->data_reg;
+
+	/* autoincrementing */
+	sl811_write(sl811, bank + SL11H_BUFADDRREG, 0);
+	writeb(0, data_reg);
+	writeb((do_out ? SL_OUT : SL_IN) /* | ep->epnum */, data_reg);
+	writeb(usb_pipedevice(urb->pipe), data_reg);
+
+	/* always data1; sometimes IN */
+	control |= SL11H_HCTLMASK_TOGGLE;
+	if (do_out)
+		control |= SL11H_HCTLMASK_OUT;
+	sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
+	ep->length = 0;
+	PACKET("STATUS%s/%s qh%p\n", ep->nak_count ? "/retry" : "",
+			do_out ? "out" : "in", ep);
+}
+
+/* IN packets can be used with any type of endpoint. here we just
+ * start the transfer, data from the peripheral may arrive later.
+ * urb->iso_frame_desc is currently ignored here...
+ */
+static void in_packet(
+	struct sl811		*sl811,
+	struct sl811h_ep	*ep,
+	struct urb		*urb,
+	u8			bank,
+	u8			control
+)
+{
+	u8			addr;
+	u8			len;
+	void __iomem		*data_reg;
+
+	/* avoid losing data on overflow */
+	len = ep->maxpacket;
+	addr = SL811HS_PACKET_BUF(bank == 0);
+	if (!(control & SL11H_HCTLMASK_ISOCH)
+			&& usb_gettoggle(urb->dev, ep->epnum, 0))
+		control |= SL11H_HCTLMASK_TOGGLE;
+	data_reg = sl811->data_reg;
+
+	/* autoincrementing */
+	sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
+	writeb(len, data_reg);
+	writeb(SL_IN | ep->epnum, data_reg);
+	writeb(usb_pipedevice(urb->pipe), data_reg);
+
+	sl811_write(sl811, bank + SL11H_HOSTCTLREG, control);
+	ep->length = min_t(u32, len,
+			urb->transfer_buffer_length - urb->actual_length);
+	PACKET("IN%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
+			!!usb_gettoggle(urb->dev, ep->epnum, 0), ep, len);
+}
+
+/* OUT packets can be used with any type of endpoint.
+ * urb->iso_frame_desc is currently ignored here...
+ */
+static void out_packet(
+	struct sl811		*sl811,
+	struct sl811h_ep	*ep,
+	struct urb		*urb,
+	u8			bank,
+	u8			control
+)
+{
+	void			*buf;
+	u8			addr;
+	u8			len;
+	void __iomem		*data_reg;
+
+	buf = urb->transfer_buffer + urb->actual_length;
+	prefetch(buf);
+
+	len = min_t(u32, ep->maxpacket,
+			urb->transfer_buffer_length - urb->actual_length);
+
+	if (!(control & SL11H_HCTLMASK_ISOCH)
+			&& usb_gettoggle(urb->dev, ep->epnum, 1))
+		control |= SL11H_HCTLMASK_TOGGLE;
+	addr = SL811HS_PACKET_BUF(bank == 0);
+	data_reg = sl811->data_reg;
+
+	sl811_write_buf(sl811, addr, buf, len);
+
+	/* autoincrementing */
+	sl811_write(sl811, bank + SL11H_BUFADDRREG, addr);
+	writeb(len, data_reg);
+	writeb(SL_OUT | ep->epnum, data_reg);
+	writeb(usb_pipedevice(urb->pipe), data_reg);
+
+	sl811_write(sl811, bank + SL11H_HOSTCTLREG,
+			control | SL11H_HCTLMASK_OUT);
+	ep->length = len;
+	PACKET("OUT%s/%d qh%p len%d\n", ep->nak_count ? "/retry" : "",
+			!!usb_gettoggle(urb->dev, ep->epnum, 1), ep, len);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* caller updates on-chip enables later */
+
+static inline void sofirq_on(struct sl811 *sl811)
+{
+	if (sl811->irq_enable & SL11H_INTMASK_SOFINTR)
+		return;
+	dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq on\n");
+	sl811->irq_enable |= SL11H_INTMASK_SOFINTR;
+}
+
+static inline void sofirq_off(struct sl811 *sl811)
+{
+	if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR))
+		return;
+	dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq off\n");
+	sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* pick the next endpoint for a transaction, and issue it.
+ * frames start with periodic transfers (after whatever is pending
+ * from the previous frame), and the rest of the time is async
+ * transfers, scheduled round-robin.
+ */
+static struct sl811h_ep	*start(struct sl811 *sl811, u8 bank)
+{
+	struct sl811h_ep	*ep;
+	struct urb		*urb;
+	int			fclock;
+	u8			control;
+
+	/* use endpoint at schedule head */
+	if (sl811->next_periodic) {
+		ep = sl811->next_periodic;
+		sl811->next_periodic = ep->next;
+	} else {
+		if (sl811->next_async)
+			ep = sl811->next_async;
+		else if (!list_empty(&sl811->async))
+			ep = container_of(sl811->async.next,
+					struct sl811h_ep, schedule);
+		else {
+			/* could set up the first fullspeed periodic
+			 * transfer for the next frame ...
+			 */
+			return NULL;
+		}
+
+#ifdef USE_B
+		if ((bank && sl811->active_b == ep) || sl811->active_a == ep)
+			return NULL;
+#endif
+
+		if (ep->schedule.next == &sl811->async)
+			sl811->next_async = NULL;
+		else
+			sl811->next_async = container_of(ep->schedule.next,
+					struct sl811h_ep, schedule);
+	}
+
+	if (unlikely(list_empty(&ep->hep->urb_list))) {
+		dev_dbg(sl811_to_hcd(sl811)->self.controller,
+			"empty %p queue?\n", ep);
+		return NULL;
+	}
+
+	urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
+	control = ep->defctrl;
+
+	/* if this frame doesn't have enough time left to transfer this
+	 * packet, wait till the next frame.  too-simple algorithm...
+	 */
+	fclock = sl811_read(sl811, SL11H_SOFTMRREG) << 6;
+	fclock -= 100;		/* setup takes not much time */
+	if (urb->dev->speed == USB_SPEED_LOW) {
+		if (control & SL11H_HCTLMASK_PREAMBLE) {
+			/* also note erratum 1: some hubs won't work */
+			fclock -= 800;
+		}
+		fclock -= ep->maxpacket << 8;
+
+		/* erratum 2: AFTERSOF only works for fullspeed */
+		if (fclock < 0) {
+			if (ep->period)
+				sl811->stat_overrun++;
+			sofirq_on(sl811);
+			return NULL;
+		}
+	} else {
+		fclock -= 12000 / 19;	/* 19 64byte packets/msec */
+		if (fclock < 0) {
+			if (ep->period)
+				sl811->stat_overrun++;
+			control |= SL11H_HCTLMASK_AFTERSOF;
+
+		/* throttle bulk/control irq noise */
+		} else if (ep->nak_count)
+			control |= SL11H_HCTLMASK_AFTERSOF;
+	}
+
+
+	switch (ep->nextpid) {
+	case USB_PID_IN:
+		in_packet(sl811, ep, urb, bank, control);
+		break;
+	case USB_PID_OUT:
+		out_packet(sl811, ep, urb, bank, control);
+		break;
+	case USB_PID_SETUP:
+		setup_packet(sl811, ep, urb, bank, control);
+		break;
+	case USB_PID_ACK:		/* for control status */
+		status_packet(sl811, ep, urb, bank, control);
+		break;
+	default:
+		dev_dbg(sl811_to_hcd(sl811)->self.controller,
+			"bad ep%p pid %02x\n", ep, ep->nextpid);
+		ep = NULL;
+	}
+	return ep;
+}
+
+#define MIN_JIFFIES	((msecs_to_jiffies(2) > 1) ? msecs_to_jiffies(2) : 2)
+
+static inline void start_transfer(struct sl811 *sl811)
+{
+	if (sl811->port1 & USB_PORT_STAT_SUSPEND)
+		return;
+	if (sl811->active_a == NULL) {
+		sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
+		if (sl811->active_a != NULL)
+			sl811->jiffies_a = jiffies + MIN_JIFFIES;
+	}
+#ifdef USE_B
+	if (sl811->active_b == NULL) {
+		sl811->active_b = start(sl811, SL811_EP_B(SL811_HOST_BUF));
+		if (sl811->active_b != NULL)
+			sl811->jiffies_b = jiffies + MIN_JIFFIES;
+	}
+#endif
+}
+
+static void finish_request(
+	struct sl811		*sl811,
+	struct sl811h_ep	*ep,
+	struct urb		*urb,
+	int			status
+) __releases(sl811->lock) __acquires(sl811->lock)
+{
+	unsigned		i;
+
+	if (usb_pipecontrol(urb->pipe))
+		ep->nextpid = USB_PID_SETUP;
+
+	usb_hcd_unlink_urb_from_ep(sl811_to_hcd(sl811), urb);
+	spin_unlock(&sl811->lock);
+	usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb, status);
+	spin_lock(&sl811->lock);
+
+	/* leave active endpoints in the schedule */
+	if (!list_empty(&ep->hep->urb_list))
+		return;
+
+	/* async deschedule? */
+	if (!list_empty(&ep->schedule)) {
+		list_del_init(&ep->schedule);
+		if (ep == sl811->next_async)
+			sl811->next_async = NULL;
+		return;
+	}
+
+	/* periodic deschedule */
+	dev_dbg(sl811_to_hcd(sl811)->self.controller,
+		"deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+	for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
+		struct sl811h_ep	*temp;
+		struct sl811h_ep	**prev = &sl811->periodic[i];
+
+		while (*prev && ((temp = *prev) != ep))
+			prev = &temp->next;
+		if (*prev)
+			*prev = ep->next;
+		sl811->load[i] -= ep->load;
+	}
+	ep->branch = PERIODIC_SIZE;
+	sl811->periodic_count--;
+	sl811_to_hcd(sl811)->self.bandwidth_allocated
+		-= ep->load / ep->period;
+	if (ep == sl811->next_periodic)
+		sl811->next_periodic = ep->next;
+
+	/* we might turn SOFs back on again for the async schedule */
+	if (sl811->periodic_count == 0)
+		sofirq_off(sl811);
+}
+
+static void
+done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
+{
+	u8			status;
+	struct urb		*urb;
+	int			urbstat = -EINPROGRESS;
+
+	if (unlikely(!ep))
+		return;
+
+	status = sl811_read(sl811, bank + SL11H_PKTSTATREG);
+
+	urb = container_of(ep->hep->urb_list.next, struct urb, urb_list);
+
+	/* we can safely ignore NAKs */
+	if (status & SL11H_STATMASK_NAK) {
+		// PACKET("...NAK_%02x qh%p\n", bank, ep);
+		if (!ep->period)
+			ep->nak_count++;
+		ep->error_count = 0;
+
+	/* ACK advances transfer, toggle, and maybe queue */
+	} else if (status & SL11H_STATMASK_ACK) {
+		struct usb_device	*udev = urb->dev;
+		int			len;
+		unsigned char		*buf;
+
+		/* urb->iso_frame_desc is currently ignored here... */
+
+		ep->nak_count = ep->error_count = 0;
+		switch (ep->nextpid) {
+		case USB_PID_OUT:
+			// PACKET("...ACK/out_%02x qh%p\n", bank, ep);
+			urb->actual_length += ep->length;
+			usb_dotoggle(udev, ep->epnum, 1);
+			if (urb->actual_length
+					== urb->transfer_buffer_length) {
+				if (usb_pipecontrol(urb->pipe))
+					ep->nextpid = USB_PID_ACK;
+
+				/* some bulk protocols terminate OUT transfers
+				 * by a short packet, using ZLPs not padding.
+				 */
+				else if (ep->length < ep->maxpacket
+						|| !(urb->transfer_flags
+							& URB_ZERO_PACKET))
+					urbstat = 0;
+			}
+			break;
+		case USB_PID_IN:
+			// PACKET("...ACK/in_%02x qh%p\n", bank, ep);
+			buf = urb->transfer_buffer + urb->actual_length;
+			prefetchw(buf);
+			len = ep->maxpacket - sl811_read(sl811,
+						bank + SL11H_XFERCNTREG);
+			if (len > ep->length) {
+				len = ep->length;
+				urbstat = -EOVERFLOW;
+			}
+			urb->actual_length += len;
+			sl811_read_buf(sl811, SL811HS_PACKET_BUF(bank == 0),
+					buf, len);
+			usb_dotoggle(udev, ep->epnum, 0);
+			if (urbstat == -EINPROGRESS &&
+					(len < ep->maxpacket ||
+						urb->actual_length ==
+						urb->transfer_buffer_length)) {
+				if (usb_pipecontrol(urb->pipe))
+					ep->nextpid = USB_PID_ACK;
+				else
+					urbstat = 0;
+			}
+			break;
+		case USB_PID_SETUP:
+			// PACKET("...ACK/setup_%02x qh%p\n", bank, ep);
+			if (urb->transfer_buffer_length == urb->actual_length)
+				ep->nextpid = USB_PID_ACK;
+			else if (usb_pipeout(urb->pipe)) {
+				usb_settoggle(udev, 0, 1, 1);
+				ep->nextpid = USB_PID_OUT;
+			} else {
+				usb_settoggle(udev, 0, 0, 1);
+				ep->nextpid = USB_PID_IN;
+			}
+			break;
+		case USB_PID_ACK:
+			// PACKET("...ACK/status_%02x qh%p\n", bank, ep);
+			urbstat = 0;
+			break;
+		}
+
+	/* STALL stops all transfers */
+	} else if (status & SL11H_STATMASK_STALL) {
+		PACKET("...STALL_%02x qh%p\n", bank, ep);
+		ep->nak_count = ep->error_count = 0;
+		urbstat = -EPIPE;
+
+	/* error? retry, until "3 strikes" */
+	} else if (++ep->error_count >= 3) {
+		if (status & SL11H_STATMASK_TMOUT)
+			urbstat = -ETIME;
+		else if (status & SL11H_STATMASK_OVF)
+			urbstat = -EOVERFLOW;
+		else
+			urbstat = -EPROTO;
+		ep->error_count = 0;
+		PACKET("...3STRIKES_%02x %02x qh%p stat %d\n",
+				bank, status, ep, urbstat);
+	}
+
+	if (urbstat != -EINPROGRESS || urb->unlinked)
+		finish_request(sl811, ep, urb, urbstat);
+}
+
+static inline u8 checkdone(struct sl811 *sl811)
+{
+	u8	ctl;
+	u8	irqstat = 0;
+
+	if (sl811->active_a && time_before_eq(sl811->jiffies_a, jiffies)) {
+		ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG));
+		if (ctl & SL11H_HCTLMASK_ARM)
+			sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
+		dev_dbg(sl811_to_hcd(sl811)->self.controller,
+			"%s DONE_A: ctrl %02x sts %02x\n",
+			(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
+			ctl,
+			sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
+		irqstat |= SL11H_INTMASK_DONE_A;
+	}
+#ifdef	USE_B
+	if (sl811->active_b && time_before_eq(sl811->jiffies_b, jiffies)) {
+		ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG));
+		if (ctl & SL11H_HCTLMASK_ARM)
+			sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
+		dev_dbg(sl811_to_hcd(sl811)->self.controller,
+			"%s DONE_B: ctrl %02x sts %02x\n",
+			(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
+			ctl,
+			sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
+		irqstat |= SL11H_INTMASK_DONE_A;
+	}
+#endif
+	return irqstat;
+}
+
+static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
+{
+	struct sl811	*sl811 = hcd_to_sl811(hcd);
+	u8		irqstat;
+	irqreturn_t	ret = IRQ_NONE;
+	unsigned	retries = 5;
+
+	spin_lock(&sl811->lock);
+
+retry:
+	irqstat = sl811_read(sl811, SL11H_IRQ_STATUS) & ~SL11H_INTMASK_DP;
+	if (irqstat) {
+		sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
+		irqstat &= sl811->irq_enable;
+	}
+
+#ifdef	QUIRK2
+	/* this may no longer be necessary ... */
+	if (irqstat == 0) {
+		irqstat = checkdone(sl811);
+		if (irqstat)
+			sl811->stat_lost++;
+	}
+#endif
+
+	/* USB packets, not necessarily handled in the order they're
+	 * issued ... that's fine if they're different endpoints.
+	 */
+	if (irqstat & SL11H_INTMASK_DONE_A) {
+		done(sl811, sl811->active_a, SL811_EP_A(SL811_HOST_BUF));
+		sl811->active_a = NULL;
+		sl811->stat_a++;
+	}
+#ifdef USE_B
+	if (irqstat & SL11H_INTMASK_DONE_B) {
+		done(sl811, sl811->active_b, SL811_EP_B(SL811_HOST_BUF));
+		sl811->active_b = NULL;
+		sl811->stat_b++;
+	}
+#endif
+	if (irqstat & SL11H_INTMASK_SOFINTR) {
+		unsigned index;
+
+		index = sl811->frame++ % (PERIODIC_SIZE - 1);
+		sl811->stat_sof++;
+
+		/* be graceful about almost-inevitable periodic schedule
+		 * overruns:  continue the previous frame's transfers iff
+		 * this one has nothing scheduled.
+		 */
+		if (sl811->next_periodic) {
+			// dev_err(hcd->self.controller, "overrun to slot %d\n", index);
+			sl811->stat_overrun++;
+		}
+		if (sl811->periodic[index])
+			sl811->next_periodic = sl811->periodic[index];
+	}
+
+	/* hub_wq manages debouncing and wakeup */
+	if (irqstat & SL11H_INTMASK_INSRMV) {
+		sl811->stat_insrmv++;
+
+		/* most stats are reset for each VBUS session */
+		sl811->stat_wake = 0;
+		sl811->stat_sof = 0;
+		sl811->stat_a = 0;
+		sl811->stat_b = 0;
+		sl811->stat_lost = 0;
+
+		sl811->ctrl1 = 0;
+		sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+		sl811->irq_enable = SL11H_INTMASK_INSRMV;
+		sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
+
+		/* usbcore nukes other pending transactions on disconnect */
+		if (sl811->active_a) {
+			sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
+			finish_request(sl811, sl811->active_a,
+				container_of(sl811->active_a
+						->hep->urb_list.next,
+					struct urb, urb_list),
+				-ESHUTDOWN);
+			sl811->active_a = NULL;
+		}
+#ifdef	USE_B
+		if (sl811->active_b) {
+			sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
+			finish_request(sl811, sl811->active_b,
+				container_of(sl811->active_b
+						->hep->urb_list.next,
+					struct urb, urb_list),
+				NULL, -ESHUTDOWN);
+			sl811->active_b = NULL;
+		}
+#endif
+
+		/* port status seems weird until after reset, so
+		 * force the reset and make hub_wq clean up later.
+		 */
+		if (irqstat & SL11H_INTMASK_RD)
+			sl811->port1 &= ~USB_PORT_STAT_CONNECTION;
+		else
+			sl811->port1 |= USB_PORT_STAT_CONNECTION;
+
+		sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16;
+
+	} else if (irqstat & SL11H_INTMASK_RD) {
+		if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
+			dev_dbg(hcd->self.controller, "wakeup\n");
+			sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
+			sl811->stat_wake++;
+		} else
+			irqstat &= ~SL11H_INTMASK_RD;
+	}
+
+	if (irqstat) {
+		if (sl811->port1 & USB_PORT_STAT_ENABLE)
+			start_transfer(sl811);
+		ret = IRQ_HANDLED;
+		if (retries--)
+			goto retry;
+	}
+
+	if (sl811->periodic_count == 0 && list_empty(&sl811->async))
+		sofirq_off(sl811);
+	sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
+
+	spin_unlock(&sl811->lock);
+
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* usb 1.1 says max 90% of a frame is available for periodic transfers.
+ * this driver doesn't promise that much since it's got to handle an
+ * IRQ per packet; irq handling latencies also use up that time.
+ *
+ * NOTE:  the periodic schedule is a sparse tree, with the load for
+ * each branch minimized.  see fig 3.5 in the OHCI spec for example.
+ */
+#define	MAX_PERIODIC_LOAD	500	/* out of 1000 usec */
+
+static int balance(struct sl811 *sl811, u16 period, u16 load)
+{
+	int	i, branch = -ENOSPC;
+
+	/* search for the least loaded schedule branch of that period
+	 * which has enough bandwidth left unreserved.
+	 */
+	for (i = 0; i < period ; i++) {
+		if (branch < 0 || sl811->load[branch] > sl811->load[i]) {
+			int	j;
+
+			for (j = i; j < PERIODIC_SIZE; j += period) {
+				if ((sl811->load[j] + load)
+						> MAX_PERIODIC_LOAD)
+					break;
+			}
+			if (j < PERIODIC_SIZE)
+				continue;
+			branch = i;
+		}
+	}
+	return branch;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int sl811h_urb_enqueue(
+	struct usb_hcd		*hcd,
+	struct urb		*urb,
+	gfp_t			mem_flags
+) {
+	struct sl811		*sl811 = hcd_to_sl811(hcd);
+	struct usb_device	*udev = urb->dev;
+	unsigned int		pipe = urb->pipe;
+	int			is_out = !usb_pipein(pipe);
+	int			type = usb_pipetype(pipe);
+	int			epnum = usb_pipeendpoint(pipe);
+	struct sl811h_ep	*ep = NULL;
+	unsigned long		flags;
+	int			i;
+	int			retval;
+	struct usb_host_endpoint	*hep = urb->ep;
+
+#ifndef CONFIG_USB_SL811_HCD_ISO
+	if (type == PIPE_ISOCHRONOUS)
+		return -ENOSPC;
+#endif
+
+	/* avoid all allocations within spinlocks */
+	if (!hep->hcpriv) {
+		ep = kzalloc(sizeof *ep, mem_flags);
+		if (ep == NULL)
+			return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&sl811->lock, flags);
+
+	/* don't submit to a dead or disabled port */
+	if (!(sl811->port1 & USB_PORT_STAT_ENABLE)
+			|| !HC_IS_RUNNING(hcd->state)) {
+		retval = -ENODEV;
+		kfree(ep);
+		goto fail_not_linked;
+	}
+	retval = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (retval) {
+		kfree(ep);
+		goto fail_not_linked;
+	}
+
+	if (hep->hcpriv) {
+		kfree(ep);
+		ep = hep->hcpriv;
+	} else if (!ep) {
+		retval = -ENOMEM;
+		goto fail;
+
+	} else {
+		INIT_LIST_HEAD(&ep->schedule);
+		ep->udev = udev;
+		ep->epnum = epnum;
+		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+		ep->defctrl = SL11H_HCTLMASK_ARM | SL11H_HCTLMASK_ENABLE;
+		usb_settoggle(udev, epnum, is_out, 0);
+
+		if (type == PIPE_CONTROL)
+			ep->nextpid = USB_PID_SETUP;
+		else if (is_out)
+			ep->nextpid = USB_PID_OUT;
+		else
+			ep->nextpid = USB_PID_IN;
+
+		if (ep->maxpacket > H_MAXPACKET) {
+			/* iso packets up to 240 bytes could work... */
+			dev_dbg(hcd->self.controller,
+				"dev %d ep%d maxpacket %d\n", udev->devnum,
+				epnum, ep->maxpacket);
+			retval = -EINVAL;
+			kfree(ep);
+			goto fail;
+		}
+
+		if (udev->speed == USB_SPEED_LOW) {
+			/* send preamble for external hub? */
+			if (!(sl811->ctrl1 & SL11H_CTL1MASK_LSPD))
+				ep->defctrl |= SL11H_HCTLMASK_PREAMBLE;
+		}
+		switch (type) {
+		case PIPE_ISOCHRONOUS:
+		case PIPE_INTERRUPT:
+			if (urb->interval > PERIODIC_SIZE)
+				urb->interval = PERIODIC_SIZE;
+			ep->period = urb->interval;
+			ep->branch = PERIODIC_SIZE;
+			if (type == PIPE_ISOCHRONOUS)
+				ep->defctrl |= SL11H_HCTLMASK_ISOCH;
+			ep->load = usb_calc_bus_time(udev->speed, !is_out,
+				(type == PIPE_ISOCHRONOUS),
+				usb_maxpacket(udev, pipe, is_out))
+					/ 1000;
+			break;
+		}
+
+		ep->hep = hep;
+		hep->hcpriv = ep;
+	}
+
+	/* maybe put endpoint into schedule */
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		if (list_empty(&ep->schedule))
+			list_add_tail(&ep->schedule, &sl811->async);
+		break;
+	case PIPE_ISOCHRONOUS:
+	case PIPE_INTERRUPT:
+		urb->interval = ep->period;
+		if (ep->branch < PERIODIC_SIZE) {
+			/* NOTE:  the phase is correct here, but the value
+			 * needs offsetting by the transfer queue depth.
+			 * All current drivers ignore start_frame, so this
+			 * is unlikely to ever matter...
+			 */
+			urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
+						+ ep->branch;
+			break;
+		}
+
+		retval = balance(sl811, ep->period, ep->load);
+		if (retval < 0)
+			goto fail;
+		ep->branch = retval;
+		retval = 0;
+		urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
+					+ ep->branch;
+
+		/* sort each schedule branch by period (slow before fast)
+		 * to share the faster parts of the tree without needing
+		 * dummy/placeholder nodes
+		 */
+		dev_dbg(hcd->self.controller, "schedule qh%d/%p branch %d\n",
+			ep->period, ep, ep->branch);
+		for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
+			struct sl811h_ep	**prev = &sl811->periodic[i];
+			struct sl811h_ep	*here = *prev;
+
+			while (here && ep != here) {
+				if (ep->period > here->period)
+					break;
+				prev = &here->next;
+				here = *prev;
+			}
+			if (ep != here) {
+				ep->next = here;
+				*prev = ep;
+			}
+			sl811->load[i] += ep->load;
+		}
+		sl811->periodic_count++;
+		hcd->self.bandwidth_allocated += ep->load / ep->period;
+		sofirq_on(sl811);
+	}
+
+	urb->hcpriv = hep;
+	start_transfer(sl811);
+	sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
+fail:
+	if (retval)
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+fail_not_linked:
+	spin_unlock_irqrestore(&sl811->lock, flags);
+	return retval;
+}
+
+static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct sl811		*sl811 = hcd_to_sl811(hcd);
+	struct usb_host_endpoint *hep;
+	unsigned long		flags;
+	struct sl811h_ep	*ep;
+	int			retval;
+
+	spin_lock_irqsave(&sl811->lock, flags);
+	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (retval)
+		goto fail;
+
+	hep = urb->hcpriv;
+	ep = hep->hcpriv;
+	if (ep) {
+		/* finish right away if this urb can't be active ...
+		 * note that some drivers wrongly expect delays
+		 */
+		if (ep->hep->urb_list.next != &urb->urb_list) {
+			/* not front of queue?  never active */
+
+		/* for active transfers, we expect an IRQ */
+		} else if (sl811->active_a == ep) {
+			if (time_before_eq(sl811->jiffies_a, jiffies)) {
+				/* happens a lot with lowspeed?? */
+				dev_dbg(hcd->self.controller,
+					"giveup on DONE_A: ctrl %02x sts %02x\n",
+					sl811_read(sl811,
+						SL811_EP_A(SL11H_HOSTCTLREG)),
+					sl811_read(sl811,
+						SL811_EP_A(SL11H_PKTSTATREG)));
+				sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
+						0);
+				sl811->active_a = NULL;
+			} else
+				urb = NULL;
+#ifdef	USE_B
+		} else if (sl811->active_b == ep) {
+			if (time_before_eq(sl811->jiffies_a, jiffies)) {
+				/* happens a lot with lowspeed?? */
+				dev_dbg(hcd->self.controller,
+					"giveup on DONE_B: ctrl %02x sts %02x\n",
+					sl811_read(sl811,
+						SL811_EP_B(SL11H_HOSTCTLREG)),
+					sl811_read(sl811,
+						SL811_EP_B(SL11H_PKTSTATREG)));
+				sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG),
+						0);
+				sl811->active_b = NULL;
+			} else
+				urb = NULL;
+#endif
+		} else {
+			/* front of queue for inactive endpoint */
+		}
+
+		if (urb)
+			finish_request(sl811, ep, urb, 0);
+		else
+			dev_dbg(sl811_to_hcd(sl811)->self.controller,
+				"dequeue, urb %p active %s; wait4irq\n", urb,
+				(sl811->active_a == ep) ? "A" : "B");
+	} else
+		retval = -EINVAL;
+ fail:
+	spin_unlock_irqrestore(&sl811->lock, flags);
+	return retval;
+}
+
+static void
+sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+	struct sl811h_ep	*ep = hep->hcpriv;
+
+	if (!ep)
+		return;
+
+	/* assume we'd just wait for the irq */
+	if (!list_empty(&hep->urb_list))
+		msleep(3);
+	if (!list_empty(&hep->urb_list))
+		dev_warn(hcd->self.controller, "ep %p not empty?\n", ep);
+
+	kfree(ep);
+	hep->hcpriv = NULL;
+}
+
+static int
+sl811h_get_frame(struct usb_hcd *hcd)
+{
+	struct sl811 *sl811 = hcd_to_sl811(hcd);
+
+	/* wrong except while periodic transfers are scheduled;
+	 * never matches the on-the-wire frame;
+	 * subject to overruns.
+	 */
+	return sl811->frame;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* the virtual root hub timer IRQ checks for hub status */
+static int
+sl811h_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct sl811 *sl811 = hcd_to_sl811(hcd);
+#ifdef	QUIRK3
+	unsigned long flags;
+
+	/* non-SMP HACK: use root hub timer as i/o watchdog
+	 * this seems essential when SOF IRQs aren't in use...
+	 */
+	local_irq_save(flags);
+	if (!timer_pending(&sl811->timer)) {
+		if (sl811h_irq( /* ~0, */ hcd) != IRQ_NONE)
+			sl811->stat_lost++;
+	}
+	local_irq_restore(flags);
+#endif
+
+	if (!(sl811->port1 & (0xffff << 16)))
+		return 0;
+
+	/* tell hub_wq port 1 changed */
+	*buf = (1 << 1);
+	return 1;
+}
+
+static void
+sl811h_hub_descriptor (
+	struct sl811			*sl811,
+	struct usb_hub_descriptor	*desc
+) {
+	u16		temp = 0;
+
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = 1;
+	desc->bDescLength = 9;
+
+	/* per-port power switching (gang of one!), or none */
+	desc->bPwrOn2PwrGood = 0;
+	if (sl811->board && sl811->board->port_power) {
+		desc->bPwrOn2PwrGood = sl811->board->potpg;
+		if (!desc->bPwrOn2PwrGood)
+			desc->bPwrOn2PwrGood = 10;
+		temp = HUB_CHAR_INDV_PORT_LPSM;
+	} else
+		temp = HUB_CHAR_NO_LPSM;
+
+	/* no overcurrent errors detection/handling */
+	temp |= HUB_CHAR_NO_OCPM;
+
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+
+	/* ports removable, and legacy PortPwrCtrlMask */
+	desc->u.hs.DeviceRemovable[0] = 0 << 1;
+	desc->u.hs.DeviceRemovable[1] = ~0;
+}
+
+static void
+sl811h_timer(struct timer_list *t)
+{
+	struct sl811 	*sl811 = from_timer(sl811, t, timer);
+	unsigned long	flags;
+	u8		irqstat;
+	u8		signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
+	const u32	mask = USB_PORT_STAT_CONNECTION
+				| USB_PORT_STAT_ENABLE
+				| USB_PORT_STAT_LOW_SPEED;
+
+	spin_lock_irqsave(&sl811->lock, flags);
+
+	/* stop special signaling */
+	sl811->ctrl1 &= ~SL11H_CTL1MASK_FORCE;
+	sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+	udelay(3);
+
+	irqstat = sl811_read(sl811, SL11H_IRQ_STATUS);
+
+	switch (signaling) {
+	case SL11H_CTL1MASK_SE0:
+		dev_dbg(sl811_to_hcd(sl811)->self.controller, "end reset\n");
+		sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
+				 | USB_PORT_STAT_POWER;
+		sl811->ctrl1 = 0;
+		/* don't wrongly ack RD */
+		if (irqstat & SL11H_INTMASK_INSRMV)
+			irqstat &= ~SL11H_INTMASK_RD;
+		break;
+	case SL11H_CTL1MASK_K:
+		dev_dbg(sl811_to_hcd(sl811)->self.controller, "end resume\n");
+		sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
+		break;
+	default:
+		dev_dbg(sl811_to_hcd(sl811)->self.controller,
+			"odd timer signaling: %02x\n", signaling);
+		break;
+	}
+	sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
+
+	if (irqstat & SL11H_INTMASK_RD) {
+		/* usbcore nukes all pending transactions on disconnect */
+		if (sl811->port1 & USB_PORT_STAT_CONNECTION)
+			sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16)
+					| (USB_PORT_STAT_C_ENABLE << 16);
+		sl811->port1 &= ~mask;
+		sl811->irq_enable = SL11H_INTMASK_INSRMV;
+	} else {
+		sl811->port1 |= mask;
+		if (irqstat & SL11H_INTMASK_DP)
+			sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED;
+		sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
+	}
+
+	if (sl811->port1 & USB_PORT_STAT_CONNECTION) {
+		u8	ctrl2 = SL811HS_CTL2_INIT;
+
+		sl811->irq_enable |= SL11H_INTMASK_DONE_A;
+#ifdef USE_B
+		sl811->irq_enable |= SL11H_INTMASK_DONE_B;
+#endif
+		if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) {
+			sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
+			ctrl2 |= SL811HS_CTL2MASK_DSWAP;
+		}
+
+		/* start SOFs flowing, kickstarting with A registers */
+		sl811->ctrl1 |= SL11H_CTL1MASK_SOF_ENA;
+		sl811_write(sl811, SL11H_SOFLOWREG, 0xe0);
+		sl811_write(sl811, SL811HS_CTLREG2, ctrl2);
+
+		/* autoincrementing */
+		sl811_write(sl811, SL811_EP_A(SL11H_BUFLNTHREG), 0);
+		writeb(SL_SOF, sl811->data_reg);
+		writeb(0, sl811->data_reg);
+		sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG),
+				SL11H_HCTLMASK_ARM);
+
+		/* hub_wq provides debounce delay */
+	} else {
+		sl811->ctrl1 = 0;
+	}
+	sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+	/* reenable irqs */
+	sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
+	spin_unlock_irqrestore(&sl811->lock, flags);
+}
+
+static int
+sl811h_hub_control(
+	struct usb_hcd	*hcd,
+	u16		typeReq,
+	u16		wValue,
+	u16		wIndex,
+	char		*buf,
+	u16		wLength
+) {
+	struct sl811	*sl811 = hcd_to_sl811(hcd);
+	int		retval = 0;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&sl811->lock, flags);
+
+	switch (typeReq) {
+	case ClearHubFeature:
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (wIndex != 1 || wLength != 0)
+			goto error;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			sl811->port1 &= USB_PORT_STAT_POWER;
+			sl811->ctrl1 = 0;
+			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+			sl811->irq_enable = SL11H_INTMASK_INSRMV;
+			sl811_write(sl811, SL11H_IRQ_ENABLE,
+						sl811->irq_enable);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (!(sl811->port1 & USB_PORT_STAT_SUSPEND))
+				break;
+
+			/* 20 msec of resume/K signaling, other irqs blocked */
+			dev_dbg(hcd->self.controller, "start resume...\n");
+			sl811->irq_enable = 0;
+			sl811_write(sl811, SL11H_IRQ_ENABLE,
+						sl811->irq_enable);
+			sl811->ctrl1 |= SL11H_CTL1MASK_K;
+			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+			mod_timer(&sl811->timer, jiffies
+					+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+			break;
+		case USB_PORT_FEAT_POWER:
+			port_power(sl811, 0);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_SUSPEND:
+		case USB_PORT_FEAT_C_CONNECTION:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_RESET:
+			break;
+		default:
+			goto error;
+		}
+		sl811->port1 &= ~(1 << wValue);
+		break;
+	case GetHubDescriptor:
+		sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
+		break;
+	case GetHubStatus:
+		put_unaligned_le32(0, buf);
+		break;
+	case GetPortStatus:
+		if (wIndex != 1)
+			goto error;
+		put_unaligned_le32(sl811->port1, buf);
+
+#ifndef	VERBOSE
+	if (*(u16*)(buf+2))	/* only if wPortChange is interesting */
+#endif
+		dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
+			sl811->port1);
+		break;
+	case SetPortFeature:
+		if (wIndex != 1 || wLength != 0)
+			goto error;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if (sl811->port1 & USB_PORT_STAT_RESET)
+				goto error;
+			if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
+				goto error;
+
+			dev_dbg(hcd->self.controller,"suspend...\n");
+			sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA;
+			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+			break;
+		case USB_PORT_FEAT_POWER:
+			port_power(sl811, 1);
+			break;
+		case USB_PORT_FEAT_RESET:
+			if (sl811->port1 & USB_PORT_STAT_SUSPEND)
+				goto error;
+			if (!(sl811->port1 & USB_PORT_STAT_POWER))
+				break;
+
+			/* 50 msec of reset/SE0 signaling, irqs blocked */
+			sl811->irq_enable = 0;
+			sl811_write(sl811, SL11H_IRQ_ENABLE,
+						sl811->irq_enable);
+			sl811->ctrl1 = SL11H_CTL1MASK_SE0;
+			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+			sl811->port1 |= USB_PORT_STAT_RESET;
+			mod_timer(&sl811->timer, jiffies
+					+ msecs_to_jiffies(50));
+			break;
+		default:
+			goto error;
+		}
+		sl811->port1 |= 1 << wValue;
+		break;
+
+	default:
+error:
+		/* "protocol stall" on error */
+		retval = -EPIPE;
+	}
+
+	spin_unlock_irqrestore(&sl811->lock, flags);
+	return retval;
+}
+
+#ifdef	CONFIG_PM
+
+static int
+sl811h_bus_suspend(struct usb_hcd *hcd)
+{
+	// SOFs off
+	dev_dbg(hcd->self.controller, "%s\n", __func__);
+	return 0;
+}
+
+static int
+sl811h_bus_resume(struct usb_hcd *hcd)
+{
+	// SOFs on
+	dev_dbg(hcd->self.controller, "%s\n", __func__);
+	return 0;
+}
+
+#else
+
+#define	sl811h_bus_suspend	NULL
+#define	sl811h_bus_resume	NULL
+
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+static void dump_irq(struct seq_file *s, char *label, u8 mask)
+{
+	seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask,
+		(mask & SL11H_INTMASK_DONE_A) ? " done_a" : "",
+		(mask & SL11H_INTMASK_DONE_B) ? " done_b" : "",
+		(mask & SL11H_INTMASK_SOFINTR) ? " sof" : "",
+		(mask & SL11H_INTMASK_INSRMV) ? " ins/rmv" : "",
+		(mask & SL11H_INTMASK_RD) ? " rd" : "",
+		(mask & SL11H_INTMASK_DP) ? " dp" : "");
+}
+
+static int sl811h_debug_show(struct seq_file *s, void *unused)
+{
+	struct sl811		*sl811 = s->private;
+	struct sl811h_ep	*ep;
+	unsigned		i;
+
+	seq_printf(s, "%s\n%s version %s\nportstatus[1] = %08x\n",
+		sl811_to_hcd(sl811)->product_desc,
+		hcd_name, DRIVER_VERSION,
+		sl811->port1);
+
+	seq_printf(s, "insert/remove: %ld\n", sl811->stat_insrmv);
+	seq_printf(s, "current session:  done_a %ld done_b %ld "
+			"wake %ld sof %ld overrun %ld lost %ld\n\n",
+		sl811->stat_a, sl811->stat_b,
+		sl811->stat_wake, sl811->stat_sof,
+		sl811->stat_overrun, sl811->stat_lost);
+
+	spin_lock_irq(&sl811->lock);
+
+	if (sl811->ctrl1 & SL11H_CTL1MASK_SUSPEND)
+		seq_printf(s, "(suspended)\n\n");
+	else {
+		u8	t = sl811_read(sl811, SL11H_CTLREG1);
+
+		seq_printf(s, "ctrl1 %02x%s%s%s%s\n", t,
+			(t & SL11H_CTL1MASK_SOF_ENA) ? " sofgen" : "",
+			({char *s; switch (t & SL11H_CTL1MASK_FORCE) {
+			case SL11H_CTL1MASK_NORMAL: s = ""; break;
+			case SL11H_CTL1MASK_SE0: s = " se0/reset"; break;
+			case SL11H_CTL1MASK_K: s = " k/resume"; break;
+			default: s = "j"; break;
+			} s; }),
+			(t & SL11H_CTL1MASK_LSPD) ? " lowspeed" : "",
+			(t & SL11H_CTL1MASK_SUSPEND) ? " suspend" : "");
+
+		dump_irq(s, "irq_enable",
+				sl811_read(sl811, SL11H_IRQ_ENABLE));
+		dump_irq(s, "irq_status",
+				sl811_read(sl811, SL11H_IRQ_STATUS));
+		seq_printf(s, "frame clocks remaining:  %d\n",
+				sl811_read(sl811, SL11H_SOFTMRREG) << 6);
+	}
+
+	seq_printf(s, "A: qh%p ctl %02x sts %02x\n", sl811->active_a,
+		sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG)),
+		sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
+	seq_printf(s, "B: qh%p ctl %02x sts %02x\n", sl811->active_b,
+		sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG)),
+		sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
+	seq_printf(s, "\n");
+	list_for_each_entry (ep, &sl811->async, schedule) {
+		struct urb		*urb;
+
+		seq_printf(s, "%s%sqh%p, ep%d%s, maxpacket %d"
+					" nak %d err %d\n",
+			(ep == sl811->active_a) ? "(A) " : "",
+			(ep == sl811->active_b) ? "(B) " : "",
+			ep, ep->epnum,
+			({ char *s; switch (ep->nextpid) {
+			case USB_PID_IN: s = "in"; break;
+			case USB_PID_OUT: s = "out"; break;
+			case USB_PID_SETUP: s = "setup"; break;
+			case USB_PID_ACK: s = "status"; break;
+			default: s = "?"; break;
+			} s;}),
+			ep->maxpacket,
+			ep->nak_count, ep->error_count);
+		list_for_each_entry (urb, &ep->hep->urb_list, urb_list) {
+			seq_printf(s, "  urb%p, %d/%d\n", urb,
+				urb->actual_length,
+				urb->transfer_buffer_length);
+		}
+	}
+	if (!list_empty(&sl811->async))
+		seq_printf(s, "\n");
+
+	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
+
+	for (i = 0; i < PERIODIC_SIZE; i++) {
+		ep = sl811->periodic[i];
+		if (!ep)
+			continue;
+		seq_printf(s, "%2d [%3d]:\n", i, sl811->load[i]);
+
+		/* DUMB: prints shared entries multiple times */
+		do {
+			seq_printf(s,
+				"   %s%sqh%d/%p (%sdev%d ep%d%s max %d) "
+					"err %d\n",
+				(ep == sl811->active_a) ? "(A) " : "",
+				(ep == sl811->active_b) ? "(B) " : "",
+				ep->period, ep,
+				(ep->udev->speed == USB_SPEED_FULL)
+					? "" : "ls ",
+				ep->udev->devnum, ep->epnum,
+				(ep->epnum == 0) ? ""
+					: ((ep->nextpid == USB_PID_IN)
+						? "in"
+						: "out"),
+				ep->maxpacket, ep->error_count);
+			ep = ep->next;
+		} while (ep);
+	}
+
+	spin_unlock_irq(&sl811->lock);
+	seq_printf(s, "\n");
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(sl811h_debug);
+
+/* expect just one sl811 per system */
+static void create_debug_file(struct sl811 *sl811)
+{
+	sl811->debug_file = debugfs_create_file("sl811h", S_IRUGO,
+						usb_debug_root, sl811,
+						&sl811h_debug_fops);
+}
+
+static void remove_debug_file(struct sl811 *sl811)
+{
+	debugfs_remove(sl811->debug_file);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+sl811h_stop(struct usb_hcd *hcd)
+{
+	struct sl811	*sl811 = hcd_to_sl811(hcd);
+	unsigned long	flags;
+
+	del_timer_sync(&hcd->rh_timer);
+
+	spin_lock_irqsave(&sl811->lock, flags);
+	port_power(sl811, 0);
+	spin_unlock_irqrestore(&sl811->lock, flags);
+}
+
+static int
+sl811h_start(struct usb_hcd *hcd)
+{
+	struct sl811		*sl811 = hcd_to_sl811(hcd);
+
+	/* chip has been reset, VBUS power is off */
+	hcd->state = HC_STATE_RUNNING;
+
+	if (sl811->board) {
+		if (!device_can_wakeup(hcd->self.controller))
+			device_init_wakeup(hcd->self.controller,
+				sl811->board->can_wakeup);
+		hcd->power_budget = sl811->board->power * 2;
+	}
+
+	/* enable power and interrupts */
+	port_power(sl811, 1);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver sl811h_hc_driver = {
+	.description =		hcd_name,
+	.hcd_priv_size =	sizeof(struct sl811),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			sl811h_irq,
+	.flags =		HCD_USB11 | HCD_MEMORY,
+
+	/* Basic lifecycle operations */
+	.start =		sl811h_start,
+	.stop =			sl811h_stop,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		sl811h_urb_enqueue,
+	.urb_dequeue =		sl811h_urb_dequeue,
+	.endpoint_disable =	sl811h_endpoint_disable,
+
+	/*
+	 * periodic schedule support
+	 */
+	.get_frame_number =	sl811h_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data =	sl811h_hub_status_data,
+	.hub_control =		sl811h_hub_control,
+	.bus_suspend =		sl811h_bus_suspend,
+	.bus_resume =		sl811h_bus_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int
+sl811h_remove(struct platform_device *dev)
+{
+	struct usb_hcd		*hcd = platform_get_drvdata(dev);
+	struct sl811		*sl811 = hcd_to_sl811(hcd);
+	struct resource		*res;
+
+	remove_debug_file(sl811);
+	usb_remove_hcd(hcd);
+
+	/* some platforms may use IORESOURCE_IO */
+	res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	if (res)
+		iounmap(sl811->data_reg);
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (res)
+		iounmap(sl811->addr_reg);
+
+	usb_put_hcd(hcd);
+	return 0;
+}
+
+static int
+sl811h_probe(struct platform_device *dev)
+{
+	struct usb_hcd		*hcd;
+	struct sl811		*sl811;
+	struct resource		*addr, *data, *ires;
+	int			irq;
+	void __iomem		*addr_reg;
+	void __iomem		*data_reg;
+	int			retval;
+	u8			tmp, ioaddr = 0;
+	unsigned long		irqflags;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/* basic sanity checks first.  board-specific init logic should
+	 * have initialized these three resources and probably board
+	 * specific platform_data.  we don't probe for IRQs, and do only
+	 * minimal sanity checking.
+	 */
+	ires = platform_get_resource(dev, IORESOURCE_IRQ, 0);
+	if (dev->num_resources < 3 || !ires)
+		return -ENODEV;
+
+	irq = ires->start;
+	irqflags = ires->flags & IRQF_TRIGGER_MASK;
+
+	/* refuse to confuse usbcore */
+	if (dev->dev.dma_mask) {
+		dev_dbg(&dev->dev, "no we won't dma\n");
+		return -EINVAL;
+	}
+
+	/* the chip may be wired for either kind of addressing */
+	addr = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	data = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	retval = -EBUSY;
+	if (!addr || !data) {
+		addr = platform_get_resource(dev, IORESOURCE_IO, 0);
+		data = platform_get_resource(dev, IORESOURCE_IO, 1);
+		if (!addr || !data)
+			return -ENODEV;
+		ioaddr = 1;
+		/*
+		 * NOTE: 64-bit resource->start is getting truncated
+		 * to avoid compiler warning, assuming that ->start
+		 * is always 32-bit for this case
+		 */
+		addr_reg = (void __iomem *) (unsigned long) addr->start;
+		data_reg = (void __iomem *) (unsigned long) data->start;
+	} else {
+		addr_reg = ioremap(addr->start, 1);
+		if (addr_reg == NULL) {
+			retval = -ENOMEM;
+			goto err2;
+		}
+
+		data_reg = ioremap(data->start, 1);
+		if (data_reg == NULL) {
+			retval = -ENOMEM;
+			goto err4;
+		}
+	}
+
+	/* allocate and initialize hcd */
+	hcd = usb_create_hcd(&sl811h_hc_driver, &dev->dev, dev_name(&dev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto err5;
+	}
+	hcd->rsrc_start = addr->start;
+	sl811 = hcd_to_sl811(hcd);
+
+	spin_lock_init(&sl811->lock);
+	INIT_LIST_HEAD(&sl811->async);
+	sl811->board = dev_get_platdata(&dev->dev);
+	timer_setup(&sl811->timer, sl811h_timer, 0);
+	sl811->addr_reg = addr_reg;
+	sl811->data_reg = data_reg;
+
+	spin_lock_irq(&sl811->lock);
+	port_power(sl811, 0);
+	spin_unlock_irq(&sl811->lock);
+	msleep(200);
+
+	tmp = sl811_read(sl811, SL11H_HWREVREG);
+	switch (tmp >> 4) {
+	case 1:
+		hcd->product_desc = "SL811HS v1.2";
+		break;
+	case 2:
+		hcd->product_desc = "SL811HS v1.5";
+		break;
+	default:
+		/* reject case 0, SL11S is less functional */
+		dev_dbg(&dev->dev, "chiprev %02x\n", tmp);
+		retval = -ENXIO;
+		goto err6;
+	}
+
+	/* The chip's IRQ is level triggered, active high.  A requirement
+	 * for platform device setup is to cope with things like signal
+	 * inverters (e.g. CF is active low) or working only with edge
+	 * triggers (e.g. most ARM CPUs).  Initial driver stress testing
+	 * was on a system with single edge triggering, so most sorts of
+	 * triggering arrangement should work.
+	 *
+	 * Use resource IRQ flags if set by platform device setup.
+	 */
+	irqflags |= IRQF_SHARED;
+	retval = usb_add_hcd(hcd, irq, irqflags);
+	if (retval != 0)
+		goto err6;
+
+	device_wakeup_enable(hcd->self.controller);
+
+	create_debug_file(sl811);
+	return retval;
+
+ err6:
+	usb_put_hcd(hcd);
+ err5:
+	if (!ioaddr)
+		iounmap(data_reg);
+ err4:
+	if (!ioaddr)
+		iounmap(addr_reg);
+ err2:
+	dev_dbg(&dev->dev, "init error, %d\n", retval);
+	return retval;
+}
+
+#ifdef	CONFIG_PM
+
+/* for this device there's no useful distinction between the controller
+ * and its root hub.
+ */
+
+static int
+sl811h_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(dev);
+	struct sl811	*sl811 = hcd_to_sl811(hcd);
+	int		retval = 0;
+
+	switch (state.event) {
+	case PM_EVENT_FREEZE:
+		retval = sl811h_bus_suspend(hcd);
+		break;
+	case PM_EVENT_SUSPEND:
+	case PM_EVENT_HIBERNATE:
+	case PM_EVENT_PRETHAW:		/* explicitly discard hw state */
+		port_power(sl811, 0);
+		break;
+	}
+	return retval;
+}
+
+static int
+sl811h_resume(struct platform_device *dev)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(dev);
+	struct sl811	*sl811 = hcd_to_sl811(hcd);
+
+	/* with no "check to see if VBUS is still powered" board hook,
+	 * let's assume it'd only be powered to enable remote wakeup.
+	 */
+	if (!sl811->port1 || !device_can_wakeup(&hcd->self.root_hub->dev)) {
+		sl811->port1 = 0;
+		port_power(sl811, 1);
+		usb_root_hub_lost_power(hcd->self.root_hub);
+		return 0;
+	}
+
+	return sl811h_bus_resume(hcd);
+}
+
+#else
+
+#define	sl811h_suspend	NULL
+#define	sl811h_resume	NULL
+
+#endif
+
+
+/* this driver is exported so sl811_cs can depend on it */
+struct platform_driver sl811h_driver = {
+	.probe =	sl811h_probe,
+	.remove =	sl811h_remove,
+
+	.suspend =	sl811h_suspend,
+	.resume =	sl811h_resume,
+	.driver = {
+		.name =	(char *) hcd_name,
+	},
+};
+EXPORT_SYMBOL(sl811h_driver);
+
+module_platform_driver(sl811h_driver);
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
new file mode 100644
index 0000000..2abe51a
--- /dev/null
+++ b/drivers/usb/host/sl811.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SL811HS register declarations and HCD data structures
+ *
+ * Copyright (C) 2004 Psion Teklogix
+ * Copyright (C) 2004 David Brownell
+ * Copyright (C) 2001 Cypress Semiconductor Inc. 
+ */
+
+/*
+ * SL811HS has transfer registers, and control registers.  In host/master
+ * mode one set of registers is used; in peripheral/slave mode, another.
+ *  - SL11H only has some "A" transfer registers from 0x00-0x04
+ *  - SL811HS also has "B" registers from 0x08-0x0c
+ *  - SL811S (or HS in slave mode) has four A+B sets, at 00, 10, 20, 30
+ */
+
+#define SL811_EP_A(base)	((base) + 0)
+#define SL811_EP_B(base)	((base) + 8)
+
+#define SL811_HOST_BUF		0x00
+#define SL811_PERIPH_EP0	0x00
+#define SL811_PERIPH_EP1	0x10
+#define SL811_PERIPH_EP2	0x20
+#define SL811_PERIPH_EP3	0x30
+
+
+/* TRANSFER REGISTERS:  host and peripheral sides are similar
+ * except for the control models (master vs slave).
+ */
+#define SL11H_HOSTCTLREG	0
+#	define SL11H_HCTLMASK_ARM	0x01
+#	define SL11H_HCTLMASK_ENABLE	0x02
+#	define SL11H_HCTLMASK_IN	0x00
+#	define SL11H_HCTLMASK_OUT	0x04
+#	define SL11H_HCTLMASK_ISOCH	0x10
+#	define SL11H_HCTLMASK_AFTERSOF	0x20
+#	define SL11H_HCTLMASK_TOGGLE	0x40
+#	define SL11H_HCTLMASK_PREAMBLE	0x80
+#define SL11H_BUFADDRREG	1
+#define SL11H_BUFLNTHREG	2
+#define SL11H_PKTSTATREG	3	/* read */
+#	define SL11H_STATMASK_ACK	0x01
+#	define SL11H_STATMASK_ERROR	0x02
+#	define SL11H_STATMASK_TMOUT	0x04
+#	define SL11H_STATMASK_SEQ	0x08
+#	define SL11H_STATMASK_SETUP	0x10
+#	define SL11H_STATMASK_OVF	0x20
+#	define SL11H_STATMASK_NAK	0x40
+#	define SL11H_STATMASK_STALL	0x80
+#define SL11H_PIDEPREG		3	/* write */
+#	define	SL_SETUP	0xd0
+#	define	SL_IN		0x90
+#	define	SL_OUT		0x10
+#	define	SL_SOF		0x50
+#	define	SL_PREAMBLE	0xc0
+#	define	SL_NAK		0xa0
+#	define	SL_STALL	0xe0
+#	define	SL_DATA0	0x30
+#	define	SL_DATA1	0xb0
+#define SL11H_XFERCNTREG	4	/* read */
+#define SL11H_DEVADDRREG	4	/* write */
+
+
+/* CONTROL REGISTERS:  host and peripheral are very different.
+ */
+#define SL11H_CTLREG1		5
+#	define SL11H_CTL1MASK_SOF_ENA	0x01
+#	define SL11H_CTL1MASK_FORCE	0x18
+#		define SL11H_CTL1MASK_NORMAL	0x00
+#		define SL11H_CTL1MASK_SE0	0x08	/* reset */
+#		define SL11H_CTL1MASK_J		0x10
+#		define SL11H_CTL1MASK_K		0x18	/* resume */
+#	define SL11H_CTL1MASK_LSPD	0x20
+#	define SL11H_CTL1MASK_SUSPEND	0x40
+#define SL11H_IRQ_ENABLE	6
+#	define SL11H_INTMASK_DONE_A	0x01
+#	define SL11H_INTMASK_DONE_B	0x02
+#	define SL11H_INTMASK_SOFINTR	0x10
+#	define SL11H_INTMASK_INSRMV	0x20	/* to/from SE0 */
+#	define SL11H_INTMASK_RD		0x40
+#	define SL11H_INTMASK_DP		0x80	/* only in INTSTATREG */
+#define SL11S_ADDRESS		7
+
+/* 0x08-0x0c are for the B buffer (not in SL11) */
+
+#define SL11H_IRQ_STATUS	0x0D	/* write to ack */
+#define SL11H_HWREVREG		0x0E	/* read */
+#	define SL11H_HWRMASK_HWREV	0xF0
+#define SL11H_SOFLOWREG		0x0E	/* write */
+#define SL11H_SOFTMRREG		0x0F	/* read */
+
+/* a write to this register enables SL811HS features.
+ * HOST flag presumably overrides the chip input signal?
+ */
+#define SL811HS_CTLREG2		0x0F
+#	define SL811HS_CTL2MASK_SOF_MASK	0x3F
+#	define SL811HS_CTL2MASK_DSWAP		0x40
+#	define SL811HS_CTL2MASK_HOST		0x80
+
+#define SL811HS_CTL2_INIT	(SL811HS_CTL2MASK_HOST | 0x2e)
+
+
+/* DATA BUFFERS: registers from 0x10..0xff are for data buffers;
+ * that's 240 bytes, which we'll split evenly between A and B sides.
+ * Only ISO can use more than 64 bytes per packet.
+ * (The SL11S has 0x40..0xff for buffers.)
+ */
+#define H_MAXPACKET	120		/* bytes in A or B fifos */
+
+#define SL11H_DATA_START	0x10
+#define	SL811HS_PACKET_BUF(is_a)	((is_a) \
+		? SL11H_DATA_START \
+		: (SL11H_DATA_START + H_MAXPACKET))
+
+/*-------------------------------------------------------------------------*/
+
+#define	LOG2_PERIODIC_SIZE	5	/* arbitrary; this matches OHCI */
+#define	PERIODIC_SIZE		(1 << LOG2_PERIODIC_SIZE)
+
+struct sl811 {
+	spinlock_t		lock;
+	void __iomem		*addr_reg;
+	void __iomem		*data_reg;
+	struct sl811_platform_data	*board;
+	struct dentry 		*debug_file;
+
+	unsigned long		stat_insrmv;
+	unsigned long		stat_wake;
+	unsigned long		stat_sof;
+	unsigned long		stat_a;
+	unsigned long		stat_b;
+	unsigned long		stat_lost;
+	unsigned long		stat_overrun;
+
+	/* sw model */
+	struct timer_list	timer;
+	struct sl811h_ep	*next_periodic;
+	struct sl811h_ep	*next_async;
+
+	struct sl811h_ep	*active_a;
+	unsigned long		jiffies_a;
+	struct sl811h_ep	*active_b;
+	unsigned long		jiffies_b;
+
+	u32			port1;
+	u8			ctrl1, ctrl2, irq_enable;
+	u16			frame;
+
+	/* async schedule: control, bulk */
+	struct list_head	async;
+
+	/* periodic schedule: interrupt, iso */
+	u16			load[PERIODIC_SIZE];
+	struct sl811h_ep	*periodic[PERIODIC_SIZE];
+	unsigned		periodic_count;
+};
+
+static inline struct sl811 *hcd_to_sl811(struct usb_hcd *hcd)
+{
+	return (struct sl811 *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *sl811_to_hcd(struct sl811 *sl811)
+{
+	return container_of((void *) sl811, struct usb_hcd, hcd_priv);
+}
+
+struct sl811h_ep {
+	struct usb_host_endpoint *hep;
+	struct usb_device	*udev;
+
+	u8			defctrl;
+	u8			maxpacket;
+	u8			epnum;
+	u8			nextpid;
+
+	u16			error_count;
+	u16			nak_count;
+	u16			length;		/* of current packet */
+
+	/* periodic schedule */
+	u16			period;
+	u16			branch;
+	u16			load;
+	struct sl811h_ep	*next;
+
+	/* async schedule */
+	struct list_head	schedule;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* These register utilities should work for the SL811S register API too
+ * NOTE:  caller must hold sl811->lock.
+ */
+
+static inline u8 sl811_read(struct sl811 *sl811, int reg)
+{
+	writeb(reg, sl811->addr_reg);
+	return readb(sl811->data_reg);
+}
+
+static inline void sl811_write(struct sl811 *sl811, int reg, u8 val)
+{
+	writeb(reg, sl811->addr_reg);
+	writeb(val, sl811->data_reg);
+}
+
+static inline void
+sl811_write_buf(struct sl811 *sl811, int addr, const void *buf, size_t count)
+{
+	const u8	*data;
+	void __iomem	*data_reg;
+
+	if (!count)
+		return;
+	writeb(addr, sl811->addr_reg);
+
+	data = buf;
+	data_reg = sl811->data_reg;
+	do {
+		writeb(*data++, data_reg);
+	} while (--count);
+}
+
+static inline void
+sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count)
+{
+	u8 		*data;
+	void __iomem	*data_reg;
+
+	if (!count)
+		return;
+	writeb(addr, sl811->addr_reg);
+
+	data = buf;
+	data_reg = sl811->data_reg;
+	do {
+		*data++ = readb(data_reg);
+	} while (--count);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef PACKET_TRACE
+#    define PACKET		pr_debug("sl811: "stuff)
+#else
+#    define PACKET(stuff...)	do{}while(0)
+#endif
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
new file mode 100644
index 0000000..7213637
--- /dev/null
+++ b/drivers/usb/host/sl811_cs.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCMCIA driver for SL811HS (as found in REX-CFU1U)
+ * Filename: sl811_cs.c
+ * Author:   Yukio Yamamoto
+ *
+ *  Port to sl811-hcd and 2.6.x by
+ *    Botond Botyanszki <boti@rocketmail.com>
+ *    Simon Pickering
+ *
+ *  Last update: 2005-05-12
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <linux/usb/sl811.h>
+
+MODULE_AUTHOR("Botond Botyanszki");
+MODULE_DESCRIPTION("REX-CFU1U PCMCIA driver for 2.6");
+MODULE_LICENSE("GPL");
+
+
+/*====================================================================*/
+/* MACROS                                                             */
+/*====================================================================*/
+
+#define INFO(args...) printk(KERN_INFO "sl811_cs: " args)
+
+/*====================================================================*/
+/* VARIABLES                                                          */
+/*====================================================================*/
+
+typedef struct local_info_t {
+	struct pcmcia_device	*p_dev;
+} local_info_t;
+
+static void sl811_cs_release(struct pcmcia_device * link);
+
+/*====================================================================*/
+
+static void release_platform_dev(struct device * dev)
+{
+	dev_dbg(dev, "sl811_cs platform_dev release\n");
+	dev->parent = NULL;
+}
+
+static struct sl811_platform_data platform_data = {
+	.potpg		= 100,
+	.power		= 50,		/* == 100mA */
+	// .reset	= ... FIXME:  invoke CF reset on the card
+};
+
+static struct resource resources[] = {
+	[0] = {
+		.flags	= IORESOURCE_IRQ,
+	},
+	[1] = {
+		// .name   = "address",
+		.flags	= IORESOURCE_IO,
+	},
+	[2] = {
+		// .name   = "data",
+		.flags	= IORESOURCE_IO,
+	},
+};
+
+extern struct platform_driver sl811h_driver;
+
+static struct platform_device platform_dev = {
+	.id			= -1,
+	.dev = {
+		.platform_data = &platform_data,
+		.release       = release_platform_dev,
+	},
+	.resource		= resources,
+	.num_resources		= ARRAY_SIZE(resources),
+};
+
+static int sl811_hc_init(struct device *parent, resource_size_t base_addr,
+			 int irq)
+{
+	if (platform_dev.dev.parent)
+		return -EBUSY;
+	platform_dev.dev.parent = parent;
+
+	/* finish seting up the platform device */
+	resources[0].start = irq;
+
+	resources[1].start = base_addr;
+	resources[1].end = base_addr;
+
+	resources[2].start = base_addr + 1;
+	resources[2].end   = base_addr + 1;
+
+	/* The driver core will probe for us.  We know sl811-hcd has been
+	 * initialized already because of the link order dependency created
+	 * by referencing "sl811h_driver".
+	 */
+	platform_dev.name = sl811h_driver.driver.name;
+	return platform_device_register(&platform_dev);
+}
+
+/*====================================================================*/
+
+static void sl811_cs_detach(struct pcmcia_device *link)
+{
+	dev_dbg(&link->dev, "sl811_cs_detach\n");
+
+	sl811_cs_release(link);
+
+	/* This points to the parent local_info_t struct */
+	kfree(link->priv);
+}
+
+static void sl811_cs_release(struct pcmcia_device * link)
+{
+	dev_dbg(&link->dev, "sl811_cs_release\n");
+
+	pcmcia_disable_device(link);
+	platform_device_unregister(&platform_dev);
+}
+
+static int sl811_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+	if (p_dev->config_index == 0)
+		return -EINVAL;
+
+	return pcmcia_request_io(p_dev);
+}
+
+
+static int sl811_cs_config(struct pcmcia_device *link)
+{
+	struct device		*parent = &link->dev;
+	int			ret;
+
+	dev_dbg(&link->dev, "sl811_cs_config\n");
+
+	link->config_flags |= CONF_ENABLE_IRQ |	CONF_AUTO_SET_VPP |
+		CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO;
+
+	if (pcmcia_loop_config(link, sl811_cs_config_check, NULL))
+		goto failed;
+
+	/* require an IRQ and two registers */
+	if (resource_size(link->resource[0]) < 2)
+		goto failed;
+
+	if (!link->irq)
+		goto failed;
+
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		goto failed;
+
+	if (sl811_hc_init(parent, link->resource[0]->start, link->irq)
+			< 0) {
+failed:
+		printk(KERN_WARNING "sl811_cs_config failed\n");
+		sl811_cs_release(link);
+		return  -ENODEV;
+	}
+	return 0;
+}
+
+static int sl811_cs_probe(struct pcmcia_device *link)
+{
+	local_info_t *local;
+
+	local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
+	if (!local)
+		return -ENOMEM;
+	local->p_dev = link;
+	link->priv = local;
+
+	return sl811_cs_config(link);
+}
+
+static const struct pcmcia_device_id sl811_ids[] = {
+	PCMCIA_DEVICE_MANF_CARD(0xc015, 0x0001), /* RATOC USB HOST CF+ Card */
+	PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, sl811_ids);
+
+static struct pcmcia_driver sl811_cs_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "sl811_cs",
+	.probe		= sl811_cs_probe,
+	.remove		= sl811_cs_detach,
+	.id_table	= sl811_ids,
+};
+module_pcmcia_driver(sl811_cs_driver);
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
new file mode 100644
index 0000000..0169877
--- /dev/null
+++ b/drivers/usb/host/ssb-hcd.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sonics Silicon Backplane
+ * Broadcom USB-core driver  (SSB bus glue)
+ *
+ * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Based on ssb-ohci driver
+ * Copyright 2007 Michael Buesch <m@bues.ch>
+ *
+ * Derived from the OHCI-PCI driver
+ * Copyright 1999 Roman Weissgaerber
+ * Copyright 2000-2002 David Brownell
+ * Copyright 1999 Linus Torvalds
+ * Copyright 1999 Gregory P. Smith
+ *
+ * Derived from the USBcore related parts of Broadcom-SB
+ * Copyright 2005-2011 Broadcom Corporation
+ */
+#include <linux/ssb/ssb.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Common USB driver for SSB Bus");
+MODULE_LICENSE("GPL");
+
+#define SSB_HCD_TMSLOW_HOSTMODE	(1 << 29)
+
+struct ssb_hcd_device {
+	struct platform_device *ehci_dev;
+	struct platform_device *ohci_dev;
+
+	u32 enable_flags;
+};
+
+static void ssb_hcd_5354wa(struct ssb_device *dev)
+{
+#ifdef CONFIG_SSB_DRIVER_MIPS
+	/* Work around for 5354 failures */
+	if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
+		/* Change syn01 reg */
+		ssb_write32(dev, 0x894, 0x00fe00fe);
+
+		/* Change syn03 reg */
+		ssb_write32(dev, 0x89c, ssb_read32(dev, 0x89c) | 0x1);
+	}
+#endif
+}
+
+static void ssb_hcd_usb20wa(struct ssb_device *dev)
+{
+	if (dev->id.coreid == SSB_DEV_USB20_HOST) {
+		/*
+		 * USB 2.0 special considerations:
+		 *
+		 * In addition to the standard SSB reset sequence, the Host
+		 * Control Register must be programmed to bring the USB core
+		 * and various phy components out of reset.
+		 */
+		ssb_write32(dev, 0x200, 0x7ff);
+
+		/* Change Flush control reg */
+		ssb_write32(dev, 0x400, ssb_read32(dev, 0x400) & ~8);
+		ssb_read32(dev, 0x400);
+
+		/* Change Shim control reg */
+		ssb_write32(dev, 0x304, ssb_read32(dev, 0x304) & ~0x100);
+		ssb_read32(dev, 0x304);
+
+		udelay(1);
+
+		ssb_hcd_5354wa(dev);
+	}
+}
+
+/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
+static u32 ssb_hcd_init_chip(struct ssb_device *dev)
+{
+	u32 flags = 0;
+
+	if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV)
+		/* Put the device into host-mode. */
+		flags |= SSB_HCD_TMSLOW_HOSTMODE;
+
+	ssb_device_enable(dev, flags);
+
+	ssb_hcd_usb20wa(dev);
+
+	return flags;
+}
+
+static const struct usb_ehci_pdata ehci_pdata = {
+};
+
+static const struct usb_ohci_pdata ohci_pdata = {
+};
+
+static struct platform_device *ssb_hcd_create_pdev(struct ssb_device *dev, bool ohci, u32 addr, u32 len)
+{
+	struct platform_device *hci_dev;
+	struct resource hci_res[2];
+	int ret;
+
+	memset(hci_res, 0, sizeof(hci_res));
+
+	hci_res[0].start = addr;
+	hci_res[0].end = hci_res[0].start + len - 1;
+	hci_res[0].flags = IORESOURCE_MEM;
+
+	hci_res[1].start = dev->irq;
+	hci_res[1].flags = IORESOURCE_IRQ;
+
+	hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
+					"ehci-platform" , 0);
+	if (!hci_dev)
+		return ERR_PTR(-ENOMEM);
+
+	hci_dev->dev.parent = dev->dev;
+	hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
+
+	ret = platform_device_add_resources(hci_dev, hci_res,
+					    ARRAY_SIZE(hci_res));
+	if (ret)
+		goto err_alloc;
+	if (ohci)
+		ret = platform_device_add_data(hci_dev, &ohci_pdata,
+					       sizeof(ohci_pdata));
+	else
+		ret = platform_device_add_data(hci_dev, &ehci_pdata,
+					       sizeof(ehci_pdata));
+	if (ret)
+		goto err_alloc;
+	ret = platform_device_add(hci_dev);
+	if (ret)
+		goto err_alloc;
+
+	return hci_dev;
+
+err_alloc:
+	platform_device_put(hci_dev);
+	return ERR_PTR(ret);
+}
+
+static int ssb_hcd_probe(struct ssb_device *dev,
+				   const struct ssb_device_id *id)
+{
+	int err, tmp;
+	int start, len;
+	u16 chipid_top;
+	u16 coreid = dev->id.coreid;
+	struct ssb_hcd_device *usb_dev;
+
+	/* USBcores are only connected on embedded devices. */
+	chipid_top = (dev->bus->chip_id & 0xFF00);
+	if (chipid_top != 0x4700 && chipid_top != 0x5300)
+		return -ENODEV;
+
+	/* TODO: Probably need checks here; is the core connected? */
+
+	if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
+		return -EOPNOTSUPP;
+
+	usb_dev = devm_kzalloc(dev->dev, sizeof(struct ssb_hcd_device),
+			       GFP_KERNEL);
+	if (!usb_dev)
+		return -ENOMEM;
+
+	/* We currently always attach SSB_DEV_USB11_HOSTDEV
+	 * as HOST OHCI. If we want to attach it as Client device,
+	 * we must branch here and call into the (yet to
+	 * be written) Client mode driver. Same for remove(). */
+	usb_dev->enable_flags = ssb_hcd_init_chip(dev);
+
+	tmp = ssb_read32(dev, SSB_ADMATCH0);
+
+	start = ssb_admatch_base(tmp);
+	len = (coreid == SSB_DEV_USB20_HOST) ? 0x800 : ssb_admatch_size(tmp);
+	usb_dev->ohci_dev = ssb_hcd_create_pdev(dev, true, start, len);
+	if (IS_ERR(usb_dev->ohci_dev))
+		return PTR_ERR(usb_dev->ohci_dev);
+
+	if (coreid == SSB_DEV_USB20_HOST) {
+		start = ssb_admatch_base(tmp) + 0x800; /* ehci core offset */
+		usb_dev->ehci_dev = ssb_hcd_create_pdev(dev, false, start, len);
+		if (IS_ERR(usb_dev->ehci_dev)) {
+			err = PTR_ERR(usb_dev->ehci_dev);
+			goto err_unregister_ohci_dev;
+		}
+	}
+
+	ssb_set_drvdata(dev, usb_dev);
+	return 0;
+
+err_unregister_ohci_dev:
+	platform_device_unregister(usb_dev->ohci_dev);
+	return err;
+}
+
+static void ssb_hcd_remove(struct ssb_device *dev)
+{
+	struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
+	struct platform_device *ohci_dev = usb_dev->ohci_dev;
+	struct platform_device *ehci_dev = usb_dev->ehci_dev;
+
+	if (ohci_dev)
+		platform_device_unregister(ohci_dev);
+	if (ehci_dev)
+		platform_device_unregister(ehci_dev);
+
+	ssb_device_disable(dev, 0);
+}
+
+static void ssb_hcd_shutdown(struct ssb_device *dev)
+{
+	ssb_device_disable(dev, 0);
+}
+
+#ifdef CONFIG_PM
+
+static int ssb_hcd_suspend(struct ssb_device *dev, pm_message_t state)
+{
+	ssb_device_disable(dev, 0);
+
+	return 0;
+}
+
+static int ssb_hcd_resume(struct ssb_device *dev)
+{
+	struct ssb_hcd_device *usb_dev = ssb_get_drvdata(dev);
+
+	ssb_device_enable(dev, usb_dev->enable_flags);
+
+	return 0;
+}
+
+#else /* !CONFIG_PM */
+#define ssb_hcd_suspend	NULL
+#define ssb_hcd_resume	NULL
+#endif /* CONFIG_PM */
+
+static const struct ssb_device_id ssb_hcd_table[] = {
+	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
+	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
+	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
+	{},
+};
+MODULE_DEVICE_TABLE(ssb, ssb_hcd_table);
+
+static struct ssb_driver ssb_hcd_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= ssb_hcd_table,
+	.probe		= ssb_hcd_probe,
+	.remove		= ssb_hcd_remove,
+	.shutdown	= ssb_hcd_shutdown,
+	.suspend	= ssb_hcd_suspend,
+	.resume		= ssb_hcd_resume,
+};
+
+static int __init ssb_hcd_init(void)
+{
+	return ssb_driver_register(&ssb_hcd_driver);
+}
+module_init(ssb_hcd_init);
+
+static void __exit ssb_hcd_exit(void)
+{
+	ssb_driver_unregister(&ssb_hcd_driver);
+}
+module_exit(ssb_hcd_exit);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
new file mode 100644
index 0000000..5b8a3d95
--- /dev/null
+++ b/drivers/usb/host/u132-hcd.c
@@ -0,0 +1,3230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* Host Controller Driver for the Elan Digital Systems U132 adapter
+*
+* Copyright(C) 2006 Elan Digital Systems Limited
+* http://www.elandigitalsystems.com
+*
+* Author and Maintainer - Tony Olech - Elan Digital Systems
+* tony.olech@elandigitalsystems.com
+*
+* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
+* based on various USB host drivers in the 2.6.15 linux kernel
+* with constant reference to the 3rd Edition of Linux Device Drivers
+* published by O'Reilly
+*
+* The U132 adapter is a USB to CardBus adapter specifically designed
+* for PC cards that contain an OHCI host controller. Typical PC cards
+* are the Orange Mobile 3G Option GlobeTrotter Fusion card.
+*
+* The U132 adapter will *NOT *work with PC cards that do not contain
+* an OHCI controller. A simple way to test whether a PC card has an
+* OHCI controller as an interface is to insert the PC card directly
+* into a laptop(or desktop) with a CardBus slot and if "lspci" shows
+* a new USB controller and "lsusb -v" shows a new OHCI Host Controller
+* then there is a good chance that the U132 adapter will support the
+* PC card.(you also need the specific client driver for the PC card)
+*
+* Please inform the Author and Maintainer about any PC cards that
+* contain OHCI Host Controller and work when directly connected to
+* an embedded CardBus slot but do not work when they are connected
+* via an ELAN U132 adapter.
+*
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+	/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
+	 * If you're going to try stuff like this, you need to split
+	 * out shareable stuff (register declarations?) into its own
+	 * file, maybe name <linux/usb/ohci.h>
+	 */
+
+#include "ohci.h"
+#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
+#define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
+	OHCI_INTR_WDH)
+MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
+MODULE_DESCRIPTION("U132 USB Host Controller Driver");
+MODULE_LICENSE("GPL");
+#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
+INT_MODULE_PARM(testing, 0);
+/* Some boards misreport power switching/overcurrent*/
+static bool distrust_firmware = true;
+module_param(distrust_firmware, bool, 0);
+MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
+	"t setup");
+static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
+/*
+* u132_module_lock exists to protect access to global variables
+*
+*/
+static struct mutex u132_module_lock;
+static int u132_exiting;
+static int u132_instances;
+static struct list_head u132_static_list;
+/*
+* end of the global variables protected by u132_module_lock
+*/
+static struct workqueue_struct *workqueue;
+#define MAX_U132_PORTS 7
+#define MAX_U132_ADDRS 128
+#define MAX_U132_UDEVS 4
+#define MAX_U132_ENDPS 100
+#define MAX_U132_RINGS 4
+static const char *cc_to_text[16] = {
+	"No Error ",
+	"CRC Error ",
+	"Bit Stuff ",
+	"Data Togg ",
+	"Stall ",
+	"DevNotResp ",
+	"PIDCheck ",
+	"UnExpPID ",
+	"DataOver ",
+	"DataUnder ",
+	"(for hw) ",
+	"(for hw) ",
+	"BufferOver ",
+	"BuffUnder ",
+	"(for HCD) ",
+	"(for HCD) "
+};
+struct u132_port {
+	struct u132 *u132;
+	int reset;
+	int enable;
+	int power;
+	int Status;
+};
+struct u132_addr {
+	u8 address;
+};
+struct u132_udev {
+	struct kref kref;
+	struct usb_device *usb_device;
+	u8 enumeration;
+	u8 udev_number;
+	u8 usb_addr;
+	u8 portnumber;
+	u8 endp_number_in[16];
+	u8 endp_number_out[16];
+};
+#define ENDP_QUEUE_SHIFT 3
+#define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
+#define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
+struct u132_urbq {
+	struct list_head urb_more;
+	struct urb *urb;
+};
+struct u132_spin {
+	spinlock_t slock;
+};
+struct u132_endp {
+	struct kref kref;
+	u8 udev_number;
+	u8 endp_number;
+	u8 usb_addr;
+	u8 usb_endp;
+	struct u132 *u132;
+	struct list_head endp_ring;
+	struct u132_ring *ring;
+	unsigned toggle_bits:2;
+	unsigned active:1;
+	unsigned delayed:1;
+	unsigned input:1;
+	unsigned output:1;
+	unsigned pipetype:2;
+	unsigned dequeueing:1;
+	unsigned edset_flush:1;
+	unsigned spare_bits:14;
+	unsigned long jiffies;
+	struct usb_host_endpoint *hep;
+	struct u132_spin queue_lock;
+	u16 queue_size;
+	u16 queue_last;
+	u16 queue_next;
+	struct urb *urb_list[ENDP_QUEUE_SIZE];
+	struct list_head urb_more;
+	struct delayed_work scheduler;
+};
+struct u132_ring {
+	unsigned in_use:1;
+	unsigned length:7;
+	u8 number;
+	struct u132 *u132;
+	struct u132_endp *curr_endp;
+	struct delayed_work scheduler;
+};
+struct u132 {
+	struct kref kref;
+	struct list_head u132_list;
+	struct mutex sw_lock;
+	struct mutex scheduler_lock;
+	struct u132_platform_data *board;
+	struct platform_device *platform_dev;
+	struct u132_ring ring[MAX_U132_RINGS];
+	int sequence_num;
+	int going;
+	int power;
+	int reset;
+	int num_ports;
+	u32 hc_control;
+	u32 hc_fminterval;
+	u32 hc_roothub_status;
+	u32 hc_roothub_a;
+	u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
+	int flags;
+	unsigned long next_statechange;
+	struct delayed_work monitor;
+	int num_endpoints;
+	struct u132_addr addr[MAX_U132_ADDRS];
+	struct u132_udev udev[MAX_U132_UDEVS];
+	struct u132_port port[MAX_U132_PORTS];
+	struct u132_endp *endp[MAX_U132_ENDPS];
+};
+
+/*
+* these cannot be inlines because we need the structure offset!!
+* Does anyone have a better way?????
+*/
+#define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
+	offsetof(struct ohci_regs, member), 0, data);
+#define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
+	offsetof(struct ohci_regs, member), 0, data);
+#define u132_read_pcimem(u132, member, data) \
+	usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
+	ohci_regs, member), 0, data);
+#define u132_write_pcimem(u132, member, data) \
+	usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
+	ohci_regs, member), 0, data);
+static inline struct u132 *udev_to_u132(struct u132_udev *udev)
+{
+	u8 udev_number = udev->udev_number;
+	return container_of(udev, struct u132, udev[udev_number]);
+}
+
+static inline struct u132 *hcd_to_u132(struct usb_hcd *hcd)
+{
+	return (struct u132 *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *u132_to_hcd(struct u132 *u132)
+{
+	return container_of((void *)u132, struct usb_hcd, hcd_priv);
+}
+
+static inline void u132_disable(struct u132 *u132)
+{
+	u132_to_hcd(u132)->state = HC_STATE_HALT;
+}
+
+
+#define kref_to_u132(d) container_of(d, struct u132, kref)
+#define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
+#define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
+#include "../misc/usb_u132.h"
+static const char hcd_name[] = "u132_hcd";
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
+	USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
+	USB_PORT_STAT_C_RESET) << 16)
+static void u132_hcd_delete(struct kref *kref)
+{
+	struct u132 *u132 = kref_to_u132(kref);
+	struct platform_device *pdev = u132->platform_dev;
+	struct usb_hcd *hcd = u132_to_hcd(u132);
+	u132->going += 1;
+	mutex_lock(&u132_module_lock);
+	list_del_init(&u132->u132_list);
+	u132_instances -= 1;
+	mutex_unlock(&u132_module_lock);
+	dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
+		"2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev);
+	usb_put_hcd(hcd);
+}
+
+static inline void u132_u132_put_kref(struct u132 *u132)
+{
+	kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static inline void u132_u132_init_kref(struct u132 *u132)
+{
+	kref_init(&u132->kref);
+}
+
+static void u132_udev_delete(struct kref *kref)
+{
+	struct u132_udev *udev = kref_to_u132_udev(kref);
+	udev->udev_number = 0;
+	udev->usb_device = NULL;
+	udev->usb_addr = 0;
+	udev->enumeration = 0;
+}
+
+static inline void u132_udev_put_kref(struct u132 *u132, struct u132_udev *udev)
+{
+	kref_put(&udev->kref, u132_udev_delete);
+}
+
+static inline void u132_udev_get_kref(struct u132 *u132, struct u132_udev *udev)
+{
+	kref_get(&udev->kref);
+}
+
+static inline void u132_udev_init_kref(struct u132 *u132,
+	struct u132_udev *udev)
+{
+	kref_init(&udev->kref);
+}
+
+static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring)
+{
+	kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
+	unsigned int delta)
+{
+	if (delta > 0) {
+		if (queue_delayed_work(workqueue, &ring->scheduler, delta))
+			return;
+	} else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
+		return;
+	kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
+	unsigned int delta)
+{
+	kref_get(&u132->kref);
+	u132_ring_requeue_work(u132, ring, delta);
+}
+
+static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
+{
+	if (cancel_delayed_work(&ring->scheduler))
+		kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static void u132_endp_delete(struct kref *kref)
+{
+	struct u132_endp *endp = kref_to_u132_endp(kref);
+	struct u132 *u132 = endp->u132;
+	u8 usb_addr = endp->usb_addr;
+	u8 usb_endp = endp->usb_endp;
+	u8 address = u132->addr[usb_addr].address;
+	struct u132_udev *udev = &u132->udev[address];
+	u8 endp_number = endp->endp_number;
+	struct usb_host_endpoint *hep = endp->hep;
+	struct u132_ring *ring = endp->ring;
+	struct list_head *head = &endp->endp_ring;
+	ring->length -= 1;
+	if (endp == ring->curr_endp) {
+		if (list_empty(head)) {
+			ring->curr_endp = NULL;
+			list_del(head);
+		} else {
+			struct u132_endp *next_endp = list_entry(head->next,
+				struct u132_endp, endp_ring);
+			ring->curr_endp = next_endp;
+			list_del(head);
+		}
+	} else
+		list_del(head);
+	if (endp->input) {
+		udev->endp_number_in[usb_endp] = 0;
+		u132_udev_put_kref(u132, udev);
+	}
+	if (endp->output) {
+		udev->endp_number_out[usb_endp] = 0;
+		u132_udev_put_kref(u132, udev);
+	}
+	u132->endp[endp_number - 1] = NULL;
+	hep->hcpriv = NULL;
+	kfree(endp);
+	u132_u132_put_kref(u132);
+}
+
+static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp)
+{
+	kref_put(&endp->kref, u132_endp_delete);
+}
+
+static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp)
+{
+	kref_get(&endp->kref);
+}
+
+static inline void u132_endp_init_kref(struct u132 *u132,
+	struct u132_endp *endp)
+{
+	kref_init(&endp->kref);
+	kref_get(&u132->kref);
+}
+
+static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
+	unsigned int delta)
+{
+	if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+		kref_get(&endp->kref);
+}
+
+static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
+{
+	if (cancel_delayed_work(&endp->scheduler))
+		kref_put(&endp->kref, u132_endp_delete);
+}
+
+static inline void u132_monitor_put_kref(struct u132 *u132)
+{
+	kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
+{
+	if (queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_get(&u132->kref);
+}
+
+static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
+{
+	if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static void u132_monitor_cancel_work(struct u132 *u132)
+{
+	if (cancel_delayed_work(&u132->monitor))
+		kref_put(&u132->kref, u132_hcd_delete);
+}
+
+static int read_roothub_info(struct u132 *u132)
+{
+	u32 revision;
+	int retval;
+	retval = u132_read_pcimem(u132, revision, &revision);
+	if (retval) {
+		dev_err(&u132->platform_dev->dev, "error %d accessing device co"
+			"ntrol\n", retval);
+		return retval;
+	} else if ((revision & 0xFF) == 0x10) {
+	} else if ((revision & 0xFF) == 0x11) {
+	} else {
+		dev_err(&u132->platform_dev->dev, "device revision is not valid"
+			" %08X\n", revision);
+		return -ENODEV;
+	}
+	retval = u132_read_pcimem(u132, control, &u132->hc_control);
+	if (retval) {
+		dev_err(&u132->platform_dev->dev, "error %d accessing device co"
+			"ntrol\n", retval);
+		return retval;
+	}
+	retval = u132_read_pcimem(u132, roothub.status,
+		&u132->hc_roothub_status);
+	if (retval) {
+		dev_err(&u132->platform_dev->dev, "error %d accessing device re"
+			"g roothub.status\n", retval);
+		return retval;
+	}
+	retval = u132_read_pcimem(u132, roothub.a, &u132->hc_roothub_a);
+	if (retval) {
+		dev_err(&u132->platform_dev->dev, "error %d accessing device re"
+			"g roothub.a\n", retval);
+		return retval;
+	}
+	{
+		int I = u132->num_ports;
+		int i = 0;
+		while (I-- > 0) {
+			retval = u132_read_pcimem(u132, roothub.portstatus[i],
+				&u132->hc_roothub_portstatus[i]);
+			if (retval) {
+				dev_err(&u132->platform_dev->dev, "error %d acc"
+					"essing device roothub.portstatus[%d]\n"
+					, retval, i);
+				return retval;
+			} else
+				i += 1;
+		}
+	}
+	return 0;
+}
+
+static void u132_hcd_monitor_work(struct work_struct *work)
+{
+	struct u132 *u132 = container_of(work, struct u132, monitor.work);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		u132_monitor_put_kref(u132);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		u132_monitor_put_kref(u132);
+		return;
+	} else {
+		int retval;
+		mutex_lock(&u132->sw_lock);
+		retval = read_roothub_info(u132);
+		if (retval) {
+			struct usb_hcd *hcd = u132_to_hcd(u132);
+			u132_disable(u132);
+			u132->going = 1;
+			mutex_unlock(&u132->sw_lock);
+			usb_hc_died(hcd);
+			ftdi_elan_gone_away(u132->platform_dev);
+			u132_monitor_put_kref(u132);
+			return;
+		} else {
+			u132_monitor_requeue_work(u132, 500);
+			mutex_unlock(&u132->sw_lock);
+			return;
+		}
+	}
+}
+
+static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
+	struct urb *urb, int status)
+{
+	struct u132_ring *ring;
+	unsigned long irqs;
+	struct usb_hcd *hcd = u132_to_hcd(u132);
+	urb->error_count = 0;
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	endp->queue_next += 1;
+	if (ENDP_QUEUE_SIZE > --endp->queue_size) {
+		endp->active = 0;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+	} else {
+		struct list_head *next = endp->urb_more.next;
+		struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
+			urb_more);
+		list_del(next);
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
+			urbq->urb;
+		endp->active = 0;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		kfree(urbq);
+	}
+	mutex_lock(&u132->scheduler_lock);
+	ring = endp->ring;
+	ring->in_use = 0;
+	u132_ring_cancel_work(u132, ring);
+	u132_ring_queue_work(u132, ring, 0);
+	mutex_unlock(&u132->scheduler_lock);
+	u132_endp_put_kref(u132, endp);
+	usb_hcd_giveback_urb(hcd, urb, status);
+}
+
+static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
+	struct urb *urb, int status)
+{
+	u132_endp_put_kref(u132, endp);
+}
+
+static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
+	struct urb *urb, int status)
+{
+	unsigned long irqs;
+	struct usb_hcd *hcd = u132_to_hcd(u132);
+	urb->error_count = 0;
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	endp->queue_next += 1;
+	if (ENDP_QUEUE_SIZE > --endp->queue_size) {
+		endp->active = 0;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+	} else {
+		struct list_head *next = endp->urb_more.next;
+		struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
+			urb_more);
+		list_del(next);
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
+			urbq->urb;
+		endp->active = 0;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		kfree(urbq);
+	}
+	usb_hcd_giveback_urb(hcd, urb, status);
+}
+
+static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
+	struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
+	void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
+	int toggle_bits, int error_count, int condition_code, int repeat_number,
+	 int halted, int skipped, int actual, int non_null))
+{
+	return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
+		 urb, address, endp->usb_endp, toggle_bits, callback);
+}
+
+static inline int edset_setup(struct u132 *u132, struct u132_ring *ring,
+	struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
+	void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
+	int toggle_bits, int error_count, int condition_code, int repeat_number,
+	 int halted, int skipped, int actual, int non_null))
+{
+	return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
+		 urb, address, endp->usb_endp, toggle_bits, callback);
+}
+
+static inline int edset_single(struct u132 *u132, struct u132_ring *ring,
+	struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
+	void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
+	int toggle_bits, int error_count, int condition_code, int repeat_number,
+	 int halted, int skipped, int actual, int non_null))
+{
+	return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number,
+		endp, urb, address, endp->usb_endp, toggle_bits, callback);
+}
+
+static inline int edset_output(struct u132 *u132, struct u132_ring *ring,
+	struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
+	void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
+	int toggle_bits, int error_count, int condition_code, int repeat_number,
+	 int halted, int skipped, int actual, int non_null))
+{
+	return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number,
+		endp, urb, address, endp->usb_endp, toggle_bits, callback);
+}
+
+
+/*
+* must not LOCK sw_lock
+*
+*/
+static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	struct u132_udev *udev = &u132->udev[address];
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		struct u132_ring *ring = endp->ring;
+		u8 *u = urb->transfer_buffer + urb->actual_length;
+		u8 *b = buf;
+		int L = len;
+
+		while (L-- > 0)
+			*u++ = *b++;
+
+		urb->actual_length += len;
+		if ((condition_code == TD_CC_NOERROR) &&
+			(urb->transfer_buffer_length > urb->actual_length)) {
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			if (urb->actual_length > 0) {
+				int retval;
+				mutex_unlock(&u132->scheduler_lock);
+				retval = edset_single(u132, ring, endp, urb,
+					address, endp->toggle_bits,
+					u132_hcd_interrupt_recv);
+				if (retval != 0)
+					u132_hcd_giveback_urb(u132, endp, urb,
+						retval);
+			} else {
+				ring->in_use = 0;
+				endp->active = 0;
+				endp->jiffies = jiffies +
+					msecs_to_jiffies(urb->interval);
+				u132_ring_cancel_work(u132, ring);
+				u132_ring_queue_work(u132, ring, 0);
+				mutex_unlock(&u132->scheduler_lock);
+				u132_endp_put_kref(u132, endp);
+			}
+			return;
+		} else if ((condition_code == TD_DATAUNDERRUN) &&
+			((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb, 0);
+			return;
+		} else {
+			if (condition_code == TD_CC_NOERROR) {
+				endp->toggle_bits = toggle_bits;
+				usb_settoggle(udev->usb_device, endp->usb_endp,
+					0, 1 & toggle_bits);
+			} else if (condition_code == TD_CC_STALL) {
+				endp->toggle_bits = 0x2;
+				usb_settoggle(udev->usb_device, endp->usb_endp,
+					0, 0);
+			} else {
+				endp->toggle_bits = 0x2;
+				usb_settoggle(udev->usb_device, endp->usb_endp,
+					0, 0);
+				dev_err(&u132->platform_dev->dev, "urb=%p givin"
+					"g back INTERRUPT %s\n", urb,
+					cc_to_text[condition_code]);
+			}
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		}
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		struct u132_ring *ring = endp->ring;
+		urb->actual_length += len;
+		endp->toggle_bits = toggle_bits;
+		if (urb->transfer_buffer_length > urb->actual_length) {
+			int retval;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = edset_output(u132, ring, endp, urb, address,
+				endp->toggle_bits, u132_hcd_bulk_output_sent);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else {
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb, 0);
+			return;
+		}
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	struct u132_udev *udev = &u132->udev[address];
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		struct u132_ring *ring = endp->ring;
+		u8 *u = urb->transfer_buffer + urb->actual_length;
+		u8 *b = buf;
+		int L = len;
+
+		while (L-- > 0)
+			*u++ = *b++;
+
+		urb->actual_length += len;
+		if ((condition_code == TD_CC_NOERROR) &&
+			(urb->transfer_buffer_length > urb->actual_length)) {
+			int retval;
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			mutex_unlock(&u132->scheduler_lock);
+			retval = usb_ftdi_elan_edset_input(u132->platform_dev,
+				ring->number, endp, urb, address,
+				endp->usb_endp, endp->toggle_bits,
+				u132_hcd_bulk_input_recv);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else if (condition_code == TD_CC_NOERROR) {
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		} else if ((condition_code == TD_DATAUNDERRUN) &&
+			((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb, 0);
+			return;
+		} else if (condition_code == TD_DATAUNDERRUN) {
+			endp->toggle_bits = toggle_bits;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0,
+				1 & toggle_bits);
+			dev_warn(&u132->platform_dev->dev, "urb=%p(SHORT NOT OK"
+				") giving back BULK IN %s\n", urb,
+				cc_to_text[condition_code]);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb, 0);
+			return;
+		} else if (condition_code == TD_CC_STALL) {
+			endp->toggle_bits = 0x2;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		} else {
+			endp->toggle_bits = 0x2;
+			usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
+			dev_err(&u132->platform_dev->dev, "urb=%p giving back B"
+				"ULK IN code=%d %s\n", urb, condition_code,
+				cc_to_text[condition_code]);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		}
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		struct u132_ring *ring = endp->ring;
+		u8 *u = urb->transfer_buffer;
+		u8 *b = buf;
+		int L = len;
+
+		while (L-- > 0)
+			*u++ = *b++;
+
+		urb->actual_length = len;
+		if ((condition_code == TD_CC_NOERROR) || ((condition_code ==
+			TD_DATAUNDERRUN) && ((urb->transfer_flags &
+			URB_SHORT_NOT_OK) == 0))) {
+			int retval;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
+				ring->number, endp, urb, address,
+				endp->usb_endp, 0x3,
+				u132_hcd_configure_empty_sent);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else if (condition_code == TD_CC_STALL) {
+			mutex_unlock(&u132->scheduler_lock);
+			dev_warn(&u132->platform_dev->dev, "giving back SETUP I"
+				"NPUT STALL urb %p\n", urb);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		} else {
+			mutex_unlock(&u132->scheduler_lock);
+			dev_err(&u132->platform_dev->dev, "giving back SETUP IN"
+				"PUT %s urb %p\n", cc_to_text[condition_code],
+				urb);
+			u132_hcd_giveback_urb(u132, endp, urb,
+				cc_to_error[condition_code]);
+			return;
+		}
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		if (usb_pipein(urb->pipe)) {
+			int retval;
+			struct u132_ring *ring = endp->ring;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = usb_ftdi_elan_edset_input(u132->platform_dev,
+				ring->number, endp, urb, address,
+				endp->usb_endp, 0,
+				u132_hcd_configure_input_recv);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else {
+			int retval;
+			struct u132_ring *ring = endp->ring;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = usb_ftdi_elan_edset_input(u132->platform_dev,
+				ring->number, endp, urb, address,
+				endp->usb_endp, 0,
+				u132_hcd_configure_empty_recv);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		}
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb,
+	u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	struct u132_udev *udev = &u132->udev[address];
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		u132->addr[0].address = 0;
+		endp->usb_addr = udev->usb_addr;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
+	u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		int retval;
+		struct u132_ring *ring = endp->ring;
+		mutex_unlock(&u132->scheduler_lock);
+		retval = usb_ftdi_elan_edset_input(u132->platform_dev,
+			ring->number, endp, urb, 0, endp->usb_endp, 0,
+			u132_hcd_enumeration_empty_recv);
+		if (retval != 0)
+			u132_hcd_giveback_urb(u132, endp, urb, retval);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		int retval;
+		struct u132_ring *ring = endp->ring;
+		u8 *u = urb->transfer_buffer;
+		u8 *b = buf;
+		int L = len;
+
+		while (L-- > 0)
+			*u++ = *b++;
+
+		urb->actual_length = len;
+		mutex_unlock(&u132->scheduler_lock);
+		retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
+			ring->number, endp, urb, address, endp->usb_endp, 0x3,
+			u132_hcd_initial_empty_sent);
+		if (retval != 0)
+			u132_hcd_giveback_urb(u132, endp, urb, retval);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
+	int len, int toggle_bits, int error_count, int condition_code,
+	int repeat_number, int halted, int skipped, int actual, int non_null)
+{
+	struct u132_endp *endp = data;
+	struct u132 *u132 = endp->u132;
+	u8 address = u132->addr[endp->usb_addr].address;
+	mutex_lock(&u132->scheduler_lock);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (endp->dequeueing) {
+		endp->dequeueing = 0;
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
+		return;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
+		return;
+	} else if (!urb->unlinked) {
+		int retval;
+		struct u132_ring *ring = endp->ring;
+		mutex_unlock(&u132->scheduler_lock);
+		retval = usb_ftdi_elan_edset_input(u132->platform_dev,
+			ring->number, endp, urb, address, endp->usb_endp, 0,
+			u132_hcd_initial_input_recv);
+		if (retval != 0)
+			u132_hcd_giveback_urb(u132, endp, urb, retval);
+		return;
+	} else {
+		dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
+				"unlinked=%d\n", urb, urb->unlinked);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_hcd_giveback_urb(u132, endp, urb, 0);
+		return;
+	}
+}
+
+/*
+* this work function is only executed from the work queue
+*
+*/
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
+{
+	struct u132_ring *ring =
+		container_of(work, struct u132_ring, scheduler.work);
+	struct u132 *u132 = ring->u132;
+	mutex_lock(&u132->scheduler_lock);
+	if (ring->in_use) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_ring_put_kref(u132, ring);
+		return;
+	} else if (ring->curr_endp) {
+		struct u132_endp *endp, *last_endp = ring->curr_endp;
+		unsigned long wakeup = 0;
+		list_for_each_entry(endp, &last_endp->endp_ring, endp_ring) {
+			if (endp->queue_next == endp->queue_last) {
+			} else if ((endp->delayed == 0)
+				|| time_after_eq(jiffies, endp->jiffies)) {
+				ring->curr_endp = endp;
+				u132_endp_cancel_work(u132, last_endp);
+				u132_endp_queue_work(u132, last_endp, 0);
+				mutex_unlock(&u132->scheduler_lock);
+				u132_ring_put_kref(u132, ring);
+				return;
+			} else {
+				unsigned long delta = endp->jiffies - jiffies;
+				if (delta > wakeup)
+					wakeup = delta;
+			}
+		}
+		if (last_endp->queue_next == last_endp->queue_last) {
+		} else if ((last_endp->delayed == 0) || time_after_eq(jiffies,
+			last_endp->jiffies)) {
+			u132_endp_cancel_work(u132, last_endp);
+			u132_endp_queue_work(u132, last_endp, 0);
+			mutex_unlock(&u132->scheduler_lock);
+			u132_ring_put_kref(u132, ring);
+			return;
+		} else {
+			unsigned long delta = last_endp->jiffies - jiffies;
+			if (delta > wakeup)
+				wakeup = delta;
+		}
+		if (wakeup > 0) {
+			u132_ring_requeue_work(u132, ring, wakeup);
+			mutex_unlock(&u132->scheduler_lock);
+			return;
+		} else {
+			mutex_unlock(&u132->scheduler_lock);
+			u132_ring_put_kref(u132, ring);
+			return;
+		}
+	} else {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_ring_put_kref(u132, ring);
+		return;
+	}
+}
+
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
+{
+	struct u132_ring *ring;
+	struct u132_endp *endp =
+		container_of(work, struct u132_endp, scheduler.work);
+	struct u132 *u132 = endp->u132;
+	mutex_lock(&u132->scheduler_lock);
+	ring = endp->ring;
+	if (endp->edset_flush) {
+		endp->edset_flush = 0;
+		if (endp->dequeueing)
+			usb_ftdi_elan_edset_flush(u132->platform_dev,
+				ring->number, endp);
+		mutex_unlock(&u132->scheduler_lock);
+		u132_endp_put_kref(u132, endp);
+		return;
+	} else if (endp->active) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_endp_put_kref(u132, endp);
+		return;
+	} else if (ring->in_use) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_endp_put_kref(u132, endp);
+		return;
+	} else if (endp->queue_next == endp->queue_last) {
+		mutex_unlock(&u132->scheduler_lock);
+		u132_endp_put_kref(u132, endp);
+		return;
+	} else if (endp->pipetype == PIPE_INTERRUPT) {
+		u8 address = u132->addr[endp->usb_addr].address;
+		if (ring->in_use) {
+			mutex_unlock(&u132->scheduler_lock);
+			u132_endp_put_kref(u132, endp);
+			return;
+		} else {
+			int retval;
+			struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
+				endp->queue_next];
+			endp->active = 1;
+			ring->curr_endp = endp;
+			ring->in_use = 1;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = edset_single(u132, ring, endp, urb, address,
+				endp->toggle_bits, u132_hcd_interrupt_recv);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		}
+	} else if (endp->pipetype == PIPE_CONTROL) {
+		u8 address = u132->addr[endp->usb_addr].address;
+		if (ring->in_use) {
+			mutex_unlock(&u132->scheduler_lock);
+			u132_endp_put_kref(u132, endp);
+			return;
+		} else if (address == 0) {
+			int retval;
+			struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
+				endp->queue_next];
+			endp->active = 1;
+			ring->curr_endp = endp;
+			ring->in_use = 1;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = edset_setup(u132, ring, endp, urb, address,
+				0x2, u132_hcd_initial_setup_sent);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else if (endp->usb_addr == 0) {
+			int retval;
+			struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
+				endp->queue_next];
+			endp->active = 1;
+			ring->curr_endp = endp;
+			ring->in_use = 1;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
+				u132_hcd_enumeration_address_sent);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		} else {
+			int retval;
+			struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
+				endp->queue_next];
+			address = u132->addr[endp->usb_addr].address;
+			endp->active = 1;
+			ring->curr_endp = endp;
+			ring->in_use = 1;
+			mutex_unlock(&u132->scheduler_lock);
+			retval = edset_setup(u132, ring, endp, urb, address,
+				0x2, u132_hcd_configure_setup_sent);
+			if (retval != 0)
+				u132_hcd_giveback_urb(u132, endp, urb, retval);
+			return;
+		}
+	} else {
+		if (endp->input) {
+			u8 address = u132->addr[endp->usb_addr].address;
+			if (ring->in_use) {
+				mutex_unlock(&u132->scheduler_lock);
+				u132_endp_put_kref(u132, endp);
+				return;
+			} else {
+				int retval;
+				struct urb *urb = endp->urb_list[
+					ENDP_QUEUE_MASK & endp->queue_next];
+				endp->active = 1;
+				ring->curr_endp = endp;
+				ring->in_use = 1;
+				mutex_unlock(&u132->scheduler_lock);
+				retval = edset_input(u132, ring, endp, urb,
+					address, endp->toggle_bits,
+					u132_hcd_bulk_input_recv);
+				if (retval == 0) {
+				} else
+					u132_hcd_giveback_urb(u132, endp, urb,
+						retval);
+				return;
+			}
+		} else {	/* output pipe */
+			u8 address = u132->addr[endp->usb_addr].address;
+			if (ring->in_use) {
+				mutex_unlock(&u132->scheduler_lock);
+				u132_endp_put_kref(u132, endp);
+				return;
+			} else {
+				int retval;
+				struct urb *urb = endp->urb_list[
+					ENDP_QUEUE_MASK & endp->queue_next];
+				endp->active = 1;
+				ring->curr_endp = endp;
+				ring->in_use = 1;
+				mutex_unlock(&u132->scheduler_lock);
+				retval = edset_output(u132, ring, endp, urb,
+					address, endp->toggle_bits,
+					u132_hcd_bulk_output_sent);
+				if (retval == 0) {
+				} else
+					u132_hcd_giveback_urb(u132, endp, urb,
+						retval);
+				return;
+			}
+		}
+	}
+}
+#ifdef CONFIG_PM
+
+static void port_power(struct u132 *u132, int pn, int is_on)
+{
+	u132->port[pn].power = is_on;
+}
+
+#endif
+
+static void u132_power(struct u132 *u132, int is_on)
+{
+	struct usb_hcd *hcd = u132_to_hcd(u132)
+		;	/* hub is inactive unless the port is powered */
+	if (is_on) {
+		if (u132->power)
+			return;
+		u132->power = 1;
+	} else {
+		u132->power = 0;
+		hcd->state = HC_STATE_HALT;
+	}
+}
+
+static int u132_periodic_reinit(struct u132 *u132)
+{
+	int retval;
+	u32 fi = u132->hc_fminterval & 0x03fff;
+	u32 fit;
+	u32 fminterval;
+	retval = u132_read_pcimem(u132, fminterval, &fminterval);
+	if (retval)
+		return retval;
+	fit = fminterval & FIT;
+	retval = u132_write_pcimem(u132, fminterval,
+		(fit ^ FIT) | u132->hc_fminterval);
+	if (retval)
+		return retval;
+	return u132_write_pcimem(u132, periodicstart,
+	       ((9 * fi) / 10) & 0x3fff);
+}
+
+static char *hcfs2string(int state)
+{
+	switch (state) {
+	case OHCI_USB_RESET:
+		return "reset";
+	case OHCI_USB_RESUME:
+		return "resume";
+	case OHCI_USB_OPER:
+		return "operational";
+	case OHCI_USB_SUSPEND:
+		return "suspend";
+	}
+	return "?";
+}
+
+static int u132_init(struct u132 *u132)
+{
+	int retval;
+	u32 control;
+	u132_disable(u132);
+	u132->next_statechange = jiffies;
+	retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, control, &control);
+	if (retval)
+		return retval;
+	if (u132->num_ports == 0) {
+		u32 rh_a = -1;
+		retval = u132_read_pcimem(u132, roothub.a, &rh_a);
+		if (retval)
+			return retval;
+		u132->num_ports = rh_a & RH_A_NDP;
+		retval = read_roothub_info(u132);
+		if (retval)
+			return retval;
+	}
+	if (u132->num_ports > MAX_U132_PORTS)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+/* Start an OHCI controller, set the BUS operational
+* resets USB and controller
+* enable interrupts
+*/
+static int u132_run(struct u132 *u132)
+{
+	int retval;
+	u32 control;
+	u32 status;
+	u32 fminterval;
+	u32 periodicstart;
+	u32 cmdstatus;
+	u32 roothub_a;
+	int mask = OHCI_INTR_INIT;
+	int first = u132->hc_fminterval == 0;
+	int sleep_time = 0;
+	int reset_timeout = 30;	/* ... allow extra time */
+	u132_disable(u132);
+	if (first) {
+		u32 temp;
+		retval = u132_read_pcimem(u132, fminterval, &temp);
+		if (retval)
+			return retval;
+		u132->hc_fminterval = temp & 0x3fff;
+		u132->hc_fminterval |= FSMP(u132->hc_fminterval) << 16;
+	}
+	retval = u132_read_pcimem(u132, control, &u132->hc_control);
+	if (retval)
+		return retval;
+	dev_info(&u132->platform_dev->dev, "resetting from state '%s', control "
+		"= %08X\n", hcfs2string(u132->hc_control & OHCI_CTRL_HCFS),
+		u132->hc_control);
+	switch (u132->hc_control & OHCI_CTRL_HCFS) {
+	case OHCI_USB_OPER:
+		sleep_time = 0;
+		break;
+	case OHCI_USB_SUSPEND:
+	case OHCI_USB_RESUME:
+		u132->hc_control &= OHCI_CTRL_RWC;
+		u132->hc_control |= OHCI_USB_RESUME;
+		sleep_time = 10;
+		break;
+	default:
+		u132->hc_control &= OHCI_CTRL_RWC;
+		u132->hc_control |= OHCI_USB_RESET;
+		sleep_time = 50;
+		break;
+	}
+	retval = u132_write_pcimem(u132, control, u132->hc_control);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, control, &control);
+	if (retval)
+		return retval;
+	msleep(sleep_time);
+	retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
+	if (retval)
+		return retval;
+	if (!(roothub_a & RH_A_NPS)) {
+		int temp;	/* power down each port */
+		for (temp = 0; temp < u132->num_ports; temp++) {
+			retval = u132_write_pcimem(u132,
+				roothub.portstatus[temp], RH_PS_LSDA);
+			if (retval)
+				return retval;
+		}
+	}
+	retval = u132_read_pcimem(u132, control, &control);
+	if (retval)
+		return retval;
+retry:
+	retval = u132_read_pcimem(u132, cmdstatus, &status);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, cmdstatus, OHCI_HCR);
+	if (retval)
+		return retval;
+extra:	{
+		retval = u132_read_pcimem(u132, cmdstatus, &status);
+		if (retval)
+			return retval;
+		if (0 != (status & OHCI_HCR)) {
+			if (--reset_timeout == 0) {
+				dev_err(&u132->platform_dev->dev, "USB HC reset"
+					" timed out!\n");
+				return -ENODEV;
+			} else {
+				msleep(5);
+				goto extra;
+			}
+		}
+	}
+	if (u132->flags & OHCI_QUIRK_INITRESET) {
+		retval = u132_write_pcimem(u132, control, u132->hc_control);
+		if (retval)
+			return retval;
+		retval = u132_read_pcimem(u132, control, &control);
+		if (retval)
+			return retval;
+	}
+	retval = u132_write_pcimem(u132, ed_controlhead, 0x00000000);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, ed_bulkhead, 0x11000000);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, hcca, 0x00000000);
+	if (retval)
+		return retval;
+	retval = u132_periodic_reinit(u132);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, fminterval, &fminterval);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, periodicstart, &periodicstart);
+	if (retval)
+		return retval;
+	if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) {
+		if (!(u132->flags & OHCI_QUIRK_INITRESET)) {
+			u132->flags |= OHCI_QUIRK_INITRESET;
+			goto retry;
+		} else
+			dev_err(&u132->platform_dev->dev, "init err(%08x %04x)"
+				"\n", fminterval, periodicstart);
+	}			/* start controller operations */
+	u132->hc_control &= OHCI_CTRL_RWC;
+	u132->hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER;
+	retval = u132_write_pcimem(u132, control, u132->hc_control);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, cmdstatus, OHCI_BLF);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, cmdstatus, &cmdstatus);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, control, &control);
+	if (retval)
+		return retval;
+	u132_to_hcd(u132)->state = HC_STATE_RUNNING;
+	retval = u132_write_pcimem(u132, roothub.status, RH_HS_DRWE);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, intrstatus, mask);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, intrdisable,
+		OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO |
+		OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH |
+		OHCI_INTR_SO);
+	if (retval)
+		return retval;	/* handle root hub init quirks ... */
+	retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
+	if (retval)
+		return retval;
+	roothub_a &= ~(RH_A_PSM | RH_A_OCPM);
+	if (u132->flags & OHCI_QUIRK_SUPERIO) {
+		roothub_a |= RH_A_NOCP;
+		roothub_a &= ~(RH_A_POTPGT | RH_A_NPS);
+		retval = u132_write_pcimem(u132, roothub.a, roothub_a);
+		if (retval)
+			return retval;
+	} else if ((u132->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
+		roothub_a |= RH_A_NPS;
+		retval = u132_write_pcimem(u132, roothub.a, roothub_a);
+		if (retval)
+			return retval;
+	}
+	retval = u132_write_pcimem(u132, roothub.status, RH_HS_LPSC);
+	if (retval)
+		return retval;
+	retval = u132_write_pcimem(u132, roothub.b,
+		(roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM);
+	if (retval)
+		return retval;
+	retval = u132_read_pcimem(u132, control, &control);
+	if (retval)
+		return retval;
+	mdelay((roothub_a >> 23) & 0x1fe);
+	u132_to_hcd(u132)->state = HC_STATE_RUNNING;
+	return 0;
+}
+
+static void u132_hcd_stop(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p) has b"
+			"een removed %d\n", u132, hcd, u132->going);
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
+			"ed\n", hcd);
+	} else {
+		mutex_lock(&u132->sw_lock);
+		msleep(100);
+		u132_power(u132, 0);
+		mutex_unlock(&u132->sw_lock);
+	}
+}
+
+static int u132_hcd_start(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else if (hcd->self.controller) {
+		int retval;
+		struct platform_device *pdev =
+			to_platform_device(hcd->self.controller);
+		u16 vendor = ((struct u132_platform_data *)
+			dev_get_platdata(&pdev->dev))->vendor;
+		u16 device = ((struct u132_platform_data *)
+			dev_get_platdata(&pdev->dev))->device;
+		mutex_lock(&u132->sw_lock);
+		msleep(10);
+		if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
+			u132->flags = OHCI_QUIRK_AMD756;
+		} else if (vendor == PCI_VENDOR_ID_OPTI && device == 0xc861) {
+			dev_err(&u132->platform_dev->dev, "WARNING: OPTi workar"
+				"ounds unavailable\n");
+		} else if (vendor == PCI_VENDOR_ID_COMPAQ && device == 0xa0f8)
+			u132->flags |= OHCI_QUIRK_ZFMICRO;
+		retval = u132_run(u132);
+		if (retval) {
+			u132_disable(u132);
+			u132->going = 1;
+		}
+		msleep(100);
+		mutex_unlock(&u132->sw_lock);
+		return retval;
+	} else {
+		dev_err(&u132->platform_dev->dev, "platform_device missing\n");
+		return -ENODEV;
+	}
+}
+
+static int u132_hcd_reset(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else {
+		int retval;
+		mutex_lock(&u132->sw_lock);
+		retval = u132_init(u132);
+		if (retval) {
+			u132_disable(u132);
+			u132->going = 1;
+		}
+		mutex_unlock(&u132->sw_lock);
+		return retval;
+	}
+}
+
+static int create_endpoint_and_queue_int(struct u132 *u132,
+	struct u132_udev *udev, struct urb *urb,
+	struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
+	gfp_t mem_flags)
+{
+	struct u132_ring *ring;
+	unsigned long irqs;
+	int rc;
+	u8 endp_number;
+	struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
+	if (!endp)
+		return -ENOMEM;
+
+	spin_lock_init(&endp->queue_lock.slock);
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+	if (rc) {
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		kfree(endp);
+		return rc;
+	}
+
+	endp_number = ++u132->num_endpoints;
+	urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
+	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
+	INIT_LIST_HEAD(&endp->urb_more);
+	ring = endp->ring = &u132->ring[0];
+	if (ring->curr_endp) {
+		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
+	} else {
+		INIT_LIST_HEAD(&endp->endp_ring);
+		ring->curr_endp = endp;
+	}
+	ring->length += 1;
+	endp->dequeueing = 0;
+	endp->edset_flush = 0;
+	endp->active = 0;
+	endp->delayed = 0;
+	endp->endp_number = endp_number;
+	endp->u132 = u132;
+	endp->hep = urb->ep;
+	endp->pipetype = usb_pipetype(urb->pipe);
+	u132_endp_init_kref(u132, endp);
+	if (usb_pipein(urb->pipe)) {
+		endp->toggle_bits = 0x2;
+		usb_settoggle(udev->usb_device, usb_endp, 0, 0);
+		endp->input = 1;
+		endp->output = 0;
+		udev->endp_number_in[usb_endp] = endp_number;
+		u132_udev_get_kref(u132, udev);
+	} else {
+		endp->toggle_bits = 0x2;
+		usb_settoggle(udev->usb_device, usb_endp, 1, 0);
+		endp->input = 0;
+		endp->output = 1;
+		udev->endp_number_out[usb_endp] = endp_number;
+		u132_udev_get_kref(u132, udev);
+	}
+	urb->hcpriv = u132;
+	endp->delayed = 1;
+	endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
+	endp->udev_number = address;
+	endp->usb_addr = usb_addr;
+	endp->usb_endp = usb_endp;
+	endp->queue_size = 1;
+	endp->queue_last = 0;
+	endp->queue_next = 0;
+	endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+	spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+	u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval));
+	return 0;
+}
+
+static int queue_int_on_old_endpoint(struct u132 *u132,
+	struct u132_udev *udev, struct urb *urb,
+	struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
+	u8 usb_endp, u8 address)
+{
+	urb->hcpriv = u132;
+	endp->delayed = 1;
+	endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
+	if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+	} else {
+		struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
+			GFP_ATOMIC);
+		if (urbq == NULL) {
+			endp->queue_size -= 1;
+			return -ENOMEM;
+		} else {
+			list_add_tail(&urbq->urb_more, &endp->urb_more);
+			urbq->urb = urb;
+		}
+	}
+	return 0;
+}
+
+static int create_endpoint_and_queue_bulk(struct u132 *u132,
+	struct u132_udev *udev, struct urb *urb,
+	struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
+	gfp_t mem_flags)
+{
+	int ring_number;
+	struct u132_ring *ring;
+	unsigned long irqs;
+	int rc;
+	u8 endp_number;
+	struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
+	if (!endp)
+		return -ENOMEM;
+
+	spin_lock_init(&endp->queue_lock.slock);
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+	if (rc) {
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		kfree(endp);
+		return rc;
+	}
+
+	endp_number = ++u132->num_endpoints;
+	urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
+	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
+	INIT_LIST_HEAD(&endp->urb_more);
+	endp->dequeueing = 0;
+	endp->edset_flush = 0;
+	endp->active = 0;
+	endp->delayed = 0;
+	endp->endp_number = endp_number;
+	endp->u132 = u132;
+	endp->hep = urb->ep;
+	endp->pipetype = usb_pipetype(urb->pipe);
+	u132_endp_init_kref(u132, endp);
+	if (usb_pipein(urb->pipe)) {
+		endp->toggle_bits = 0x2;
+		usb_settoggle(udev->usb_device, usb_endp, 0, 0);
+		ring_number = 3;
+		endp->input = 1;
+		endp->output = 0;
+		udev->endp_number_in[usb_endp] = endp_number;
+		u132_udev_get_kref(u132, udev);
+	} else {
+		endp->toggle_bits = 0x2;
+		usb_settoggle(udev->usb_device, usb_endp, 1, 0);
+		ring_number = 2;
+		endp->input = 0;
+		endp->output = 1;
+		udev->endp_number_out[usb_endp] = endp_number;
+		u132_udev_get_kref(u132, udev);
+	}
+	ring = endp->ring = &u132->ring[ring_number - 1];
+	if (ring->curr_endp) {
+		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
+	} else {
+		INIT_LIST_HEAD(&endp->endp_ring);
+		ring->curr_endp = endp;
+	}
+	ring->length += 1;
+	urb->hcpriv = u132;
+	endp->udev_number = address;
+	endp->usb_addr = usb_addr;
+	endp->usb_endp = usb_endp;
+	endp->queue_size = 1;
+	endp->queue_last = 0;
+	endp->queue_next = 0;
+	endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+	spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+	u132_endp_queue_work(u132, endp, 0);
+	return 0;
+}
+
+static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
+	struct urb *urb,
+	struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
+	u8 usb_endp, u8 address)
+{
+	urb->hcpriv = u132;
+	if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+	} else {
+		struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
+			GFP_ATOMIC);
+		if (urbq == NULL) {
+			endp->queue_size -= 1;
+			return -ENOMEM;
+		} else {
+			list_add_tail(&urbq->urb_more, &endp->urb_more);
+			urbq->urb = urb;
+		}
+	}
+	return 0;
+}
+
+static int create_endpoint_and_queue_control(struct u132 *u132,
+	struct urb *urb,
+	struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
+	gfp_t mem_flags)
+{
+	struct u132_ring *ring;
+	unsigned long irqs;
+	int rc;
+	u8 endp_number;
+	struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
+	if (!endp)
+		return -ENOMEM;
+
+	spin_lock_init(&endp->queue_lock.slock);
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+	if (rc) {
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		kfree(endp);
+		return rc;
+	}
+
+	endp_number = ++u132->num_endpoints;
+	urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
+	INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
+	INIT_LIST_HEAD(&endp->urb_more);
+	ring = endp->ring = &u132->ring[0];
+	if (ring->curr_endp) {
+		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
+	} else {
+		INIT_LIST_HEAD(&endp->endp_ring);
+		ring->curr_endp = endp;
+	}
+	ring->length += 1;
+	endp->dequeueing = 0;
+	endp->edset_flush = 0;
+	endp->active = 0;
+	endp->delayed = 0;
+	endp->endp_number = endp_number;
+	endp->u132 = u132;
+	endp->hep = urb->ep;
+	u132_endp_init_kref(u132, endp);
+	u132_endp_get_kref(u132, endp);
+	if (usb_addr == 0) {
+		u8 address = u132->addr[usb_addr].address;
+		struct u132_udev *udev = &u132->udev[address];
+		endp->udev_number = address;
+		endp->usb_addr = usb_addr;
+		endp->usb_endp = usb_endp;
+		endp->input = 1;
+		endp->output = 1;
+		endp->pipetype = usb_pipetype(urb->pipe);
+		u132_udev_init_kref(u132, udev);
+		u132_udev_get_kref(u132, udev);
+		udev->endp_number_in[usb_endp] = endp_number;
+		udev->endp_number_out[usb_endp] = endp_number;
+		urb->hcpriv = u132;
+		endp->queue_size = 1;
+		endp->queue_last = 0;
+		endp->queue_next = 0;
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		u132_endp_queue_work(u132, endp, 0);
+		return 0;
+	} else {		/*(usb_addr > 0) */
+		u8 address = u132->addr[usb_addr].address;
+		struct u132_udev *udev = &u132->udev[address];
+		endp->udev_number = address;
+		endp->usb_addr = usb_addr;
+		endp->usb_endp = usb_endp;
+		endp->input = 1;
+		endp->output = 1;
+		endp->pipetype = usb_pipetype(urb->pipe);
+		u132_udev_get_kref(u132, udev);
+		udev->enumeration = 2;
+		udev->endp_number_in[usb_endp] = endp_number;
+		udev->endp_number_out[usb_endp] = endp_number;
+		urb->hcpriv = u132;
+		endp->queue_size = 1;
+		endp->queue_last = 0;
+		endp->queue_next = 0;
+		endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		u132_endp_queue_work(u132, endp, 0);
+		return 0;
+	}
+}
+
+static int queue_control_on_old_endpoint(struct u132 *u132,
+	struct urb *urb,
+	struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
+	u8 usb_endp)
+{
+	if (usb_addr == 0) {
+		if (usb_pipein(urb->pipe)) {
+			urb->hcpriv = u132;
+			if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
+				endp->urb_list[ENDP_QUEUE_MASK &
+					endp->queue_last++] = urb;
+			} else {
+				struct u132_urbq *urbq =
+					kmalloc(sizeof(struct u132_urbq),
+					GFP_ATOMIC);
+				if (urbq == NULL) {
+					endp->queue_size -= 1;
+					return -ENOMEM;
+				} else {
+					list_add_tail(&urbq->urb_more,
+						&endp->urb_more);
+					urbq->urb = urb;
+				}
+			}
+			return 0;
+		} else {	/* usb_pipeout(urb->pipe) */
+			struct u132_addr *addr = &u132->addr[usb_dev->devnum];
+			int I = MAX_U132_UDEVS;
+			int i = 0;
+			while (--I > 0) {
+				struct u132_udev *udev = &u132->udev[++i];
+				if (udev->usb_device) {
+					continue;
+				} else {
+					udev->enumeration = 1;
+					u132->addr[0].address = i;
+					endp->udev_number = i;
+					udev->udev_number = i;
+					udev->usb_addr = usb_dev->devnum;
+					u132_udev_init_kref(u132, udev);
+					udev->endp_number_in[usb_endp] =
+						endp->endp_number;
+					u132_udev_get_kref(u132, udev);
+					udev->endp_number_out[usb_endp] =
+						endp->endp_number;
+					udev->usb_device = usb_dev;
+					((u8 *) (urb->setup_packet))[2] =
+						addr->address = i;
+					u132_udev_get_kref(u132, udev);
+					break;
+				}
+			}
+			if (I == 0) {
+				dev_err(&u132->platform_dev->dev, "run out of d"
+					"evice space\n");
+				return -EINVAL;
+			}
+			urb->hcpriv = u132;
+			if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
+				endp->urb_list[ENDP_QUEUE_MASK &
+					endp->queue_last++] = urb;
+			} else {
+				struct u132_urbq *urbq =
+					kmalloc(sizeof(struct u132_urbq),
+					GFP_ATOMIC);
+				if (urbq == NULL) {
+					endp->queue_size -= 1;
+					return -ENOMEM;
+				} else {
+					list_add_tail(&urbq->urb_more,
+						&endp->urb_more);
+					urbq->urb = urb;
+				}
+			}
+			return 0;
+		}
+	} else {		/*(usb_addr > 0) */
+		u8 address = u132->addr[usb_addr].address;
+		struct u132_udev *udev = &u132->udev[address];
+		urb->hcpriv = u132;
+		if (udev->enumeration != 2)
+			udev->enumeration = 2;
+		if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
+			endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
+				urb;
+		} else {
+			struct u132_urbq *urbq =
+				kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC);
+			if (urbq == NULL) {
+				endp->queue_size -= 1;
+				return -ENOMEM;
+			} else {
+				list_add_tail(&urbq->urb_more, &endp->urb_more);
+				urbq->urb = urb;
+			}
+		}
+		return 0;
+	}
+}
+
+static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+		gfp_t mem_flags)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (irqs_disabled()) {
+		if (gfpflags_allow_blocking(mem_flags)) {
+			printk(KERN_ERR "invalid context for function that might sleep\n");
+			return -EINVAL;
+		}
+	}
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed "
+				"urb=%p\n", urb);
+		return -ESHUTDOWN;
+	} else {
+		u8 usb_addr = usb_pipedevice(urb->pipe);
+		u8 usb_endp = usb_pipeendpoint(urb->pipe);
+		struct usb_device *usb_dev = urb->dev;
+		if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+			u8 address = u132->addr[usb_addr].address;
+			struct u132_udev *udev = &u132->udev[address];
+			struct u132_endp *endp = urb->ep->hcpriv;
+			urb->actual_length = 0;
+			if (endp) {
+				unsigned long irqs;
+				int retval;
+				spin_lock_irqsave(&endp->queue_lock.slock,
+					irqs);
+				retval = usb_hcd_link_urb_to_ep(hcd, urb);
+				if (retval == 0) {
+					retval = queue_int_on_old_endpoint(
+							u132, udev, urb,
+							usb_dev, endp,
+							usb_addr, usb_endp,
+							address);
+					if (retval)
+						usb_hcd_unlink_urb_from_ep(
+	hcd, urb);
+				}
+				spin_unlock_irqrestore(&endp->queue_lock.slock,
+					irqs);
+				if (retval) {
+					return retval;
+				} else {
+					u132_endp_queue_work(u132, endp,
+						msecs_to_jiffies(urb->interval))
+						;
+					return 0;
+				}
+			} else if (u132->num_endpoints == MAX_U132_ENDPS) {
+				return -EINVAL;
+			} else {	/*(endp == NULL) */
+				return create_endpoint_and_queue_int(u132, udev,
+						urb, usb_dev, usb_addr,
+						usb_endp, address, mem_flags);
+			}
+		} else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+			dev_err(&u132->platform_dev->dev, "the hardware does no"
+				"t support PIPE_ISOCHRONOUS\n");
+			return -EINVAL;
+		} else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
+			u8 address = u132->addr[usb_addr].address;
+			struct u132_udev *udev = &u132->udev[address];
+			struct u132_endp *endp = urb->ep->hcpriv;
+			urb->actual_length = 0;
+			if (endp) {
+				unsigned long irqs;
+				int retval;
+				spin_lock_irqsave(&endp->queue_lock.slock,
+					irqs);
+				retval = usb_hcd_link_urb_to_ep(hcd, urb);
+				if (retval == 0) {
+					retval = queue_bulk_on_old_endpoint(
+							u132, udev, urb,
+							usb_dev, endp,
+							usb_addr, usb_endp,
+							address);
+					if (retval)
+						usb_hcd_unlink_urb_from_ep(
+	hcd, urb);
+				}
+				spin_unlock_irqrestore(&endp->queue_lock.slock,
+					irqs);
+				if (retval) {
+					return retval;
+				} else {
+					u132_endp_queue_work(u132, endp, 0);
+					return 0;
+				}
+			} else if (u132->num_endpoints == MAX_U132_ENDPS) {
+				return -EINVAL;
+			} else
+				return create_endpoint_and_queue_bulk(u132,
+					udev, urb, usb_dev, usb_addr,
+					usb_endp, address, mem_flags);
+		} else {
+			struct u132_endp *endp = urb->ep->hcpriv;
+			u16 urb_size = 8;
+			u8 *b = urb->setup_packet;
+			int i = 0;
+			char data[30 * 3 + 4];
+			char *d = data;
+			int m = (sizeof(data) - 1) / 3;
+			int l = 0;
+			data[0] = 0;
+			while (urb_size-- > 0) {
+				if (i > m) {
+				} else if (i++ < m) {
+					int w = sprintf(d, " %02X", *b++);
+					d += w;
+					l += w;
+				} else
+					d += sprintf(d, " ..");
+			}
+			if (endp) {
+				unsigned long irqs;
+				int retval;
+				spin_lock_irqsave(&endp->queue_lock.slock,
+					irqs);
+				retval = usb_hcd_link_urb_to_ep(hcd, urb);
+				if (retval == 0) {
+					retval = queue_control_on_old_endpoint(
+							u132, urb, usb_dev,
+							endp, usb_addr,
+							usb_endp);
+					if (retval)
+						usb_hcd_unlink_urb_from_ep(
+								hcd, urb);
+				}
+				spin_unlock_irqrestore(&endp->queue_lock.slock,
+					irqs);
+				if (retval) {
+					return retval;
+				} else {
+					u132_endp_queue_work(u132, endp, 0);
+					return 0;
+				}
+			} else if (u132->num_endpoints == MAX_U132_ENDPS) {
+				return -EINVAL;
+			} else
+				return create_endpoint_and_queue_control(u132,
+					urb, usb_dev, usb_addr, usb_endp,
+					mem_flags);
+		}
+	}
+}
+
+static int dequeue_from_overflow_chain(struct u132 *u132,
+	struct u132_endp *endp, struct urb *urb)
+{
+	struct u132_urbq *urbq;
+
+	list_for_each_entry(urbq, &endp->urb_more, urb_more) {
+		if (urbq->urb == urb) {
+			struct usb_hcd *hcd = u132_to_hcd(u132);
+			list_del(&urbq->urb_more);
+			endp->queue_size -= 1;
+			urb->error_count = 0;
+			usb_hcd_giveback_urb(hcd, urb, 0);
+			return 0;
+		} else
+			continue;
+	}
+	dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
+		"[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
+		"\n", urb, endp->endp_number, endp, endp->ring->number,
+		endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
+		endp->usb_endp, endp->usb_addr, endp->queue_size,
+		endp->queue_next, endp->queue_last);
+	return -EINVAL;
+}
+
+static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
+		struct urb *urb, int status)
+{
+	unsigned long irqs;
+	int rc;
+
+	spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+	rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
+	if (rc) {
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		return rc;
+	}
+	if (endp->queue_size == 0) {
+		dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
+			"=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
+			endp->endp_number, endp, endp->ring->number,
+			endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
+			endp->usb_endp, endp->usb_addr);
+		spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+		return -EINVAL;
+	}
+	if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) {
+		if (endp->active) {
+			endp->dequeueing = 1;
+			endp->edset_flush = 1;
+			u132_endp_queue_work(u132, endp, 0);
+			spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+			return 0;
+		} else {
+			spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+			u132_hcd_abandon_urb(u132, endp, urb, status);
+			return 0;
+		}
+	} else {
+		u16 queue_list = 0;
+		u16 queue_size = endp->queue_size;
+		u16 queue_scan = endp->queue_next;
+		struct urb **urb_slot = NULL;
+		while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
+			if (urb == endp->urb_list[ENDP_QUEUE_MASK &
+				++queue_scan]) {
+				urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
+					queue_scan];
+				break;
+			} else
+				continue;
+		}
+		while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
+			*urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
+				++queue_scan];
+			urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
+				queue_scan];
+		}
+		if (urb_slot) {
+			struct usb_hcd *hcd = u132_to_hcd(u132);
+
+			usb_hcd_unlink_urb_from_ep(hcd, urb);
+			endp->queue_size -= 1;
+			if (list_empty(&endp->urb_more)) {
+				spin_unlock_irqrestore(&endp->queue_lock.slock,
+					irqs);
+			} else {
+				struct list_head *next = endp->urb_more.next;
+				struct u132_urbq *urbq = list_entry(next,
+					struct u132_urbq, urb_more);
+				list_del(next);
+				*urb_slot = urbq->urb;
+				spin_unlock_irqrestore(&endp->queue_lock.slock,
+					irqs);
+				kfree(urbq);
+			} urb->error_count = 0;
+			usb_hcd_giveback_urb(hcd, urb, status);
+			return 0;
+		} else if (list_empty(&endp->urb_more)) {
+			dev_err(&u132->platform_dev->dev, "urb=%p not found in "
+				"endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
+				"=%d size=%d next=%04X last=%04X\n", urb,
+				endp->endp_number, endp, endp->ring->number,
+				endp->input ? 'I' : ' ',
+				endp->output ? 'O' : ' ', endp->usb_endp,
+				endp->usb_addr, endp->queue_size,
+				endp->queue_next, endp->queue_last);
+			spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+			return -EINVAL;
+		} else {
+			int retval;
+
+			usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
+			retval = dequeue_from_overflow_chain(u132, endp,
+				urb);
+			spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+			return retval;
+		}
+	}
+}
+
+static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 2) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else {
+		u8 usb_addr = usb_pipedevice(urb->pipe);
+		u8 usb_endp = usb_pipeendpoint(urb->pipe);
+		u8 address = u132->addr[usb_addr].address;
+		struct u132_udev *udev = &u132->udev[address];
+		if (usb_pipein(urb->pipe)) {
+			u8 endp_number = udev->endp_number_in[usb_endp];
+			struct u132_endp *endp = u132->endp[endp_number - 1];
+			return u132_endp_urb_dequeue(u132, endp, urb, status);
+		} else {
+			u8 endp_number = udev->endp_number_out[usb_endp];
+			struct u132_endp *endp = u132->endp[endp_number - 1];
+			return u132_endp_urb_dequeue(u132, endp, urb, status);
+		}
+	}
+}
+
+static void u132_endpoint_disable(struct usb_hcd *hcd,
+	struct usb_host_endpoint *hep)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 2) {
+		dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p hep=%p"
+			") has been removed %d\n", u132, hcd, hep,
+			u132->going);
+	} else {
+		struct u132_endp *endp = hep->hcpriv;
+		if (endp)
+			u132_endp_put_kref(u132, endp);
+	}
+}
+
+static int u132_get_frame(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else {
+		int frame = 0;
+		dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
+		mdelay(100);
+		return frame;
+	}
+}
+
+static int u132_roothub_descriptor(struct u132 *u132,
+	struct usb_hub_descriptor *desc)
+{
+	int retval;
+	u16 temp;
+	u32 rh_a = -1;
+	u32 rh_b = -1;
+	retval = u132_read_pcimem(u132, roothub.a, &rh_a);
+	if (retval)
+		return retval;
+	desc->bDescriptorType = USB_DT_HUB;
+	desc->bPwrOn2PwrGood = (rh_a & RH_A_POTPGT) >> 24;
+	desc->bHubContrCurrent = 0;
+	desc->bNbrPorts = u132->num_ports;
+	temp = 1 + (u132->num_ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+	temp = HUB_CHAR_COMMON_LPSM | HUB_CHAR_COMMON_OCPM;
+	if (rh_a & RH_A_NPS)
+		temp |= HUB_CHAR_NO_LPSM;
+	if (rh_a & RH_A_PSM)
+		temp |= HUB_CHAR_INDV_PORT_LPSM;
+	if (rh_a & RH_A_NOCP)
+		temp |= HUB_CHAR_NO_OCPM;
+	else if (rh_a & RH_A_OCPM)
+		temp |= HUB_CHAR_INDV_PORT_OCPM;
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+	retval = u132_read_pcimem(u132, roothub.b, &rh_b);
+	if (retval)
+		return retval;
+	memset(desc->u.hs.DeviceRemovable, 0xff,
+			sizeof(desc->u.hs.DeviceRemovable));
+	desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR;
+	if (u132->num_ports > 7) {
+		desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8;
+		desc->u.hs.DeviceRemovable[2] = 0xff;
+	} else
+		desc->u.hs.DeviceRemovable[1] = 0xff;
+	return 0;
+}
+
+static int u132_roothub_status(struct u132 *u132, __le32 *desc)
+{
+	u32 rh_status = -1;
+	int ret_status = u132_read_pcimem(u132, roothub.status, &rh_status);
+	*desc = cpu_to_le32(rh_status);
+	return ret_status;
+}
+
+static int u132_roothub_portstatus(struct u132 *u132, __le32 *desc, u16 wIndex)
+{
+	if (wIndex == 0 || wIndex > u132->num_ports) {
+		return -EINVAL;
+	} else {
+		int port = wIndex - 1;
+		u32 rh_portstatus = -1;
+		int ret_portstatus = u132_read_pcimem(u132,
+			roothub.portstatus[port], &rh_portstatus);
+		*desc = cpu_to_le32(rh_portstatus);
+		if (*(u16 *) (desc + 2)) {
+			dev_info(&u132->platform_dev->dev, "Port %d Status Chan"
+				"ge = %08X\n", port, *desc);
+		}
+		return ret_portstatus;
+	}
+}
+
+
+/* this timer value might be vendor-specific ... */
+#define PORT_RESET_HW_MSEC 10
+#define PORT_RESET_MSEC 10
+/* wrap-aware logic morphed from <linux/jiffies.h> */
+#define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
+static int u132_roothub_portreset(struct u132 *u132, int port_index)
+{
+	int retval;
+	u32 fmnumber;
+	u16 now;
+	u16 reset_done;
+	retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
+	if (retval)
+		return retval;
+	now = fmnumber;
+	reset_done = now + PORT_RESET_MSEC;
+	do {
+		u32 portstat;
+		do {
+			retval = u132_read_pcimem(u132,
+				roothub.portstatus[port_index], &portstat);
+			if (retval)
+				return retval;
+			if (RH_PS_PRS & portstat)
+				continue;
+			else
+				break;
+		} while (tick_before(now, reset_done));
+		if (RH_PS_PRS & portstat)
+			return -ENODEV;
+		if (RH_PS_CCS & portstat) {
+			if (RH_PS_PRSC & portstat) {
+				retval = u132_write_pcimem(u132,
+					roothub.portstatus[port_index],
+					RH_PS_PRSC);
+				if (retval)
+					return retval;
+			}
+		} else
+			break;	/* start the next reset,
+				sleep till it's probably done */
+		retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
+			 RH_PS_PRS);
+		if (retval)
+			return retval;
+		msleep(PORT_RESET_HW_MSEC);
+		retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
+		if (retval)
+			return retval;
+		now = fmnumber;
+	} while (tick_before(now, reset_done));
+	return 0;
+}
+
+static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue,
+	u16 wIndex)
+{
+	if (wIndex == 0 || wIndex > u132->num_ports) {
+		return -EINVAL;
+	} else {
+		int port_index = wIndex - 1;
+		struct u132_port *port = &u132->port[port_index];
+		port->Status &= ~(1 << wValue);
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			return u132_write_pcimem(u132,
+			       roothub.portstatus[port_index], RH_PS_PSS);
+		case USB_PORT_FEAT_POWER:
+			return u132_write_pcimem(u132,
+			       roothub.portstatus[port_index], RH_PS_PPS);
+		case USB_PORT_FEAT_RESET:
+			return u132_roothub_portreset(u132, port_index);
+		default:
+			return -EPIPE;
+		}
+	}
+}
+
+static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
+	u16 wIndex)
+{
+	if (wIndex == 0 || wIndex > u132->num_ports) {
+		return -EINVAL;
+	} else {
+		int port_index = wIndex - 1;
+		u32 temp;
+		struct u132_port *port = &u132->port[port_index];
+		port->Status &= ~(1 << wValue);
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			temp = RH_PS_CCS;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			temp = RH_PS_PESC;
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			temp = RH_PS_POCI;
+			if ((u132->hc_control & OHCI_CTRL_HCFS)
+				!= OHCI_USB_OPER) {
+				dev_err(&u132->platform_dev->dev, "TODO resume_"
+					"root_hub\n");
+			}
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			temp = RH_PS_PSSC;
+			break;
+		case USB_PORT_FEAT_POWER:
+			temp = RH_PS_LSDA;
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			temp = RH_PS_CSC;
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			temp = RH_PS_OCIC;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			temp = RH_PS_PRSC;
+			break;
+		default:
+			return -EPIPE;
+		}
+		return u132_write_pcimem(u132, roothub.portstatus[port_index],
+		       temp);
+	}
+}
+
+
+/* the virtual root hub timer IRQ checks for hub status*/
+static int u132_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device hcd=%p has been remov"
+			"ed %d\n", hcd, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
+			"ed\n", hcd);
+		return -ESHUTDOWN;
+	} else {
+		int i, changed = 0, length = 1;
+		if (u132->flags & OHCI_QUIRK_AMD756) {
+			if ((u132->hc_roothub_a & RH_A_NDP) > MAX_ROOT_PORTS) {
+				dev_err(&u132->platform_dev->dev, "bogus NDP, r"
+					"ereads as NDP=%d\n",
+					u132->hc_roothub_a & RH_A_NDP);
+				goto done;
+			}
+		}
+		if (u132->hc_roothub_status & (RH_HS_LPSC | RH_HS_OCIC))
+			buf[0] = changed = 1;
+		else
+			buf[0] = 0;
+		if (u132->num_ports > 7) {
+			buf[1] = 0;
+			length++;
+		}
+		for (i = 0; i < u132->num_ports; i++) {
+			if (u132->hc_roothub_portstatus[i] & (RH_PS_CSC |
+				RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC |
+				RH_PS_PRSC)) {
+				changed = 1;
+				if (i < 7)
+					buf[0] |= 1 << (i + 1);
+				else
+					buf[1] |= 1 << (i - 7);
+				continue;
+			}
+			if (!(u132->hc_roothub_portstatus[i] & RH_PS_CCS))
+				continue;
+
+			if ((u132->hc_roothub_portstatus[i] & RH_PS_PSS))
+				continue;
+		}
+done:
+		return changed ? length : 0;
+	}
+}
+
+static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+	u16 wIndex, char *buf, u16 wLength)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else {
+		int retval = 0;
+		mutex_lock(&u132->sw_lock);
+		switch (typeReq) {
+		case ClearHubFeature:
+			switch (wValue) {
+			case C_HUB_OVER_CURRENT:
+			case C_HUB_LOCAL_POWER:
+				break;
+			default:
+				goto stall;
+			}
+			break;
+		case SetHubFeature:
+			switch (wValue) {
+			case C_HUB_OVER_CURRENT:
+			case C_HUB_LOCAL_POWER:
+				break;
+			default:
+				goto stall;
+			}
+			break;
+		case ClearPortFeature:{
+				retval = u132_roothub_clearportfeature(u132,
+					wValue, wIndex);
+				if (retval)
+					goto error;
+				break;
+			}
+		case GetHubDescriptor:{
+				retval = u132_roothub_descriptor(u132,
+					(struct usb_hub_descriptor *)buf);
+				if (retval)
+					goto error;
+				break;
+			}
+		case GetHubStatus:{
+				retval = u132_roothub_status(u132,
+					(__le32 *) buf);
+				if (retval)
+					goto error;
+				break;
+			}
+		case GetPortStatus:{
+				retval = u132_roothub_portstatus(u132,
+					(__le32 *) buf, wIndex);
+				if (retval)
+					goto error;
+				break;
+			}
+		case SetPortFeature:{
+				retval = u132_roothub_setportfeature(u132,
+					wValue, wIndex);
+				if (retval)
+					goto error;
+				break;
+			}
+		default:
+			goto stall;
+		error:
+			u132_disable(u132);
+			u132->going = 1;
+			break;
+		stall:
+			retval = -EPIPE;
+			break;
+		}
+		mutex_unlock(&u132->sw_lock);
+		return retval;
+	}
+}
+
+static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else
+		return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int u132_bus_suspend(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else
+		return 0;
+}
+
+static int u132_bus_resume(struct usb_hcd *hcd)
+{
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else
+		return 0;
+}
+
+#else
+#define u132_bus_suspend NULL
+#define u132_bus_resume NULL
+#endif
+static const struct hc_driver u132_hc_driver = {
+	.description = hcd_name,
+	.hcd_priv_size = sizeof(struct u132),
+	.irq = NULL,
+	.flags = HCD_USB11 | HCD_MEMORY,
+	.reset = u132_hcd_reset,
+	.start = u132_hcd_start,
+	.stop = u132_hcd_stop,
+	.urb_enqueue = u132_urb_enqueue,
+	.urb_dequeue = u132_urb_dequeue,
+	.endpoint_disable = u132_endpoint_disable,
+	.get_frame_number = u132_get_frame,
+	.hub_status_data = u132_hub_status_data,
+	.hub_control = u132_hub_control,
+	.bus_suspend = u132_bus_suspend,
+	.bus_resume = u132_bus_resume,
+	.start_port_reset = u132_start_port_reset,
+};
+
+/*
+* This function may be called by the USB core whilst the "usb_all_devices_rwsem"
+* is held for writing, thus this module must not call usb_remove_hcd()
+* synchronously - but instead should immediately stop activity to the
+* device and asynchronously call usb_remove_hcd()
+*/
+static int u132_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	if (hcd) {
+		struct u132 *u132 = hcd_to_u132(hcd);
+		if (u132->going++ > 1) {
+			dev_err(&u132->platform_dev->dev, "already being remove"
+				"d\n");
+			return -ENODEV;
+		} else {
+			int rings = MAX_U132_RINGS;
+			int endps = MAX_U132_ENDPS;
+			dev_err(&u132->platform_dev->dev, "removing device u132"
+				".%d\n", u132->sequence_num);
+			msleep(100);
+			mutex_lock(&u132->sw_lock);
+			u132_monitor_cancel_work(u132);
+			while (rings-- > 0) {
+				struct u132_ring *ring = &u132->ring[rings];
+				u132_ring_cancel_work(u132, ring);
+			} while (endps-- > 0) {
+				struct u132_endp *endp = u132->endp[endps];
+				if (endp)
+					u132_endp_cancel_work(u132, endp);
+			}
+			u132->going += 1;
+			printk(KERN_INFO "removing device u132.%d\n",
+				u132->sequence_num);
+			mutex_unlock(&u132->sw_lock);
+			usb_remove_hcd(hcd);
+			u132_u132_put_kref(u132);
+			return 0;
+		}
+	} else
+		return 0;
+}
+
+static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
+{
+	int rings = MAX_U132_RINGS;
+	int ports = MAX_U132_PORTS;
+	int addrs = MAX_U132_ADDRS;
+	int udevs = MAX_U132_UDEVS;
+	int endps = MAX_U132_ENDPS;
+	u132->board = dev_get_platdata(&pdev->dev);
+	u132->platform_dev = pdev;
+	u132->power = 0;
+	u132->reset = 0;
+	mutex_init(&u132->sw_lock);
+	mutex_init(&u132->scheduler_lock);
+	while (rings-- > 0) {
+		struct u132_ring *ring = &u132->ring[rings];
+		ring->u132 = u132;
+		ring->number = rings + 1;
+		ring->length = 0;
+		ring->curr_endp = NULL;
+		INIT_DELAYED_WORK(&ring->scheduler,
+				  u132_hcd_ring_work_scheduler);
+	}
+	mutex_lock(&u132->sw_lock);
+	INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
+	while (ports-- > 0) {
+		struct u132_port *port = &u132->port[ports];
+		port->u132 = u132;
+		port->reset = 0;
+		port->enable = 0;
+		port->power = 0;
+		port->Status = 0;
+	}
+	while (addrs-- > 0) {
+		struct u132_addr *addr = &u132->addr[addrs];
+		addr->address = 0;
+	}
+	while (udevs-- > 0) {
+		struct u132_udev *udev = &u132->udev[udevs];
+		int i = ARRAY_SIZE(udev->endp_number_in);
+		int o = ARRAY_SIZE(udev->endp_number_out);
+		udev->usb_device = NULL;
+		udev->udev_number = 0;
+		udev->usb_addr = 0;
+		udev->portnumber = 0;
+		while (i-- > 0)
+			udev->endp_number_in[i] = 0;
+
+		while (o-- > 0)
+			udev->endp_number_out[o] = 0;
+
+	}
+	while (endps-- > 0)
+		u132->endp[endps] = NULL;
+
+	mutex_unlock(&u132->sw_lock);
+}
+
+static int u132_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	int retval;
+	u32 control;
+	u32 rh_a = -1;
+
+	msleep(100);
+	if (u132_exiting > 0)
+		return -ENODEV;
+
+	retval = ftdi_write_pcimem(pdev, intrdisable, OHCI_INTR_MIE);
+	if (retval)
+		return retval;
+	retval = ftdi_read_pcimem(pdev, control, &control);
+	if (retval)
+		return retval;
+	retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
+	if (retval)
+		return retval;
+	if (pdev->dev.dma_mask)
+		return -EINVAL;
+
+	hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		printk(KERN_ERR "failed to create the usb hcd struct for U132\n"
+			);
+		ftdi_elan_gone_away(pdev);
+		return -ENOMEM;
+	} else {
+		struct u132 *u132 = hcd_to_u132(hcd);
+		retval = 0;
+		hcd->rsrc_start = 0;
+		mutex_lock(&u132_module_lock);
+		list_add_tail(&u132->u132_list, &u132_static_list);
+		u132->sequence_num = ++u132_instances;
+		mutex_unlock(&u132_module_lock);
+		u132_u132_init_kref(u132);
+		u132_initialise(u132, pdev);
+		hcd->product_desc = "ELAN U132 Host Controller";
+		retval = usb_add_hcd(hcd, 0, 0);
+		if (retval != 0) {
+			dev_err(&u132->platform_dev->dev, "init error %d\n",
+				retval);
+			u132_u132_put_kref(u132);
+			return retval;
+		} else {
+			device_wakeup_enable(hcd->self.controller);
+			u132_monitor_queue_work(u132, 100);
+			return 0;
+		}
+	}
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * for this device there's no useful distinction between the controller
+ * and its root hub.
+ */
+static int u132_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else {
+		int retval = 0, ports;
+
+		switch (state.event) {
+		case PM_EVENT_FREEZE:
+			retval = u132_bus_suspend(hcd);
+			break;
+		case PM_EVENT_SUSPEND:
+		case PM_EVENT_HIBERNATE:
+			ports = MAX_U132_PORTS;
+			while (ports-- > 0) {
+				port_power(u132, ports, 0);
+			}
+			break;
+		}
+		return retval;
+	}
+}
+
+static int u132_resume(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct u132 *u132 = hcd_to_u132(hcd);
+	if (u132->going > 1) {
+		dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
+			, u132->going);
+		return -ENODEV;
+	} else if (u132->going > 0) {
+		dev_err(&u132->platform_dev->dev, "device is being removed\n");
+		return -ESHUTDOWN;
+	} else {
+		int retval = 0;
+		if (!u132->port[0].power) {
+			int ports = MAX_U132_PORTS;
+			while (ports-- > 0) {
+				port_power(u132, ports, 1);
+			}
+			retval = 0;
+		} else {
+			retval = u132_bus_resume(hcd);
+		}
+		return retval;
+	}
+}
+
+#else
+#define u132_suspend NULL
+#define u132_resume NULL
+#endif
+/*
+* this driver is loaded explicitly by ftdi_u132
+*
+* the platform_driver struct is static because it is per type of module
+*/
+static struct platform_driver u132_platform_driver = {
+	.probe = u132_probe,
+	.remove = u132_remove,
+	.suspend = u132_suspend,
+	.resume = u132_resume,
+	.driver = {
+		   .name = hcd_name,
+		   },
+};
+static int __init u132_hcd_init(void)
+{
+	int retval;
+	INIT_LIST_HEAD(&u132_static_list);
+	u132_instances = 0;
+	u132_exiting = 0;
+	mutex_init(&u132_module_lock);
+	if (usb_disabled())
+		return -ENODEV;
+	printk(KERN_INFO "driver %s\n", hcd_name);
+	workqueue = create_singlethread_workqueue("u132");
+	retval = platform_driver_register(&u132_platform_driver);
+	return retval;
+}
+
+
+module_init(u132_hcd_init);
+static void __exit u132_hcd_exit(void)
+{
+	struct u132 *u132;
+	struct u132 *temp;
+	mutex_lock(&u132_module_lock);
+	u132_exiting += 1;
+	mutex_unlock(&u132_module_lock);
+	list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
+		platform_device_unregister(u132->platform_dev);
+	}
+	platform_driver_unregister(&u132_platform_driver);
+	printk(KERN_INFO "u132-hcd driver deregistered\n");
+	wait_event(u132_hcd_wait, u132_instances == 0);
+	flush_workqueue(workqueue);
+	destroy_workqueue(workqueue);
+}
+
+
+module_exit(u132_hcd_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:u132_hcd");
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
new file mode 100644
index 0000000..c4e67c4
--- /dev/null
+++ b/drivers/usb/host/uhci-debug.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UHCI-specific debugging code. Invaluable when something
+ * goes wrong, but don't get in my face.
+ *
+ * Kernel visible pointers are surrounded in []s and bus
+ * visible pointers are surrounded in ()s
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2001 Johannes Erdfelt
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <asm/io.h>
+
+#include "uhci-hcd.h"
+
+#define EXTRA_SPACE	1024
+
+static struct dentry *uhci_debugfs_root;
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+
+/* Handle REALLY large printks so we don't overflow buffers */
+static void lprintk(char *buf)
+{
+	char *p;
+
+	/* Just write one line at a time */
+	while (buf) {
+		p = strchr(buf, '\n');
+		if (p)
+			*p = 0;
+		printk(KERN_DEBUG "%s\n", buf);
+		buf = p;
+		if (buf)
+			buf++;
+	}
+}
+
+static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf,
+			int len, int space)
+{
+	char *out = buf;
+	char *spid;
+	u32 status, token;
+
+	status = td_status(uhci, td);
+	out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td,
+		hc32_to_cpu(uhci, td->link));
+	out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
+		((status >> 27) & 3),
+		(status & TD_CTRL_SPD) ?      "SPD " : "",
+		(status & TD_CTRL_LS) ?       "LS " : "",
+		(status & TD_CTRL_IOC) ?      "IOC " : "",
+		(status & TD_CTRL_ACTIVE) ?   "Active " : "",
+		(status & TD_CTRL_STALLED) ?  "Stalled " : "",
+		(status & TD_CTRL_DBUFERR) ?  "DataBufErr " : "",
+		(status & TD_CTRL_BABBLE) ?   "Babble " : "",
+		(status & TD_CTRL_NAK) ?      "NAK " : "",
+		(status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
+		(status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
+		status & 0x7ff);
+	if (out - buf > len)
+		goto done;
+
+	token = td_token(uhci, td);
+	switch (uhci_packetid(token)) {
+		case USB_PID_SETUP:
+			spid = "SETUP";
+			break;
+		case USB_PID_OUT:
+			spid = "OUT";
+			break;
+		case USB_PID_IN:
+			spid = "IN";
+			break;
+		default:
+			spid = "?";
+			break;
+	}
+
+	out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
+		token >> 21,
+		((token >> 19) & 1),
+		(token >> 15) & 15,
+		(token >> 8) & 127,
+		(token & 0xff),
+		spid);
+	out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer));
+
+done:
+	if (out - buf > len)
+		out += sprintf(out, " ...\n");
+	return out - buf;
+}
+
+static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp,
+			char *buf, int len, int space)
+{
+	char *out = buf;
+	struct uhci_td *td;
+	int i, nactive, ninactive;
+	char *ptype;
+
+
+	out += sprintf(out, "urb_priv [%p] ", urbp);
+	out += sprintf(out, "urb [%p] ", urbp->urb);
+	out += sprintf(out, "qh [%p] ", urbp->qh);
+	out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
+	out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
+			(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
+	if (out - buf > len)
+		goto done;
+
+	switch (usb_pipetype(urbp->urb->pipe)) {
+	case PIPE_ISOCHRONOUS: ptype = "ISO"; break;
+	case PIPE_INTERRUPT: ptype = "INT"; break;
+	case PIPE_BULK: ptype = "BLK"; break;
+	default:
+	case PIPE_CONTROL: ptype = "CTL"; break;
+	}
+
+	out += sprintf(out, "%s%s", ptype, (urbp->fsbr ? " FSBR" : ""));
+	out += sprintf(out, " Actlen=%d%s", urbp->urb->actual_length,
+			(urbp->qh->type == USB_ENDPOINT_XFER_CONTROL ?
+				"-8" : ""));
+
+	if (urbp->urb->unlinked)
+		out += sprintf(out, " Unlinked=%d", urbp->urb->unlinked);
+	out += sprintf(out, "\n");
+
+	if (out - buf > len)
+		goto done;
+
+	i = nactive = ninactive = 0;
+	list_for_each_entry(td, &urbp->td_list, list) {
+		if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC &&
+				(++i <= 10 || debug > 2)) {
+			out += sprintf(out, "%*s%d: ", space + 2, "", i);
+			out += uhci_show_td(uhci, td, out,
+					len - (out - buf), 0);
+			if (out - buf > len)
+				goto tail;
+		} else {
+			if (td_status(uhci, td) & TD_CTRL_ACTIVE)
+				++nactive;
+			else
+				++ninactive;
+		}
+	}
+	if (nactive + ninactive > 0)
+		out += sprintf(out,
+				"%*s[skipped %d inactive and %d active TDs]\n",
+				space, "", ninactive, nactive);
+done:
+	if (out - buf > len)
+		out += sprintf(out, " ...\n");
+tail:
+	return out - buf;
+}
+
+static int uhci_show_qh(struct uhci_hcd *uhci,
+		struct uhci_qh *qh, char *buf, int len, int space)
+{
+	char *out = buf;
+	int i, nurbs;
+	__hc32 element = qh_element(qh);
+	char *qtype;
+
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_ISOC: qtype = "ISO"; break;
+	case USB_ENDPOINT_XFER_INT: qtype = "INT"; break;
+	case USB_ENDPOINT_XFER_BULK: qtype = "BLK"; break;
+	case USB_ENDPOINT_XFER_CONTROL: qtype = "CTL"; break;
+	default: qtype = "Skel" ; break;
+	}
+
+	out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n",
+			space, "", qh, qtype,
+			hc32_to_cpu(uhci, qh->link),
+			hc32_to_cpu(uhci, element));
+	if (qh->type == USB_ENDPOINT_XFER_ISOC)
+		out += sprintf(out,
+				"%*s    period %d phase %d load %d us, frame %x desc [%p]\n",
+				space, "", qh->period, qh->phase, qh->load,
+				qh->iso_frame, qh->iso_packet_desc);
+	else if (qh->type == USB_ENDPOINT_XFER_INT)
+		out += sprintf(out, "%*s    period %d phase %d load %d us\n",
+				space, "", qh->period, qh->phase, qh->load);
+	if (out - buf > len)
+		goto done;
+
+	if (element & UHCI_PTR_QH(uhci))
+		out += sprintf(out, "%*s  Element points to QH (bug?)\n", space, "");
+
+	if (element & UHCI_PTR_DEPTH(uhci))
+		out += sprintf(out, "%*s  Depth traverse\n", space, "");
+
+	if (element & cpu_to_hc32(uhci, 8))
+		out += sprintf(out, "%*s  Bit 3 set (bug?)\n", space, "");
+
+	if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci))))
+		out += sprintf(out, "%*s  Element is NULL (bug?)\n", space, "");
+
+	if (out - buf > len)
+		goto done;
+
+	if (list_empty(&qh->queue)) {
+		out += sprintf(out, "%*s  queue is empty\n", space, "");
+		if (qh == uhci->skel_async_qh) {
+			out += uhci_show_td(uhci, uhci->term_td, out,
+					len - (out - buf), 0);
+			if (out - buf > len)
+				goto tail;
+		}
+	} else {
+		struct urb_priv *urbp = list_entry(qh->queue.next,
+				struct urb_priv, node);
+		struct uhci_td *td = list_entry(urbp->td_list.next,
+				struct uhci_td, list);
+
+		if (element != LINK_TO_TD(uhci, td))
+			out += sprintf(out, "%*s Element != First TD\n",
+					space, "");
+		i = nurbs = 0;
+		list_for_each_entry(urbp, &qh->queue, node) {
+			if (++i <= 10) {
+				out += uhci_show_urbp(uhci, urbp, out,
+						len - (out - buf), space + 2);
+				if (out - buf > len)
+					goto tail;
+			}
+			else
+				++nurbs;
+		}
+		if (nurbs > 0)
+			out += sprintf(out, "%*s Skipped %d URBs\n",
+					space, "", nurbs);
+	}
+
+	if (out - buf > len)
+		goto done;
+
+	if (qh->dummy_td) {
+		out += sprintf(out, "%*s  Dummy TD\n", space, "");
+		out += uhci_show_td(uhci, qh->dummy_td, out,
+				len - (out - buf), 0);
+		if (out - buf > len)
+			goto tail;
+	}
+
+done:
+	if (out - buf > len)
+		out += sprintf(out, " ...\n");
+tail:
+	return out - buf;
+}
+
+static int uhci_show_sc(int port, unsigned short status, char *buf)
+{
+	return sprintf(buf, "  stat%d     =     %04x  %s%s%s%s%s%s%s%s%s%s\n",
+		port,
+		status,
+		(status & USBPORTSC_SUSP) ?	" Suspend" : "",
+		(status & USBPORTSC_OCC) ?	" OverCurrentChange" : "",
+		(status & USBPORTSC_OC) ?	" OverCurrent" : "",
+		(status & USBPORTSC_PR) ?	" Reset" : "",
+		(status & USBPORTSC_LSDA) ?	" LowSpeed" : "",
+		(status & USBPORTSC_RD) ?	" ResumeDetect" : "",
+		(status & USBPORTSC_PEC) ?	" EnableChange" : "",
+		(status & USBPORTSC_PE) ?	" Enabled" : "",
+		(status & USBPORTSC_CSC) ?	" ConnectChange" : "",
+		(status & USBPORTSC_CCS) ?	" Connected" : "");
+}
+
+static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf)
+{
+	char *rh_state;
+
+	switch (uhci->rh_state) {
+	    case UHCI_RH_RESET:
+		rh_state = "reset";		break;
+	    case UHCI_RH_SUSPENDED:
+		rh_state = "suspended";		break;
+	    case UHCI_RH_AUTO_STOPPED:
+		rh_state = "auto-stopped";	break;
+	    case UHCI_RH_RESUMING:
+		rh_state = "resuming";		break;
+	    case UHCI_RH_SUSPENDING:
+		rh_state = "suspending";	break;
+	    case UHCI_RH_RUNNING:
+		rh_state = "running";		break;
+	    case UHCI_RH_RUNNING_NODEVS:
+		rh_state = "running, no devs";	break;
+	    default:
+		rh_state = "?";			break;
+	}
+	return sprintf(buf, "Root-hub state: %s   FSBR: %d\n",
+			rh_state, uhci->fsbr_is_on);
+}
+
+static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
+{
+	char *out = buf;
+	unsigned short usbcmd, usbstat, usbint, usbfrnum;
+	unsigned int flbaseadd;
+	unsigned char sof;
+	unsigned short portsc1, portsc2;
+
+
+	usbcmd    = uhci_readw(uhci, USBCMD);
+	usbstat   = uhci_readw(uhci, USBSTS);
+	usbint    = uhci_readw(uhci, USBINTR);
+	usbfrnum  = uhci_readw(uhci, USBFRNUM);
+	flbaseadd = uhci_readl(uhci, USBFLBASEADD);
+	sof       = uhci_readb(uhci, USBSOF);
+	portsc1   = uhci_readw(uhci, USBPORTSC1);
+	portsc2   = uhci_readw(uhci, USBPORTSC2);
+
+	out += sprintf(out, "  usbcmd    =     %04x   %s%s%s%s%s%s%s%s\n",
+		usbcmd,
+		(usbcmd & USBCMD_MAXP) ?    "Maxp64 " : "Maxp32 ",
+		(usbcmd & USBCMD_CF) ?      "CF " : "",
+		(usbcmd & USBCMD_SWDBG) ?   "SWDBG " : "",
+		(usbcmd & USBCMD_FGR) ?     "FGR " : "",
+		(usbcmd & USBCMD_EGSM) ?    "EGSM " : "",
+		(usbcmd & USBCMD_GRESET) ?  "GRESET " : "",
+		(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
+		(usbcmd & USBCMD_RS) ?      "RS " : "");
+	if (out - buf > len)
+		goto done;
+
+	out += sprintf(out, "  usbstat   =     %04x   %s%s%s%s%s%s\n",
+		usbstat,
+		(usbstat & USBSTS_HCH) ?    "HCHalted " : "",
+		(usbstat & USBSTS_HCPE) ?   "HostControllerProcessError " : "",
+		(usbstat & USBSTS_HSE) ?    "HostSystemError " : "",
+		(usbstat & USBSTS_RD) ?     "ResumeDetect " : "",
+		(usbstat & USBSTS_ERROR) ?  "USBError " : "",
+		(usbstat & USBSTS_USBINT) ? "USBINT " : "");
+	if (out - buf > len)
+		goto done;
+
+	out += sprintf(out, "  usbint    =     %04x\n", usbint);
+	out += sprintf(out, "  usbfrnum  =   (%d)%03x\n", (usbfrnum >> 10) & 1,
+		0xfff & (4*(unsigned int)usbfrnum));
+	out += sprintf(out, "  flbaseadd = %08x\n", flbaseadd);
+	out += sprintf(out, "  sof       =       %02x\n", sof);
+	if (out - buf > len)
+		goto done;
+
+	out += uhci_show_sc(1, portsc1, out);
+	if (out - buf > len)
+		goto done;
+
+	out += uhci_show_sc(2, portsc2, out);
+	if (out - buf > len)
+		goto done;
+
+	out += sprintf(out,
+			"Most recent frame: %x (%d)   Last ISO frame: %x (%d)\n",
+			uhci->frame_number, uhci->frame_number & 1023,
+			uhci->last_iso_frame, uhci->last_iso_frame & 1023);
+
+done:
+	if (out - buf > len)
+		out += sprintf(out, " ...\n");
+	return out - buf;
+}
+
+static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
+{
+	char *out = buf;
+	int i, j;
+	struct uhci_qh *qh;
+	struct uhci_td *td;
+	struct list_head *tmp, *head;
+	int nframes, nerrs;
+	__hc32 link;
+	__hc32 fsbr_link;
+
+	static const char * const qh_names[] = {
+		"unlink", "iso", "int128", "int64", "int32", "int16",
+		"int8", "int4", "int2", "async", "term"
+	};
+
+	out += uhci_show_root_hub_state(uhci, out);
+	if (out - buf > len)
+		goto done;
+	out += sprintf(out, "HC status\n");
+	out += uhci_show_status(uhci, out, len - (out - buf));
+	if (out - buf > len)
+		goto tail;
+
+	out += sprintf(out, "Periodic load table\n");
+	for (i = 0; i < MAX_PHASE; ++i) {
+		out += sprintf(out, "\t%d", uhci->load[i]);
+		if (i % 8 == 7)
+			*out++ = '\n';
+	}
+	out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
+			uhci->total_load,
+			uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
+			uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
+	if (debug <= 1)
+		goto tail;
+
+	out += sprintf(out, "Frame List\n");
+	nframes = 10;
+	nerrs = 0;
+	for (i = 0; i < UHCI_NUMFRAMES; ++i) {
+		__hc32 qh_dma;
+
+		if (out - buf > len)
+			goto done;
+		j = 0;
+		td = uhci->frame_cpu[i];
+		link = uhci->frame[i];
+		if (!td)
+			goto check_link;
+
+		if (nframes > 0) {
+			out += sprintf(out, "- Frame %d -> (%08x)\n",
+					i, hc32_to_cpu(uhci, link));
+			j = 1;
+		}
+
+		head = &td->fl_list;
+		tmp = head;
+		do {
+			td = list_entry(tmp, struct uhci_td, fl_list);
+			tmp = tmp->next;
+			if (link != LINK_TO_TD(uhci, td)) {
+				if (nframes > 0) {
+					out += sprintf(out,
+						"    link does not match list entry!\n");
+					if (out - buf > len)
+						goto done;
+				} else
+					++nerrs;
+			}
+			if (nframes > 0) {
+				out += uhci_show_td(uhci, td, out,
+						len - (out - buf), 4);
+				if (out - buf > len)
+					goto tail;
+			}
+			link = td->link;
+		} while (tmp != head);
+
+check_link:
+		qh_dma = uhci_frame_skel_link(uhci, i);
+		if (link != qh_dma) {
+			if (nframes > 0) {
+				if (!j) {
+					out += sprintf(out,
+						"- Frame %d -> (%08x)\n",
+						i, hc32_to_cpu(uhci, link));
+					j = 1;
+				}
+				out += sprintf(out,
+					"   link does not match QH (%08x)!\n",
+					hc32_to_cpu(uhci, qh_dma));
+				if (out - buf > len)
+					goto done;
+			} else
+				++nerrs;
+		}
+		nframes -= j;
+	}
+	if (nerrs > 0)
+		out += sprintf(out, "Skipped %d bad links\n", nerrs);
+
+	out += sprintf(out, "Skeleton QHs\n");
+
+	if (out - buf > len)
+		goto done;
+
+	fsbr_link = 0;
+	for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
+		int cnt = 0;
+
+		qh = uhci->skelqh[i];
+		out += sprintf(out, "- skel_%s_qh\n", qh_names[i]);
+		out += uhci_show_qh(uhci, qh, out, len - (out - buf), 4);
+		if (out - buf > len)
+			goto tail;
+
+		/* Last QH is the Terminating QH, it's different */
+		if (i == SKEL_TERM) {
+			if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) {
+				out += sprintf(out,
+					"    skel_term_qh element is not set to term_td!\n");
+				if (out - buf > len)
+					goto done;
+			}
+			link = fsbr_link;
+			if (!link)
+				link = LINK_TO_QH(uhci, uhci->skel_term_qh);
+			goto check_qh_link;
+		}
+
+		head = &qh->node;
+		tmp = head->next;
+
+		while (tmp != head) {
+			qh = list_entry(tmp, struct uhci_qh, node);
+			tmp = tmp->next;
+			if (++cnt <= 10) {
+				out += uhci_show_qh(uhci, qh, out,
+						len - (out - buf), 4);
+				if (out - buf > len)
+					goto tail;
+			}
+			if (!fsbr_link && qh->skel >= SKEL_FSBR)
+				fsbr_link = LINK_TO_QH(uhci, qh);
+		}
+		if ((cnt -= 10) > 0)
+			out += sprintf(out, "    Skipped %d QHs\n", cnt);
+
+		link = UHCI_PTR_TERM(uhci);
+		if (i <= SKEL_ISO)
+			;
+		else if (i < SKEL_ASYNC)
+			link = LINK_TO_QH(uhci, uhci->skel_async_qh);
+		else if (!uhci->fsbr_is_on)
+			;
+		else
+			link = LINK_TO_QH(uhci, uhci->skel_term_qh);
+check_qh_link:
+		if (qh->link != link)
+			out += sprintf(out,
+				"    last QH not linked to next skeleton!\n");
+
+		if (out - buf > len)
+			goto done;
+	}
+
+done:
+	if (out - buf > len)
+		out += sprintf(out, " ...\n");
+tail:
+	return out - buf;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MAX_OUTPUT	(64 * 1024)
+
+struct uhci_debug {
+	int size;
+	char *data;
+};
+
+static int uhci_debug_open(struct inode *inode, struct file *file)
+{
+	struct uhci_hcd *uhci = inode->i_private;
+	struct uhci_debug *up;
+	unsigned long flags;
+
+	up = kmalloc(sizeof(*up), GFP_KERNEL);
+	if (!up)
+		return -ENOMEM;
+
+	up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
+	if (!up->data) {
+		kfree(up);
+		return -ENOMEM;
+	}
+
+	up->size = 0;
+	spin_lock_irqsave(&uhci->lock, flags);
+	if (uhci->is_initialized)
+		up->size = uhci_sprint_schedule(uhci, up->data,
+					MAX_OUTPUT - EXTRA_SPACE);
+	spin_unlock_irqrestore(&uhci->lock, flags);
+
+	file->private_data = up;
+
+	return 0;
+}
+
+static loff_t uhci_debug_lseek(struct file *file, loff_t off, int whence)
+{
+	struct uhci_debug *up = file->private_data;
+	return no_seek_end_llseek_size(file, off, whence, up->size);
+}
+
+static ssize_t uhci_debug_read(struct file *file, char __user *buf,
+				size_t nbytes, loff_t *ppos)
+{
+	struct uhci_debug *up = file->private_data;
+	return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
+}
+
+static int uhci_debug_release(struct inode *inode, struct file *file)
+{
+	struct uhci_debug *up = file->private_data;
+
+	kfree(up->data);
+	kfree(up);
+
+	return 0;
+}
+
+static const struct file_operations uhci_debug_operations = {
+	.owner =	THIS_MODULE,
+	.open =		uhci_debug_open,
+	.llseek =	uhci_debug_lseek,
+	.read =		uhci_debug_read,
+	.release =	uhci_debug_release,
+};
+#define UHCI_DEBUG_OPS
+
+#endif	/* CONFIG_DEBUG_FS */
+
+#else	/* CONFIG_DYNAMIC_DEBUG*/
+
+static inline void lprintk(char *buf)
+{}
+
+static inline int uhci_show_qh(struct uhci_hcd *uhci,
+		struct uhci_qh *qh, char *buf, int len, int space)
+{
+	return 0;
+}
+
+static inline int uhci_sprint_schedule(struct uhci_hcd *uhci,
+		char *buf, int len)
+{
+	return 0;
+}
+
+#endif
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
new file mode 100644
index 0000000..2103b1e
--- /dev/null
+++ b/drivers/usb/host/uhci-grlib.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC
+ *
+ * Copyright (c) 2011 Jan Andersson <jan@gaisler.com>
+ *
+ * This file is based on UHCI PCI HCD:
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include <linux/device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+static int uhci_grlib_init(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	/*
+	 * Probe to determine the endianness of the controller.
+	 * We know that bit 7 of the PORTSC1 register is always set
+	 * and bit 15 is always clear.  If uhci_readw() yields a value
+	 * with bit 7 (0x80) turned on then the current little-endian
+	 * setting is correct.  Otherwise we assume the value was
+	 * byte-swapped; hence the register interface and presumably
+	 * also the descriptors are big-endian.
+	 */
+	if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) {
+		uhci->big_endian_mmio = 1;
+		uhci->big_endian_desc = 1;
+	}
+
+	uhci->rh_numports = uhci_count_ports(hcd);
+
+	/* Set up pointers to to generic functions */
+	uhci->reset_hc = uhci_generic_reset_hc;
+	uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
+	/* No special actions need to be taken for the functions below */
+	uhci->configure_hc = NULL;
+	uhci->resume_detect_interrupts_are_broken = NULL;
+	uhci->global_suspend_mode_is_broken = NULL;
+
+	/* Reset if the controller isn't already safely quiescent. */
+	check_and_reset_hc(uhci);
+	return 0;
+}
+
+static const struct hc_driver uhci_grlib_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"GRLIB GRUSBHC UHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct uhci_hcd),
+
+	/* Generic hardware linkage */
+	.irq =			uhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB11,
+
+	/* Basic lifecycle operations */
+	.reset =		uhci_grlib_init,
+	.start =		uhci_start,
+#ifdef CONFIG_PM
+	.pci_suspend =		NULL,
+	.pci_resume =		NULL,
+	.bus_suspend =		uhci_rh_suspend,
+	.bus_resume =		uhci_rh_resume,
+#endif
+	.stop =			uhci_stop,
+
+	.urb_enqueue =		uhci_urb_enqueue,
+	.urb_dequeue =		uhci_urb_dequeue,
+
+	.endpoint_disable =	uhci_hcd_endpoint_disable,
+	.get_frame_number =	uhci_hcd_get_frame_number,
+
+	.hub_status_data =	uhci_hub_status_data,
+	.hub_control =		uhci_hub_control,
+};
+
+
+static int uhci_hcd_grlib_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct usb_hcd *hcd;
+	struct uhci_hcd	*uhci = NULL;
+	struct resource res;
+	int irq;
+	int rv;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n");
+
+	rv = of_address_to_resource(dn, 0, &res);
+	if (rv)
+		return rv;
+
+	/* usb_create_hcd requires dma_mask != NULL */
+	op->dev.dma_mask = &op->dev.coherent_dma_mask;
+	hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev,
+			"GRUSBHC UHCI USB");
+	if (!hcd)
+		return -ENOMEM;
+
+	hcd->rsrc_start = res.start;
+	hcd->rsrc_len = resource_size(&res);
+
+	irq = irq_of_parse_and_map(dn, 0);
+	if (irq == NO_IRQ) {
+		printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
+		rv = -EBUSY;
+		goto err_usb;
+	}
+
+	hcd->regs = devm_ioremap_resource(&op->dev, &res);
+	if (IS_ERR(hcd->regs)) {
+		rv = PTR_ERR(hcd->regs);
+		goto err_irq;
+	}
+
+	uhci = hcd_to_uhci(hcd);
+
+	uhci->regs = hcd->regs;
+
+	rv = usb_add_hcd(hcd, irq, 0);
+	if (rv)
+		goto err_irq;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_irq:
+	irq_dispose_mapping(irq);
+err_usb:
+	usb_put_hcd(hcd);
+
+	return rv;
+}
+
+static int uhci_hcd_grlib_remove(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n");
+
+	usb_remove_hcd(hcd);
+
+	irq_dispose_mapping(hcd->irq);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more.  This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel.  Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_hcd_grlib_shutdown(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+static const struct of_device_id uhci_hcd_grlib_of_match[] = {
+	{ .name = "GAISLER_UHCI", },
+	{ .name = "01_027", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match);
+
+
+static struct platform_driver uhci_grlib_driver = {
+	.probe		= uhci_hcd_grlib_probe,
+	.remove		= uhci_hcd_grlib_remove,
+	.shutdown	= uhci_hcd_grlib_shutdown,
+	.driver = {
+		.name = "grlib-uhci",
+		.of_match_table = uhci_hcd_grlib_of_match,
+	},
+};
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
new file mode 100644
index 0000000..6218bfe
--- /dev/null
+++ b/drivers/usb/host/uhci-hcd.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Host Controller Interface driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ *
+ * Intel documents this fairly well, and as far as I know there
+ * are no royalties or anything like that, but even so there are
+ * people who decided that they want to do the same thing in a
+ * completely different way.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/pm.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/bitops.h>
+#include <linux/dmi.h>
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "uhci-hcd.h"
+
+/*
+ * Version Information
+ */
+#define DRIVER_AUTHOR							\
+	"Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, "		\
+	"Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, "	\
+	"Roman Weissgaerber, Alan Stern"
+#define DRIVER_DESC "USB Universal Host Controller Interface driver"
+
+/* for flakey hardware, ignore overcurrent indicators */
+static bool ignore_oc;
+module_param(ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
+
+/*
+ * debug = 0, no debugging messages
+ * debug = 1, dump failed URBs except for stalls
+ * debug = 2, dump all failed URBs (including stalls)
+ *            show all queues in /sys/kernel/debug/uhci/[pci_addr]
+ * debug = 3, show all TDs in URBs when dumping
+ */
+#ifdef CONFIG_DYNAMIC_DEBUG
+
+static int debug = 1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level");
+static char *errbuf;
+
+#else
+
+#define debug 0
+#define errbuf NULL
+
+#endif
+
+
+#define ERRBUF_LEN    (32 * 1024)
+
+static struct kmem_cache *uhci_up_cachep;	/* urb_priv */
+
+static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
+static void wakeup_rh(struct uhci_hcd *uhci);
+static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
+
+/*
+ * Calculate the link pointer DMA value for the first Skeleton QH in a frame.
+ */
+static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
+{
+	int skelnum;
+
+	/*
+	 * The interrupt queues will be interleaved as evenly as possible.
+	 * There's not much to be done about period-1 interrupts; they have
+	 * to occur in every frame.  But we can schedule period-2 interrupts
+	 * in odd-numbered frames, period-4 interrupts in frames congruent
+	 * to 2 (mod 4), and so on.  This way each frame only has two
+	 * interrupt QHs, which will help spread out bandwidth utilization.
+	 *
+	 * ffs (Find First bit Set) does exactly what we need:
+	 * 1,3,5,...  => ffs = 0 => use period-2 QH = skelqh[8],
+	 * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc.
+	 * ffs >= 7 => not on any high-period queue, so use
+	 *	period-1 QH = skelqh[9].
+	 * Add in UHCI_NUMFRAMES to insure at least one bit is set.
+	 */
+	skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
+	if (skelnum <= 1)
+		skelnum = 9;
+	return LINK_TO_QH(uhci, uhci->skelqh[skelnum]);
+}
+
+#include "uhci-debug.c"
+#include "uhci-q.c"
+#include "uhci-hub.c"
+
+/*
+ * Finish up a host controller reset and update the recorded state.
+ */
+static void finish_reset(struct uhci_hcd *uhci)
+{
+	int port;
+
+	/* HCRESET doesn't affect the Suspend, Reset, and Resume Detect
+	 * bits in the port status and control registers.
+	 * We have to clear them by hand.
+	 */
+	for (port = 0; port < uhci->rh_numports; ++port)
+		uhci_writew(uhci, 0, USBPORTSC1 + (port * 2));
+
+	uhci->port_c_suspend = uhci->resuming_ports = 0;
+	uhci->rh_state = UHCI_RH_RESET;
+	uhci->is_stopped = UHCI_IS_STOPPED;
+	clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+}
+
+/*
+ * Last rites for a defunct/nonfunctional controller
+ * or one we don't want to use any more.
+ */
+static void uhci_hc_died(struct uhci_hcd *uhci)
+{
+	uhci_get_current_frame_number(uhci);
+	uhci->reset_hc(uhci);
+	finish_reset(uhci);
+	uhci->dead = 1;
+
+	/* The current frame may already be partway finished */
+	++uhci->frame_number;
+}
+
+/*
+ * Initialize a controller that was newly discovered or has lost power
+ * or otherwise been reset while it was suspended.  In none of these cases
+ * can we be sure of its previous state.
+ */
+static void check_and_reset_hc(struct uhci_hcd *uhci)
+{
+	if (uhci->check_and_reset_hc(uhci))
+		finish_reset(uhci);
+}
+
+#if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC)
+/*
+ * The two functions below are generic reset functions that are used on systems
+ * that do not have keyboard and mouse legacy support. We assume that we are
+ * running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined.
+ */
+
+/*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+ */
+static void uhci_generic_reset_hc(struct uhci_hcd *uhci)
+{
+	/* Reset the HC - this will force us to get a
+	 * new notification of any already connected
+	 * ports due to the virtual disconnect that it
+	 * implies.
+	 */
+	uhci_writew(uhci, USBCMD_HCRESET, USBCMD);
+	mb();
+	udelay(5);
+	if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET)
+		dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n");
+
+	/* Just to be safe, disable interrupt requests and
+	 * make sure the controller is stopped.
+	 */
+	uhci_writew(uhci, 0, USBINTR);
+	uhci_writew(uhci, 0, USBCMD);
+}
+
+/*
+ * Initialize a controller that was newly discovered or has just been
+ * resumed.  In either case we can't be sure of its previous state.
+ *
+ * Returns: 1 if the controller was reset, 0 otherwise.
+ */
+static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci)
+{
+	unsigned int cmd, intr;
+
+	/*
+	 * When restarting a suspended controller, we expect all the
+	 * settings to be the same as we left them:
+	 *
+	 *	Controller is stopped and configured with EGSM set;
+	 *	No interrupts enabled except possibly Resume Detect.
+	 *
+	 * If any of these conditions are violated we do a complete reset.
+	 */
+
+	cmd = uhci_readw(uhci, USBCMD);
+	if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) {
+		dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n",
+				__func__, cmd);
+		goto reset_needed;
+	}
+
+	intr = uhci_readw(uhci, USBINTR);
+	if (intr & (~USBINTR_RESUME)) {
+		dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n",
+				__func__, intr);
+		goto reset_needed;
+	}
+	return 0;
+
+reset_needed:
+	dev_dbg(uhci_dev(uhci), "Performing full reset\n");
+	uhci_generic_reset_hc(uhci);
+	return 1;
+}
+#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
+
+/*
+ * Store the basic register settings needed by the controller.
+ */
+static void configure_hc(struct uhci_hcd *uhci)
+{
+	/* Set the frame length to the default: 1 ms exactly */
+	uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF);
+
+	/* Store the frame list base address */
+	uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD);
+
+	/* Set the current frame number */
+	uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER,
+			USBFRNUM);
+
+	/* perform any arch/bus specific configuration */
+	if (uhci->configure_hc)
+		uhci->configure_hc(uhci);
+}
+
+static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
+{
+	/*
+	 * If we have to ignore overcurrent events then almost by definition
+	 * we can't depend on resume-detect interrupts.
+	 *
+	 * Those interrupts also don't seem to work on ASpeed SoCs.
+	 */
+	if (ignore_oc || uhci_is_aspeed(uhci))
+		return 1;
+
+	return uhci->resume_detect_interrupts_are_broken ?
+		uhci->resume_detect_interrupts_are_broken(uhci) : 0;
+}
+
+static int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
+{
+	return uhci->global_suspend_mode_is_broken ?
+		uhci->global_suspend_mode_is_broken(uhci) : 0;
+}
+
+static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+	int auto_stop;
+	int int_enable, egsm_enable, wakeup_enable;
+	struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub;
+
+	auto_stop = (new_state == UHCI_RH_AUTO_STOPPED);
+	dev_dbg(&rhdev->dev, "%s%s\n", __func__,
+			(auto_stop ? " (auto-stop)" : ""));
+
+	/* Start off by assuming Resume-Detect interrupts and EGSM work
+	 * and that remote wakeups should be enabled.
+	 */
+	egsm_enable = USBCMD_EGSM;
+	int_enable = USBINTR_RESUME;
+	wakeup_enable = 1;
+
+	/*
+	 * In auto-stop mode, we must be able to detect new connections.
+	 * The user can force us to poll by disabling remote wakeup;
+	 * otherwise we will use the EGSM/RD mechanism.
+	 */
+	if (auto_stop) {
+		if (!device_may_wakeup(&rhdev->dev))
+			egsm_enable = int_enable = 0;
+	}
+
+#ifdef CONFIG_PM
+	/*
+	 * In bus-suspend mode, we use the wakeup setting specified
+	 * for the root hub.
+	 */
+	else {
+		if (!rhdev->do_remote_wakeup)
+			wakeup_enable = 0;
+	}
+#endif
+
+	/*
+	 * UHCI doesn't distinguish between wakeup requests from downstream
+	 * devices and local connect/disconnect events.  There's no way to
+	 * enable one without the other; both are controlled by EGSM.  Thus
+	 * if wakeups are disallowed then EGSM must be turned off -- in which
+	 * case remote wakeup requests from downstream during system sleep
+	 * will be lost.
+	 *
+	 * In addition, if EGSM is broken then we can't use it.  Likewise,
+	 * if Resume-Detect interrupts are broken then we can't use them.
+	 *
+	 * Finally, neither EGSM nor RD is useful by itself.  Without EGSM,
+	 * the RD status bit will never get set.  Without RD, the controller
+	 * won't generate interrupts to tell the system about wakeup events.
+	 */
+	if (!wakeup_enable || global_suspend_mode_is_broken(uhci) ||
+			resume_detect_interrupts_are_broken(uhci))
+		egsm_enable = int_enable = 0;
+
+	uhci->RD_enable = !!int_enable;
+	uhci_writew(uhci, int_enable, USBINTR);
+	uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD);
+	mb();
+	udelay(5);
+
+	/* If we're auto-stopping then no devices have been attached
+	 * for a while, so there shouldn't be any active URBs and the
+	 * controller should stop after a few microseconds.  Otherwise
+	 * we will give the controller one frame to stop.
+	 */
+	if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) {
+		uhci->rh_state = UHCI_RH_SUSPENDING;
+		spin_unlock_irq(&uhci->lock);
+		msleep(1);
+		spin_lock_irq(&uhci->lock);
+		if (uhci->dead)
+			return;
+	}
+	if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH))
+		dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n");
+
+	uhci_get_current_frame_number(uhci);
+
+	uhci->rh_state = new_state;
+	uhci->is_stopped = UHCI_IS_STOPPED;
+
+	/*
+	 * If remote wakeup is enabled but either EGSM or RD interrupts
+	 * doesn't work, then we won't get an interrupt when a wakeup event
+	 * occurs.  Thus the suspended root hub needs to be polled.
+	 */
+	if (wakeup_enable && (!int_enable || !egsm_enable))
+		set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+	else
+		clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+
+	uhci_scan_schedule(uhci);
+	uhci_fsbr_off(uhci);
+}
+
+static void start_rh(struct uhci_hcd *uhci)
+{
+	uhci->is_stopped = 0;
+
+	/*
+	 * Clear stale status bits on Aspeed as we get a stale HCH
+	 * which causes problems later on
+	 */
+	if (uhci_is_aspeed(uhci))
+		uhci_writew(uhci, uhci_readw(uhci, USBSTS), USBSTS);
+
+	/* Mark it configured and running with a 64-byte max packet.
+	 * All interrupts are enabled, even though RESUME won't do anything.
+	 */
+	uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD);
+	uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME |
+		USBINTR_IOC | USBINTR_SP, USBINTR);
+	mb();
+	uhci->rh_state = UHCI_RH_RUNNING;
+	set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
+}
+
+static void wakeup_rh(struct uhci_hcd *uhci)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+	dev_dbg(&uhci_to_hcd(uhci)->self.root_hub->dev,
+			"%s%s\n", __func__,
+			uhci->rh_state == UHCI_RH_AUTO_STOPPED ?
+				" (auto-start)" : "");
+
+	/* If we are auto-stopped then no devices are attached so there's
+	 * no need for wakeup signals.  Otherwise we send Global Resume
+	 * for 20 ms.
+	 */
+	if (uhci->rh_state == UHCI_RH_SUSPENDED) {
+		unsigned egsm;
+
+		/* Keep EGSM on if it was set before */
+		egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM;
+		uhci->rh_state = UHCI_RH_RESUMING;
+		uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD);
+		spin_unlock_irq(&uhci->lock);
+		msleep(20);
+		spin_lock_irq(&uhci->lock);
+		if (uhci->dead)
+			return;
+
+		/* End Global Resume and wait for EOP to be sent */
+		uhci_writew(uhci, USBCMD_CF, USBCMD);
+		mb();
+		udelay(4);
+		if (uhci_readw(uhci, USBCMD) & USBCMD_FGR)
+			dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n");
+	}
+
+	start_rh(uhci);
+
+	/* Restart root hub polling */
+	mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
+}
+
+static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned short status;
+
+	/*
+	 * Read the interrupt status, and write it back to clear the
+	 * interrupt cause.  Contrary to the UHCI specification, the
+	 * "HC Halted" status bit is persistent: it is RO, not R/WC.
+	 */
+	status = uhci_readw(uhci, USBSTS);
+	if (!(status & ~USBSTS_HCH))	/* shared interrupt, not mine */
+		return IRQ_NONE;
+	uhci_writew(uhci, status, USBSTS);		/* Clear it */
+
+	spin_lock(&uhci->lock);
+	if (unlikely(!uhci->is_initialized))	/* not yet configured */
+		goto done;
+
+	if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
+		if (status & USBSTS_HSE)
+			dev_err(uhci_dev(uhci),
+				"host system error, PCI problems?\n");
+		if (status & USBSTS_HCPE)
+			dev_err(uhci_dev(uhci),
+				"host controller process error, something bad happened!\n");
+		if (status & USBSTS_HCH) {
+			if (uhci->rh_state >= UHCI_RH_RUNNING) {
+				dev_err(uhci_dev(uhci),
+					"host controller halted, very bad!\n");
+				if (debug > 1 && errbuf) {
+					/* Print the schedule for debugging */
+					uhci_sprint_schedule(uhci, errbuf,
+						ERRBUF_LEN - EXTRA_SPACE);
+					lprintk(errbuf);
+				}
+				uhci_hc_died(uhci);
+				usb_hc_died(hcd);
+
+				/* Force a callback in case there are
+				 * pending unlinks */
+				mod_timer(&hcd->rh_timer, jiffies);
+			}
+		}
+	}
+
+	if (status & USBSTS_RD) {
+		spin_unlock(&uhci->lock);
+		usb_hcd_poll_rh_status(hcd);
+	} else {
+		uhci_scan_schedule(uhci);
+ done:
+		spin_unlock(&uhci->lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Store the current frame number in uhci->frame_number if the controller
+ * is running.  Expand from 11 bits (of which we use only 10) to a
+ * full-sized integer.
+ *
+ * Like many other parts of the driver, this code relies on being polled
+ * more than once per second as long as the controller is running.
+ */
+static void uhci_get_current_frame_number(struct uhci_hcd *uhci)
+{
+	if (!uhci->is_stopped) {
+		unsigned delta;
+
+		delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) &
+				(UHCI_NUMFRAMES - 1);
+		uhci->frame_number += delta;
+	}
+}
+
+/*
+ * De-allocate all resources
+ */
+static void release_uhci(struct uhci_hcd *uhci)
+{
+	int i;
+
+
+	spin_lock_irq(&uhci->lock);
+	uhci->is_initialized = 0;
+	spin_unlock_irq(&uhci->lock);
+
+	debugfs_remove(uhci->dentry);
+
+	for (i = 0; i < UHCI_NUM_SKELQH; i++)
+		uhci_free_qh(uhci, uhci->skelqh[i]);
+
+	uhci_free_td(uhci, uhci->term_td);
+
+	dma_pool_destroy(uhci->qh_pool);
+
+	dma_pool_destroy(uhci->td_pool);
+
+	kfree(uhci->frame_cpu);
+
+	dma_free_coherent(uhci_dev(uhci),
+			UHCI_NUMFRAMES * sizeof(*uhci->frame),
+			uhci->frame, uhci->frame_dma_handle);
+}
+
+/*
+ * Allocate a frame list, and then setup the skeleton
+ *
+ * The hardware doesn't really know any difference
+ * in the queues, but the order does matter for the
+ * protocols higher up.  The order in which the queues
+ * are encountered by the hardware is:
+ *
+ *  - All isochronous events are handled before any
+ *    of the queues. We don't do that here, because
+ *    we'll create the actual TD entries on demand.
+ *  - The first queue is the high-period interrupt queue.
+ *  - The second queue is the period-1 interrupt and async
+ *    (low-speed control, full-speed control, then bulk) queue.
+ *  - The third queue is the terminating bandwidth reclamation queue,
+ *    which contains no members, loops back to itself, and is present
+ *    only when FSBR is on and there are no full-speed control or bulk QHs.
+ */
+static int uhci_start(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	int retval = -EBUSY;
+	int i;
+	struct dentry __maybe_unused *dentry;
+
+	hcd->uses_new_polling = 1;
+	/* Accept arbitrarily long scatter-gather lists */
+	if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+		hcd->self.sg_tablesize = ~0;
+
+	spin_lock_init(&uhci->lock);
+	timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
+	INIT_LIST_HEAD(&uhci->idle_qh_list);
+	init_waitqueue_head(&uhci->waitqh);
+
+#ifdef UHCI_DEBUG_OPS
+	uhci->dentry = debugfs_create_file(hcd->self.bus_name,
+					   S_IFREG|S_IRUGO|S_IWUSR,
+					   uhci_debugfs_root, uhci,
+					   &uhci_debug_operations);
+#endif
+
+	uhci->frame = dma_zalloc_coherent(uhci_dev(uhci),
+			UHCI_NUMFRAMES * sizeof(*uhci->frame),
+			&uhci->frame_dma_handle, GFP_KERNEL);
+	if (!uhci->frame) {
+		dev_err(uhci_dev(uhci),
+			"unable to allocate consistent memory for frame list\n");
+		goto err_alloc_frame;
+	}
+
+	uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
+			GFP_KERNEL);
+	if (!uhci->frame_cpu)
+		goto err_alloc_frame_cpu;
+
+	uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
+			sizeof(struct uhci_td), 16, 0);
+	if (!uhci->td_pool) {
+		dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
+		goto err_create_td_pool;
+	}
+
+	uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
+			sizeof(struct uhci_qh), 16, 0);
+	if (!uhci->qh_pool) {
+		dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
+		goto err_create_qh_pool;
+	}
+
+	uhci->term_td = uhci_alloc_td(uhci);
+	if (!uhci->term_td) {
+		dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
+		goto err_alloc_term_td;
+	}
+
+	for (i = 0; i < UHCI_NUM_SKELQH; i++) {
+		uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
+		if (!uhci->skelqh[i]) {
+			dev_err(uhci_dev(uhci), "unable to allocate QH\n");
+			goto err_alloc_skelqh;
+		}
+	}
+
+	/*
+	 * 8 Interrupt queues; link all higher int queues to int1 = async
+	 */
+	for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
+		uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh);
+	uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci);
+	uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
+
+	/* This dummy TD is to work around a bug in Intel PIIX controllers */
+	uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) |
+			(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
+	uhci->term_td->link = UHCI_PTR_TERM(uhci);
+	uhci->skel_async_qh->element = uhci->skel_term_qh->element =
+		LINK_TO_TD(uhci, uhci->term_td);
+
+	/*
+	 * Fill the frame list: make all entries point to the proper
+	 * interrupt queue.
+	 */
+	for (i = 0; i < UHCI_NUMFRAMES; i++) {
+
+		/* Only place we don't use the frame list routines */
+		uhci->frame[i] = uhci_frame_skel_link(uhci, i);
+	}
+
+	/*
+	 * Some architectures require a full mb() to enforce completion of
+	 * the memory writes above before the I/O transfers in configure_hc().
+	 */
+	mb();
+
+	spin_lock_irq(&uhci->lock);
+	configure_hc(uhci);
+	uhci->is_initialized = 1;
+	start_rh(uhci);
+	spin_unlock_irq(&uhci->lock);
+	return 0;
+
+/*
+ * error exits:
+ */
+err_alloc_skelqh:
+	for (i = 0; i < UHCI_NUM_SKELQH; i++) {
+		if (uhci->skelqh[i])
+			uhci_free_qh(uhci, uhci->skelqh[i]);
+	}
+
+	uhci_free_td(uhci, uhci->term_td);
+
+err_alloc_term_td:
+	dma_pool_destroy(uhci->qh_pool);
+
+err_create_qh_pool:
+	dma_pool_destroy(uhci->td_pool);
+
+err_create_td_pool:
+	kfree(uhci->frame_cpu);
+
+err_alloc_frame_cpu:
+	dma_free_coherent(uhci_dev(uhci),
+			UHCI_NUMFRAMES * sizeof(*uhci->frame),
+			uhci->frame, uhci->frame_dma_handle);
+
+err_alloc_frame:
+	debugfs_remove(uhci->dentry);
+
+	return retval;
+}
+
+static void uhci_stop(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	spin_lock_irq(&uhci->lock);
+	if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead)
+		uhci_hc_died(uhci);
+	uhci_scan_schedule(uhci);
+	spin_unlock_irq(&uhci->lock);
+	synchronize_irq(hcd->irq);
+
+	del_timer_sync(&uhci->fsbr_timer);
+	release_uhci(uhci);
+}
+
+#ifdef CONFIG_PM
+static int uhci_rh_suspend(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	int rc = 0;
+
+	spin_lock_irq(&uhci->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		rc = -ESHUTDOWN;
+	else if (uhci->dead)
+		;		/* Dead controllers tell no tales */
+
+	/* Once the controller is stopped, port resumes that are already
+	 * in progress won't complete.  Hence if remote wakeup is enabled
+	 * for the root hub and any ports are in the middle of a resume or
+	 * remote wakeup, we must fail the suspend.
+	 */
+	else if (hcd->self.root_hub->do_remote_wakeup &&
+			uhci->resuming_ports) {
+		dev_dbg(uhci_dev(uhci),
+			"suspend failed because a port is resuming\n");
+		rc = -EBUSY;
+	} else
+		suspend_rh(uhci, UHCI_RH_SUSPENDED);
+	spin_unlock_irq(&uhci->lock);
+	return rc;
+}
+
+static int uhci_rh_resume(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	int rc = 0;
+
+	spin_lock_irq(&uhci->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd))
+		rc = -ESHUTDOWN;
+	else if (!uhci->dead)
+		wakeup_rh(uhci);
+	spin_unlock_irq(&uhci->lock);
+	return rc;
+}
+
+#endif
+
+/* Wait until a particular device/endpoint's QH is idle, and free it */
+static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
+		struct usb_host_endpoint *hep)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	struct uhci_qh *qh;
+
+	spin_lock_irq(&uhci->lock);
+	qh = (struct uhci_qh *) hep->hcpriv;
+	if (qh == NULL)
+		goto done;
+
+	while (qh->state != QH_STATE_IDLE) {
+		++uhci->num_waiting;
+		spin_unlock_irq(&uhci->lock);
+		wait_event_interruptible(uhci->waitqh,
+				qh->state == QH_STATE_IDLE);
+		spin_lock_irq(&uhci->lock);
+		--uhci->num_waiting;
+	}
+
+	uhci_free_qh(uhci, qh);
+done:
+	spin_unlock_irq(&uhci->lock);
+}
+
+static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned frame_number;
+	unsigned delta;
+
+	/* Minimize latency by avoiding the spinlock */
+	frame_number = uhci->frame_number;
+	barrier();
+	delta = (uhci_readw(uhci, USBFRNUM) - frame_number) &
+			(UHCI_NUMFRAMES - 1);
+	return frame_number + delta;
+}
+
+/* Determines number of ports on controller */
+static int uhci_count_ports(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned io_size = (unsigned) hcd->rsrc_len;
+	int port;
+
+	/* The UHCI spec says devices must have 2 ports, and goes on to say
+	 * they may have more but gives no way to determine how many there
+	 * are.  However according to the UHCI spec, Bit 7 of the port
+	 * status and control register is always set to 1.  So we try to
+	 * use this to our advantage.  Another common failure mode when
+	 * a nonexistent register is addressed is to return all ones, so
+	 * we test for that also.
+	 */
+	for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) {
+		unsigned int portstatus;
+
+		portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2));
+		if (!(portstatus & 0x0080) || portstatus == 0xffff)
+			break;
+	}
+	if (debug)
+		dev_info(uhci_dev(uhci), "detected %d ports\n", port);
+
+	/* Anything greater than 7 is weird so we'll ignore it. */
+	if (port > UHCI_RH_MAXCHILD) {
+		dev_info(uhci_dev(uhci),
+			"port count misdetected? forcing to 2 ports\n");
+		port = 2;
+	}
+
+	return port;
+}
+
+static const char hcd_name[] = "uhci_hcd";
+
+#ifdef CONFIG_USB_PCI
+#include "uhci-pci.c"
+#define	PCI_DRIVER		uhci_pci_driver
+#endif
+
+#ifdef CONFIG_SPARC_LEON
+#include "uhci-grlib.c"
+#define PLATFORM_DRIVER		uhci_grlib_driver
+#endif
+
+#ifdef CONFIG_USB_UHCI_PLATFORM
+#include "uhci-platform.c"
+#define PLATFORM_DRIVER		uhci_platform_driver
+#endif
+
+#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER)
+#error "missing bus glue for uhci-hcd"
+#endif
+
+static int __init uhci_hcd_init(void)
+{
+	int retval = -ENOMEM;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
+			ignore_oc ? ", overcurrent ignored" : "");
+	set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+	errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+	if (!errbuf)
+		goto errbuf_failed;
+	uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
+#endif
+
+	uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
+		sizeof(struct urb_priv), 0, 0, NULL);
+	if (!uhci_up_cachep)
+		goto up_failed;
+
+#ifdef PLATFORM_DRIVER
+	retval = platform_driver_register(&PLATFORM_DRIVER);
+	if (retval < 0)
+		goto clean0;
+#endif
+
+#ifdef PCI_DRIVER
+	retval = pci_register_driver(&PCI_DRIVER);
+	if (retval < 0)
+		goto clean1;
+#endif
+
+	return 0;
+
+#ifdef PCI_DRIVER
+clean1:
+#endif
+#ifdef PLATFORM_DRIVER
+	platform_driver_unregister(&PLATFORM_DRIVER);
+clean0:
+#endif
+	kmem_cache_destroy(uhci_up_cachep);
+
+up_failed:
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+	debugfs_remove(uhci_debugfs_root);
+
+	kfree(errbuf);
+
+errbuf_failed:
+#endif
+
+	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+	return retval;
+}
+
+static void __exit uhci_hcd_cleanup(void) 
+{
+#ifdef PLATFORM_DRIVER
+	platform_driver_unregister(&PLATFORM_DRIVER);
+#endif
+#ifdef PCI_DRIVER
+	pci_unregister_driver(&PCI_DRIVER);
+#endif
+	kmem_cache_destroy(uhci_up_cachep);
+	debugfs_remove(uhci_debugfs_root);
+#ifdef CONFIG_DYNAMIC_DEBUG
+	kfree(errbuf);
+#endif
+	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
+}
+
+module_init(uhci_hcd_init);
+module_exit(uhci_hcd_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
new file mode 100644
index 0000000..7f9f33c
--- /dev/null
+++ b/drivers/usb/host/uhci-hcd.h
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_UHCI_HCD_H
+#define __LINUX_UHCI_HCD_H
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/clk.h>
+
+#define usb_packetid(pipe)	(usb_pipein(pipe) ? USB_PID_IN : USB_PID_OUT)
+#define PIPE_DEVEP_MASK		0x0007ff00
+
+
+/*
+ * Universal Host Controller Interface data structures and defines
+ */
+
+/* Command register */
+#define USBCMD		0
+#define   USBCMD_RS		0x0001	/* Run/Stop */
+#define   USBCMD_HCRESET	0x0002	/* Host reset */
+#define   USBCMD_GRESET		0x0004	/* Global reset */
+#define   USBCMD_EGSM		0x0008	/* Global Suspend Mode */
+#define   USBCMD_FGR		0x0010	/* Force Global Resume */
+#define   USBCMD_SWDBG		0x0020	/* SW Debug mode */
+#define   USBCMD_CF		0x0040	/* Config Flag (sw only) */
+#define   USBCMD_MAXP		0x0080	/* Max Packet (0 = 32, 1 = 64) */
+
+/* Status register */
+#define USBSTS		2
+#define   USBSTS_USBINT		0x0001	/* Interrupt due to IOC */
+#define   USBSTS_ERROR		0x0002	/* Interrupt due to error */
+#define   USBSTS_RD		0x0004	/* Resume Detect */
+#define   USBSTS_HSE		0x0008	/* Host System Error: PCI problems */
+#define   USBSTS_HCPE		0x0010	/* Host Controller Process Error:
+					 * the schedule is buggy */
+#define   USBSTS_HCH		0x0020	/* HC Halted */
+
+/* Interrupt enable register */
+#define USBINTR		4
+#define   USBINTR_TIMEOUT	0x0001	/* Timeout/CRC error enable */
+#define   USBINTR_RESUME	0x0002	/* Resume interrupt enable */
+#define   USBINTR_IOC		0x0004	/* Interrupt On Complete enable */
+#define   USBINTR_SP		0x0008	/* Short packet interrupt enable */
+
+#define USBFRNUM	6
+#define USBFLBASEADD	8
+#define USBSOF		12
+#define   USBSOF_DEFAULT	64	/* Frame length is exactly 1 ms */
+
+/* USB port status and control registers */
+#define USBPORTSC1	16
+#define USBPORTSC2	18
+#define USBPORTSC3	20
+#define USBPORTSC4	22
+#define   USBPORTSC_CCS		0x0001	/* Current Connect Status
+					 * ("device present") */
+#define   USBPORTSC_CSC		0x0002	/* Connect Status Change */
+#define   USBPORTSC_PE		0x0004	/* Port Enable */
+#define   USBPORTSC_PEC		0x0008	/* Port Enable Change */
+#define   USBPORTSC_DPLUS	0x0010	/* D+ high (line status) */
+#define   USBPORTSC_DMINUS	0x0020	/* D- high (line status) */
+#define   USBPORTSC_RD		0x0040	/* Resume Detect */
+#define   USBPORTSC_RES1	0x0080	/* reserved, always 1 */
+#define   USBPORTSC_LSDA	0x0100	/* Low Speed Device Attached */
+#define   USBPORTSC_PR		0x0200	/* Port Reset */
+/* OC and OCC from Intel 430TX and later (not UHCI 1.1d spec) */
+#define   USBPORTSC_OC		0x0400	/* Over Current condition */
+#define   USBPORTSC_OCC		0x0800	/* Over Current Change R/WC */
+#define   USBPORTSC_SUSP	0x1000	/* Suspend */
+#define   USBPORTSC_RES2	0x2000	/* reserved, write zeroes */
+#define   USBPORTSC_RES3	0x4000	/* reserved, write zeroes */
+#define   USBPORTSC_RES4	0x8000	/* reserved, write zeroes */
+
+/* PCI legacy support register */
+#define USBLEGSUP		0xc0
+#define   USBLEGSUP_DEFAULT	0x2000	/* only PIRQ enable set */
+#define   USBLEGSUP_RWC		0x8f00	/* the R/WC bits */
+#define   USBLEGSUP_RO		0x5040	/* R/O and reserved bits */
+
+/* PCI Intel-specific resume-enable register */
+#define USBRES_INTEL		0xc4
+#define   USBPORT1EN		0x01
+#define   USBPORT2EN		0x02
+
+#define UHCI_PTR_BITS(uhci)	cpu_to_hc32((uhci), 0x000F)
+#define UHCI_PTR_TERM(uhci)	cpu_to_hc32((uhci), 0x0001)
+#define UHCI_PTR_QH(uhci)	cpu_to_hc32((uhci), 0x0002)
+#define UHCI_PTR_DEPTH(uhci)	cpu_to_hc32((uhci), 0x0004)
+#define UHCI_PTR_BREADTH(uhci)	cpu_to_hc32((uhci), 0x0000)
+
+#define UHCI_NUMFRAMES		1024	/* in the frame list [array] */
+#define UHCI_MAX_SOF_NUMBER	2047	/* in an SOF packet */
+#define CAN_SCHEDULE_FRAMES	1000	/* how far in the future frames
+					 * can be scheduled */
+#define MAX_PHASE		32	/* Periodic scheduling length */
+
+/* When no queues need Full-Speed Bandwidth Reclamation,
+ * delay this long before turning FSBR off */
+#define FSBR_OFF_DELAY		msecs_to_jiffies(10)
+
+/* If a queue hasn't advanced after this much time, assume it is stuck */
+#define QH_WAIT_TIMEOUT		msecs_to_jiffies(200)
+
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given UHCI_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+#else
+#define __hc32	__le32
+#define __hc16	__le16
+#endif
+
+/*
+ *	Queue Headers
+ */
+
+/*
+ * One role of a QH is to hold a queue of TDs for some endpoint.  One QH goes
+ * with each endpoint, and qh->element (updated by the HC) is either:
+ *   - the next unprocessed TD in the endpoint's queue, or
+ *   - UHCI_PTR_TERM (when there's no more traffic for this endpoint).
+ *
+ * The other role of a QH is to serve as a "skeleton" framelist entry, so we
+ * can easily splice a QH for some endpoint into the schedule at the right
+ * place.  Then qh->element is UHCI_PTR_TERM.
+ *
+ * In the schedule, qh->link maintains a list of QHs seen by the HC:
+ *     skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ...
+ *
+ * qh->node is the software equivalent of qh->link.  The differences
+ * are that the software list is doubly-linked and QHs in the UNLINKING
+ * state are on the software list but not the hardware schedule.
+ *
+ * For bookkeeping purposes we maintain QHs even for Isochronous endpoints,
+ * but they never get added to the hardware schedule.
+ */
+#define QH_STATE_IDLE		1	/* QH is not being used */
+#define QH_STATE_UNLINKING	2	/* QH has been removed from the
+					 * schedule but the hardware may
+					 * still be using it */
+#define QH_STATE_ACTIVE		3	/* QH is on the schedule */
+
+struct uhci_qh {
+	/* Hardware fields */
+	__hc32 link;			/* Next QH in the schedule */
+	__hc32 element;			/* Queue element (TD) pointer */
+
+	/* Software fields */
+	dma_addr_t dma_handle;
+
+	struct list_head node;		/* Node in the list of QHs */
+	struct usb_host_endpoint *hep;	/* Endpoint information */
+	struct usb_device *udev;
+	struct list_head queue;		/* Queue of urbps for this QH */
+	struct uhci_td *dummy_td;	/* Dummy TD to end the queue */
+	struct uhci_td *post_td;	/* Last TD completed */
+
+	struct usb_iso_packet_descriptor *iso_packet_desc;
+					/* Next urb->iso_frame_desc entry */
+	unsigned long advance_jiffies;	/* Time of last queue advance */
+	unsigned int unlink_frame;	/* When the QH was unlinked */
+	unsigned int period;		/* For Interrupt and Isochronous QHs */
+	short phase;			/* Between 0 and period-1 */
+	short load;			/* Periodic time requirement, in us */
+	unsigned int iso_frame;		/* Frame # for iso_packet_desc */
+
+	int state;			/* QH_STATE_xxx; see above */
+	int type;			/* Queue type (control, bulk, etc) */
+	int skel;			/* Skeleton queue number */
+
+	unsigned int initial_toggle:1;	/* Endpoint's current toggle value */
+	unsigned int needs_fixup:1;	/* Must fix the TD toggle values */
+	unsigned int is_stopped:1;	/* Queue was stopped by error/unlink */
+	unsigned int wait_expired:1;	/* QH_WAIT_TIMEOUT has expired */
+	unsigned int bandwidth_reserved:1;	/* Periodic bandwidth has
+						 * been allocated */
+} __attribute__((aligned(16)));
+
+/*
+ * We need a special accessor for the element pointer because it is
+ * subject to asynchronous updates by the controller.
+ */
+#define qh_element(qh)		READ_ONCE((qh)->element)
+
+#define LINK_TO_QH(uhci, qh)	(UHCI_PTR_QH((uhci)) | \
+				cpu_to_hc32((uhci), (qh)->dma_handle))
+
+
+/*
+ *	Transfer Descriptors
+ */
+
+/*
+ * for TD <status>:
+ */
+#define TD_CTRL_SPD		(1 << 29)	/* Short Packet Detect */
+#define TD_CTRL_C_ERR_MASK	(3 << 27)	/* Error Counter bits */
+#define TD_CTRL_C_ERR_SHIFT	27
+#define TD_CTRL_LS		(1 << 26)	/* Low Speed Device */
+#define TD_CTRL_IOS		(1 << 25)	/* Isochronous Select */
+#define TD_CTRL_IOC		(1 << 24)	/* Interrupt on Complete */
+#define TD_CTRL_ACTIVE		(1 << 23)	/* TD Active */
+#define TD_CTRL_STALLED		(1 << 22)	/* TD Stalled */
+#define TD_CTRL_DBUFERR		(1 << 21)	/* Data Buffer Error */
+#define TD_CTRL_BABBLE		(1 << 20)	/* Babble Detected */
+#define TD_CTRL_NAK		(1 << 19)	/* NAK Received */
+#define TD_CTRL_CRCTIMEO	(1 << 18)	/* CRC/Time Out Error */
+#define TD_CTRL_BITSTUFF	(1 << 17)	/* Bit Stuff Error */
+#define TD_CTRL_ACTLEN_MASK	0x7FF	/* actual length, encoded as n - 1 */
+
+#define uhci_maxerr(err)		((err) << TD_CTRL_C_ERR_SHIFT)
+#define uhci_status_bits(ctrl_sts)	((ctrl_sts) & 0xF60000)
+#define uhci_actual_length(ctrl_sts)	(((ctrl_sts) + 1) & \
+			TD_CTRL_ACTLEN_MASK)	/* 1-based */
+
+/*
+ * for TD <info>: (a.k.a. Token)
+ */
+#define td_token(uhci, td)	hc32_to_cpu((uhci), (td)->token)
+#define TD_TOKEN_DEVADDR_SHIFT	8
+#define TD_TOKEN_TOGGLE_SHIFT	19
+#define TD_TOKEN_TOGGLE		(1 << 19)
+#define TD_TOKEN_EXPLEN_SHIFT	21
+#define TD_TOKEN_EXPLEN_MASK	0x7FF	/* expected length, encoded as n-1 */
+#define TD_TOKEN_PID_MASK	0xFF
+
+#define uhci_explen(len)	((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \
+					TD_TOKEN_EXPLEN_SHIFT)
+
+#define uhci_expected_length(token) ((((token) >> TD_TOKEN_EXPLEN_SHIFT) + \
+					1) & TD_TOKEN_EXPLEN_MASK)
+#define uhci_toggle(token)	(((token) >> TD_TOKEN_TOGGLE_SHIFT) & 1)
+#define uhci_endpoint(token)	(((token) >> 15) & 0xf)
+#define uhci_devaddr(token)	(((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7f)
+#define uhci_devep(token)	(((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7ff)
+#define uhci_packetid(token)	((token) & TD_TOKEN_PID_MASK)
+#define uhci_packetout(token)	(uhci_packetid(token) != USB_PID_IN)
+#define uhci_packetin(token)	(uhci_packetid(token) == USB_PID_IN)
+
+/*
+ * The documentation says "4 words for hardware, 4 words for software".
+ *
+ * That's silly, the hardware doesn't care. The hardware only cares that
+ * the hardware words are 16-byte aligned, and we can have any amount of
+ * sw space after the TD entry.
+ *
+ * td->link points to either another TD (not necessarily for the same urb or
+ * even the same endpoint), or nothing (PTR_TERM), or a QH.
+ */
+struct uhci_td {
+	/* Hardware fields */
+	__hc32 link;
+	__hc32 status;
+	__hc32 token;
+	__hc32 buffer;
+
+	/* Software fields */
+	dma_addr_t dma_handle;
+
+	struct list_head list;
+
+	int frame;			/* for iso: what frame? */
+	struct list_head fl_list;
+} __attribute__((aligned(16)));
+
+/*
+ * We need a special accessor for the control/status word because it is
+ * subject to asynchronous updates by the controller.
+ */
+#define td_status(uhci, td)		hc32_to_cpu((uhci), \
+						READ_ONCE((td)->status))
+
+#define LINK_TO_TD(uhci, td)		(cpu_to_hc32((uhci), (td)->dma_handle))
+
+
+/*
+ *	Skeleton Queue Headers
+ */
+
+/*
+ * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for
+ * automatic queuing. To make it easy to insert entries into the schedule,
+ * we have a skeleton of QHs for each predefined Interrupt latency.
+ * Asynchronous QHs (low-speed control, full-speed control, and bulk)
+ * go onto the period-1 interrupt list, since they all get accessed on
+ * every frame.
+ *
+ * When we want to add a new QH, we add it to the list starting from the
+ * appropriate skeleton QH.  For instance, the schedule can look like this:
+ *
+ * skel int128 QH
+ * dev 1 interrupt QH
+ * dev 5 interrupt QH
+ * skel int64 QH
+ * skel int32 QH
+ * ...
+ * skel int1 + async QH
+ * dev 5 low-speed control QH
+ * dev 1 bulk QH
+ * dev 2 bulk QH
+ *
+ * There is a special terminating QH used to keep full-speed bandwidth
+ * reclamation active when no full-speed control or bulk QHs are linked
+ * into the schedule.  It has an inactive TD (to work around a PIIX bug,
+ * see the Intel errata) and it points back to itself.
+ *
+ * There's a special skeleton QH for Isochronous QHs which never appears
+ * on the schedule.  Isochronous TDs go on the schedule before the
+ * the skeleton QHs.  The hardware accesses them directly rather than
+ * through their QH, which is used only for bookkeeping purposes.
+ * While the UHCI spec doesn't forbid the use of QHs for Isochronous,
+ * it doesn't use them either.  And the spec says that queues never
+ * advance on an error completion status, which makes them totally
+ * unsuitable for Isochronous transfers.
+ *
+ * There's also a special skeleton QH used for QHs which are in the process
+ * of unlinking and so may still be in use by the hardware.  It too never
+ * appears on the schedule.
+ */
+
+#define UHCI_NUM_SKELQH		11
+#define SKEL_UNLINK		0
+#define skel_unlink_qh		skelqh[SKEL_UNLINK]
+#define SKEL_ISO		1
+#define skel_iso_qh		skelqh[SKEL_ISO]
+	/* int128, int64, ..., int1 = 2, 3, ..., 9 */
+#define SKEL_INDEX(exponent)	(9 - exponent)
+#define SKEL_ASYNC		9
+#define skel_async_qh		skelqh[SKEL_ASYNC]
+#define SKEL_TERM		10
+#define skel_term_qh		skelqh[SKEL_TERM]
+
+/* The following entries refer to sublists of skel_async_qh */
+#define SKEL_LS_CONTROL		20
+#define SKEL_FS_CONTROL		21
+#define SKEL_FSBR		SKEL_FS_CONTROL
+#define SKEL_BULK		22
+
+/*
+ *	The UHCI controller and root hub
+ */
+
+/*
+ * States for the root hub:
+ *
+ * To prevent "bouncing" in the presence of electrical noise,
+ * when there are no devices attached we delay for 1 second in the
+ * RUNNING_NODEVS state before switching to the AUTO_STOPPED state.
+ * 
+ * (Note that the AUTO_STOPPED state won't be necessary once the hub
+ * driver learns to autosuspend.)
+ */
+enum uhci_rh_state {
+	/* In the following states the HC must be halted.
+	 * These two must come first. */
+	UHCI_RH_RESET,
+	UHCI_RH_SUSPENDED,
+
+	UHCI_RH_AUTO_STOPPED,
+	UHCI_RH_RESUMING,
+
+	/* In this state the HC changes from running to halted,
+	 * so it can legally appear either way. */
+	UHCI_RH_SUSPENDING,
+
+	/* In the following states it's an error if the HC is halted.
+	 * These two must come last. */
+	UHCI_RH_RUNNING,		/* The normal state */
+	UHCI_RH_RUNNING_NODEVS,		/* Running with no devices attached */
+};
+
+/*
+ * The full UHCI controller information:
+ */
+struct uhci_hcd {
+
+	/* debugfs */
+	struct dentry *dentry;
+
+	/* Grabbed from PCI */
+	unsigned long io_addr;
+
+	/* Used when registers are memory mapped */
+	void __iomem *regs;
+
+	struct dma_pool *qh_pool;
+	struct dma_pool *td_pool;
+
+	struct uhci_td *term_td;	/* Terminating TD, see UHCI bug */
+	struct uhci_qh *skelqh[UHCI_NUM_SKELQH];	/* Skeleton QHs */
+	struct uhci_qh *next_qh;	/* Next QH to scan */
+
+	spinlock_t lock;
+
+	dma_addr_t frame_dma_handle;	/* Hardware frame list */
+	__hc32 *frame;
+	void **frame_cpu;		/* CPU's frame list */
+
+	enum uhci_rh_state rh_state;
+	unsigned long auto_stop_time;		/* When to AUTO_STOP */
+
+	unsigned int frame_number;		/* As of last check */
+	unsigned int is_stopped;
+#define UHCI_IS_STOPPED		9999		/* Larger than a frame # */
+	unsigned int last_iso_frame;		/* Frame of last scan */
+	unsigned int cur_iso_frame;		/* Frame for current scan */
+
+	unsigned int scan_in_progress:1;	/* Schedule scan is running */
+	unsigned int need_rescan:1;		/* Redo the schedule scan */
+	unsigned int dead:1;			/* Controller has died */
+	unsigned int RD_enable:1;		/* Suspended root hub with
+						   Resume-Detect interrupts
+						   enabled */
+	unsigned int is_initialized:1;		/* Data structure is usable */
+	unsigned int fsbr_is_on:1;		/* FSBR is turned on */
+	unsigned int fsbr_is_wanted:1;		/* Does any URB want FSBR? */
+	unsigned int fsbr_expiring:1;		/* FSBR is timing out */
+
+	struct timer_list fsbr_timer;		/* For turning off FBSR */
+
+	/* Silicon quirks */
+	unsigned int oc_low:1;			/* OverCurrent bit active low */
+	unsigned int wait_for_hp:1;		/* Wait for HP port reset */
+	unsigned int big_endian_mmio:1;		/* Big endian registers */
+	unsigned int big_endian_desc:1;		/* Big endian descriptors */
+	unsigned int is_aspeed:1;		/* Aspeed impl. workarounds */
+
+	/* Support for port suspend/resume/reset */
+	unsigned long port_c_suspend;		/* Bit-arrays of ports */
+	unsigned long resuming_ports;
+	unsigned long ports_timeout;		/* Time to stop signalling */
+
+	struct list_head idle_qh_list;		/* Where the idle QHs live */
+
+	int rh_numports;			/* Number of root-hub ports */
+
+	wait_queue_head_t waitqh;		/* endpoint_disable waiters */
+	int num_waiting;			/* Number of waiters */
+
+	int total_load;				/* Sum of array values */
+	short load[MAX_PHASE];			/* Periodic allocations */
+
+	struct clk *clk;			/* (optional) clock source */
+
+	/* Reset host controller */
+	void	(*reset_hc) (struct uhci_hcd *uhci);
+	int	(*check_and_reset_hc) (struct uhci_hcd *uhci);
+	/* configure_hc should perform arch specific settings, if needed */
+	void	(*configure_hc) (struct uhci_hcd *uhci);
+	/* Check for broken resume detect interrupts */
+	int	(*resume_detect_interrupts_are_broken) (struct uhci_hcd *uhci);
+	/* Check for broken global suspend */
+	int	(*global_suspend_mode_is_broken) (struct uhci_hcd *uhci);
+};
+
+/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
+static inline struct uhci_hcd *hcd_to_uhci(struct usb_hcd *hcd)
+{
+	return (struct uhci_hcd *) (hcd->hcd_priv);
+}
+static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
+{
+	return container_of((void *) uhci, struct usb_hcd, hcd_priv);
+}
+
+#define uhci_dev(u)	(uhci_to_hcd(u)->self.controller)
+
+/* Utility macro for comparing frame numbers */
+#define uhci_frame_before_eq(f1, f2)	(0 <= (int) ((f2) - (f1)))
+
+
+/*
+ *	Private per-URB data
+ */
+struct urb_priv {
+	struct list_head node;		/* Node in the QH's urbp list */
+
+	struct urb *urb;
+
+	struct uhci_qh *qh;		/* QH for this URB */
+	struct list_head td_list;
+
+	unsigned fsbr:1;		/* URB wants FSBR */
+};
+
+
+/* Some special IDs */
+
+#define PCI_VENDOR_ID_GENESYS		0x17a0
+#define PCI_DEVICE_ID_GL880S_UHCI	0x8083
+
+/* Aspeed SoC needs some quirks */
+static inline bool uhci_is_aspeed(const struct uhci_hcd *uhci)
+{
+	return IS_ENABLED(CONFIG_USB_UHCI_ASPEED) && uhci->is_aspeed;
+}
+
+/*
+ * Functions used to access controller registers. The UCHI spec says that host
+ * controller I/O registers are mapped into PCI I/O space. For non-PCI hosts
+ * we use memory mapped registers.
+ */
+
+#ifndef CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC
+/* Support PCI only */
+static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
+{
+	return inl(uhci->io_addr + reg);
+}
+
+static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
+{
+	outl(val, uhci->io_addr + reg);
+}
+
+static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
+{
+	return inw(uhci->io_addr + reg);
+}
+
+static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
+{
+	outw(val, uhci->io_addr + reg);
+}
+
+static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
+{
+	return inb(uhci->io_addr + reg);
+}
+
+static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
+{
+	outb(val, uhci->io_addr + reg);
+}
+
+#else
+/* Support non-PCI host controllers */
+#ifdef CONFIG_USB_PCI
+/* Support PCI and non-PCI host controllers */
+#define uhci_has_pci_registers(u)	((u)->io_addr != 0)
+#else
+/* Support non-PCI host controllers only */
+#define uhci_has_pci_registers(u)	0
+#endif
+
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+/* Support (non-PCI) big endian host controllers */
+#define uhci_big_endian_mmio(u)		((u)->big_endian_mmio)
+#else
+#define uhci_big_endian_mmio(u)		0
+#endif
+
+static inline int uhci_aspeed_reg(unsigned int reg)
+{
+	switch (reg) {
+	case USBCMD:
+		return 00;
+	case USBSTS:
+		return 0x04;
+	case USBINTR:
+		return 0x08;
+	case USBFRNUM:
+		return 0x80;
+	case USBFLBASEADD:
+		return 0x0c;
+	case USBSOF:
+		return 0x84;
+	case USBPORTSC1:
+		return 0x88;
+	case USBPORTSC2:
+		return 0x8c;
+	case USBPORTSC3:
+		return 0x90;
+	case USBPORTSC4:
+		return 0x94;
+	default:
+		pr_warn("UHCI: Unsupported register 0x%02x on Aspeed\n", reg);
+		/* Return an unimplemented register */
+		return 0x10;
+	}
+}
+
+static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		return inl(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		return readl_be(uhci->regs + reg);
+#endif
+	else
+		return readl(uhci->regs + reg);
+}
+
+static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		outl(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		writel_be(val, uhci->regs + reg);
+#endif
+	else
+		writel(val, uhci->regs + reg);
+}
+
+static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		return inw(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		return readw_be(uhci->regs + reg);
+#endif
+	else
+		return readw(uhci->regs + reg);
+}
+
+static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		outw(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		writew_be(val, uhci->regs + reg);
+#endif
+	else
+		writew(val, uhci->regs + reg);
+}
+
+static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		return inb(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		return readb_be(uhci->regs + reg);
+#endif
+	else
+		return readb(uhci->regs + reg);
+}
+
+static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
+{
+	if (uhci_has_pci_registers(uhci))
+		outb(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
+	else if (uhci_big_endian_mmio(uhci))
+		writeb_be(val, uhci->regs + reg);
+#endif
+	else
+		writeb(val, uhci->regs + reg);
+}
+#endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
+
+/*
+ * The GRLIB GRUSBHC controller can use big endian format for its descriptors.
+ *
+ * UHCI controllers accessed through PCI work normally (little-endian
+ * everywhere), so we don't bother supporting a BE-only mode.
+ */
+#ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC
+#define uhci_big_endian_desc(u)		((u)->big_endian_desc)
+
+/* cpu to uhci */
+static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x)
+{
+	return uhci_big_endian_desc(uhci)
+		? (__force __hc32)cpu_to_be32(x)
+		: (__force __hc32)cpu_to_le32(x);
+}
+
+/* uhci to cpu */
+static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x)
+{
+	return uhci_big_endian_desc(uhci)
+		? be32_to_cpu((__force __be32)x)
+		: le32_to_cpu((__force __le32)x);
+}
+
+#else
+/* cpu to uhci */
+static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x)
+{
+	return cpu_to_le32(x);
+}
+
+/* uhci to cpu */
+static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x)
+{
+	return le32_to_cpu(x);
+}
+#endif
+
+#endif
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
new file mode 100644
index 0000000..47106dd
--- /dev/null
+++ b/drivers/usb/host/uhci-hub.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Host Controller Interface driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
+ */
+
+static const __u8 root_hub_hub_des[] =
+{
+	0x09,			/*  __u8  bLength; */
+	USB_DT_HUB,		/*  __u8  bDescriptorType; Hub-descriptor */
+	0x02,			/*  __u8  bNbrPorts; */
+	HUB_CHAR_NO_LPSM |	/* __u16  wHubCharacteristics; */
+		HUB_CHAR_INDV_PORT_OCPM, /* (per-port OC, no power switching) */
+	0x00,
+	0x01,			/*  __u8  bPwrOn2pwrGood; 2ms */
+	0x00,			/*  __u8  bHubContrCurrent; 0 mA */
+	0x00,			/*  __u8  DeviceRemovable; *** 7 Ports max */
+	0xff			/*  __u8  PortPwrCtrlMask; *** 7 ports max */
+};
+
+#define	UHCI_RH_MAXCHILD	7
+
+/* must write as zeroes */
+#define WZ_BITS		(USBPORTSC_RES2 | USBPORTSC_RES3 | USBPORTSC_RES4)
+
+/* status change bits:  nonzero writes will clear */
+#define RWC_BITS	(USBPORTSC_OCC | USBPORTSC_PEC | USBPORTSC_CSC)
+
+/* suspend/resume bits: port suspended or port resuming */
+#define SUSPEND_BITS	(USBPORTSC_SUSP | USBPORTSC_RD)
+
+/* A port that either is connected or has a changed-bit set will prevent
+ * us from AUTO_STOPPING.
+ */
+static int any_ports_active(struct uhci_hcd *uhci)
+{
+	int port;
+
+	for (port = 0; port < uhci->rh_numports; ++port) {
+		if ((uhci_readw(uhci, USBPORTSC1 + port * 2) &
+				(USBPORTSC_CCS | RWC_BITS)) ||
+				test_bit(port, &uhci->port_c_suspend))
+			return 1;
+	}
+	return 0;
+}
+
+static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf)
+{
+	int port;
+	int mask = RWC_BITS;
+
+	/* Some boards (both VIA and Intel apparently) report bogus
+	 * overcurrent indications, causing massive log spam unless
+	 * we completely ignore them.  This doesn't seem to be a problem
+	 * with the chipset so much as with the way it is connected on
+	 * the motherboard; if the overcurrent input is left to float
+	 * then it may constantly register false positives. */
+	if (ignore_oc)
+		mask &= ~USBPORTSC_OCC;
+
+	*buf = 0;
+	for (port = 0; port < uhci->rh_numports; ++port) {
+		if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & mask) ||
+				test_bit(port, &uhci->port_c_suspend))
+			*buf |= (1 << (port + 1));
+	}
+	return !!*buf;
+}
+
+#define CLR_RH_PORTSTAT(x) \
+	status = uhci_readw(uhci, port_addr);	\
+	status &= ~(RWC_BITS|WZ_BITS); \
+	status &= ~(x); \
+	status |= RWC_BITS & (x); \
+	uhci_writew(uhci, status, port_addr)
+
+#define SET_RH_PORTSTAT(x) \
+	status = uhci_readw(uhci, port_addr);	\
+	status |= (x); \
+	status &= ~(RWC_BITS|WZ_BITS); \
+	uhci_writew(uhci, status, port_addr)
+
+/* UHCI controllers don't automatically stop resume signalling after 20 msec,
+ * so we have to poll and check timeouts in order to take care of it.
+ */
+static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
+		unsigned long port_addr)
+{
+	int status;
+	int i;
+
+	if (uhci_readw(uhci, port_addr) & SUSPEND_BITS) {
+		CLR_RH_PORTSTAT(SUSPEND_BITS);
+		if (test_bit(port, &uhci->resuming_ports))
+			set_bit(port, &uhci->port_c_suspend);
+
+		/* The controller won't actually turn off the RD bit until
+		 * it has had a chance to send a low-speed EOP sequence,
+		 * which is supposed to take 3 bit times (= 2 microseconds).
+		 * Experiments show that some controllers take longer, so
+		 * we'll poll for completion. */
+		for (i = 0; i < 10; ++i) {
+			if (!(uhci_readw(uhci, port_addr) & SUSPEND_BITS))
+				break;
+			udelay(1);
+		}
+	}
+	clear_bit(port, &uhci->resuming_ports);
+	usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
+}
+
+/* Wait for the UHCI controller in HP's iLO2 server management chip.
+ * It can take up to 250 us to finish a reset and set the CSC bit.
+ */
+static void wait_for_HP(struct uhci_hcd *uhci, unsigned long port_addr)
+{
+	int i;
+
+	for (i = 10; i < 250; i += 10) {
+		if (uhci_readw(uhci, port_addr) & USBPORTSC_CSC)
+			return;
+		udelay(10);
+	}
+	/* Log a warning? */
+}
+
+static void uhci_check_ports(struct uhci_hcd *uhci)
+{
+	unsigned int port;
+	unsigned long port_addr;
+	int status;
+
+	for (port = 0; port < uhci->rh_numports; ++port) {
+		port_addr = USBPORTSC1 + 2 * port;
+		status = uhci_readw(uhci, port_addr);
+		if (unlikely(status & USBPORTSC_PR)) {
+			if (time_after_eq(jiffies, uhci->ports_timeout)) {
+				CLR_RH_PORTSTAT(USBPORTSC_PR);
+				udelay(10);
+
+				/* HP's server management chip requires
+				 * a longer delay. */
+				if (uhci->wait_for_hp)
+					wait_for_HP(uhci, port_addr);
+
+				/* If the port was enabled before, turning
+				 * reset on caused a port enable change.
+				 * Turning reset off causes a port connect
+				 * status change.  Clear these changes. */
+				CLR_RH_PORTSTAT(USBPORTSC_CSC | USBPORTSC_PEC);
+				SET_RH_PORTSTAT(USBPORTSC_PE);
+			}
+		}
+		if (unlikely(status & USBPORTSC_RD)) {
+			if (!test_bit(port, &uhci->resuming_ports)) {
+
+				/* Port received a wakeup request */
+				set_bit(port, &uhci->resuming_ports);
+				uhci->ports_timeout = jiffies +
+					msecs_to_jiffies(USB_RESUME_TIMEOUT);
+				usb_hcd_start_port_resume(
+						&uhci_to_hcd(uhci)->self, port);
+
+				/* Make sure we see the port again
+				 * after the resuming period is over. */
+				mod_timer(&uhci_to_hcd(uhci)->rh_timer,
+						uhci->ports_timeout);
+			} else if (time_after_eq(jiffies,
+						uhci->ports_timeout)) {
+				uhci_finish_suspend(uhci, port, port_addr);
+			}
+		}
+	}
+}
+
+static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned long flags;
+	int status = 0;
+
+	spin_lock_irqsave(&uhci->lock, flags);
+
+	uhci_scan_schedule(uhci);
+	if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
+		goto done;
+	uhci_check_ports(uhci);
+
+	status = get_hub_status_data(uhci, buf);
+
+	switch (uhci->rh_state) {
+	    case UHCI_RH_SUSPENDED:
+		/* if port change, ask to be resumed */
+		if (status || uhci->resuming_ports) {
+			status = 1;
+			usb_hcd_resume_root_hub(hcd);
+		}
+		break;
+
+	    case UHCI_RH_AUTO_STOPPED:
+		/* if port change, auto start */
+		if (status)
+			wakeup_rh(uhci);
+		break;
+
+	    case UHCI_RH_RUNNING:
+		/* are any devices attached? */
+		if (!any_ports_active(uhci)) {
+			uhci->rh_state = UHCI_RH_RUNNING_NODEVS;
+			uhci->auto_stop_time = jiffies + HZ;
+		}
+		break;
+
+	    case UHCI_RH_RUNNING_NODEVS:
+		/* auto-stop if nothing connected for 1 second */
+		if (any_ports_active(uhci))
+			uhci->rh_state = UHCI_RH_RUNNING;
+		else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+				!uhci->wait_for_hp)
+			suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
+		break;
+
+	    default:
+		break;
+	}
+
+done:
+	spin_unlock_irqrestore(&uhci->lock, flags);
+	return status;
+}
+
+/* size of returned buffer is part of USB spec */
+static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+			u16 wIndex, char *buf, u16 wLength)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	int status, lstatus, retval = 0;
+	unsigned int port = wIndex - 1;
+	unsigned long port_addr = USBPORTSC1 + 2 * port;
+	u16 wPortChange, wPortStatus;
+	unsigned long flags;
+
+	if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
+		return -ETIMEDOUT;
+
+	spin_lock_irqsave(&uhci->lock, flags);
+	switch (typeReq) {
+
+	case GetHubStatus:
+		*(__le32 *)buf = cpu_to_le32(0);
+		retval = 4; /* hub power */
+		break;
+	case GetPortStatus:
+		if (port >= uhci->rh_numports)
+			goto err;
+
+		uhci_check_ports(uhci);
+		status = uhci_readw(uhci, port_addr);
+
+		/* Intel controllers report the OverCurrent bit active on.
+		 * VIA controllers report it active off, so we'll adjust the
+		 * bit value.  (It's not standardized in the UHCI spec.)
+		 */
+		if (uhci->oc_low)
+			status ^= USBPORTSC_OC;
+
+		/* UHCI doesn't support C_RESET (always false) */
+		wPortChange = lstatus = 0;
+		if (status & USBPORTSC_CSC)
+			wPortChange |= USB_PORT_STAT_C_CONNECTION;
+		if (status & USBPORTSC_PEC)
+			wPortChange |= USB_PORT_STAT_C_ENABLE;
+		if ((status & USBPORTSC_OCC) && !ignore_oc)
+			wPortChange |= USB_PORT_STAT_C_OVERCURRENT;
+
+		if (test_bit(port, &uhci->port_c_suspend)) {
+			wPortChange |= USB_PORT_STAT_C_SUSPEND;
+			lstatus |= 1;
+		}
+		if (test_bit(port, &uhci->resuming_ports))
+			lstatus |= 4;
+
+		/* UHCI has no power switching (always on) */
+		wPortStatus = USB_PORT_STAT_POWER;
+		if (status & USBPORTSC_CCS)
+			wPortStatus |= USB_PORT_STAT_CONNECTION;
+		if (status & USBPORTSC_PE) {
+			wPortStatus |= USB_PORT_STAT_ENABLE;
+			if (status & SUSPEND_BITS)
+				wPortStatus |= USB_PORT_STAT_SUSPEND;
+		}
+		if (status & USBPORTSC_OC)
+			wPortStatus |= USB_PORT_STAT_OVERCURRENT;
+		if (status & USBPORTSC_PR)
+			wPortStatus |= USB_PORT_STAT_RESET;
+		if (status & USBPORTSC_LSDA)
+			wPortStatus |= USB_PORT_STAT_LOW_SPEED;
+
+		if (wPortChange)
+			dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
+					wIndex, status, lstatus);
+
+		*(__le16 *)buf = cpu_to_le16(wPortStatus);
+		*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
+		retval = 4;
+		break;
+	case SetHubFeature:		/* We don't implement these */
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_OVER_CURRENT:
+		case C_HUB_LOCAL_POWER:
+			break;
+		default:
+			goto err;
+		}
+		break;
+	case SetPortFeature:
+		if (port >= uhci->rh_numports)
+			goto err;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			SET_RH_PORTSTAT(USBPORTSC_SUSP);
+			break;
+		case USB_PORT_FEAT_RESET:
+			SET_RH_PORTSTAT(USBPORTSC_PR);
+
+			/* Reset terminates Resume signalling */
+			uhci_finish_suspend(uhci, port, port_addr);
+
+			/* USB v2.0 7.1.7.5 */
+			uhci->ports_timeout = jiffies +
+				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			break;
+		case USB_PORT_FEAT_POWER:
+			/* UHCI has no power switching */
+			break;
+		default:
+			goto err;
+		}
+		break;
+	case ClearPortFeature:
+		if (port >= uhci->rh_numports)
+			goto err;
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			CLR_RH_PORTSTAT(USBPORTSC_PE);
+
+			/* Disable terminates Resume signalling */
+			uhci_finish_suspend(uhci, port, port_addr);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			CLR_RH_PORTSTAT(USBPORTSC_PEC);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {
+
+				/* Make certain the port isn't suspended */
+				uhci_finish_suspend(uhci, port, port_addr);
+			} else if (!test_and_set_bit(port,
+						&uhci->resuming_ports)) {
+				SET_RH_PORTSTAT(USBPORTSC_RD);
+
+				/* The controller won't allow RD to be set
+				 * if the port is disabled.  When this happens
+				 * just skip the Resume signalling.
+				 */
+				if (!(uhci_readw(uhci, port_addr) &
+						USBPORTSC_RD))
+					uhci_finish_suspend(uhci, port,
+							port_addr);
+				else
+					/* USB v2.0 7.1.7.7 */
+					uhci->ports_timeout = jiffies +
+						msecs_to_jiffies(20);
+			}
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			clear_bit(port, &uhci->port_c_suspend);
+			break;
+		case USB_PORT_FEAT_POWER:
+			/* UHCI has no power switching */
+			goto err;
+		case USB_PORT_FEAT_C_CONNECTION:
+			CLR_RH_PORTSTAT(USBPORTSC_CSC);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			CLR_RH_PORTSTAT(USBPORTSC_OCC);
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* this driver won't report these */
+			break;
+		default:
+			goto err;
+		}
+		break;
+	case GetHubDescriptor:
+		retval = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
+		memcpy(buf, root_hub_hub_des, retval);
+		if (retval > 2)
+			buf[2] = uhci->rh_numports;
+		break;
+	default:
+err:
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&uhci->lock, flags);
+
+	return retval;
+}
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
new file mode 100644
index 0000000..0dd9442
--- /dev/null
+++ b/drivers/usb/host/uhci-pci.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UHCI HCD (Host Controller Driver) PCI Bus Glue.
+ *
+ * Extracted from uhci-hcd.c:
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include "pci-quirks.h"
+
+/*
+ * Make sure the controller is completely inactive, unable to
+ * generate interrupts or do DMA.
+ */
+static void uhci_pci_reset_hc(struct uhci_hcd *uhci)
+{
+	uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr);
+}
+
+/*
+ * Initialize a controller that was newly discovered or has just been
+ * resumed.  In either case we can't be sure of its previous state.
+ *
+ * Returns: 1 if the controller was reset, 0 otherwise.
+ */
+static int uhci_pci_check_and_reset_hc(struct uhci_hcd *uhci)
+{
+	return uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)),
+				uhci->io_addr);
+}
+
+/*
+ * Store the basic register settings needed by the controller.
+ * This function is called at the end of configure_hc in uhci-hcd.c.
+ */
+static void uhci_pci_configure_hc(struct uhci_hcd *uhci)
+{
+	struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+
+	/* Enable PIRQ */
+	pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT);
+
+	/* Disable platform-specific non-PME# wakeup */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+		pci_write_config_byte(pdev, USBRES_INTEL, 0);
+}
+
+static int uhci_pci_resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
+{
+	int port;
+
+	switch (to_pci_dev(uhci_dev(uhci))->vendor) {
+	default:
+		break;
+
+	case PCI_VENDOR_ID_GENESYS:
+		/* Genesys Logic's GL880S controllers don't generate
+		 * resume-detect interrupts.
+		 */
+		return 1;
+
+	case PCI_VENDOR_ID_INTEL:
+		/* Some of Intel's USB controllers have a bug that causes
+		 * resume-detect interrupts if any port has an over-current
+		 * condition.  To make matters worse, some motherboards
+		 * hardwire unused USB ports' over-current inputs active!
+		 * To prevent problems, we will not enable resume-detect
+		 * interrupts if any ports are OC.
+		 */
+		for (port = 0; port < uhci->rh_numports; ++port) {
+			if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+					USBPORTSC_OC)
+				return 1;
+		}
+		break;
+	}
+	return 0;
+}
+
+static int uhci_pci_global_suspend_mode_is_broken(struct uhci_hcd *uhci)
+{
+	int port;
+	const char *sys_info;
+	static const char bad_Asus_board[] = "A7V8X";
+
+	/* One of Asus's motherboards has a bug which causes it to
+	 * wake up immediately from suspend-to-RAM if any of the ports
+	 * are connected.  In such cases we will not set EGSM.
+	 */
+	sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+	if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
+		for (port = 0; port < uhci->rh_numports; ++port) {
+			if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
+					USBPORTSC_CCS)
+				return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int uhci_pci_init(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	uhci->io_addr = (unsigned long) hcd->rsrc_start;
+
+	uhci->rh_numports = uhci_count_ports(hcd);
+
+	/* Intel controllers report the OverCurrent bit active on.
+	 * VIA controllers report it active off, so we'll adjust the
+	 * bit value.  (It's not standardized in the UHCI spec.)
+	 */
+	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+		uhci->oc_low = 1;
+
+	/* HP's server management chip requires a longer port reset delay. */
+	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
+		uhci->wait_for_hp = 1;
+
+	/* Intel controllers use non-PME wakeup signalling */
+	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+		device_set_wakeup_capable(uhci_dev(uhci), true);
+
+	/* Set up pointers to PCI-specific functions */
+	uhci->reset_hc = uhci_pci_reset_hc;
+	uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
+	uhci->configure_hc = uhci_pci_configure_hc;
+	uhci->resume_detect_interrupts_are_broken =
+		uhci_pci_resume_detect_interrupts_are_broken;
+	uhci->global_suspend_mode_is_broken =
+		uhci_pci_global_suspend_mode_is_broken;
+
+
+	/* Kick BIOS off this hardware and reset if the controller
+	 * isn't already safely quiescent.
+	 */
+	check_and_reset_hc(uhci);
+	return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more.  This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel.  Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_shutdown(struct pci_dev *pdev)
+{
+	struct usb_hcd *hcd = pci_get_drvdata(pdev);
+
+	uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+#ifdef CONFIG_PM
+
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated);
+
+static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci));
+	int rc = 0;
+
+	dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+	spin_lock_irq(&uhci->lock);
+	if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
+		goto done_okay;		/* Already suspended or dead */
+
+	/* All PCI host controllers are required to disable IRQ generation
+	 * at the source, so we must turn off PIRQ.
+	 */
+	pci_write_config_word(pdev, USBLEGSUP, 0);
+	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+	/* Enable platform-specific non-PME# wakeup */
+	if (do_wakeup) {
+		if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+			pci_write_config_byte(pdev, USBRES_INTEL,
+					USBPORT1EN | USBPORT2EN);
+	}
+
+done_okay:
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	spin_unlock_irq(&uhci->lock);
+
+	synchronize_irq(hcd->irq);
+
+	/* Check for race with a wakeup request */
+	if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) {
+		uhci_pci_resume(hcd, false);
+		rc = -EBUSY;
+	}
+	return rc;
+}
+
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	dev_dbg(uhci_dev(uhci), "%s\n", __func__);
+
+	/* Since we aren't in D3 any more, it's safe to set this flag
+	 * even if the controller was dead.
+	 */
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	spin_lock_irq(&uhci->lock);
+
+	/* Make sure resume from hibernation re-enumerates everything */
+	if (hibernated) {
+		uhci->reset_hc(uhci);
+		finish_reset(uhci);
+	}
+
+	/* The firmware may have changed the controller settings during
+	 * a system wakeup.  Check it and reconfigure to avoid problems.
+	 */
+	else {
+		check_and_reset_hc(uhci);
+	}
+	configure_hc(uhci);
+
+	/* Tell the core if the controller had to be reset */
+	if (uhci->rh_state == UHCI_RH_RESET)
+		usb_root_hub_lost_power(hcd->self.root_hub);
+
+	spin_unlock_irq(&uhci->lock);
+
+	/* If interrupts don't work and remote wakeup is enabled then
+	 * the suspended root hub needs to be polled.
+	 */
+	if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup)
+		set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+	/* Does the root hub have a port wakeup pending? */
+	usb_hcd_poll_rh_status(hcd);
+	return 0;
+}
+
+#endif
+
+static const struct hc_driver uhci_driver = {
+	.description =		hcd_name,
+	.product_desc =		"UHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct uhci_hcd),
+
+	/* Generic hardware linkage */
+	.irq =			uhci_irq,
+	.flags =		HCD_USB11,
+
+	/* Basic lifecycle operations */
+	.reset =		uhci_pci_init,
+	.start =		uhci_start,
+#ifdef CONFIG_PM
+	.pci_suspend =		uhci_pci_suspend,
+	.pci_resume =		uhci_pci_resume,
+	.bus_suspend =		uhci_rh_suspend,
+	.bus_resume =		uhci_rh_resume,
+#endif
+	.stop =			uhci_stop,
+
+	.urb_enqueue =		uhci_urb_enqueue,
+	.urb_dequeue =		uhci_urb_dequeue,
+
+	.endpoint_disable =	uhci_hcd_endpoint_disable,
+	.get_frame_number =	uhci_hcd_get_frame_number,
+
+	.hub_status_data =	uhci_hub_status_data,
+	.hub_control =		uhci_hub_control,
+};
+
+static const struct pci_device_id uhci_pci_ids[] = { {
+	/* handle any USB UHCI controller */
+	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
+	.driver_data =	(unsigned long) &uhci_driver,
+	}, { /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+
+static struct pci_driver uhci_pci_driver = {
+	.name =		(char *)hcd_name,
+	.id_table =	uhci_pci_ids,
+
+	.probe =	usb_hcd_pci_probe,
+	.remove =	usb_hcd_pci_remove,
+	.shutdown =	uhci_shutdown,
+
+#ifdef CONFIG_PM
+	.driver =	{
+		.pm =	&usb_hcd_pci_pm_ops
+	},
+#endif
+};
+
+MODULE_SOFTDEP("pre: ehci_pci");
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
new file mode 100644
index 0000000..89700e2
--- /dev/null
+++ b/drivers/usb/host/uhci-platform.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic UHCI HCD (Host Controller Driver) for Platform Devices
+ *
+ * Copyright (c) 2011 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This file is based on uhci-grlib.c
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+static int uhci_platform_init(struct usb_hcd *hcd)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	/* Probe number of ports if not already provided by DT */
+	if (!uhci->rh_numports)
+		uhci->rh_numports = uhci_count_ports(hcd);
+
+	/* Set up pointers to to generic functions */
+	uhci->reset_hc = uhci_generic_reset_hc;
+	uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc;
+
+	/* No special actions need to be taken for the functions below */
+	uhci->configure_hc = NULL;
+	uhci->resume_detect_interrupts_are_broken = NULL;
+	uhci->global_suspend_mode_is_broken = NULL;
+
+	/* Reset if the controller isn't already safely quiescent. */
+	check_and_reset_hc(uhci);
+	return 0;
+}
+
+static const struct hc_driver uhci_platform_hc_driver = {
+	.description =		hcd_name,
+	.product_desc =		"Generic UHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct uhci_hcd),
+
+	/* Generic hardware linkage */
+	.irq =			uhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB11,
+
+	/* Basic lifecycle operations */
+	.reset =		uhci_platform_init,
+	.start =		uhci_start,
+#ifdef CONFIG_PM
+	.pci_suspend =		NULL,
+	.pci_resume =		NULL,
+	.bus_suspend =		uhci_rh_suspend,
+	.bus_resume =		uhci_rh_resume,
+#endif
+	.stop =			uhci_stop,
+
+	.urb_enqueue =		uhci_urb_enqueue,
+	.urb_dequeue =		uhci_urb_dequeue,
+
+	.endpoint_disable =	uhci_hcd_endpoint_disable,
+	.get_frame_number =	uhci_hcd_get_frame_number,
+
+	.hub_status_data =	uhci_hub_status_data,
+	.hub_control =		uhci_hub_control,
+};
+
+static int uhci_hcd_platform_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct usb_hcd *hcd;
+	struct uhci_hcd	*uhci;
+	struct resource *res;
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * Right now device-tree probed devices don't get dma_mask set.
+	 * Since shared usb code relies on it, set it here for now.
+	 * Once we have dma capability bindings this can go away.
+	 */
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
+			pdev->name);
+	if (!hcd)
+		return -ENOMEM;
+
+	uhci = hcd_to_uhci(hcd);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto err_rmr;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	uhci->regs = hcd->regs;
+
+	/* Grab some things from the device-tree */
+	if (np) {
+		u32 num_ports;
+
+		if (of_property_read_u32(np, "#ports", &num_ports) == 0) {
+			uhci->rh_numports = num_ports;
+			dev_info(&pdev->dev,
+				"Detected %d ports from device-tree\n",
+				num_ports);
+		}
+		if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
+		    of_device_is_compatible(np, "aspeed,ast2500-uhci")) {
+			uhci->is_aspeed = 1;
+			dev_info(&pdev->dev,
+				 "Enabled Aspeed implementation workarounds\n");
+		}
+	}
+
+	/* Get and enable clock if any specified */
+	uhci->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(uhci->clk)) {
+		ret = PTR_ERR(uhci->clk);
+		goto err_rmr;
+	}
+	ret = clk_prepare_enable(uhci->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+		goto err_rmr;
+	}
+
+	ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+	if (ret)
+		goto err_clk;
+
+	device_wakeup_enable(hcd->self.controller);
+	return 0;
+
+err_clk:
+	clk_disable_unprepare(uhci->clk);
+err_rmr:
+	usb_put_hcd(hcd);
+
+	return ret;
+}
+
+static int uhci_hcd_platform_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+
+	clk_disable_unprepare(uhci->clk);
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+/* Make sure the controller is quiescent and that we're not using it
+ * any more.  This is mainly for the benefit of programs which, like kexec,
+ * expect the hardware to be idle: not doing DMA or generating IRQs.
+ *
+ * This routine may be called in a damaged or failing kernel.  Hence we
+ * do not acquire the spinlock before shutting down the controller.
+ */
+static void uhci_hcd_platform_shutdown(struct platform_device *op)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(op);
+
+	uhci_hc_died(hcd_to_uhci(hcd));
+}
+
+static const struct of_device_id platform_uhci_ids[] = {
+	{ .compatible = "generic-uhci", },
+	{ .compatible = "platform-uhci", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, platform_uhci_ids);
+
+static struct platform_driver uhci_platform_driver = {
+	.probe		= uhci_hcd_platform_probe,
+	.remove		= uhci_hcd_platform_remove,
+	.shutdown	= uhci_hcd_platform_shutdown,
+	.driver = {
+		.name = "platform-uhci",
+		.of_match_table = platform_uhci_ids,
+	},
+};
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
new file mode 100644
index 0000000..35fcb82
--- /dev/null
+++ b/drivers/usb/host/uhci-q.c
@@ -0,0 +1,1793 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Host Controller Interface driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999 Randy Dunlap
+ * (C) Copyright 1999 Georg Acher, acher@in.tum.de
+ * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
+ * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
+ * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
+ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
+ *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
+ * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
+ * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
+ */
+
+
+/*
+ * Technically, updating td->status here is a race, but it's not really a
+ * problem. The worst that can happen is that we set the IOC bit again
+ * generating a spurious interrupt. We could fix this by creating another
+ * QH and leaving the IOC bit always set, but then we would have to play
+ * games with the FSBR code to make sure we get the correct order in all
+ * the cases. I don't think it's worth the effort
+ */
+static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
+{
+	if (uhci->is_stopped)
+		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
+	uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
+}
+
+static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
+{
+	uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
+}
+
+
+/*
+ * Full-Speed Bandwidth Reclamation (FSBR).
+ * We turn on FSBR whenever a queue that wants it is advancing,
+ * and leave it on for a short time thereafter.
+ */
+static void uhci_fsbr_on(struct uhci_hcd *uhci)
+{
+	struct uhci_qh *lqh;
+
+	/* The terminating skeleton QH always points back to the first
+	 * FSBR QH.  Make the last async QH point to the terminating
+	 * skeleton QH. */
+	uhci->fsbr_is_on = 1;
+	lqh = list_entry(uhci->skel_async_qh->node.prev,
+			struct uhci_qh, node);
+	lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
+}
+
+static void uhci_fsbr_off(struct uhci_hcd *uhci)
+{
+	struct uhci_qh *lqh;
+
+	/* Remove the link from the last async QH to the terminating
+	 * skeleton QH. */
+	uhci->fsbr_is_on = 0;
+	lqh = list_entry(uhci->skel_async_qh->node.prev,
+			struct uhci_qh, node);
+	lqh->link = UHCI_PTR_TERM(uhci);
+}
+
+static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
+{
+	struct urb_priv *urbp = urb->hcpriv;
+
+	urbp->fsbr = 1;
+}
+
+static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
+{
+	if (urbp->fsbr) {
+		uhci->fsbr_is_wanted = 1;
+		if (!uhci->fsbr_is_on)
+			uhci_fsbr_on(uhci);
+		else if (uhci->fsbr_expiring) {
+			uhci->fsbr_expiring = 0;
+			del_timer(&uhci->fsbr_timer);
+		}
+	}
+}
+
+static void uhci_fsbr_timeout(struct timer_list *t)
+{
+	struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
+	unsigned long flags;
+
+	spin_lock_irqsave(&uhci->lock, flags);
+	if (uhci->fsbr_expiring) {
+		uhci->fsbr_expiring = 0;
+		uhci_fsbr_off(uhci);
+	}
+	spin_unlock_irqrestore(&uhci->lock, flags);
+}
+
+
+static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
+{
+	dma_addr_t dma_handle;
+	struct uhci_td *td;
+
+	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
+	if (!td)
+		return NULL;
+
+	td->dma_handle = dma_handle;
+	td->frame = -1;
+
+	INIT_LIST_HEAD(&td->list);
+	INIT_LIST_HEAD(&td->fl_list);
+
+	return td;
+}
+
+static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
+{
+	if (!list_empty(&td->list))
+		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
+	if (!list_empty(&td->fl_list))
+		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
+
+	dma_pool_free(uhci->td_pool, td, td->dma_handle);
+}
+
+static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
+		u32 status, u32 token, u32 buffer)
+{
+	td->status = cpu_to_hc32(uhci, status);
+	td->token = cpu_to_hc32(uhci, token);
+	td->buffer = cpu_to_hc32(uhci, buffer);
+}
+
+static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
+{
+	list_add_tail(&td->list, &urbp->td_list);
+}
+
+static void uhci_remove_td_from_urbp(struct uhci_td *td)
+{
+	list_del_init(&td->list);
+}
+
+/*
+ * We insert Isochronous URBs directly into the frame list at the beginning
+ */
+static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
+		struct uhci_td *td, unsigned framenum)
+{
+	framenum &= (UHCI_NUMFRAMES - 1);
+
+	td->frame = framenum;
+
+	/* Is there a TD already mapped there? */
+	if (uhci->frame_cpu[framenum]) {
+		struct uhci_td *ftd, *ltd;
+
+		ftd = uhci->frame_cpu[framenum];
+		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
+
+		list_add_tail(&td->fl_list, &ftd->fl_list);
+
+		td->link = ltd->link;
+		wmb();
+		ltd->link = LINK_TO_TD(uhci, td);
+	} else {
+		td->link = uhci->frame[framenum];
+		wmb();
+		uhci->frame[framenum] = LINK_TO_TD(uhci, td);
+		uhci->frame_cpu[framenum] = td;
+	}
+}
+
+static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
+		struct uhci_td *td)
+{
+	/* If it's not inserted, don't remove it */
+	if (td->frame == -1) {
+		WARN_ON(!list_empty(&td->fl_list));
+		return;
+	}
+
+	if (uhci->frame_cpu[td->frame] == td) {
+		if (list_empty(&td->fl_list)) {
+			uhci->frame[td->frame] = td->link;
+			uhci->frame_cpu[td->frame] = NULL;
+		} else {
+			struct uhci_td *ntd;
+
+			ntd = list_entry(td->fl_list.next,
+					 struct uhci_td,
+					 fl_list);
+			uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
+			uhci->frame_cpu[td->frame] = ntd;
+		}
+	} else {
+		struct uhci_td *ptd;
+
+		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
+		ptd->link = td->link;
+	}
+
+	list_del_init(&td->fl_list);
+	td->frame = -1;
+}
+
+static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
+		unsigned int framenum)
+{
+	struct uhci_td *ftd, *ltd;
+
+	framenum &= (UHCI_NUMFRAMES - 1);
+
+	ftd = uhci->frame_cpu[framenum];
+	if (ftd) {
+		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
+		uhci->frame[framenum] = ltd->link;
+		uhci->frame_cpu[framenum] = NULL;
+
+		while (!list_empty(&ftd->fl_list))
+			list_del_init(ftd->fl_list.prev);
+	}
+}
+
+/*
+ * Remove all the TDs for an Isochronous URB from the frame list
+ */
+static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
+{
+	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+	struct uhci_td *td;
+
+	list_for_each_entry(td, &urbp->td_list, list)
+		uhci_remove_td_from_frame_list(uhci, td);
+}
+
+static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
+		struct usb_device *udev, struct usb_host_endpoint *hep)
+{
+	dma_addr_t dma_handle;
+	struct uhci_qh *qh;
+
+	qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
+	if (!qh)
+		return NULL;
+
+	qh->dma_handle = dma_handle;
+
+	qh->element = UHCI_PTR_TERM(uhci);
+	qh->link = UHCI_PTR_TERM(uhci);
+
+	INIT_LIST_HEAD(&qh->queue);
+	INIT_LIST_HEAD(&qh->node);
+
+	if (udev) {		/* Normal QH */
+		qh->type = usb_endpoint_type(&hep->desc);
+		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
+			qh->dummy_td = uhci_alloc_td(uhci);
+			if (!qh->dummy_td) {
+				dma_pool_free(uhci->qh_pool, qh, dma_handle);
+				return NULL;
+			}
+		}
+		qh->state = QH_STATE_IDLE;
+		qh->hep = hep;
+		qh->udev = udev;
+		hep->hcpriv = qh;
+
+		if (qh->type == USB_ENDPOINT_XFER_INT ||
+				qh->type == USB_ENDPOINT_XFER_ISOC)
+			qh->load = usb_calc_bus_time(udev->speed,
+					usb_endpoint_dir_in(&hep->desc),
+					qh->type == USB_ENDPOINT_XFER_ISOC,
+					usb_endpoint_maxp(&hep->desc))
+				/ 1000 + 1;
+
+	} else {		/* Skeleton QH */
+		qh->state = QH_STATE_ACTIVE;
+		qh->type = -1;
+	}
+	return qh;
+}
+
+static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
+	if (!list_empty(&qh->queue))
+		dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
+
+	list_del(&qh->node);
+	if (qh->udev) {
+		qh->hep->hcpriv = NULL;
+		if (qh->dummy_td)
+			uhci_free_td(uhci, qh->dummy_td);
+	}
+	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
+}
+
+/*
+ * When a queue is stopped and a dequeued URB is given back, adjust
+ * the previous TD link (if the URB isn't first on the queue) or
+ * save its toggle value (if it is first and is currently executing).
+ *
+ * Returns 0 if the URB should not yet be given back, 1 otherwise.
+ */
+static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
+		struct urb *urb)
+{
+	struct urb_priv *urbp = urb->hcpriv;
+	struct uhci_td *td;
+	int ret = 1;
+
+	/* Isochronous pipes don't use toggles and their TD link pointers
+	 * get adjusted during uhci_urb_dequeue().  But since their queues
+	 * cannot truly be stopped, we have to watch out for dequeues
+	 * occurring after the nominal unlink frame. */
+	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+		ret = (uhci->frame_number + uhci->is_stopped !=
+				qh->unlink_frame);
+		goto done;
+	}
+
+	/* If the URB isn't first on its queue, adjust the link pointer
+	 * of the last TD in the previous URB.  The toggle doesn't need
+	 * to be saved since this URB can't be executing yet. */
+	if (qh->queue.next != &urbp->node) {
+		struct urb_priv *purbp;
+		struct uhci_td *ptd;
+
+		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
+		WARN_ON(list_empty(&purbp->td_list));
+		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
+				list);
+		td = list_entry(urbp->td_list.prev, struct uhci_td,
+				list);
+		ptd->link = td->link;
+		goto done;
+	}
+
+	/* If the QH element pointer is UHCI_PTR_TERM then then currently
+	 * executing URB has already been unlinked, so this one isn't it. */
+	if (qh_element(qh) == UHCI_PTR_TERM(uhci))
+		goto done;
+	qh->element = UHCI_PTR_TERM(uhci);
+
+	/* Control pipes don't have to worry about toggles */
+	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
+		goto done;
+
+	/* Save the next toggle value */
+	WARN_ON(list_empty(&urbp->td_list));
+	td = list_entry(urbp->td_list.next, struct uhci_td, list);
+	qh->needs_fixup = 1;
+	qh->initial_toggle = uhci_toggle(td_token(uhci, td));
+
+done:
+	return ret;
+}
+
+/*
+ * Fix up the data toggles for URBs in a queue, when one of them
+ * terminates early (short transfer, error, or dequeued).
+ */
+static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
+			int skip_first)
+{
+	struct urb_priv *urbp = NULL;
+	struct uhci_td *td;
+	unsigned int toggle = qh->initial_toggle;
+	unsigned int pipe;
+
+	/* Fixups for a short transfer start with the second URB in the
+	 * queue (the short URB is the first). */
+	if (skip_first)
+		urbp = list_entry(qh->queue.next, struct urb_priv, node);
+
+	/* When starting with the first URB, if the QH element pointer is
+	 * still valid then we know the URB's toggles are okay. */
+	else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
+		toggle = 2;
+
+	/* Fix up the toggle for the URBs in the queue.  Normally this
+	 * loop won't run more than once: When an error or short transfer
+	 * occurs, the queue usually gets emptied. */
+	urbp = list_prepare_entry(urbp, &qh->queue, node);
+	list_for_each_entry_continue(urbp, &qh->queue, node) {
+
+		/* If the first TD has the right toggle value, we don't
+		 * need to change any toggles in this URB */
+		td = list_entry(urbp->td_list.next, struct uhci_td, list);
+		if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
+			td = list_entry(urbp->td_list.prev, struct uhci_td,
+					list);
+			toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
+
+		/* Otherwise all the toggles in the URB have to be switched */
+		} else {
+			list_for_each_entry(td, &urbp->td_list, list) {
+				td->token ^= cpu_to_hc32(uhci,
+							TD_TOKEN_TOGGLE);
+				toggle ^= 1;
+			}
+		}
+	}
+
+	wmb();
+	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
+	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
+			usb_pipeout(pipe), toggle);
+	qh->needs_fixup = 0;
+}
+
+/*
+ * Link an Isochronous QH into its skeleton's list
+ */
+static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
+
+	/* Isochronous QHs aren't linked by the hardware */
+}
+
+/*
+ * Link a high-period interrupt QH into the schedule at the end of its
+ * skeleton's list
+ */
+static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct uhci_qh *pqh;
+
+	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
+
+	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+	qh->link = pqh->link;
+	wmb();
+	pqh->link = LINK_TO_QH(uhci, qh);
+}
+
+/*
+ * Link a period-1 interrupt or async QH into the schedule at the
+ * correct spot in the async skeleton's list, and update the FSBR link
+ */
+static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct uhci_qh *pqh;
+	__hc32 link_to_new_qh;
+
+	/* Find the predecessor QH for our new one and insert it in the list.
+	 * The list of QHs is expected to be short, so linear search won't
+	 * take too long. */
+	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
+		if (pqh->skel <= qh->skel)
+			break;
+	}
+	list_add(&qh->node, &pqh->node);
+
+	/* Link it into the schedule */
+	qh->link = pqh->link;
+	wmb();
+	link_to_new_qh = LINK_TO_QH(uhci, qh);
+	pqh->link = link_to_new_qh;
+
+	/* If this is now the first FSBR QH, link the terminating skeleton
+	 * QH to it. */
+	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+		uhci->skel_term_qh->link = link_to_new_qh;
+}
+
+/*
+ * Put a QH on the schedule in both hardware and software
+ */
+static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	WARN_ON(list_empty(&qh->queue));
+
+	/* Set the element pointer if it isn't set already.
+	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
+	if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
+		struct urb_priv *urbp = list_entry(qh->queue.next,
+				struct urb_priv, node);
+		struct uhci_td *td = list_entry(urbp->td_list.next,
+				struct uhci_td, list);
+
+		qh->element = LINK_TO_TD(uhci, td);
+	}
+
+	/* Treat the queue as if it has just advanced */
+	qh->wait_expired = 0;
+	qh->advance_jiffies = jiffies;
+
+	if (qh->state == QH_STATE_ACTIVE)
+		return;
+	qh->state = QH_STATE_ACTIVE;
+
+	/* Move the QH from its old list to the correct spot in the appropriate
+	 * skeleton's list */
+	if (qh == uhci->next_qh)
+		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+				node);
+	list_del(&qh->node);
+
+	if (qh->skel == SKEL_ISO)
+		link_iso(uhci, qh);
+	else if (qh->skel < SKEL_ASYNC)
+		link_interrupt(uhci, qh);
+	else
+		link_async(uhci, qh);
+}
+
+/*
+ * Unlink a high-period interrupt QH from the schedule
+ */
+static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct uhci_qh *pqh;
+
+	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+	pqh->link = qh->link;
+	mb();
+}
+
+/*
+ * Unlink a period-1 interrupt or async QH from the schedule
+ */
+static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct uhci_qh *pqh;
+	__hc32 link_to_next_qh = qh->link;
+
+	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
+	pqh->link = link_to_next_qh;
+
+	/* If this was the old first FSBR QH, link the terminating skeleton
+	 * QH to the next (new first FSBR) QH. */
+	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
+		uhci->skel_term_qh->link = link_to_next_qh;
+	mb();
+}
+
+/*
+ * Take a QH off the hardware schedule
+ */
+static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	if (qh->state == QH_STATE_UNLINKING)
+		return;
+	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
+	qh->state = QH_STATE_UNLINKING;
+
+	/* Unlink the QH from the schedule and record when we did it */
+	if (qh->skel == SKEL_ISO)
+		;
+	else if (qh->skel < SKEL_ASYNC)
+		unlink_interrupt(uhci, qh);
+	else
+		unlink_async(uhci, qh);
+
+	uhci_get_current_frame_number(uhci);
+	qh->unlink_frame = uhci->frame_number;
+
+	/* Force an interrupt so we know when the QH is fully unlinked */
+	if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
+		uhci_set_next_interrupt(uhci);
+
+	/* Move the QH from its old list to the end of the unlinking list */
+	if (qh == uhci->next_qh)
+		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+				node);
+	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
+}
+
+/*
+ * When we and the controller are through with a QH, it becomes IDLE.
+ * This happens when a QH has been off the schedule (on the unlinking
+ * list) for more than one frame, or when an error occurs while adding
+ * the first URB onto a new QH.
+ */
+static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	WARN_ON(qh->state == QH_STATE_ACTIVE);
+
+	if (qh == uhci->next_qh)
+		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
+				node);
+	list_move(&qh->node, &uhci->idle_qh_list);
+	qh->state = QH_STATE_IDLE;
+
+	/* Now that the QH is idle, its post_td isn't being used */
+	if (qh->post_td) {
+		uhci_free_td(uhci, qh->post_td);
+		qh->post_td = NULL;
+	}
+
+	/* If anyone is waiting for a QH to become idle, wake them up */
+	if (uhci->num_waiting)
+		wake_up_all(&uhci->waitqh);
+}
+
+/*
+ * Find the highest existing bandwidth load for a given phase and period.
+ */
+static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
+{
+	int highest_load = uhci->load[phase];
+
+	for (phase += period; phase < MAX_PHASE; phase += period)
+		highest_load = max_t(int, highest_load, uhci->load[phase]);
+	return highest_load;
+}
+
+/*
+ * Set qh->phase to the optimal phase for a periodic transfer and
+ * check whether the bandwidth requirement is acceptable.
+ */
+static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	int minimax_load;
+
+	/* Find the optimal phase (unless it is already set) and get
+	 * its load value. */
+	if (qh->phase >= 0)
+		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+	else {
+		int phase, load;
+		int max_phase = min_t(int, MAX_PHASE, qh->period);
+
+		qh->phase = 0;
+		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
+		for (phase = 1; phase < max_phase; ++phase) {
+			load = uhci_highest_load(uhci, phase, qh->period);
+			if (load < minimax_load) {
+				minimax_load = load;
+				qh->phase = phase;
+			}
+		}
+	}
+
+	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
+	if (minimax_load + qh->load > 900) {
+		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
+				"period %d, phase %d, %d + %d us\n",
+				qh->period, qh->phase, minimax_load, qh->load);
+		return -ENOSPC;
+	}
+	return 0;
+}
+
+/*
+ * Reserve a periodic QH's bandwidth in the schedule
+ */
+static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	int i;
+	int load = qh->load;
+	char *p = "??";
+
+	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+		uhci->load[i] += load;
+		uhci->total_load += load;
+	}
+	uhci_to_hcd(uhci)->self.bandwidth_allocated =
+			uhci->total_load / MAX_PHASE;
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_INT:
+		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+		p = "INT";
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+		p = "ISO";
+		break;
+	}
+	qh->bandwidth_reserved = 1;
+	dev_dbg(uhci_dev(uhci),
+			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+			"reserve", qh->udev->devnum,
+			qh->hep->desc.bEndpointAddress, p,
+			qh->period, qh->phase, load);
+}
+
+/*
+ * Release a periodic QH's bandwidth reservation
+ */
+static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	int i;
+	int load = qh->load;
+	char *p = "??";
+
+	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
+		uhci->load[i] -= load;
+		uhci->total_load -= load;
+	}
+	uhci_to_hcd(uhci)->self.bandwidth_allocated =
+			uhci->total_load / MAX_PHASE;
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_INT:
+		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
+		p = "INT";
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
+		p = "ISO";
+		break;
+	}
+	qh->bandwidth_reserved = 0;
+	dev_dbg(uhci_dev(uhci),
+			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
+			"release", qh->udev->devnum,
+			qh->hep->desc.bEndpointAddress, p,
+			qh->period, qh->phase, load);
+}
+
+static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
+		struct urb *urb)
+{
+	struct urb_priv *urbp;
+
+	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
+	if (!urbp)
+		return NULL;
+
+	urbp->urb = urb;
+	urb->hcpriv = urbp;
+
+	INIT_LIST_HEAD(&urbp->node);
+	INIT_LIST_HEAD(&urbp->td_list);
+
+	return urbp;
+}
+
+static void uhci_free_urb_priv(struct uhci_hcd *uhci,
+		struct urb_priv *urbp)
+{
+	struct uhci_td *td, *tmp;
+
+	if (!list_empty(&urbp->node))
+		dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
+				urbp->urb);
+
+	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+		uhci_remove_td_from_urbp(td);
+		uhci_free_td(uhci, td);
+	}
+
+	kmem_cache_free(uhci_up_cachep, urbp);
+}
+
+/*
+ * Map status to standard result codes
+ *
+ * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
+ * uhci_status_bits(td_status(uhci, td)).
+ * Note: <status> does not include the TD_CTRL_NAK bit.
+ * <dir_out> is True for output TDs and False for input TDs.
+ */
+static int uhci_map_status(int status, int dir_out)
+{
+	if (!status)
+		return 0;
+	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
+		return -EPROTO;
+	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
+		if (dir_out)
+			return -EPROTO;
+		else
+			return -EILSEQ;
+	}
+	if (status & TD_CTRL_BABBLE)			/* Babble */
+		return -EOVERFLOW;
+	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
+		return -ENOSR;
+	if (status & TD_CTRL_STALLED)			/* Stalled */
+		return -EPIPE;
+	return 0;
+}
+
+/*
+ * Control transfers
+ */
+static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
+		struct uhci_qh *qh)
+{
+	struct uhci_td *td;
+	unsigned long destination, status;
+	int maxsze = usb_endpoint_maxp(&qh->hep->desc);
+	int len = urb->transfer_buffer_length;
+	dma_addr_t data = urb->transfer_dma;
+	__hc32 *plink;
+	struct urb_priv *urbp = urb->hcpriv;
+	int skel;
+
+	/* The "pipe" thing contains the destination in bits 8--18 */
+	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
+
+	/* 3 errors, dummy TD remains inactive */
+	status = uhci_maxerr(3);
+	if (urb->dev->speed == USB_SPEED_LOW)
+		status |= TD_CTRL_LS;
+
+	/*
+	 * Build the TD for the control request setup packet
+	 */
+	td = qh->dummy_td;
+	uhci_add_td_to_urbp(td, urbp);
+	uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
+			urb->setup_dma);
+	plink = &td->link;
+	status |= TD_CTRL_ACTIVE;
+
+	/*
+	 * If direction is "send", change the packet ID from SETUP (0x2D)
+	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
+	 * set Short Packet Detect (SPD) for all data packets.
+	 *
+	 * 0-length transfers always get treated as "send".
+	 */
+	if (usb_pipeout(urb->pipe) || len == 0)
+		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
+	else {
+		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
+		status |= TD_CTRL_SPD;
+	}
+
+	/*
+	 * Build the DATA TDs
+	 */
+	while (len > 0) {
+		int pktsze = maxsze;
+
+		if (len <= pktsze) {		/* The last data packet */
+			pktsze = len;
+			status &= ~TD_CTRL_SPD;
+		}
+
+		td = uhci_alloc_td(uhci);
+		if (!td)
+			goto nomem;
+		*plink = LINK_TO_TD(uhci, td);
+
+		/* Alternate Data0/1 (start with Data1) */
+		destination ^= TD_TOKEN_TOGGLE;
+
+		uhci_add_td_to_urbp(td, urbp);
+		uhci_fill_td(uhci, td, status,
+			destination | uhci_explen(pktsze), data);
+		plink = &td->link;
+
+		data += pktsze;
+		len -= pktsze;
+	}
+
+	/*
+	 * Build the final TD for control status
+	 */
+	td = uhci_alloc_td(uhci);
+	if (!td)
+		goto nomem;
+	*plink = LINK_TO_TD(uhci, td);
+
+	/* Change direction for the status transaction */
+	destination ^= (USB_PID_IN ^ USB_PID_OUT);
+	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
+
+	uhci_add_td_to_urbp(td, urbp);
+	uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
+			destination | uhci_explen(0), 0);
+	plink = &td->link;
+
+	/*
+	 * Build the new dummy TD and activate the old one
+	 */
+	td = uhci_alloc_td(uhci);
+	if (!td)
+		goto nomem;
+	*plink = LINK_TO_TD(uhci, td);
+
+	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
+	wmb();
+	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
+	qh->dummy_td = td;
+
+	/* Low-speed transfers get a different queue, and won't hog the bus.
+	 * Also, some devices enumerate better without FSBR; the easiest way
+	 * to do that is to put URBs on the low-speed queue while the device
+	 * isn't in the CONFIGURED state. */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->state != USB_STATE_CONFIGURED)
+		skel = SKEL_LS_CONTROL;
+	else {
+		skel = SKEL_FS_CONTROL;
+		uhci_add_fsbr(uhci, urb);
+	}
+	if (qh->state != QH_STATE_ACTIVE)
+		qh->skel = skel;
+	return 0;
+
+nomem:
+	/* Remove the dummy TD from the td_list so it doesn't get freed */
+	uhci_remove_td_from_urbp(qh->dummy_td);
+	return -ENOMEM;
+}
+
+/*
+ * Common submit for bulk and interrupt
+ */
+static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
+		struct uhci_qh *qh)
+{
+	struct uhci_td *td;
+	unsigned long destination, status;
+	int maxsze = usb_endpoint_maxp(&qh->hep->desc);
+	int len = urb->transfer_buffer_length;
+	int this_sg_len;
+	dma_addr_t data;
+	__hc32 *plink;
+	struct urb_priv *urbp = urb->hcpriv;
+	unsigned int toggle;
+	struct scatterlist  *sg;
+	int i;
+
+	if (len < 0)
+		return -EINVAL;
+
+	/* The "pipe" thing contains the destination in bits 8--18 */
+	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
+	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+			 usb_pipeout(urb->pipe));
+
+	/* 3 errors, dummy TD remains inactive */
+	status = uhci_maxerr(3);
+	if (urb->dev->speed == USB_SPEED_LOW)
+		status |= TD_CTRL_LS;
+	if (usb_pipein(urb->pipe))
+		status |= TD_CTRL_SPD;
+
+	i = urb->num_mapped_sgs;
+	if (len > 0 && i > 0) {
+		sg = urb->sg;
+		data = sg_dma_address(sg);
+
+		/* urb->transfer_buffer_length may be smaller than the
+		 * size of the scatterlist (or vice versa)
+		 */
+		this_sg_len = min_t(int, sg_dma_len(sg), len);
+	} else {
+		sg = NULL;
+		data = urb->transfer_dma;
+		this_sg_len = len;
+	}
+	/*
+	 * Build the DATA TDs
+	 */
+	plink = NULL;
+	td = qh->dummy_td;
+	for (;;) {	/* Allow zero length packets */
+		int pktsze = maxsze;
+
+		if (len <= pktsze) {		/* The last packet */
+			pktsze = len;
+			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+				status &= ~TD_CTRL_SPD;
+		}
+
+		if (plink) {
+			td = uhci_alloc_td(uhci);
+			if (!td)
+				goto nomem;
+			*plink = LINK_TO_TD(uhci, td);
+		}
+		uhci_add_td_to_urbp(td, urbp);
+		uhci_fill_td(uhci, td, status,
+				destination | uhci_explen(pktsze) |
+					(toggle << TD_TOKEN_TOGGLE_SHIFT),
+				data);
+		plink = &td->link;
+		status |= TD_CTRL_ACTIVE;
+
+		toggle ^= 1;
+		data += pktsze;
+		this_sg_len -= pktsze;
+		len -= maxsze;
+		if (this_sg_len <= 0) {
+			if (--i <= 0 || len <= 0)
+				break;
+			sg = sg_next(sg);
+			data = sg_dma_address(sg);
+			this_sg_len = min_t(int, sg_dma_len(sg), len);
+		}
+	}
+
+	/*
+	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
+	 * is OUT and the transfer_length was an exact multiple of maxsze,
+	 * hence (len = transfer_length - N * maxsze) == 0
+	 * however, if transfer_length == 0, the zero packet was already
+	 * prepared above.
+	 */
+	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
+			usb_pipeout(urb->pipe) && len == 0 &&
+			urb->transfer_buffer_length > 0) {
+		td = uhci_alloc_td(uhci);
+		if (!td)
+			goto nomem;
+		*plink = LINK_TO_TD(uhci, td);
+
+		uhci_add_td_to_urbp(td, urbp);
+		uhci_fill_td(uhci, td, status,
+				destination | uhci_explen(0) |
+					(toggle << TD_TOKEN_TOGGLE_SHIFT),
+				data);
+		plink = &td->link;
+
+		toggle ^= 1;
+	}
+
+	/* Set the interrupt-on-completion flag on the last packet.
+	 * A more-or-less typical 4 KB URB (= size of one memory page)
+	 * will require about 3 ms to transfer; that's a little on the
+	 * fast side but not enough to justify delaying an interrupt
+	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
+	 * flag setting. */
+	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
+
+	/*
+	 * Build the new dummy TD and activate the old one
+	 */
+	td = uhci_alloc_td(uhci);
+	if (!td)
+		goto nomem;
+	*plink = LINK_TO_TD(uhci, td);
+
+	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
+	wmb();
+	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
+	qh->dummy_td = td;
+
+	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+			usb_pipeout(urb->pipe), toggle);
+	return 0;
+
+nomem:
+	/* Remove the dummy TD from the td_list so it doesn't get freed */
+	uhci_remove_td_from_urbp(qh->dummy_td);
+	return -ENOMEM;
+}
+
+static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
+		struct uhci_qh *qh)
+{
+	int ret;
+
+	/* Can't have low-speed bulk transfers */
+	if (urb->dev->speed == USB_SPEED_LOW)
+		return -EINVAL;
+
+	if (qh->state != QH_STATE_ACTIVE)
+		qh->skel = SKEL_BULK;
+	ret = uhci_submit_common(uhci, urb, qh);
+	if (ret == 0)
+		uhci_add_fsbr(uhci, urb);
+	return ret;
+}
+
+static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
+		struct uhci_qh *qh)
+{
+	int ret;
+
+	/* USB 1.1 interrupt transfers only involve one packet per interval.
+	 * Drivers can submit URBs of any length, but longer ones will need
+	 * multiple intervals to complete.
+	 */
+
+	if (!qh->bandwidth_reserved) {
+		int exponent;
+
+		/* Figure out which power-of-two queue to use */
+		for (exponent = 7; exponent >= 0; --exponent) {
+			if ((1 << exponent) <= urb->interval)
+				break;
+		}
+		if (exponent < 0)
+			return -EINVAL;
+
+		/* If the slot is full, try a lower period */
+		do {
+			qh->period = 1 << exponent;
+			qh->skel = SKEL_INDEX(exponent);
+
+			/* For now, interrupt phase is fixed by the layout
+			 * of the QH lists.
+			 */
+			qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
+			ret = uhci_check_bandwidth(uhci, qh);
+		} while (ret != 0 && --exponent >= 0);
+		if (ret)
+			return ret;
+	} else if (qh->period > urb->interval)
+		return -EINVAL;		/* Can't decrease the period */
+
+	ret = uhci_submit_common(uhci, urb, qh);
+	if (ret == 0) {
+		urb->interval = qh->period;
+		if (!qh->bandwidth_reserved)
+			uhci_reserve_bandwidth(uhci, qh);
+	}
+	return ret;
+}
+
+/*
+ * Fix up the data structures following a short transfer
+ */
+static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
+		struct uhci_qh *qh, struct urb_priv *urbp)
+{
+	struct uhci_td *td;
+	struct list_head *tmp;
+	int ret;
+
+	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
+	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+		/* When a control transfer is short, we have to restart
+		 * the queue at the status stage transaction, which is
+		 * the last TD. */
+		WARN_ON(list_empty(&urbp->td_list));
+		qh->element = LINK_TO_TD(uhci, td);
+		tmp = td->list.prev;
+		ret = -EINPROGRESS;
+
+	} else {
+
+		/* When a bulk/interrupt transfer is short, we have to
+		 * fix up the toggles of the following URBs on the queue
+		 * before restarting the queue at the next URB. */
+		qh->initial_toggle =
+			uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
+		uhci_fixup_toggles(uhci, qh, 1);
+
+		if (list_empty(&urbp->td_list))
+			td = qh->post_td;
+		qh->element = td->link;
+		tmp = urbp->td_list.prev;
+		ret = 0;
+	}
+
+	/* Remove all the TDs we skipped over, from tmp back to the start */
+	while (tmp != &urbp->td_list) {
+		td = list_entry(tmp, struct uhci_td, list);
+		tmp = tmp->prev;
+
+		uhci_remove_td_from_urbp(td);
+		uhci_free_td(uhci, td);
+	}
+	return ret;
+}
+
+/*
+ * Common result for control, bulk, and interrupt
+ */
+static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
+{
+	struct urb_priv *urbp = urb->hcpriv;
+	struct uhci_qh *qh = urbp->qh;
+	struct uhci_td *td, *tmp;
+	unsigned status;
+	int ret = 0;
+
+	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+		unsigned int ctrlstat;
+		int len;
+
+		ctrlstat = td_status(uhci, td);
+		status = uhci_status_bits(ctrlstat);
+		if (status & TD_CTRL_ACTIVE)
+			return -EINPROGRESS;
+
+		len = uhci_actual_length(ctrlstat);
+		urb->actual_length += len;
+
+		if (status) {
+			ret = uhci_map_status(status,
+					uhci_packetout(td_token(uhci, td)));
+			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
+				/* Some debugging code */
+				dev_dbg(&urb->dev->dev,
+						"%s: failed with status %x\n",
+						__func__, status);
+
+				if (debug > 1 && errbuf) {
+					/* Print the chain for debugging */
+					uhci_show_qh(uhci, urbp->qh, errbuf,
+						ERRBUF_LEN - EXTRA_SPACE, 0);
+					lprintk(errbuf);
+				}
+			}
+
+		/* Did we receive a short packet? */
+		} else if (len < uhci_expected_length(td_token(uhci, td))) {
+
+			/* For control transfers, go to the status TD if
+			 * this isn't already the last data TD */
+			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+				if (td->list.next != urbp->td_list.prev)
+					ret = 1;
+			}
+
+			/* For bulk and interrupt, this may be an error */
+			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
+				ret = -EREMOTEIO;
+
+			/* Fixup needed only if this isn't the URB's last TD */
+			else if (&td->list != urbp->td_list.prev)
+				ret = 1;
+		}
+
+		uhci_remove_td_from_urbp(td);
+		if (qh->post_td)
+			uhci_free_td(uhci, qh->post_td);
+		qh->post_td = td;
+
+		if (ret != 0)
+			goto err;
+	}
+	return ret;
+
+err:
+	if (ret < 0) {
+		/* Note that the queue has stopped and save
+		 * the next toggle value */
+		qh->element = UHCI_PTR_TERM(uhci);
+		qh->is_stopped = 1;
+		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
+		qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
+				(ret == -EREMOTEIO);
+
+	} else		/* Short packet received */
+		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
+	return ret;
+}
+
+/*
+ * Isochronous transfers
+ */
+static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
+		struct uhci_qh *qh)
+{
+	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
+	int i;
+	unsigned frame, next;
+	unsigned long destination, status;
+	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+
+	/* Values must not be too big (could overflow below) */
+	if (urb->interval >= UHCI_NUMFRAMES ||
+			urb->number_of_packets >= UHCI_NUMFRAMES)
+		return -EFBIG;
+
+	uhci_get_current_frame_number(uhci);
+
+	/* Check the period and figure out the starting frame number */
+	if (!qh->bandwidth_reserved) {
+		qh->period = urb->interval;
+		qh->phase = -1;		/* Find the best phase */
+		i = uhci_check_bandwidth(uhci, qh);
+		if (i)
+			return i;
+
+		/* Allow a little time to allocate the TDs */
+		next = uhci->frame_number + 10;
+		frame = qh->phase;
+
+		/* Round up to the first available slot */
+		frame += (next - frame + qh->period - 1) & -qh->period;
+
+	} else if (qh->period != urb->interval) {
+		return -EINVAL;		/* Can't change the period */
+
+	} else {
+		next = uhci->frame_number + 1;
+
+		/* Find the next unused frame */
+		if (list_empty(&qh->queue)) {
+			frame = qh->iso_frame;
+		} else {
+			struct urb *lurb;
+
+			lurb = list_entry(qh->queue.prev,
+					struct urb_priv, node)->urb;
+			frame = lurb->start_frame +
+					lurb->number_of_packets *
+					lurb->interval;
+		}
+
+		/* Fell behind? */
+		if (!uhci_frame_before_eq(next, frame)) {
+
+			/* USB_ISO_ASAP: Round up to the first available slot */
+			if (urb->transfer_flags & URB_ISO_ASAP)
+				frame += (next - frame + qh->period - 1) &
+						-qh->period;
+
+			/*
+			 * Not ASAP: Use the next slot in the stream,
+			 * no matter what.
+			 */
+			else if (!uhci_frame_before_eq(next,
+					frame + (urb->number_of_packets - 1) *
+						qh->period))
+				dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+						urb, frame,
+						(urb->number_of_packets - 1) *
+							qh->period,
+						next);
+		}
+	}
+
+	/* Make sure we won't have to go too far into the future */
+	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
+			frame + urb->number_of_packets * urb->interval))
+		return -EFBIG;
+	urb->start_frame = frame;
+
+	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
+	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
+
+	for (i = 0; i < urb->number_of_packets; i++) {
+		td = uhci_alloc_td(uhci);
+		if (!td)
+			return -ENOMEM;
+
+		uhci_add_td_to_urbp(td, urbp);
+		uhci_fill_td(uhci, td, status, destination |
+				uhci_explen(urb->iso_frame_desc[i].length),
+				urb->transfer_dma +
+					urb->iso_frame_desc[i].offset);
+	}
+
+	/* Set the interrupt-on-completion flag on the last packet. */
+	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
+
+	/* Add the TDs to the frame list */
+	frame = urb->start_frame;
+	list_for_each_entry(td, &urbp->td_list, list) {
+		uhci_insert_td_in_frame_list(uhci, td, frame);
+		frame += qh->period;
+	}
+
+	if (list_empty(&qh->queue)) {
+		qh->iso_packet_desc = &urb->iso_frame_desc[0];
+		qh->iso_frame = urb->start_frame;
+	}
+
+	qh->skel = SKEL_ISO;
+	if (!qh->bandwidth_reserved)
+		uhci_reserve_bandwidth(uhci, qh);
+	return 0;
+}
+
+static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
+{
+	struct uhci_td *td, *tmp;
+	struct urb_priv *urbp = urb->hcpriv;
+	struct uhci_qh *qh = urbp->qh;
+
+	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
+		unsigned int ctrlstat;
+		int status;
+		int actlength;
+
+		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
+			return -EINPROGRESS;
+
+		uhci_remove_tds_from_frame(uhci, qh->iso_frame);
+
+		ctrlstat = td_status(uhci, td);
+		if (ctrlstat & TD_CTRL_ACTIVE) {
+			status = -EXDEV;	/* TD was added too late? */
+		} else {
+			status = uhci_map_status(uhci_status_bits(ctrlstat),
+					usb_pipeout(urb->pipe));
+			actlength = uhci_actual_length(ctrlstat);
+
+			urb->actual_length += actlength;
+			qh->iso_packet_desc->actual_length = actlength;
+			qh->iso_packet_desc->status = status;
+		}
+		if (status)
+			urb->error_count++;
+
+		uhci_remove_td_from_urbp(td);
+		uhci_free_td(uhci, td);
+		qh->iso_frame += qh->period;
+		++qh->iso_packet_desc;
+	}
+	return 0;
+}
+
+static int uhci_urb_enqueue(struct usb_hcd *hcd,
+		struct urb *urb, gfp_t mem_flags)
+{
+	int ret;
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned long flags;
+	struct urb_priv *urbp;
+	struct uhci_qh *qh;
+
+	spin_lock_irqsave(&uhci->lock, flags);
+
+	ret = usb_hcd_link_urb_to_ep(hcd, urb);
+	if (ret)
+		goto done_not_linked;
+
+	ret = -ENOMEM;
+	urbp = uhci_alloc_urb_priv(uhci, urb);
+	if (!urbp)
+		goto done;
+
+	if (urb->ep->hcpriv)
+		qh = urb->ep->hcpriv;
+	else {
+		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
+		if (!qh)
+			goto err_no_qh;
+	}
+	urbp->qh = qh;
+
+	switch (qh->type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ret = uhci_submit_control(uhci, urb, qh);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		ret = uhci_submit_bulk(uhci, urb, qh);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		ret = uhci_submit_interrupt(uhci, urb, qh);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		urb->error_count = 0;
+		ret = uhci_submit_isochronous(uhci, urb, qh);
+		break;
+	}
+	if (ret != 0)
+		goto err_submit_failed;
+
+	/* Add this URB to the QH */
+	list_add_tail(&urbp->node, &qh->queue);
+
+	/* If the new URB is the first and only one on this QH then either
+	 * the QH is new and idle or else it's unlinked and waiting to
+	 * become idle, so we can activate it right away.  But only if the
+	 * queue isn't stopped. */
+	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
+		uhci_activate_qh(uhci, qh);
+		uhci_urbp_wants_fsbr(uhci, urbp);
+	}
+	goto done;
+
+err_submit_failed:
+	if (qh->state == QH_STATE_IDLE)
+		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
+err_no_qh:
+	uhci_free_urb_priv(uhci, urbp);
+done:
+	if (ret)
+		usb_hcd_unlink_urb_from_ep(hcd, urb);
+done_not_linked:
+	spin_unlock_irqrestore(&uhci->lock, flags);
+	return ret;
+}
+
+static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+	unsigned long flags;
+	struct uhci_qh *qh;
+	int rc;
+
+	spin_lock_irqsave(&uhci->lock, flags);
+	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (rc)
+		goto done;
+
+	qh = ((struct urb_priv *) urb->hcpriv)->qh;
+
+	/* Remove Isochronous TDs from the frame list ASAP */
+	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
+		uhci_unlink_isochronous_tds(uhci, urb);
+		mb();
+
+		/* If the URB has already started, update the QH unlink time */
+		uhci_get_current_frame_number(uhci);
+		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
+			qh->unlink_frame = uhci->frame_number;
+	}
+
+	uhci_unlink_qh(uhci, qh);
+
+done:
+	spin_unlock_irqrestore(&uhci->lock, flags);
+	return rc;
+}
+
+/*
+ * Finish unlinking an URB and give it back
+ */
+static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
+		struct urb *urb, int status)
+__releases(uhci->lock)
+__acquires(uhci->lock)
+{
+	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+
+	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+		/* Subtract off the length of the SETUP packet from
+		 * urb->actual_length.
+		 */
+		urb->actual_length -= min_t(u32, 8, urb->actual_length);
+	}
+
+	/* When giving back the first URB in an Isochronous queue,
+	 * reinitialize the QH's iso-related members for the next URB. */
+	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
+			urbp->node.prev == &qh->queue &&
+			urbp->node.next != &qh->queue) {
+		struct urb *nurb = list_entry(urbp->node.next,
+				struct urb_priv, node)->urb;
+
+		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
+		qh->iso_frame = nurb->start_frame;
+	}
+
+	/* Take the URB off the QH's queue.  If the queue is now empty,
+	 * this is a perfect time for a toggle fixup. */
+	list_del_init(&urbp->node);
+	if (list_empty(&qh->queue) && qh->needs_fixup) {
+		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+				usb_pipeout(urb->pipe), qh->initial_toggle);
+		qh->needs_fixup = 0;
+	}
+
+	uhci_free_urb_priv(uhci, urbp);
+	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
+
+	spin_unlock(&uhci->lock);
+	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
+	spin_lock(&uhci->lock);
+
+	/* If the queue is now empty, we can unlink the QH and give up its
+	 * reserved bandwidth. */
+	if (list_empty(&qh->queue)) {
+		uhci_unlink_qh(uhci, qh);
+		if (qh->bandwidth_reserved)
+			uhci_release_bandwidth(uhci, qh);
+	}
+}
+
+/*
+ * Scan the URBs in a QH's queue
+ */
+#define QH_FINISHED_UNLINKING(qh)			\
+		(qh->state == QH_STATE_UNLINKING &&	\
+		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
+
+static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct urb_priv *urbp;
+	struct urb *urb;
+	int status;
+
+	while (!list_empty(&qh->queue)) {
+		urbp = list_entry(qh->queue.next, struct urb_priv, node);
+		urb = urbp->urb;
+
+		if (qh->type == USB_ENDPOINT_XFER_ISOC)
+			status = uhci_result_isochronous(uhci, urb);
+		else
+			status = uhci_result_common(uhci, urb);
+		if (status == -EINPROGRESS)
+			break;
+
+		/* Dequeued but completed URBs can't be given back unless
+		 * the QH is stopped or has finished unlinking. */
+		if (urb->unlinked) {
+			if (QH_FINISHED_UNLINKING(qh))
+				qh->is_stopped = 1;
+			else if (!qh->is_stopped)
+				return;
+		}
+
+		uhci_giveback_urb(uhci, qh, urb, status);
+		if (status < 0)
+			break;
+	}
+
+	/* If the QH is neither stopped nor finished unlinking (normal case),
+	 * our work here is done. */
+	if (QH_FINISHED_UNLINKING(qh))
+		qh->is_stopped = 1;
+	else if (!qh->is_stopped)
+		return;
+
+	/* Otherwise give back each of the dequeued URBs */
+restart:
+	list_for_each_entry(urbp, &qh->queue, node) {
+		urb = urbp->urb;
+		if (urb->unlinked) {
+
+			/* Fix up the TD links and save the toggles for
+			 * non-Isochronous queues.  For Isochronous queues,
+			 * test for too-recent dequeues. */
+			if (!uhci_cleanup_queue(uhci, qh, urb)) {
+				qh->is_stopped = 0;
+				return;
+			}
+			uhci_giveback_urb(uhci, qh, urb, 0);
+			goto restart;
+		}
+	}
+	qh->is_stopped = 0;
+
+	/* There are no more dequeued URBs.  If there are still URBs on the
+	 * queue, the QH can now be re-activated. */
+	if (!list_empty(&qh->queue)) {
+		if (qh->needs_fixup)
+			uhci_fixup_toggles(uhci, qh, 0);
+
+		/* If the first URB on the queue wants FSBR but its time
+		 * limit has expired, set the next TD to interrupt on
+		 * completion before reactivating the QH. */
+		urbp = list_entry(qh->queue.next, struct urb_priv, node);
+		if (urbp->fsbr && qh->wait_expired) {
+			struct uhci_td *td = list_entry(urbp->td_list.next,
+					struct uhci_td, list);
+
+			td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
+		}
+
+		uhci_activate_qh(uhci, qh);
+	}
+
+	/* The queue is empty.  The QH can become idle if it is fully
+	 * unlinked. */
+	else if (QH_FINISHED_UNLINKING(qh))
+		uhci_make_qh_idle(uhci, qh);
+}
+
+/*
+ * Check for queues that have made some forward progress.
+ * Returns 0 if the queue is not Isochronous, is ACTIVE, and
+ * has not advanced since last examined; 1 otherwise.
+ *
+ * Early Intel controllers have a bug which causes qh->element sometimes
+ * not to advance when a TD completes successfully.  The queue remains
+ * stuck on the inactive completed TD.  We detect such cases and advance
+ * the element pointer by hand.
+ */
+static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
+{
+	struct urb_priv *urbp = NULL;
+	struct uhci_td *td;
+	int ret = 1;
+	unsigned status;
+
+	if (qh->type == USB_ENDPOINT_XFER_ISOC)
+		goto done;
+
+	/* Treat an UNLINKING queue as though it hasn't advanced.
+	 * This is okay because reactivation will treat it as though
+	 * it has advanced, and if it is going to become IDLE then
+	 * this doesn't matter anyway.  Furthermore it's possible
+	 * for an UNLINKING queue not to have any URBs at all, or
+	 * for its first URB not to have any TDs (if it was dequeued
+	 * just as it completed).  So it's not easy in any case to
+	 * test whether such queues have advanced. */
+	if (qh->state != QH_STATE_ACTIVE) {
+		urbp = NULL;
+		status = 0;
+
+	} else {
+		urbp = list_entry(qh->queue.next, struct urb_priv, node);
+		td = list_entry(urbp->td_list.next, struct uhci_td, list);
+		status = td_status(uhci, td);
+		if (!(status & TD_CTRL_ACTIVE)) {
+
+			/* We're okay, the queue has advanced */
+			qh->wait_expired = 0;
+			qh->advance_jiffies = jiffies;
+			goto done;
+		}
+		ret = uhci->is_stopped;
+	}
+
+	/* The queue hasn't advanced; check for timeout */
+	if (qh->wait_expired)
+		goto done;
+
+	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
+
+		/* Detect the Intel bug and work around it */
+		if (qh->post_td && qh_element(qh) ==
+			LINK_TO_TD(uhci, qh->post_td)) {
+			qh->element = qh->post_td->link;
+			qh->advance_jiffies = jiffies;
+			ret = 1;
+			goto done;
+		}
+
+		qh->wait_expired = 1;
+
+		/* If the current URB wants FSBR, unlink it temporarily
+		 * so that we can safely set the next TD to interrupt on
+		 * completion.  That way we'll know as soon as the queue
+		 * starts moving again. */
+		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
+			uhci_unlink_qh(uhci, qh);
+
+	} else {
+		/* Unmoving but not-yet-expired queues keep FSBR alive */
+		if (urbp)
+			uhci_urbp_wants_fsbr(uhci, urbp);
+	}
+
+done:
+	return ret;
+}
+
+/*
+ * Process events in the schedule, but only in one thread at a time
+ */
+static void uhci_scan_schedule(struct uhci_hcd *uhci)
+{
+	int i;
+	struct uhci_qh *qh;
+
+	/* Don't allow re-entrant calls */
+	if (uhci->scan_in_progress) {
+		uhci->need_rescan = 1;
+		return;
+	}
+	uhci->scan_in_progress = 1;
+rescan:
+	uhci->need_rescan = 0;
+	uhci->fsbr_is_wanted = 0;
+
+	uhci_clear_next_interrupt(uhci);
+	uhci_get_current_frame_number(uhci);
+	uhci->cur_iso_frame = uhci->frame_number;
+
+	/* Go through all the QH queues and process the URBs in each one */
+	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
+		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
+				struct uhci_qh, node);
+		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
+			uhci->next_qh = list_entry(qh->node.next,
+					struct uhci_qh, node);
+
+			if (uhci_advance_check(uhci, qh)) {
+				uhci_scan_qh(uhci, qh);
+				if (qh->state == QH_STATE_ACTIVE) {
+					uhci_urbp_wants_fsbr(uhci,
+	list_entry(qh->queue.next, struct urb_priv, node));
+				}
+			}
+		}
+	}
+
+	uhci->last_iso_frame = uhci->cur_iso_frame;
+	if (uhci->need_rescan)
+		goto rescan;
+	uhci->scan_in_progress = 0;
+
+	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
+			!uhci->fsbr_expiring) {
+		uhci->fsbr_expiring = 1;
+		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
+	}
+
+	if (list_empty(&uhci->skel_unlink_qh->node))
+		uhci_clear_next_interrupt(uhci);
+	else
+		uhci_set_next_interrupt(uhci);
+}
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
new file mode 100644
index 0000000..26df013
--- /dev/null
+++ b/drivers/usb/host/whci/Kbuild
@@ -0,0 +1,12 @@
+obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
+
+whci-hcd-y := \
+	asl.o	\
+	debug.o \
+	hcd.o	\
+	hw.o	\
+	init.o	\
+	int.o	\
+	pzl.o	\
+	qset.o	\
+	wusb.o
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
new file mode 100644
index 0000000..276fb34
--- /dev/null
+++ b/drivers/usb/host/whci/asl.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) asynchronous schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qset **next, struct whc_qset **prev)
+{
+	struct list_head *n, *p;
+
+	BUG_ON(list_empty(&whc->async_list));
+
+	n = qset->list_node.next;
+	if (n == &whc->async_list)
+		n = n->next;
+	p = qset->list_node.prev;
+	if (p == &whc->async_list)
+		p = p->prev;
+
+	*next = container_of(n, struct whc_qset, list_node);
+	*prev = container_of(p, struct whc_qset, list_node);
+
+}
+
+static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
+{
+	list_move(&qset->list_node, &whc->async_list);
+	qset->in_sw_list = true;
+}
+
+static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_qset *next, *prev;
+
+	qset_clear(whc, qset);
+
+	/* Link into ASL. */
+	qset_get_next_prev(whc, qset, &next, &prev);
+	whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
+	whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
+	qset->in_hw_list = true;
+}
+
+static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_qset *prev, *next;
+
+	qset_get_next_prev(whc, qset, &next, &prev);
+
+	list_move(&qset->list_node, &whc->async_removed_list);
+	qset->in_sw_list = false;
+
+	/*
+	 * No more qsets in the ASL?  The caller must stop the ASL as
+	 * it's no longer valid.
+	 */
+	if (list_empty(&whc->async_list))
+		return;
+
+	/* Remove from ASL. */
+	whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
+	qset->in_hw_list = false;
+}
+
+/**
+ * process_qset - process any recently inactivated or halted qTDs in a
+ * qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
+ * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
+ */
+static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
+{
+	enum whc_update update = 0;
+	uint32_t status = 0;
+
+	while (qset->ntds) {
+		struct whc_qtd *td;
+
+		td = &qset->qtd[qset->td_start];
+		status = le32_to_cpu(td->status);
+
+		/*
+		 * Nothing to do with a still active qTD.
+		 */
+		if (status & QTD_STS_ACTIVE)
+			break;
+
+		if (status & QTD_STS_HALTED) {
+			/* Ug, an error. */
+			process_halted_qtd(whc, qset, td);
+			/* A halted qTD always triggers an update
+			   because the qset was either removed or
+			   reactivated. */
+			update |= WHC_UPDATE_UPDATED;
+			goto done;
+		}
+
+		/* Mmm, a completed qTD. */
+		process_inactive_qtd(whc, qset, td);
+	}
+
+	if (!qset->remove)
+		update |= qset_add_qtds(whc, qset);
+
+done:
+	/*
+	 * Remove this qset from the ASL if requested, but only if has
+	 * no qTDs.
+	 */
+	if (qset->remove && qset->ntds == 0) {
+		asl_qset_remove(whc, qset);
+		update |= WHC_UPDATE_REMOVED;
+	}
+	return update;
+}
+
+void asl_start(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+
+	le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
+
+	whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
+		      1000, "start ASL");
+}
+
+void asl_stop(struct whc *whc)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_ASYNC_SCHED, 0,
+		      1000, "stop ASL");
+}
+
+/**
+ * asl_update - request an ASL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the ASL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void asl_update(struct whc *whc, uint32_t wusbcmd)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	long t;
+
+	mutex_lock(&wusbhc->mutex);
+	if (wusbhc->active) {
+		whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+		t = wait_event_timeout(
+			whc->async_list_wq,
+			(le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
+			msecs_to_jiffies(1000));
+		if (t == 0)
+			whc_hw_error(whc, "ASL update timeout");
+	}
+	mutex_unlock(&wusbhc->mutex);
+}
+
+/**
+ * scan_async_work - scan the ASL for qsets to process.
+ *
+ * Process each qset in the ASL in turn and then signal the WHC that
+ * the ASL has been updated.
+ *
+ * Then start, stop or update the asynchronous schedule as required.
+ */
+void scan_async_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, async_work);
+	struct whc_qset *qset, *t;
+	enum whc_update update = 0;
+
+	spin_lock_irq(&whc->lock);
+
+	/*
+	 * Transerve the software list backwards so new qsets can be
+	 * safely inserted into the ASL without making it non-circular.
+	 */
+	list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
+		if (!qset->in_hw_list) {
+			asl_qset_insert(whc, qset);
+			update |= WHC_UPDATE_ADDED;
+		}
+
+		update |= process_qset(whc, qset);
+	}
+
+	spin_unlock_irq(&whc->lock);
+
+	if (update) {
+		uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
+		if (update & WHC_UPDATE_REMOVED)
+			wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
+		asl_update(whc, wusbcmd);
+	}
+
+	/*
+	 * Now that the ASL is updated, complete the removal of any
+	 * removed qsets.
+	 *
+	 * If the qset was to be reset, do so and reinsert it into the
+	 * ASL if it has pending transfers.
+	 */
+	spin_lock_irq(&whc->lock);
+
+	list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
+		qset_remove_complete(whc, qset);
+		if (qset->reset) {
+			qset_reset(whc, qset);
+			if (!list_empty(&qset->stds)) {
+				asl_qset_insert_begin(whc, qset);
+				queue_work(whc->workqueue, &whc->async_work);
+			}
+		}
+	}
+
+	spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the ASL.
+ */
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	int err;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+	if (err < 0) {
+		spin_unlock_irqrestore(&whc->lock, flags);
+		return err;
+	}
+
+	qset = get_qset(whc, urb, GFP_ATOMIC);
+	if (qset == NULL)
+		err = -ENOMEM;
+	else
+		err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+	if (!err) {
+		if (!qset->in_sw_list && !qset->remove)
+			asl_qset_insert_begin(whc, qset);
+	} else
+		usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	if (!err)
+		queue_work(whc->workqueue, &whc->async_work);
+
+	return err;
+}
+
+/**
+ * asl_urb_dequeue - remove an URB (qset) from the async list.
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed from the ASL so the qTDs
+ * can be removed.
+ */
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+	struct whc_urb *wurb = urb->hcpriv;
+	struct whc_qset *qset = wurb->qset;
+	struct whc_std *std, *t;
+	bool has_qtd = false;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+	if (ret < 0)
+		goto out;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb) {
+			if (std->qtd)
+				has_qtd = true;
+			qset_free_std(whc, std);
+		} else
+			std->qtd = NULL; /* so this std is re-added when the qset is */
+	}
+
+	if (has_qtd) {
+		asl_qset_remove(whc, qset);
+		wurb->status = status;
+		wurb->is_async = true;
+		queue_work(whc->workqueue, &wurb->dequeue_work);
+	} else
+		qset_remove_urb(whc, qset, urb, status);
+out:
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	return ret;
+}
+
+/**
+ * asl_qset_delete - delete a qset from the ASL
+ */
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	qset->remove = 1;
+	queue_work(whc->workqueue, &whc->async_work);
+	qset_delete(whc, qset);
+}
+
+/**
+ * asl_init - initialize the asynchronous schedule list
+ *
+ * A dummy qset with no qTDs is added to the ASL to simplify removing
+ * qsets (no need to stop the ASL when the last qset is removed).
+ */
+int asl_init(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	qset = qset_alloc(whc, GFP_KERNEL);
+	if (qset == NULL)
+		return -ENOMEM;
+
+	asl_qset_insert_begin(whc, qset);
+	asl_qset_insert(whc, qset);
+
+	return 0;
+}
+
+/**
+ * asl_clean_up - free ASL resources
+ *
+ * The ASL is stopped and empty except for the dummy qset.
+ */
+void asl_clean_up(struct whc *whc)
+{
+	struct whc_qset *qset;
+
+	if (!list_empty(&whc->async_list)) {
+		qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+		list_del(&qset->list_node);
+		qset_free(whc, qset);
+	}
+}
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
new file mode 100644
index 0000000..8ddfe3f
--- /dev/null
+++ b/drivers/usb/host/whci/debug.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) debug.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/export.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_dbg {
+	struct dentry *di_f;
+	struct dentry *asl_f;
+	struct dentry *pzl_f;
+};
+
+static void qset_print(struct seq_file *s, struct whc_qset *qset)
+{
+	static const char *qh_type[] = {
+		"ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
+	struct whc_std *std;
+	struct urb *urb = NULL;
+	int i;
+
+	seq_printf(s, "qset %08x", (u32)qset->qset_dma);
+	if (&qset->list_node == qset->whc->async_list.prev) {
+		seq_printf(s, " (dummy)\n");
+	} else {
+		seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
+			   qset->qh.info1 & 0x0f,
+			   (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
+			   qh_type[(qset->qh.info1 >> 5) & 0x7],
+			   (qset->qh.info1 >> 16) & 0xffff);
+	}
+	seq_printf(s, "  -> %08x\n", (u32)qset->qh.link);
+	seq_printf(s, "  info: %08x %08x %08x\n",
+		   qset->qh.info1, qset->qh.info2,  qset->qh.info3);
+	seq_printf(s, "  sts: %04x errs: %d curwin: %08x\n",
+		   qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
+	seq_printf(s, "  TD: sts: %08x opts: %08x\n",
+		   qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+
+	for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
+		seq_printf(s, "  %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
+			i == qset->td_start ? 'S' : ' ',
+			i == qset->td_end ? 'E' : ' ',
+			i, qset->qtd[i].status, qset->qtd[i].options,
+			(u32)qset->qtd[i].page_list_ptr);
+	}
+	seq_printf(s, "  ntds: %d\n", qset->ntds);
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (urb != std->urb) {
+			urb = std->urb;
+			seq_printf(s, "  urb %p transferred: %d bytes\n", urb,
+				urb->actual_length);
+		}
+		if (std->qtd)
+			seq_printf(s, "    sTD[%td]: %zu bytes @ %08x\n",
+				std->qtd - &qset->qtd[0],
+				std->len, std->num_pointers ?
+				(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+		else
+			seq_printf(s, "    sTD[-]: %zd bytes @ %08x\n",
+				std->len, std->num_pointers ?
+				(u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+	}
+}
+
+static int di_show(struct seq_file *s, void *p)
+{
+	struct whc *whc = s->private;
+	int d;
+
+	for (d = 0; d < whc->n_devices; d++) {
+		struct di_buf_entry *di = &whc->di_buf[d];
+
+		seq_printf(s, "DI[%d]\n", d);
+		seq_printf(s, "  availability: %*pb\n",
+			   UWB_NUM_MAS, (unsigned long *)di->availability_info);
+		seq_printf(s, "  %c%c key idx: %d dev addr: %d\n",
+			   (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
+			   (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
+			   (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
+			   (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
+	}
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(di);
+
+static int asl_show(struct seq_file *s, void *p)
+{
+	struct whc *whc = s->private;
+	struct whc_qset *qset;
+
+	list_for_each_entry(qset, &whc->async_list, list_node) {
+		qset_print(s, qset);
+	}
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(asl);
+
+static int pzl_show(struct seq_file *s, void *p)
+{
+	struct whc *whc = s->private;
+	struct whc_qset *qset;
+	int period;
+
+	for (period = 0; period < 5; period++) {
+		seq_printf(s, "Period %d\n", period);
+		list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
+			qset_print(s, qset);
+		}
+	}
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pzl);
+
+void whc_dbg_init(struct whc *whc)
+{
+	if (whc->wusbhc.pal.debugfs_dir == NULL)
+		return;
+
+	whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL);
+	if (whc->dbg == NULL)
+		return;
+
+	whc->dbg->di_f = debugfs_create_file("di", 0444,
+					      whc->wusbhc.pal.debugfs_dir, whc,
+					      &di_fops);
+	whc->dbg->asl_f = debugfs_create_file("asl", 0444,
+					      whc->wusbhc.pal.debugfs_dir, whc,
+					      &asl_fops);
+	whc->dbg->pzl_f = debugfs_create_file("pzl", 0444,
+					      whc->wusbhc.pal.debugfs_dir, whc,
+					      &pzl_fops);
+}
+
+void whc_dbg_clean_up(struct whc *whc)
+{
+	if (whc->dbg) {
+		debugfs_remove(whc->dbg->pzl_f);
+		debugfs_remove(whc->dbg->asl_f);
+		debugfs_remove(whc->dbg->di_f);
+		kfree(whc->dbg);
+	}
+}
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
new file mode 100644
index 0000000..8af9dcf
--- /dev/null
+++ b/drivers/usb/host/whci/hcd.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) driver.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * One time initialization.
+ *
+ * Nothing to do here.
+ */
+static int whc_reset(struct usb_hcd *usb_hcd)
+{
+	return 0;
+}
+
+/*
+ * Start the wireless host controller.
+ *
+ * Start device notification.
+ *
+ * Put hc into run state, set DNTS parameters.
+ */
+static int whc_start(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u8 bcid;
+	int ret;
+
+	mutex_lock(&wusbhc->mutex);
+
+	le_writel(WUSBINTR_GEN_CMD_DONE
+		  | WUSBINTR_HOST_ERR
+		  | WUSBINTR_ASYNC_SCHED_SYNCED
+		  | WUSBINTR_DNTS_INT
+		  | WUSBINTR_ERR_INT
+		  | WUSBINTR_INT,
+		  whc->base + WUSBINTR);
+
+	/* set cluster ID */
+	bcid = wusb_cluster_id_get();
+	ret = whc_set_cluster_id(whc, bcid);
+	if (ret < 0)
+		goto out;
+	wusbhc->cluster_id = bcid;
+
+	/* start HC */
+	whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
+
+	usb_hcd->uses_new_polling = 1;
+	set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
+	usb_hcd->state = HC_STATE_RUNNING;
+
+out:
+	mutex_unlock(&wusbhc->mutex);
+	return ret;
+}
+
+
+/*
+ * Stop the wireless host controller.
+ *
+ * Stop device notification.
+ *
+ * Wait for pending transfer to stop? Put hc into stop state?
+ */
+static void whc_stop(struct usb_hcd *usb_hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	mutex_lock(&wusbhc->mutex);
+
+	/* stop HC */
+	le_writel(0, whc->base + WUSBINTR);
+	whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
+		      100, "HC to halt");
+
+	wusb_cluster_id_put(wusbhc->cluster_id);
+
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static int whc_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	/* Frame numbers are not applicable to WUSB. */
+	return -ENOSYS;
+}
+
+
+/*
+ * Queue an URB to the ASL or PZL
+ */
+static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+			   gfp_t mem_flags)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_INTERRUPT:
+		ret = pzl_urb_enqueue(whc, urb, mem_flags);
+		break;
+	case PIPE_ISOCHRONOUS:
+		dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
+		ret = -ENOTSUPP;
+		break;
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		ret = asl_urb_enqueue(whc, urb, mem_flags);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Remove a queued URB from the ASL or PZL.
+ */
+static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_INTERRUPT:
+		ret = pzl_urb_dequeue(whc, urb, status);
+		break;
+	case PIPE_ISOCHRONOUS:
+		ret = -ENOTSUPP;
+		break;
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		ret = asl_urb_dequeue(whc, urb, status);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Wait for all URBs to the endpoint to be completed, then delete the
+ * qset.
+ */
+static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
+				 struct usb_host_endpoint *ep)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	struct whc_qset *qset;
+
+	qset = ep->hcpriv;
+	if (qset) {
+		ep->hcpriv = NULL;
+		if (usb_endpoint_xfer_bulk(&ep->desc)
+		    || usb_endpoint_xfer_control(&ep->desc))
+			asl_qset_delete(whc, qset);
+		else
+			pzl_qset_delete(whc, qset);
+	}
+}
+
+static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
+			       struct usb_host_endpoint *ep)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	struct whc_qset *qset;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	qset = ep->hcpriv;
+	if (qset) {
+		qset->remove = 1;
+		qset->reset = 1;
+
+		if (usb_endpoint_xfer_bulk(&ep->desc)
+		    || usb_endpoint_xfer_control(&ep->desc))
+			queue_work(whc->workqueue, &whc->async_work);
+		else
+			queue_work(whc->workqueue, &whc->periodic_work);
+	}
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+
+static const struct hc_driver whc_hc_driver = {
+	.description = "whci-hcd",
+	.product_desc = "Wireless host controller",
+	.hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
+	.irq = whc_int_handler,
+	.flags = HCD_USB2,
+
+	.reset = whc_reset,
+	.start = whc_start,
+	.stop = whc_stop,
+	.get_frame_number = whc_get_frame_number,
+	.urb_enqueue = whc_urb_enqueue,
+	.urb_dequeue = whc_urb_dequeue,
+	.endpoint_disable = whc_endpoint_disable,
+	.endpoint_reset = whc_endpoint_reset,
+
+	.hub_status_data = wusbhc_rh_status_data,
+	.hub_control = wusbhc_rh_control,
+	.start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int whc_probe(struct umc_dev *umc)
+{
+	int ret;
+	struct usb_hcd *usb_hcd;
+	struct wusbhc *wusbhc;
+	struct whc *whc;
+	struct device *dev = &umc->dev;
+
+	usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
+	if (usb_hcd == NULL) {
+		dev_err(dev, "unable to create hcd\n");
+		return -ENOMEM;
+	}
+
+	usb_hcd->wireless = 1;
+	usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
+
+	wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	whc = wusbhc_to_whc(wusbhc);
+	whc->umc = umc;
+
+	ret = whc_init(whc);
+	if (ret)
+		goto error_whc_init;
+
+	wusbhc->dev = dev;
+	wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
+	if (!wusbhc->uwb_rc) {
+		ret = -ENODEV;
+		dev_err(dev, "cannot get radio controller\n");
+		goto error_uwb_rc;
+	}
+
+	if (whc->n_devices > USB_MAXCHILDREN) {
+		dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
+			 whc->n_devices);
+		wusbhc->ports_max = USB_MAXCHILDREN;
+	} else
+		wusbhc->ports_max = whc->n_devices;
+	wusbhc->mmcies_max      = whc->n_mmc_ies;
+	wusbhc->start           = whc_wusbhc_start;
+	wusbhc->stop            = whc_wusbhc_stop;
+	wusbhc->mmcie_add       = whc_mmcie_add;
+	wusbhc->mmcie_rm        = whc_mmcie_rm;
+	wusbhc->dev_info_set    = whc_dev_info_set;
+	wusbhc->bwa_set         = whc_bwa_set;
+	wusbhc->set_num_dnts    = whc_set_num_dnts;
+	wusbhc->set_ptk         = whc_set_ptk;
+	wusbhc->set_gtk         = whc_set_gtk;
+
+	ret = wusbhc_create(wusbhc);
+	if (ret)
+		goto error_wusbhc_create;
+
+	ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
+	if (ret) {
+		dev_err(dev, "cannot add HCD: %d\n", ret);
+		goto error_usb_add_hcd;
+	}
+	device_wakeup_enable(usb_hcd->self.controller);
+
+	ret = wusbhc_b_create(wusbhc);
+	if (ret) {
+		dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
+		goto error_wusbhc_b_create;
+	}
+
+	whc_dbg_init(whc);
+
+	return 0;
+
+error_wusbhc_b_create:
+	usb_remove_hcd(usb_hcd);
+error_usb_add_hcd:
+	wusbhc_destroy(wusbhc);
+error_wusbhc_create:
+	uwb_rc_put(wusbhc->uwb_rc);
+error_uwb_rc:
+	whc_clean_up(whc);
+error_whc_init:
+	usb_put_hcd(usb_hcd);
+	return ret;
+}
+
+
+static void whc_remove(struct umc_dev *umc)
+{
+	struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	if (usb_hcd) {
+		whc_dbg_clean_up(whc);
+		wusbhc_b_destroy(wusbhc);
+		usb_remove_hcd(usb_hcd);
+		wusbhc_destroy(wusbhc);
+		uwb_rc_put(wusbhc->uwb_rc);
+		whc_clean_up(whc);
+		usb_put_hcd(usb_hcd);
+	}
+}
+
+static struct umc_driver whci_hc_driver = {
+	.name =		"whci-hcd",
+	.cap_id =       UMC_CAP_ID_WHCI_WUSB_HC,
+	.probe =	whc_probe,
+	.remove =	whc_remove,
+};
+
+static int __init whci_hc_driver_init(void)
+{
+	return umc_driver_register(&whci_hc_driver);
+}
+module_init(whci_hc_driver_init);
+
+static void __exit whci_hc_driver_exit(void)
+{
+	umc_driver_unregister(&whci_hc_driver);
+}
+module_exit(whci_hc_driver_exit);
+
+/* PCI device ID's that we handle (so it gets loaded) */
+static struct pci_device_id __used whci_hcd_id_table[] = {
+	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+	{ /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
+
+MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
new file mode 100644
index 0000000..22b3b7f
--- /dev/null
+++ b/drivers/usb/host/whci/hw.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) hardware access helpers.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
+{
+	unsigned long flags;
+	u32 cmd;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	cmd = le_readl(whc->base + WUSBCMD);
+	cmd = (cmd & ~mask) | val;
+	le_writel(cmd, whc->base + WUSBCMD);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
+ * @whc:    the WHCI HC
+ * @cmd:    command to start.
+ * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
+ * @addr:   pointer to any data for the command (may be NULL).
+ * @len:    length of the data (if any).
+ */
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
+{
+	unsigned long flags;
+	dma_addr_t dma_addr;
+	int t;
+	int ret = 0;
+
+	mutex_lock(&whc->mutex);
+
+	/* Wait for previous command to complete. */
+	t = wait_event_timeout(whc->cmd_wq,
+			       (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
+			       WHC_GENCMD_TIMEOUT_MS);
+	if (t == 0) {
+		dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
+			le_readl(whc->base + WUSBGENCMDSTS),
+			le_readl(whc->base + WUSBGENCMDPARAMS));
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	if (addr) {
+		memcpy(whc->gen_cmd_buf, addr, len);
+		dma_addr = whc->gen_cmd_buf_dma;
+	} else
+		dma_addr = 0;
+
+	/* Poke registers to start cmd. */
+	spin_lock_irqsave(&whc->lock, flags);
+
+	le_writel(params, whc->base + WUSBGENCMDPARAMS);
+	le_writeq(dma_addr, whc->base + WUSBGENADDR);
+
+	le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
+		  whc->base + WUSBGENCMDSTS);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+out:
+	mutex_unlock(&whc->mutex);
+
+	return ret;
+}
+
+/**
+ * whc_hw_error - recover from a hardware error
+ * @whc:    the WHCI HC that broke.
+ * @reason: a description of the failure.
+ *
+ * Recover from broken hardware with a full reset.
+ */
+void whc_hw_error(struct whc *whc, const char *reason)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+
+	dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
+	wusbhc_reset_all(wusbhc);
+}
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
new file mode 100644
index 0000000..8241697
--- /dev/null
+++ b/drivers/usb/host/whci/init.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) initialization.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * Reset the host controller.
+ */
+static void whc_hw_reset(struct whc *whc)
+{
+	le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
+		      100, "reset");
+}
+
+static void whc_hw_init_di_buf(struct whc *whc)
+{
+	int d;
+
+	/* Disable all entries in the Device Information buffer. */
+	for (d = 0; d < whc->n_devices; d++)
+		whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
+
+	le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
+}
+
+static void whc_hw_init_dn_buf(struct whc *whc)
+{
+	/* Clear the Device Notification buffer to ensure the V (valid)
+	 * bits are clear.  */
+	memset(whc->dn_buf, 0, 4096);
+
+	le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
+}
+
+int whc_init(struct whc *whc)
+{
+	u32 whcsparams;
+	int ret, i;
+	resource_size_t start, len;
+
+	spin_lock_init(&whc->lock);
+	mutex_init(&whc->mutex);
+	init_waitqueue_head(&whc->cmd_wq);
+	init_waitqueue_head(&whc->async_list_wq);
+	init_waitqueue_head(&whc->periodic_list_wq);
+	whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
+	if (whc->workqueue == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	INIT_WORK(&whc->dn_work, whc_dn_work);
+
+	INIT_WORK(&whc->async_work, scan_async_work);
+	INIT_LIST_HEAD(&whc->async_list);
+	INIT_LIST_HEAD(&whc->async_removed_list);
+
+	INIT_WORK(&whc->periodic_work, scan_periodic_work);
+	for (i = 0; i < 5; i++)
+		INIT_LIST_HEAD(&whc->periodic_list[i]);
+	INIT_LIST_HEAD(&whc->periodic_removed_list);
+
+	/* Map HC registers. */
+	start = whc->umc->resource.start;
+	len   = whc->umc->resource.end - start + 1;
+	if (!request_mem_region(start, len, "whci-hc")) {
+		dev_err(&whc->umc->dev, "can't request HC region\n");
+		ret = -EBUSY;
+		goto error;
+	}
+	whc->base_phys = start;
+	whc->base = ioremap(start, len);
+	if (!whc->base) {
+		dev_err(&whc->umc->dev, "ioremap\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	whc_hw_reset(whc);
+
+	/* Read maximum number of devices, keys and MMC IEs. */
+	whcsparams = le_readl(whc->base + WHCSPARAMS);
+	whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
+	whc->n_keys    = WHCSPARAMS_TO_N_KEYS(whcsparams);
+	whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
+
+	dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
+		whc->n_devices, whc->n_keys, whc->n_mmc_ies);
+
+	whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
+					 sizeof(struct whc_qset), 64, 0);
+	if (whc->qset_pool == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = asl_init(whc);
+	if (ret < 0)
+		goto error;
+	ret = pzl_init(whc);
+	if (ret < 0)
+		goto error;
+
+	/* Allocate and initialize a buffer for generic commands, the
+	   Device Information buffer, and the Device Notification
+	   buffer. */
+
+	whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+					      &whc->gen_cmd_buf_dma, GFP_KERNEL);
+	if (whc->gen_cmd_buf == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
+					 sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+					 &whc->dn_buf_dma, GFP_KERNEL);
+	if (!whc->dn_buf) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	whc_hw_init_dn_buf(whc);
+
+	whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
+					 sizeof(struct di_buf_entry) * whc->n_devices,
+					 &whc->di_buf_dma, GFP_KERNEL);
+	if (!whc->di_buf) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	whc_hw_init_di_buf(whc);
+
+	return 0;
+
+error:
+	whc_clean_up(whc);
+	return ret;
+}
+
+void whc_clean_up(struct whc *whc)
+{
+	resource_size_t len;
+
+	if (whc->di_buf)
+		dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
+				  whc->di_buf, whc->di_buf_dma);
+	if (whc->dn_buf)
+		dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+				  whc->dn_buf, whc->dn_buf_dma);
+	if (whc->gen_cmd_buf)
+		dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+				  whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
+
+	pzl_clean_up(whc);
+	asl_clean_up(whc);
+
+	dma_pool_destroy(whc->qset_pool);
+
+	len   = resource_size(&whc->umc->resource);
+	if (whc->base)
+		iounmap(whc->base);
+	if (whc->base_phys)
+		release_mem_region(whc->base_phys, len);
+
+	if (whc->workqueue)
+		destroy_workqueue(whc->workqueue);
+}
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
new file mode 100644
index 0000000..7e4ad1b
--- /dev/null
+++ b/drivers/usb/host/whci/int.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) interrupt handling.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void transfer_done(struct whc *whc)
+{
+	queue_work(whc->workqueue, &whc->async_work);
+	queue_work(whc->workqueue, &whc->periodic_work);
+}
+
+irqreturn_t whc_int_handler(struct usb_hcd *hcd)
+{
+	struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 sts;
+
+	sts = le_readl(whc->base + WUSBSTS);
+	if (!(sts & WUSBSTS_INT_MASK))
+		return IRQ_NONE;
+	le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
+
+	if (sts & WUSBSTS_GEN_CMD_DONE)
+		wake_up(&whc->cmd_wq);
+
+	if (sts & WUSBSTS_HOST_ERR)
+		dev_err(&whc->umc->dev, "FIXME: host system error\n");
+
+	if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
+		wake_up(&whc->async_list_wq);
+
+	if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
+		wake_up(&whc->periodic_list_wq);
+
+	if (sts & WUSBSTS_DNTS_INT)
+		queue_work(whc->workqueue, &whc->dn_work);
+
+	/*
+	 * A transfer completed (see [WHCI] section 4.7.1.2 for when
+	 * this occurs).
+	 */
+	if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
+		transfer_done(whc);
+
+	return IRQ_HANDLED;
+}
+
+static int process_dn_buf(struct whc *whc)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	struct dn_buf_entry *dn;
+	int processed = 0;
+
+	for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
+		if (dn->status & WHC_DN_STATUS_VALID) {
+			wusbhc_handle_dn(wusbhc, dn->src_addr,
+					 (struct wusb_dn_hdr *)dn->dn_data,
+					 dn->msg_size);
+			dn->status &= ~WHC_DN_STATUS_VALID;
+			processed++;
+		}
+	}
+	return processed;
+}
+
+void whc_dn_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, dn_work);
+	int processed;
+
+	do {
+		processed = process_dn_buf(whc);
+	} while (processed);
+}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
new file mode 100644
index 0000000..ef52aeb
--- /dev/null
+++ b/drivers/usb/host/whci/pzl.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) periodic schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
+{
+	switch (period) {
+	case 0:
+		whc_qset_set_link_ptr(&whc->pz_list[0], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[2], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[4], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[6], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[8], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[10], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[12], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[14], addr);
+		break;
+	case 1:
+		whc_qset_set_link_ptr(&whc->pz_list[1], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[5], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[9], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[13], addr);
+		break;
+	case 2:
+		whc_qset_set_link_ptr(&whc->pz_list[3], addr);
+		whc_qset_set_link_ptr(&whc->pz_list[11], addr);
+		break;
+	case 3:
+		whc_qset_set_link_ptr(&whc->pz_list[7], addr);
+		break;
+	case 4:
+		whc_qset_set_link_ptr(&whc->pz_list[15], addr);
+		break;
+	}
+}
+
+/*
+ * Return the 'period' to use for this qset.  The minimum interval for
+ * the endpoint is used so whatever urbs are submitted the device is
+ * polled often enough.
+ */
+static int qset_get_period(struct whc *whc, struct whc_qset *qset)
+{
+	uint8_t bInterval = qset->ep->desc.bInterval;
+
+	if (bInterval < 6)
+		bInterval = 6;
+	if (bInterval > 10)
+		bInterval = 10;
+	return bInterval - 6;
+}
+
+static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
+{
+	int period;
+
+	period = qset_get_period(whc, qset);
+
+	qset_clear(whc, qset);
+	list_move(&qset->list_node, &whc->periodic_list[period]);
+	qset->in_sw_list = true;
+}
+
+static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+	list_move(&qset->list_node, &whc->periodic_removed_list);
+	qset->in_hw_list = false;
+	qset->in_sw_list = false;
+}
+
+/**
+ * pzl_process_qset - process any recently inactivated or halted qTDs
+ * in a qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns the schedule updates required.
+ */
+static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
+{
+	enum whc_update update = 0;
+	uint32_t status = 0;
+
+	while (qset->ntds) {
+		struct whc_qtd *td;
+
+		td = &qset->qtd[qset->td_start];
+		status = le32_to_cpu(td->status);
+
+		/*
+		 * Nothing to do with a still active qTD.
+		 */
+		if (status & QTD_STS_ACTIVE)
+			break;
+
+		if (status & QTD_STS_HALTED) {
+			/* Ug, an error. */
+			process_halted_qtd(whc, qset, td);
+			/* A halted qTD always triggers an update
+			   because the qset was either removed or
+			   reactivated. */
+			update |= WHC_UPDATE_UPDATED;
+			goto done;
+		}
+
+		/* Mmm, a completed qTD. */
+		process_inactive_qtd(whc, qset, td);
+	}
+
+	if (!qset->remove)
+		update |= qset_add_qtds(whc, qset);
+
+done:
+	/*
+	 * If there are no qTDs in this qset, remove it from the PZL.
+	 */
+	if (qset->remove && qset->ntds == 0) {
+		pzl_qset_remove(whc, qset);
+		update |= WHC_UPDATE_REMOVED;
+	}
+
+	return update;
+}
+
+/**
+ * pzl_start - start the periodic schedule
+ * @whc: the WHCI host controller
+ *
+ * The PZL must be valid (e.g., all entries in the list should have
+ * the T bit set).
+ */
+void pzl_start(struct whc *whc)
+{
+	le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+	whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
+		      1000, "start PZL");
+}
+
+/**
+ * pzl_stop - stop the periodic schedule
+ * @whc: the WHCI host controller
+ */
+void pzl_stop(struct whc *whc)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
+	whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+		      WUSBSTS_PERIODIC_SCHED, 0,
+		      1000, "stop PZL");
+}
+
+/**
+ * pzl_update - request a PZL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the PZL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void pzl_update(struct whc *whc, uint32_t wusbcmd)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	long t;
+
+	mutex_lock(&wusbhc->mutex);
+	if (wusbhc->active) {
+		whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+		t = wait_event_timeout(
+			whc->periodic_list_wq,
+			(le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
+			msecs_to_jiffies(1000));
+		if (t == 0)
+			whc_hw_error(whc, "PZL update timeout");
+	}
+	mutex_unlock(&wusbhc->mutex);
+}
+
+static void update_pzl_hw_view(struct whc *whc)
+{
+	struct whc_qset *qset, *t;
+	int period;
+	u64 tmp_qh = 0;
+
+	for (period = 0; period < 5; period++) {
+		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+			whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
+			tmp_qh = qset->qset_dma;
+			qset->in_hw_list = true;
+		}
+		update_pzl_pointers(whc, period, tmp_qh);
+	}
+}
+
+/**
+ * scan_periodic_work - scan the PZL for qsets to process.
+ *
+ * Process each qset in the PZL in turn and then signal the WHC that
+ * the PZL has been updated.
+ *
+ * Then start, stop or update the periodic schedule as required.
+ */
+void scan_periodic_work(struct work_struct *work)
+{
+	struct whc *whc = container_of(work, struct whc, periodic_work);
+	struct whc_qset *qset, *t;
+	enum whc_update update = 0;
+	int period;
+
+	spin_lock_irq(&whc->lock);
+
+	for (period = 4; period >= 0; period--) {
+		list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+			if (!qset->in_hw_list)
+				update |= WHC_UPDATE_ADDED;
+			update |= pzl_process_qset(whc, qset);
+		}
+	}
+
+	if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
+		update_pzl_hw_view(whc);
+
+	spin_unlock_irq(&whc->lock);
+
+	if (update) {
+		uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
+		if (update & WHC_UPDATE_REMOVED)
+			wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
+		pzl_update(whc, wusbcmd);
+	}
+
+	/*
+	 * Now that the PZL is updated, complete the removal of any
+	 * removed qsets.
+	 *
+	 * If the qset was to be reset, do so and reinsert it into the
+	 * PZL if it has pending transfers.
+	 */
+	spin_lock_irq(&whc->lock);
+
+	list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
+		qset_remove_complete(whc, qset);
+		if (qset->reset) {
+			qset_reset(whc, qset);
+			if (!list_empty(&qset->stds)) {
+				qset_insert_in_sw_list(whc, qset);
+				queue_work(whc->workqueue, &whc->periodic_work);
+			}
+		}
+	}
+
+	spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the PZL.
+ */
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	int err;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+	if (err < 0) {
+		spin_unlock_irqrestore(&whc->lock, flags);
+		return err;
+	}
+
+	qset = get_qset(whc, urb, GFP_ATOMIC);
+	if (qset == NULL)
+		err = -ENOMEM;
+	else
+		err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+	if (!err) {
+		if (!qset->in_sw_list && !qset->remove)
+			qset_insert_in_sw_list(whc, qset);
+	} else
+		usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	if (!err)
+		queue_work(whc->workqueue, &whc->periodic_work);
+
+	return err;
+}
+
+/**
+ * pzl_urb_dequeue - remove an URB (qset) from the periodic list
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed so the qTDs can be safely
+ * removed.
+ */
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+	struct whc_urb *wurb = urb->hcpriv;
+	struct whc_qset *qset = wurb->qset;
+	struct whc_std *std, *t;
+	bool has_qtd = false;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&whc->lock, flags);
+
+	ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+	if (ret < 0)
+		goto out;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb) {
+			if (std->qtd)
+				has_qtd = true;
+			qset_free_std(whc, std);
+		} else
+			std->qtd = NULL; /* so this std is re-added when the qset is */
+	}
+
+	if (has_qtd) {
+		pzl_qset_remove(whc, qset);
+		update_pzl_hw_view(whc);
+		wurb->status = status;
+		wurb->is_async = false;
+		queue_work(whc->workqueue, &wurb->dequeue_work);
+	} else
+		qset_remove_urb(whc, qset, urb, status);
+out:
+	spin_unlock_irqrestore(&whc->lock, flags);
+
+	return ret;
+}
+
+/**
+ * pzl_qset_delete - delete a qset from the PZL
+ */
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	qset->remove = 1;
+	queue_work(whc->workqueue, &whc->periodic_work);
+	qset_delete(whc, qset);
+}
+
+/**
+ * pzl_init - initialize the periodic zone list
+ * @whc: the WHCI host controller
+ */
+int pzl_init(struct whc *whc)
+{
+	int i;
+
+	whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
+					  &whc->pz_list_dma, GFP_KERNEL);
+	if (whc->pz_list == NULL)
+		return -ENOMEM;
+
+	/* Set T bit on all elements in PZL. */
+	for (i = 0; i < 16; i++)
+		whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+
+	le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+	return 0;
+}
+
+/**
+ * pzl_clean_up - free PZL resources
+ * @whc: the WHCI host controller
+ *
+ * The PZL is stopped and empty.
+ */
+void pzl_clean_up(struct whc *whc)
+{
+	if (whc->pz_list)
+		dma_free_coherent(&whc->umc->dev,  sizeof(u64) * 16, whc->pz_list,
+				  whc->pz_list_dma);
+}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
new file mode 100644
index 0000000..925166a
--- /dev/null
+++ b/drivers/usb/host/whci/qset.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) qset management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+	dma_addr_t dma;
+
+	qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
+	if (qset == NULL)
+		return NULL;
+
+	qset->qset_dma = dma;
+	qset->whc = whc;
+
+	INIT_LIST_HEAD(&qset->list_node);
+	INIT_LIST_HEAD(&qset->stds);
+
+	return qset;
+}
+
+/**
+ * qset_fill_qh - fill the static endpoint state in a qset's QHead
+ * @qset: the qset whose QH needs initializing with static endpoint
+ *        state
+ * @urb:  an urb for a transfer to this endpoint
+ */
+static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
+{
+	struct usb_device *usb_dev = urb->dev;
+	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+	struct usb_wireless_ep_comp_descriptor *epcd;
+	bool is_out;
+	uint8_t phy_rate;
+
+	is_out = usb_pipeout(urb->pipe);
+
+	qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+
+	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+	if (epcd) {
+		qset->max_seq = epcd->bMaxSequence;
+		qset->max_burst = epcd->bMaxBurst;
+	} else {
+		qset->max_seq = 2;
+		qset->max_burst = 1;
+	}
+
+	/*
+	 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
+	 * the maximum supported by the device for other endpoints
+	 * (unless limited by the user).
+	 */
+	if (usb_pipecontrol(urb->pipe))
+		phy_rate = UWB_PHY_RATE_53;
+	else {
+		uint16_t phy_rates;
+
+		phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
+		phy_rate = fls(phy_rates) - 1;
+		if (phy_rate > whc->wusbhc.phy_rate)
+			phy_rate = whc->wusbhc.phy_rate;
+	}
+
+	qset->qh.info1 = cpu_to_le32(
+		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
+		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
+		| usb_pipe_to_qh_type(urb->pipe)
+		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
+		| QH_INFO1_MAX_PKT_LEN(qset->max_packet)
+		);
+	qset->qh.info2 = cpu_to_le32(
+		QH_INFO2_BURST(qset->max_burst)
+		| QH_INFO2_DBP(0)
+		| QH_INFO2_MAX_COUNT(3)
+		| QH_INFO2_MAX_RETRY(3)
+		| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
+		);
+	/* FIXME: where can we obtain these Tx parameters from?  Why
+	 * doesn't the chip know what Tx power to use? It knows the Rx
+	 * strength and can presumably guess the Tx power required
+	 * from that? */
+	qset->qh.info3 = cpu_to_le32(
+		QH_INFO3_TX_RATE(phy_rate)
+		| QH_INFO3_TX_PWR(0) /* 0 == max power */
+		);
+
+	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * qset_clear - clear fields in a qset so it may be reinserted into a
+ * schedule.
+ *
+ * The sequence number and current window are not cleared (see
+ * qset_reset()).
+ */
+void qset_clear(struct whc *whc, struct whc_qset *qset)
+{
+	qset->td_start = qset->td_end = qset->ntds = 0;
+
+	qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+	qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
+	qset->qh.err_count = 0;
+	qset->qh.scratch[0] = 0;
+	qset->qh.scratch[1] = 0;
+	qset->qh.scratch[2] = 0;
+
+	memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
+
+	init_completion(&qset->remove_complete);
+}
+
+/**
+ * qset_reset - reset endpoint state in a qset.
+ *
+ * Clears the sequence number and current window.  This qset must not
+ * be in the ASL or PZL.
+ */
+void qset_reset(struct whc *whc, struct whc_qset *qset)
+{
+	qset->reset = 0;
+
+	qset->qh.status &= ~QH_STATUS_SEQ_MASK;
+	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * get_qset - get the qset for an async endpoint
+ *
+ * A new qset is created if one does not already exist.
+ */
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
+				 gfp_t mem_flags)
+{
+	struct whc_qset *qset;
+
+	qset = urb->ep->hcpriv;
+	if (qset == NULL) {
+		qset = qset_alloc(whc, mem_flags);
+		if (qset == NULL)
+			return NULL;
+
+		qset->ep = urb->ep;
+		urb->ep->hcpriv = qset;
+		qset_fill_qh(whc, qset, urb);
+	}
+	return qset;
+}
+
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
+{
+	qset->remove = 0;
+	list_del_init(&qset->list_node);
+	complete(&qset->remove_complete);
+}
+
+/**
+ * qset_add_qtds - add qTDs for an URB to a qset
+ *
+ * Returns true if the list (ASL/PZL) must be updated because (for a
+ * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
+ */
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
+{
+	struct whc_std *std;
+	enum whc_update update = 0;
+
+	list_for_each_entry(std, &qset->stds, list_node) {
+		struct whc_qtd *qtd;
+		uint32_t status;
+
+		if (qset->ntds >= WHCI_QSET_TD_MAX
+		    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
+			break;
+
+		if (std->qtd)
+			continue; /* already has a qTD */
+
+		qtd = std->qtd = &qset->qtd[qset->td_end];
+
+		/* Fill in setup bytes for control transfers. */
+		if (usb_pipecontrol(std->urb->pipe))
+			memcpy(qtd->setup, std->urb->setup_packet, 8);
+
+		status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
+
+		if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
+			status |= QTD_STS_LAST_PKT;
+
+		/*
+		 * For an IN transfer the iAlt field should be set so
+		 * the h/w will automatically advance to the next
+		 * transfer. However, if there are 8 or more TDs
+		 * remaining in this transfer then iAlt cannot be set
+		 * as it could point to somewhere in this transfer.
+		 */
+		if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
+			int ialt;
+			ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
+			status |= QTD_STS_IALT(ialt);
+		} else if (usb_pipein(std->urb->pipe))
+			qset->pause_after_urb = std->urb;
+
+		if (std->num_pointers)
+			qtd->options = cpu_to_le32(QTD_OPT_IOC);
+		else
+			qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
+		qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
+
+		qtd->status = cpu_to_le32(status);
+
+		if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
+			update = WHC_UPDATE_UPDATED;
+
+		if (++qset->td_end >= WHCI_QSET_TD_MAX)
+			qset->td_end = 0;
+		qset->ntds++;
+	}
+
+	return update;
+}
+
+/**
+ * qset_remove_qtd - remove the first qTD from a qset.
+ *
+ * The qTD might be still active (if it's part of a IN URB that
+ * resulted in a short read) so ensure it's deactivated.
+ */
+static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
+{
+	qset->qtd[qset->td_start].status = 0;
+
+	if (++qset->td_start >= WHCI_QSET_TD_MAX)
+		qset->td_start = 0;
+	qset->ntds--;
+}
+
+static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
+{
+	struct scatterlist *sg;
+	void *bounce;
+	size_t remaining, offset;
+
+	bounce = std->bounce_buf;
+	remaining = std->len;
+
+	sg = std->bounce_sg;
+	offset = std->bounce_offset;
+
+	while (remaining) {
+		size_t len;
+
+		len = min(sg->length - offset, remaining);
+		memcpy(sg_virt(sg) + offset, bounce, len);
+
+		bounce += len;
+		remaining -= len;
+
+		offset += len;
+		if (offset >= sg->length) {
+			sg = sg_next(sg);
+			offset = 0;
+		}
+	}
+
+}
+
+/**
+ * qset_free_std - remove an sTD and free it.
+ * @whc: the WHCI host controller
+ * @std: the sTD to remove and free.
+ */
+void qset_free_std(struct whc *whc, struct whc_std *std)
+{
+	list_del(&std->list_node);
+	if (std->bounce_buf) {
+		bool is_out = usb_pipeout(std->urb->pipe);
+		dma_addr_t dma_addr;
+
+		if (std->num_pointers)
+			dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
+		else
+			dma_addr = std->dma_addr;
+
+		dma_unmap_single(whc->wusbhc.dev, dma_addr,
+				 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (!is_out)
+			qset_copy_bounce_to_sg(whc, std);
+		kfree(std->bounce_buf);
+	}
+	if (std->pl_virt) {
+		if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
+			dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+					 std->num_pointers * sizeof(struct whc_page_list_entry),
+					 DMA_TO_DEVICE);
+		kfree(std->pl_virt);
+		std->pl_virt = NULL;
+	}
+	kfree(std);
+}
+
+/**
+ * qset_remove_qtds - remove an URB's qTDs (and sTDs).
+ */
+static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
+			     struct urb *urb)
+{
+	struct whc_std *std, *t;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb != urb)
+			break;
+		if (std->qtd != NULL)
+			qset_remove_qtd(whc, qset);
+		qset_free_std(whc, std);
+	}
+}
+
+/**
+ * qset_free_stds - free any remaining sTDs for an URB.
+ */
+static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
+{
+	struct whc_std *std, *t;
+
+	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+		if (std->urb == urb)
+			qset_free_std(qset->whc, std);
+	}
+}
+
+static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
+{
+	dma_addr_t dma_addr = std->dma_addr;
+	dma_addr_t sp, ep;
+	size_t pl_len;
+	int p;
+
+	/* Short buffers don't need a page list. */
+	if (std->len <= WHCI_PAGE_SIZE) {
+		std->num_pointers = 0;
+		return 0;
+	}
+
+	sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+	ep = dma_addr + std->len;
+	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+
+	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+	std->pl_virt = kmalloc(pl_len, mem_flags);
+	if (std->pl_virt == NULL)
+		return -ENOMEM;
+	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
+		kfree(std->pl_virt);
+		return -EFAULT;
+	}
+
+	for (p = 0; p < std->num_pointers; p++) {
+		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+		dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+	}
+
+	return 0;
+}
+
+/**
+ * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
+ */
+static void urb_dequeue_work(struct work_struct *work)
+{
+	struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
+	struct whc_qset *qset = wurb->qset;
+	struct whc *whc = qset->whc;
+	unsigned long flags;
+
+	if (wurb->is_async)
+		asl_update(whc, WUSBCMD_ASYNC_UPDATED
+			   | WUSBCMD_ASYNC_SYNCED_DB
+			   | WUSBCMD_ASYNC_QSET_RM);
+	else
+		pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
+			   | WUSBCMD_PERIODIC_SYNCED_DB
+			   | WUSBCMD_PERIODIC_QSET_RM);
+
+	spin_lock_irqsave(&whc->lock, flags);
+	qset_remove_urb(whc, qset, wurb->urb, wurb->status);
+	spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
+				    struct urb *urb, gfp_t mem_flags)
+{
+	struct whc_std *std;
+
+	std = kzalloc(sizeof(struct whc_std), mem_flags);
+	if (std == NULL)
+		return NULL;
+
+	std->urb = urb;
+	std->qtd = NULL;
+
+	INIT_LIST_HEAD(&std->list_node);
+	list_add_tail(&std->list_node, &qset->stds);
+
+	return std;
+}
+
+static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+			   gfp_t mem_flags)
+{
+	size_t remaining;
+	struct scatterlist *sg;
+	int i;
+	int ntds = 0;
+	struct whc_std *std = NULL;
+	struct whc_page_list_entry *new_pl_virt;
+	dma_addr_t prev_end = 0;
+	size_t pl_len;
+	int p = 0;
+
+	remaining = urb->transfer_buffer_length;
+
+	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+		dma_addr_t dma_addr;
+		size_t dma_remaining;
+		dma_addr_t sp, ep;
+		int num_pointers;
+
+		if (remaining == 0) {
+			break;
+		}
+
+		dma_addr = sg_dma_address(sg);
+		dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
+
+		while (dma_remaining) {
+			size_t dma_len;
+
+			/*
+			 * We can use the previous std (if it exists) provided that:
+			 * - the previous one ended on a page boundary.
+			 * - the current one begins on a page boundary.
+			 * - the previous one isn't full.
+			 *
+			 * If a new std is needed but the previous one
+			 * was not a whole number of packets then this
+			 * sg list cannot be mapped onto multiple
+			 * qTDs.  Return an error and let the caller
+			 * sort it out.
+			 */
+			if (!std
+			    || (prev_end & (WHCI_PAGE_SIZE-1))
+			    || (dma_addr & (WHCI_PAGE_SIZE-1))
+			    || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
+				if (std && std->len % qset->max_packet != 0)
+					return -EINVAL;
+				std = qset_new_std(whc, qset, urb, mem_flags);
+				if (std == NULL) {
+					return -ENOMEM;
+				}
+				ntds++;
+				p = 0;
+			}
+
+			dma_len = dma_remaining;
+
+			/*
+			 * If the remainder of this element doesn't
+			 * fit in a single qTD, limit the qTD to a
+			 * whole number of packets.  This allows the
+			 * remainder to go into the next qTD.
+			 */
+			if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
+				dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
+					* qset->max_packet - std->len;
+			}
+
+			std->len += dma_len;
+			std->ntds_remaining = -1; /* filled in later */
+
+			sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+			ep = dma_addr + dma_len;
+			num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+			std->num_pointers += num_pointers;
+
+			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+
+			new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+			if (new_pl_virt == NULL) {
+				kfree(std->pl_virt);
+				std->pl_virt = NULL;
+				return -ENOMEM;
+			}
+			std->pl_virt = new_pl_virt;
+
+			for (;p < std->num_pointers; p++) {
+				std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+				dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+			}
+
+			prev_end = dma_addr = ep;
+			dma_remaining -= dma_len;
+			remaining -= dma_len;
+		}
+	}
+
+	/* Now the number of stds is know, go back and fill in
+	   std->ntds_remaining. */
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (std->ntds_remaining == -1) {
+			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+			std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
+						       pl_len, DMA_TO_DEVICE);
+			if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
+				return -EFAULT;
+			std->ntds_remaining = ntds--;
+		}
+	}
+	return 0;
+}
+
+/**
+ * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
+ *
+ * If the URB contains an sg list whose elements cannot be directly
+ * mapped to qTDs then the data must be transferred via bounce
+ * buffers.
+ */
+static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+				     struct urb *urb, gfp_t mem_flags)
+{
+	bool is_out = usb_pipeout(urb->pipe);
+	size_t max_std_len;
+	size_t remaining;
+	int ntds = 0;
+	struct whc_std *std = NULL;
+	void *bounce = NULL;
+	struct scatterlist *sg;
+	int i;
+
+	/* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
+	max_std_len = qset->max_burst * qset->max_packet;
+
+	remaining = urb->transfer_buffer_length;
+
+	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+		size_t len;
+		size_t sg_remaining;
+		void *orig;
+
+		if (remaining == 0) {
+			break;
+		}
+
+		sg_remaining = min_t(size_t, remaining, sg->length);
+		orig = sg_virt(sg);
+
+		while (sg_remaining) {
+			if (!std || std->len == max_std_len) {
+				std = qset_new_std(whc, qset, urb, mem_flags);
+				if (std == NULL)
+					return -ENOMEM;
+				std->bounce_buf = kmalloc(max_std_len, mem_flags);
+				if (std->bounce_buf == NULL)
+					return -ENOMEM;
+				std->bounce_sg = sg;
+				std->bounce_offset = orig - sg_virt(sg);
+				bounce = std->bounce_buf;
+				ntds++;
+			}
+
+			len = min(sg_remaining, max_std_len - std->len);
+
+			if (is_out)
+				memcpy(bounce, orig, len);
+
+			std->len += len;
+			std->ntds_remaining = -1; /* filled in later */
+
+			bounce += len;
+			orig += len;
+			sg_remaining -= len;
+			remaining -= len;
+		}
+	}
+
+	/*
+	 * For each of the new sTDs, map the bounce buffers, create
+	 * page lists (if necessary), and fill in std->ntds_remaining.
+	 */
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (std->ntds_remaining != -1)
+			continue;
+
+		std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
+					       is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
+			return -EFAULT;
+
+		if (qset_fill_page_list(whc, std, mem_flags) < 0)
+			return -ENOMEM;
+
+		std->ntds_remaining = ntds--;
+	}
+
+	return 0;
+}
+
+/**
+ * qset_add_urb - add an urb to the qset's queue.
+ *
+ * The URB is chopped into sTDs, one for each qTD that will required.
+ * At least one qTD (and sTD) is required even if the transfer has no
+ * data (e.g., for some control transfers).
+ */
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+	gfp_t mem_flags)
+{
+	struct whc_urb *wurb;
+	int remaining = urb->transfer_buffer_length;
+	u64 transfer_dma = urb->transfer_dma;
+	int ntds_remaining;
+	int ret;
+
+	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
+	if (wurb == NULL)
+		goto err_no_mem;
+	urb->hcpriv = wurb;
+	wurb->qset = qset;
+	wurb->urb = urb;
+	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+
+	if (urb->num_sgs) {
+		ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
+		if (ret == -EINVAL) {
+			qset_free_stds(qset, urb);
+			ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
+		}
+		if (ret < 0)
+			goto err_no_mem;
+		return 0;
+	}
+
+	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+	if (ntds_remaining == 0)
+		ntds_remaining = 1;
+
+	while (ntds_remaining) {
+		struct whc_std *std;
+		size_t std_len;
+
+		std_len = remaining;
+		if (std_len > QTD_MAX_XFER_SIZE)
+			std_len = QTD_MAX_XFER_SIZE;
+
+		std = qset_new_std(whc, qset, urb, mem_flags);
+		if (std == NULL)
+			goto err_no_mem;
+
+		std->dma_addr = transfer_dma;
+		std->len = std_len;
+		std->ntds_remaining = ntds_remaining;
+
+		if (qset_fill_page_list(whc, std, mem_flags) < 0)
+			goto err_no_mem;
+
+		ntds_remaining--;
+		remaining -= std_len;
+		transfer_dma += std_len;
+	}
+
+	return 0;
+
+err_no_mem:
+	qset_free_stds(qset, urb);
+	return -ENOMEM;
+}
+
+/**
+ * qset_remove_urb - remove an URB from the urb queue.
+ *
+ * The URB is returned to the USB subsystem.
+ */
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+			    struct urb *urb, int status)
+{
+	struct wusbhc *wusbhc = &whc->wusbhc;
+	struct whc_urb *wurb = urb->hcpriv;
+
+	usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
+	/* Drop the lock as urb->complete() may enqueue another urb. */
+	spin_unlock(&whc->lock);
+	wusbhc_giveback_urb(wusbhc, urb, status);
+	spin_lock(&whc->lock);
+
+	kfree(wurb);
+}
+
+/**
+ * get_urb_status_from_qtd - get the completed urb status from qTD status
+ * @urb:    completed urb
+ * @status: qTD status
+ */
+static int get_urb_status_from_qtd(struct urb *urb, u32 status)
+{
+	if (status & QTD_STS_HALTED) {
+		if (status & QTD_STS_DBE)
+			return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
+		else if (status & QTD_STS_BABBLE)
+			return -EOVERFLOW;
+		else if (status & QTD_STS_RCE)
+			return -ETIME;
+		return -EPIPE;
+	}
+	if (usb_pipein(urb->pipe)
+	    && (urb->transfer_flags & URB_SHORT_NOT_OK)
+	    && urb->actual_length < urb->transfer_buffer_length)
+		return -EREMOTEIO;
+	return 0;
+}
+
+/**
+ * process_inactive_qtd - process an inactive (but not halted) qTD.
+ *
+ * Update the urb with the transfer bytes from the qTD, if the urb is
+ * completely transferred or (in the case of an IN only) the LPF is
+ * set, then the transfer is complete and the urb should be returned
+ * to the system.
+ */
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+				 struct whc_qtd *qtd)
+{
+	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+	struct urb *urb = std->urb;
+	uint32_t status;
+	bool complete;
+
+	status = le32_to_cpu(qtd->status);
+
+	urb->actual_length += std->len - QTD_STS_TO_LEN(status);
+
+	if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
+		complete = true;
+	else
+		complete = whc_std_last(std);
+
+	qset_remove_qtd(whc, qset);
+	qset_free_std(whc, std);
+
+	/*
+	 * Transfers for this URB are complete?  Then return it to the
+	 * USB subsystem.
+	 */
+	if (complete) {
+		qset_remove_qtds(whc, qset, urb);
+		qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
+
+		/*
+		 * If iAlt isn't valid then the hardware didn't
+		 * advance iCur. Adjust the start and end pointers to
+		 * match iCur.
+		 */
+		if (!(status & QTD_STS_IALT_VALID))
+			qset->td_start = qset->td_end
+				= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
+		qset->pause_after_urb = NULL;
+	}
+}
+
+/**
+ * process_halted_qtd - process a qset with a halted qtd
+ *
+ * Remove all the qTDs for the failed URB and return the failed URB to
+ * the USB subsystem.  Then remove all other qTDs so the qset can be
+ * removed.
+ *
+ * FIXME: this is the point where rate adaptation can be done.  If a
+ * transfer failed because it exceeded the maximum number of retries
+ * then it could be reactivated with a slower rate without having to
+ * remove the qset.
+ */
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qtd *qtd)
+{
+	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+	struct urb *urb = std->urb;
+	int urb_status;
+
+	urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
+
+	qset_remove_qtds(whc, qset, urb);
+	qset_remove_urb(whc, qset, urb, urb_status);
+
+	list_for_each_entry(std, &qset->stds, list_node) {
+		if (qset->ntds == 0)
+			break;
+		qset_remove_qtd(whc, qset);
+		std->qtd = NULL;
+	}
+
+	qset->remove = 1;
+}
+
+void qset_free(struct whc *whc, struct whc_qset *qset)
+{
+	dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
+}
+
+/**
+ * qset_delete - wait for a qset to be unused, then free it.
+ */
+void qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+	wait_for_completion(&qset->remove_complete);
+	qset_free(whc, qset);
+}
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
new file mode 100644
index 0000000..1394769
--- /dev/null
+++ b/drivers/usb/host/whci/whcd.h
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) private header.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#ifndef __WHCD_H
+#define __WHCD_H
+
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+#include <linux/workqueue.h>
+
+#include "whci-hc.h"
+
+/* Generic command timeout. */
+#define WHC_GENCMD_TIMEOUT_MS 100
+
+struct whc_dbg;
+
+struct whc {
+	struct wusbhc wusbhc;
+	struct umc_dev *umc;
+
+	resource_size_t base_phys;
+	void __iomem *base;
+	int irq;
+
+	u8 n_devices;
+	u8 n_keys;
+	u8 n_mmc_ies;
+
+	u64 *pz_list;
+	struct dn_buf_entry *dn_buf;
+	struct di_buf_entry *di_buf;
+	dma_addr_t pz_list_dma;
+	dma_addr_t dn_buf_dma;
+	dma_addr_t di_buf_dma;
+
+	spinlock_t   lock;
+	struct mutex mutex;
+
+	void *            gen_cmd_buf;
+	dma_addr_t        gen_cmd_buf_dma;
+	wait_queue_head_t cmd_wq;
+
+	struct workqueue_struct *workqueue;
+	struct work_struct       dn_work;
+
+	struct dma_pool *qset_pool;
+
+	struct list_head async_list;
+	struct list_head async_removed_list;
+	wait_queue_head_t async_list_wq;
+	struct work_struct async_work;
+
+	struct list_head periodic_list[5];
+	struct list_head periodic_removed_list;
+	wait_queue_head_t periodic_list_wq;
+	struct work_struct periodic_work;
+
+	struct whc_dbg *dbg;
+};
+
+#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
+
+/**
+ * struct whc_std - a software TD.
+ * @urb: the URB this sTD is for.
+ * @offset: start of the URB's data for this TD.
+ * @len: the length of data in the associated TD.
+ * @ntds_remaining: number of TDs (starting from this one) in this transfer.
+ *
+ * @bounce_buf: a bounce buffer if the std was from an urb with a sg
+ * list that could not be mapped to qTDs directly.
+ * @bounce_sg: the first scatterlist element bounce_buf is for.
+ * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
+ *
+ * Queued URBs may require more TDs than are available in a qset so we
+ * use a list of these "software TDs" (sTDs) to hold per-TD data.
+ */
+struct whc_std {
+	struct urb *urb;
+	size_t len;
+	int    ntds_remaining;
+	struct whc_qtd *qtd;
+
+	struct list_head list_node;
+	int num_pointers;
+	dma_addr_t dma_addr;
+	struct whc_page_list_entry *pl_virt;
+
+	void *bounce_buf;
+	struct scatterlist *bounce_sg;
+	unsigned bounce_offset;
+};
+
+/**
+ * struct whc_urb - per URB host controller structure.
+ * @urb: the URB this struct is for.
+ * @qset: the qset associated to the URB.
+ * @dequeue_work: the work to remove the URB when dequeued.
+ * @is_async: the URB belongs to async sheduler or not.
+ * @status: the status to be returned when calling wusbhc_giveback_urb.
+ */
+struct whc_urb {
+	struct urb *urb;
+	struct whc_qset *qset;
+	struct work_struct dequeue_work;
+	bool is_async;
+	int status;
+};
+
+/**
+ * whc_std_last - is this sTD the URB's last?
+ * @std: the sTD to check.
+ */
+static inline bool whc_std_last(struct whc_std *std)
+{
+	return std->ntds_remaining <= 1;
+}
+
+enum whc_update {
+	WHC_UPDATE_ADDED   = 0x01,
+	WHC_UPDATE_REMOVED = 0x02,
+	WHC_UPDATE_UPDATED = 0x04,
+};
+
+/* init.c */
+int whc_init(struct whc *whc);
+void whc_clean_up(struct whc *whc);
+
+/* hw.c */
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
+void whc_hw_error(struct whc *whc, const char *reason);
+
+/* wusb.c */
+int whc_wusbhc_start(struct wusbhc *wusbhc);
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+		  u8 handle, struct wuie_hdr *wuie);
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+		const void *ptk, size_t key_size);
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+		const void *gtk, size_t key_size);
+int whc_set_cluster_id(struct whc *whc, u8 bcid);
+
+/* int.c */
+irqreturn_t whc_int_handler(struct usb_hcd *hcd);
+void whc_dn_work(struct work_struct *work);
+
+/* asl.c */
+void asl_start(struct whc *whc);
+void asl_stop(struct whc *whc);
+int  asl_init(struct whc *whc);
+void asl_clean_up(struct whc *whc);
+int  asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int  asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_async_work(struct work_struct *work);
+
+/* pzl.c */
+int  pzl_init(struct whc *whc);
+void pzl_clean_up(struct whc *whc);
+void pzl_start(struct whc *whc);
+void pzl_stop(struct whc *whc);
+int  pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int  pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_periodic_work(struct work_struct *work);
+
+/* qset.c */
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
+void qset_free(struct whc *whc, struct whc_qset *qset);
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+void qset_delete(struct whc *whc, struct whc_qset *qset);
+void qset_clear(struct whc *whc, struct whc_qset *qset);
+void qset_reset(struct whc *whc, struct whc_qset *qset);
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+		 gfp_t mem_flags);
+void qset_free_std(struct whc *whc, struct whc_std *std);
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+			    struct urb *urb, int status);
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+			       struct whc_qtd *qtd);
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+				 struct whc_qtd *qtd);
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
+void pzl_update(struct whc *whc, uint32_t wusbcmd);
+void asl_update(struct whc *whc, uint32_t wusbcmd);
+
+/* debug.c */
+void whc_dbg_init(struct whc *whc);
+void whc_dbg_clean_up(struct whc *whc);
+
+#endif /* #ifndef __WHCD_H */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
new file mode 100644
index 0000000..5a86a57
--- /dev/null
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) data structures.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#ifndef _WHCI_WHCI_HC_H
+#define _WHCI_WHCI_HC_H
+
+#include <linux/list.h>
+
+/**
+ * WHCI_PAGE_SIZE - page size use by WHCI
+ *
+ * WHCI assumes that host system uses pages of 4096 octets.
+ */
+#define WHCI_PAGE_SIZE 4096
+
+
+/**
+ * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
+ * qtd.
+ *
+ * This is 2^20 - 1.
+ */
+#define QTD_MAX_XFER_SIZE 1048575
+
+
+/**
+ * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
+ *
+ * This describes the data for a bulk, control or interrupt transfer.
+ *
+ * [WHCI] section 3.2.4
+ */
+struct whc_qtd {
+	__le32 status; /*< remaining transfer len and transfer status */
+	__le32 options;
+	__le64 page_list_ptr; /*< physical pointer to data buffer page list*/
+	__u8   setup[8];      /*< setup data for control transfers */
+} __attribute__((packed));
+
+#define QTD_STS_ACTIVE     (1 << 31)  /* enable execution of transaction */
+#define QTD_STS_HALTED     (1 << 30)  /* transfer halted */
+#define QTD_STS_DBE        (1 << 29)  /* data buffer error */
+#define QTD_STS_BABBLE     (1 << 28)  /* babble detected */
+#define QTD_STS_RCE        (1 << 27)  /* retry count exceeded */
+#define QTD_STS_LAST_PKT   (1 << 26)  /* set Last Packet Flag in WUSB header */
+#define QTD_STS_INACTIVE   (1 << 25)  /* queue set is marked inactive */
+#define QTD_STS_IALT_VALID (1 << 23)                          /* iAlt field is valid */
+#define QTD_STS_IALT(i)    (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
+#define QTD_STS_LEN(l)     ((l) << 0) /* transfer length */
+#define QTD_STS_TO_LEN(s)  ((s) & 0x000fffff)
+
+#define QTD_OPT_IOC      (1 << 1) /* page_list_ptr points to buffer directly */
+#define QTD_OPT_SMALL    (1 << 0) /* interrupt on complete */
+
+/**
+ * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
+ *
+ * This describes the data and other parameters for an isochronous
+ * transfer.
+ *
+ * [WHCI] section 3.2.5
+ */
+struct whc_itd {
+	__le16 presentation_time;    /*< presentation time for OUT transfers */
+	__u8   num_segments;         /*< number of data segments in segment list */
+	__u8   status;               /*< command execution status */
+	__le32 options;              /*< misc transfer options */
+	__le64 page_list_ptr;        /*< physical pointer to data buffer page list */
+	__le64 seg_list_ptr;         /*< physical pointer to segment list */
+} __attribute__((packed));
+
+#define ITD_STS_ACTIVE   (1 << 7) /* enable execution of transaction */
+#define ITD_STS_DBE      (1 << 5) /* data buffer error */
+#define ITD_STS_BABBLE   (1 << 4) /* babble detected */
+#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
+
+#define ITD_OPT_IOC      (1 << 1) /* interrupt on complete */
+#define ITD_OPT_SMALL    (1 << 0) /* page_list_ptr points to buffer directly */
+
+/**
+ * Page list entry.
+ *
+ * A TD's page list must contain sufficient page list entries for the
+ * total data length in the TD.
+ *
+ * [WHCI] section 3.2.4.3
+ */
+struct whc_page_list_entry {
+	__le64 buf_ptr; /*< physical pointer to buffer */
+} __attribute__((packed));
+
+/**
+ * struct whc_seg_list_entry - Segment list entry.
+ *
+ * Describes a portion of the data buffer described in the containing
+ * qTD's page list.
+ *
+ * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
+ *           + qtd->seg_list_ptr[seg].offset;
+ *
+ * Segments can't cross page boundries.
+ *
+ * [WHCI] section 3.2.5.5
+ */
+struct whc_seg_list_entry {
+	__le16 len;    /*< segment length */
+	__u8   idx;    /*< index into page list */
+	__u8   status; /*< segment status */
+	__le16 offset; /*< 12 bit offset into page */
+} __attribute__((packed));
+
+/**
+ * struct whc_qhead - endpoint and status information for a qset.
+ *
+ * [WHCI] section 3.2.6
+ */
+struct whc_qhead {
+	__le64 link; /*< next qset in list */
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le16 status;
+	__le16 err_count;  /*< transaction error count */
+	__le32 cur_window;
+	__le32 scratch[3]; /*< h/w scratch area */
+	union {
+		struct whc_qtd qtd;
+		struct whc_itd itd;
+	} overlay;
+} __attribute__((packed));
+
+#define QH_LINK_PTR_MASK (~0x03Full)
+#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
+#define QH_LINK_IQS      (1 << 4) /* isochronous queue set */
+#define QH_LINK_NTDS(n)  (((n) - 1) << 1) /* number of TDs in queue set */
+#define QH_LINK_T        (1 << 0) /* last queue set in periodic schedule list */
+
+#define QH_INFO1_EP(e)           ((e) << 0)  /* endpoint number */
+#define QH_INFO1_DIR_IN          (1 << 4)    /* IN transfer */
+#define QH_INFO1_DIR_OUT         (0 << 4)    /* OUT transfer */
+#define QH_INFO1_TR_TYPE_CTRL    (0x0 << 5)  /* control transfer */
+#define QH_INFO1_TR_TYPE_ISOC    (0x1 << 5)  /* isochronous transfer */
+#define QH_INFO1_TR_TYPE_BULK    (0x2 << 5)  /* bulk transfer */
+#define QH_INFO1_TR_TYPE_INT     (0x3 << 5)  /* interrupt */
+#define QH_INFO1_TR_TYPE_LP_INT  (0x7 << 5)  /* low power interrupt */
+#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8)  /* index into device info buffer */
+#define QH_INFO1_SET_INACTIVE    (1 << 15)   /* set inactive after transfer */
+#define QH_INFO1_MAX_PKT_LEN(l)  ((l) << 16) /* maximum packet length */
+
+#define QH_INFO2_BURST(b)        ((b) << 0)  /* maximum burst length */
+#define QH_INFO2_DBP(p)          ((p) << 5)  /* data burst policy (see [WUSB] table 5-7) */
+#define QH_INFO2_MAX_COUNT(c)    ((c) << 8)  /* max isoc/int pkts per zone */
+#define QH_INFO2_RQS             (1 << 15)   /* reactivate queue set */
+#define QH_INFO2_MAX_RETRY(r)    ((r) << 16) /* maximum transaction retries */
+#define QH_INFO2_MAX_SEQ(s)      ((s) << 20) /* maximum sequence number */
+#define QH_INFO3_MAX_DELAY(d)    ((d) << 0)  /* maximum stream delay in 125 us units (isoc only) */
+#define QH_INFO3_INTERVAL(i)     ((i) << 16) /* segment interval in 125 us units (isoc only) */
+
+#define QH_INFO3_TX_RATE(r)      ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
+#define QH_INFO3_TX_PWR(p)       ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
+
+#define QH_STATUS_FLOW_CTRL      (1 << 15)
+#define QH_STATUS_ICUR(i)        ((i) << 5)
+#define QH_STATUS_TO_ICUR(s)     (((s) >> 5) & 0x7)
+#define QH_STATUS_SEQ_MASK       0x1f
+
+/**
+ * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
+ *
+ * Returns the QH type field for a USB core pipe type.
+ */
+static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
+{
+	static const unsigned type[] = {
+		[PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
+		[PIPE_INTERRUPT]   = QH_INFO1_TR_TYPE_INT,
+		[PIPE_CONTROL]     = QH_INFO1_TR_TYPE_CTRL,
+		[PIPE_BULK]        = QH_INFO1_TR_TYPE_BULK,
+	};
+	return type[usb_pipetype(pipe)];
+}
+
+/**
+ * Maxiumum number of TDs in a qset.
+ */
+#define WHCI_QSET_TD_MAX 8
+
+/**
+ * struct whc_qset - WUSB data transfers to a specific endpoint
+ * @qh: the QHead of this qset
+ * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
+ * transfers)
+ * @itd: up to 8 iTDs (for qsets for isochronous transfers)
+ * @qset_dma: DMA address for this qset
+ * @whc: WHCI HC this qset is for
+ * @ep: endpoint
+ * @stds: list of sTDs queued to this qset
+ * @ntds: number of qTDs queued (not necessarily the same as nTDs
+ * field in the QH)
+ * @td_start: index of the first qTD in the list
+ * @td_end: index of next free qTD in the list (provided
+ *          ntds < WHCI_QSET_TD_MAX)
+ *
+ * Queue Sets (qsets) are added to the asynchronous schedule list
+ * (ASL) or the periodic zone list (PZL).
+ *
+ * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
+ * Each TD may refer to at most 1 MiB of data. If a single transfer
+ * has > 8MiB of data, TDs can be reused as they are completed since
+ * the TD list is used as a circular buffer.  Similarly, several
+ * (smaller) transfers may be queued in a qset.
+ *
+ * WHCI controllers may cache portions of the qsets in the ASL and
+ * PZL, requiring the WHCD to inform the WHC that the lists have been
+ * updated (fields changed or qsets inserted or removed).  For safe
+ * insertion and removal of qsets from the lists the schedule must be
+ * stopped to avoid races in updating the QH link pointers.
+ *
+ * Since the HC is free to execute qsets in any order, all transfers
+ * to an endpoint should use the same qset to ensure transfers are
+ * executed in the order they're submitted.
+ *
+ * [WHCI] section 3.2.3
+ */
+struct whc_qset {
+	struct whc_qhead qh;
+	union {
+		struct whc_qtd qtd[WHCI_QSET_TD_MAX];
+		struct whc_itd itd[WHCI_QSET_TD_MAX];
+	};
+
+	/* private data for WHCD */
+	dma_addr_t qset_dma;
+	struct whc *whc;
+	struct usb_host_endpoint *ep;
+	struct list_head stds;
+	int ntds;
+	int td_start;
+	int td_end;
+	struct list_head list_node;
+	unsigned in_sw_list:1;
+	unsigned in_hw_list:1;
+	unsigned remove:1;
+	unsigned reset:1;
+	struct urb *pause_after_urb;
+	struct completion remove_complete;
+	uint16_t max_packet;
+	uint8_t max_burst;
+	uint8_t max_seq;
+};
+
+static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
+{
+	if (target)
+		*ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
+	else
+		*ptr = QH_LINK_T;
+}
+
+/**
+ * struct di_buf_entry - Device Information (DI) buffer entry.
+ *
+ * There's one of these per connected device.
+ */
+struct di_buf_entry {
+	__le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
+	__le32 addr_sec_info;        /*< addressing and security info */
+	__le32 reserved[7];
+} __attribute__((packed));
+
+#define WHC_DI_SECURE           (1 << 31)
+#define WHC_DI_DISABLE          (1 << 30)
+#define WHC_DI_KEY_IDX(k)       ((k) << 8)
+#define WHC_DI_KEY_IDX_MASK     0x0000ff00
+#define WHC_DI_DEV_ADDR(a)      ((a) << 0)
+#define WHC_DI_DEV_ADDR_MASK    0x000000ff
+
+/**
+ * struct dn_buf_entry - Device Notification (DN) buffer entry.
+ *
+ * [WHCI] section 3.2.8
+ */
+struct dn_buf_entry {
+	__u8   msg_size;    /*< number of octets of valid DN data */
+	__u8   reserved1;
+	__u8   src_addr;    /*< source address */
+	__u8   status;      /*< buffer entry status */
+	__le32 tkid;        /*< TKID for source device, valid if secure bit is set */
+	__u8   dn_data[56]; /*< up to 56 octets of DN data */
+} __attribute__((packed));
+
+#define WHC_DN_STATUS_VALID  (1 << 7) /* buffer entry is valid */
+#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
+
+#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
+
+/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
+   data. [WHCI] section 2.4.7. */
+#define WHC_GEN_CMD_DATA_LEN 256
+
+/*
+ * HC registers.
+ *
+ * [WHCI] section 2.4
+ */
+
+#define WHCIVERSION          0x00
+
+#define WHCSPARAMS           0x04
+#  define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
+#  define WHCSPARAMS_TO_N_KEYS(p)    (((p) >> 8) & 0xff)
+#  define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
+
+#define WUSBCMD              0x08
+#  define WUSBCMD_BCID(b)            ((b) << 16)
+#  define WUSBCMD_BCID_MASK          (0xff << 16)
+#  define WUSBCMD_ASYNC_QSET_RM      (1 << 12)
+#  define WUSBCMD_PERIODIC_QSET_RM   (1 << 11)
+#  define WUSBCMD_WUSBSI(s)          ((s) << 8)
+#  define WUSBCMD_WUSBSI_MASK        (0x7 << 8)
+#  define WUSBCMD_ASYNC_SYNCED_DB    (1 << 7)
+#  define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
+#  define WUSBCMD_ASYNC_UPDATED      (1 << 5)
+#  define WUSBCMD_PERIODIC_UPDATED   (1 << 4)
+#  define WUSBCMD_ASYNC_EN           (1 << 3)
+#  define WUSBCMD_PERIODIC_EN        (1 << 2)
+#  define WUSBCMD_WHCRESET           (1 << 1)
+#  define WUSBCMD_RUN                (1 << 0)
+
+#define WUSBSTS              0x0c
+#  define WUSBSTS_ASYNC_SCHED             (1 << 15)
+#  define WUSBSTS_PERIODIC_SCHED          (1 << 14)
+#  define WUSBSTS_DNTS_SCHED              (1 << 13)
+#  define WUSBSTS_HCHALTED                (1 << 12)
+#  define WUSBSTS_GEN_CMD_DONE            (1 << 9)
+#  define WUSBSTS_CHAN_TIME_ROLLOVER      (1 << 8)
+#  define WUSBSTS_DNTS_OVERFLOW           (1 << 7)
+#  define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
+#  define WUSBSTS_HOST_ERR                (1 << 5)
+#  define WUSBSTS_ASYNC_SCHED_SYNCED      (1 << 4)
+#  define WUSBSTS_PERIODIC_SCHED_SYNCED   (1 << 3)
+#  define WUSBSTS_DNTS_INT                (1 << 2)
+#  define WUSBSTS_ERR_INT                 (1 << 1)
+#  define WUSBSTS_INT                     (1 << 0)
+#  define WUSBSTS_INT_MASK                0x3ff
+
+#define WUSBINTR             0x10
+#  define WUSBINTR_GEN_CMD_DONE             (1 << 9)
+#  define WUSBINTR_CHAN_TIME_ROLLOVER       (1 << 8)
+#  define WUSBINTR_DNTS_OVERFLOW            (1 << 7)
+#  define WUSBINTR_BPST_ADJUSTMENT_CHANGED  (1 << 6)
+#  define WUSBINTR_HOST_ERR                 (1 << 5)
+#  define WUSBINTR_ASYNC_SCHED_SYNCED       (1 << 4)
+#  define WUSBINTR_PERIODIC_SCHED_SYNCED    (1 << 3)
+#  define WUSBINTR_DNTS_INT                 (1 << 2)
+#  define WUSBINTR_ERR_INT                  (1 << 1)
+#  define WUSBINTR_INT                      (1 << 0)
+#  define WUSBINTR_ALL 0x3ff
+
+#define WUSBGENCMDSTS        0x14
+#  define WUSBGENCMDSTS_ACTIVE (1 << 31)
+#  define WUSBGENCMDSTS_ERROR  (1 << 24)
+#  define WUSBGENCMDSTS_IOC    (1 << 23)
+#  define WUSBGENCMDSTS_MMCIE_ADD 0x01
+#  define WUSBGENCMDSTS_MMCIE_RM  0x02
+#  define WUSBGENCMDSTS_SET_MAS   0x03
+#  define WUSBGENCMDSTS_CHAN_STOP 0x04
+#  define WUSBGENCMDSTS_RWP_EN    0x05
+
+#define WUSBGENCMDPARAMS     0x18
+#define WUSBGENADDR          0x20
+#define WUSBASYNCLISTADDR    0x28
+#define WUSBDNTSBUFADDR      0x30
+#define WUSBDEVICEINFOADDR   0x38
+
+#define WUSBSETSECKEYCMD     0x40
+#  define WUSBSETSECKEYCMD_SET    (1 << 31)
+#  define WUSBSETSECKEYCMD_ERASE  (1 << 30)
+#  define WUSBSETSECKEYCMD_GTK    (1 << 8)
+#  define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
+
+#define WUSBTKID             0x44
+#define WUSBSECKEY           0x48
+#define WUSBPERIODICLISTBASE 0x58
+#define WUSBMASINDEX         0x60
+
+#define WUSBDNTSCTRL         0x64
+#  define WUSBDNTSCTRL_ACTIVE      (1 << 31)
+#  define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
+#  define WUSBDNTSCTRL_SLOTS(s)    ((s) << 0)
+
+#define WUSBTIME             0x68
+#  define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff
+
+#define WUSBBPST             0x6c
+#define WUSBDIBUPDATED       0x70
+
+#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
new file mode 100644
index 0000000..8a4d805
--- /dev/null
+++ b/drivers/usb/host/whci/wusb.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) WUSB operations.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static int whc_update_di(struct whc *whc, int idx)
+{
+	int offset = idx / 32;
+	u32 bit = 1 << (idx % 32);
+
+	le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
+
+	return whci_wait_for(&whc->umc->dev,
+			     whc->base + WUSBDIBUPDATED + offset, bit, 0,
+			     100, "DI update");
+}
+
+/*
+ * WHCI starts MMCs based on there being a valid GTK so these need
+ * only start/stop the asynchronous and periodic schedules and send a
+ * channel stop command.
+ */
+
+int whc_wusbhc_start(struct wusbhc *wusbhc)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	asl_start(whc);
+	pzl_start(whc);
+
+	return 0;
+}
+
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 stop_time, now_time;
+	int ret;
+
+	pzl_stop(whc);
+	asl_stop(whc);
+
+	now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK;
+	stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff;
+	ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0);
+	if (ret == 0)
+		msleep(delay);
+}
+
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+		  u8 handle, struct wuie_hdr *wuie)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 params;
+
+	params = (interval << 24)
+		| (repeat_cnt << 16)
+		| (wuie->bLength << 8)
+		| handle;
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
+}
+
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 params;
+
+	params = handle;
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
+}
+
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+
+	if (stream_index >= 0)
+		whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
+
+	return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
+}
+
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int idx = wusb_dev->port_idx;
+	struct di_buf_entry *di = &whc->di_buf[idx];
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
+	di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
+	di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
+
+	ret = whc_update_di(whc, idx);
+
+	mutex_unlock(&whc->mutex);
+
+	return ret;
+}
+
+/*
+ * Set the number of Device Notification Time Slots (DNTS) and enable
+ * device notifications.
+ */
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	u32 dntsctrl;
+
+	dntsctrl = WUSBDNTSCTRL_ACTIVE
+		| WUSBDNTSCTRL_INTERVAL(interval)
+		| WUSBDNTSCTRL_SLOTS(slots);
+
+	le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
+
+	return 0;
+}
+
+static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
+		       const void *key, size_t key_size, bool is_gtk)
+{
+	uint32_t setkeycmd;
+	uint32_t seckey[4];
+	int i;
+	int ret;
+
+	memcpy(seckey, key, key_size);
+	setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
+	if (is_gtk)
+		setkeycmd |= WUSBSETSECKEYCMD_GTK;
+
+	le_writel(tkid, whc->base + WUSBTKID);
+	for (i = 0; i < 4; i++)
+		le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
+	le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
+
+	ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
+			    WUSBSETSECKEYCMD_SET, 0, 100, "set key");
+
+	return ret;
+}
+
+/**
+ * whc_set_ptk - set the PTK to use for a device.
+ *
+ * The index into the key table for this PTK is the same as the
+ * device's port index.
+ */
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+		const void *ptk, size_t key_size)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	struct di_buf_entry *di = &whc->di_buf[port_idx];
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	if (ptk) {
+		ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
+		if (ret)
+			goto out;
+
+		di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
+		di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
+	} else
+		di->addr_sec_info &= ~WHC_DI_SECURE;
+
+	ret = whc_update_di(whc, port_idx);
+out:
+	mutex_unlock(&whc->mutex);
+	return ret;
+}
+
+/**
+ * whc_set_gtk - set the GTK for subsequent broadcast packets
+ *
+ * The GTK is stored in the last entry in the key table (the previous
+ * N_DEVICES entries are for the per-device PTKs).
+ */
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+		const void *gtk, size_t key_size)
+{
+	struct whc *whc = wusbhc_to_whc(wusbhc);
+	int ret;
+
+	mutex_lock(&whc->mutex);
+
+	ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
+
+	mutex_unlock(&whc->mutex);
+
+	return ret;
+}
+
+int whc_set_cluster_id(struct whc *whc, u8 bcid)
+{
+	whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
+	return 0;
+}
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 0000000..386abf2
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+#include "xhci.h"
+
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *ctx)
+{
+	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+	int state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+	return xhci_slot_state_string(state);
+}
+
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+			const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	xhci_dbg(xhci, "%pV\n", &vaf);
+	trace(&vaf);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(xhci_dbg_trace);
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
new file mode 100644
index 0000000..86cff5c
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * xhci-dbgcap.c - xHCI debug capability support
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/nls.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-dbgcap.h"
+
+static inline void *
+dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
+		       dma_addr_t *dma_handle, gfp_t flags)
+{
+	void		*vaddr;
+
+	vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+				   size, dma_handle, flags);
+	memset(vaddr, 0, size);
+	return vaddr;
+}
+
+static inline void
+dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
+		      void *cpu_addr, dma_addr_t dma_handle)
+{
+	if (cpu_addr)
+		dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
+				  size, cpu_addr, dma_handle);
+}
+
+static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
+{
+	struct usb_string_descriptor	*s_desc;
+	u32				string_length;
+
+	/* Serial string: */
+	s_desc = (struct usb_string_descriptor *)strings->serial;
+	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
+			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+			DBC_MAX_STRING_LENGTH);
+
+	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+	string_length		= s_desc->bLength;
+	string_length		<<= 8;
+
+	/* Product string: */
+	s_desc = (struct usb_string_descriptor *)strings->product;
+	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
+			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+			DBC_MAX_STRING_LENGTH);
+
+	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+	string_length		+= s_desc->bLength;
+	string_length		<<= 8;
+
+	/* Manufacture string: */
+	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
+	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
+			strlen(DBC_STRING_MANUFACTURER),
+			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
+			DBC_MAX_STRING_LENGTH);
+
+	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
+	s_desc->bDescriptorType	= USB_DT_STRING;
+	string_length		+= s_desc->bLength;
+	string_length		<<= 8;
+
+	/* String0: */
+	strings->string0[0]	= 4;
+	strings->string0[1]	= USB_DT_STRING;
+	strings->string0[2]	= 0x09;
+	strings->string0[3]	= 0x04;
+	string_length		+= 4;
+
+	return string_length;
+}
+
+static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
+{
+	struct xhci_dbc		*dbc;
+	struct dbc_info_context	*info;
+	struct xhci_ep_ctx	*ep_ctx;
+	u32			dev_info;
+	dma_addr_t		deq, dma;
+	unsigned int		max_burst;
+
+	dbc = xhci->dbc;
+	if (!dbc)
+		return;
+
+	/* Populate info Context: */
+	info			= (struct dbc_info_context *)dbc->ctx->bytes;
+	dma			= dbc->string_dma;
+	info->string0		= cpu_to_le64(dma);
+	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
+	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
+	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
+	info->length		= cpu_to_le32(string_length);
+
+	/* Populate bulk out endpoint context: */
+	ep_ctx			= dbc_bulkout_ctx(dbc);
+	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+	deq			= dbc_bulkout_enq(dbc);
+	ep_ctx->ep_info		= 0;
+	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
+
+	/* Populate bulk in endpoint context: */
+	ep_ctx			= dbc_bulkin_ctx(dbc);
+	deq			= dbc_bulkin_enq(dbc);
+	ep_ctx->ep_info		= 0;
+	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
+
+	/* Set DbC context and info registers: */
+	xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
+
+	dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
+	writel(dev_info, &dbc->regs->devinfo1);
+
+	dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
+	writel(dev_info, &dbc->regs->devinfo2);
+}
+
+static void xhci_dbc_giveback(struct dbc_request *req, int status)
+	__releases(&dbc->lock)
+	__acquires(&dbc->lock)
+{
+	struct dbc_ep		*dep = req->dep;
+	struct xhci_dbc		*dbc = dep->dbc;
+	struct xhci_hcd		*xhci = dbc->xhci;
+	struct device		*dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
+
+	list_del_init(&req->list_pending);
+	req->trb_dma = 0;
+	req->trb = NULL;
+
+	if (req->status == -EINPROGRESS)
+		req->status = status;
+
+	trace_xhci_dbc_giveback_request(req);
+
+	dma_unmap_single(dev,
+			 req->dma,
+			 req->length,
+			 dbc_ep_dma_direction(dep));
+
+	/* Give back the transfer request: */
+	spin_unlock(&dbc->lock);
+	req->complete(xhci, req);
+	spin_lock(&dbc->lock);
+}
+
+static void xhci_dbc_flush_single_request(struct dbc_request *req)
+{
+	union xhci_trb	*trb = req->trb;
+
+	trb->generic.field[0]	= 0;
+	trb->generic.field[1]	= 0;
+	trb->generic.field[2]	= 0;
+	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
+	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+
+	xhci_dbc_giveback(req, -ESHUTDOWN);
+}
+
+static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
+{
+	struct dbc_request	*req, *tmp;
+
+	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
+		xhci_dbc_flush_single_request(req);
+}
+
+static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
+{
+	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
+	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
+}
+
+struct dbc_request *
+dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
+{
+	struct dbc_request	*req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->dep = dep;
+	INIT_LIST_HEAD(&req->list_pending);
+	INIT_LIST_HEAD(&req->list_pool);
+	req->direction = dep->direction;
+
+	trace_xhci_dbc_alloc_request(req);
+
+	return req;
+}
+
+void
+dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
+{
+	trace_xhci_dbc_free_request(req);
+
+	kfree(req);
+}
+
+static void
+xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
+		   u32 field2, u32 field3, u32 field4)
+{
+	union xhci_trb		*trb, *next;
+
+	trb = ring->enqueue;
+	trb->generic.field[0]	= cpu_to_le32(field1);
+	trb->generic.field[1]	= cpu_to_le32(field2);
+	trb->generic.field[2]	= cpu_to_le32(field3);
+	trb->generic.field[3]	= cpu_to_le32(field4);
+
+	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
+
+	ring->num_trbs_free--;
+	next = ++(ring->enqueue);
+	if (TRB_TYPE_LINK_LE32(next->link.control)) {
+		next->link.control ^= cpu_to_le32(TRB_CYCLE);
+		ring->enqueue = ring->enq_seg->trbs;
+		ring->cycle_state ^= 1;
+	}
+}
+
+static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
+				  struct dbc_request *req)
+{
+	u64			addr;
+	union xhci_trb		*trb;
+	unsigned int		num_trbs;
+	struct xhci_dbc		*dbc = dep->dbc;
+	struct xhci_ring	*ring = dep->ring;
+	u32			length, control, cycle;
+
+	num_trbs = count_trbs(req->dma, req->length);
+	WARN_ON(num_trbs != 1);
+	if (ring->num_trbs_free < num_trbs)
+		return -EBUSY;
+
+	addr	= req->dma;
+	trb	= ring->enqueue;
+	cycle	= ring->cycle_state;
+	length	= TRB_LEN(req->length);
+	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
+
+	if (cycle)
+		control &= cpu_to_le32(~TRB_CYCLE);
+	else
+		control |= cpu_to_le32(TRB_CYCLE);
+
+	req->trb = ring->enqueue;
+	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+	xhci_dbc_queue_trb(ring,
+			   lower_32_bits(addr),
+			   upper_32_bits(addr),
+			   length, control);
+
+	/*
+	 * Add a barrier between writes of trb fields and flipping
+	 * the cycle bit:
+	 */
+	wmb();
+
+	if (cycle)
+		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
+	else
+		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
+
+	return 0;
+}
+
+static int
+dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
+{
+	int			ret;
+	struct device		*dev;
+	struct xhci_dbc		*dbc = dep->dbc;
+	struct xhci_hcd		*xhci = dbc->xhci;
+
+	dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	if (!req->length || !req->buf)
+		return -EINVAL;
+
+	req->actual		= 0;
+	req->status		= -EINPROGRESS;
+
+	req->dma = dma_map_single(dev,
+				  req->buf,
+				  req->length,
+				  dbc_ep_dma_direction(dep));
+	if (dma_mapping_error(dev, req->dma)) {
+		xhci_err(xhci, "failed to map buffer\n");
+		return -EFAULT;
+	}
+
+	ret = xhci_dbc_queue_bulk_tx(dep, req);
+	if (ret) {
+		xhci_err(xhci, "failed to queue trbs\n");
+		dma_unmap_single(dev,
+				 req->dma,
+				 req->length,
+				 dbc_ep_dma_direction(dep));
+		return -EFAULT;
+	}
+
+	list_add_tail(&req->list_pending, &dep->list_pending);
+
+	return 0;
+}
+
+int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
+		 gfp_t gfp_flags)
+{
+	unsigned long		flags;
+	struct xhci_dbc		*dbc = dep->dbc;
+	int			ret = -ESHUTDOWN;
+
+	spin_lock_irqsave(&dbc->lock, flags);
+	if (dbc->state == DS_CONFIGURED)
+		ret = dbc_ep_do_queue(dep, req);
+	spin_unlock_irqrestore(&dbc->lock, flags);
+
+	mod_delayed_work(system_wq, &dbc->event_work, 0);
+
+	trace_xhci_dbc_queue_request(req);
+
+	return ret;
+}
+
+static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
+{
+	struct dbc_ep		*dep;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	dep			= &dbc->eps[direction];
+	dep->dbc		= dbc;
+	dep->direction		= direction;
+	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
+
+	INIT_LIST_HEAD(&dep->list_pending);
+}
+
+static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
+{
+	xhci_dbc_do_eps_init(xhci, BULK_OUT);
+	xhci_dbc_do_eps_init(xhci, BULK_IN);
+}
+
+static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
+}
+
+static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int			ret;
+	dma_addr_t		deq;
+	u32			string_length;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	/* Allocate various rings for events and transfers: */
+	dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
+	if (!dbc->ring_evt)
+		goto evt_fail;
+
+	dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
+	if (!dbc->ring_in)
+		goto in_fail;
+
+	dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
+	if (!dbc->ring_out)
+		goto out_fail;
+
+	/* Allocate and populate ERST: */
+	ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
+	if (ret)
+		goto erst_fail;
+
+	/* Allocate context data structure: */
+	dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+	if (!dbc->ctx)
+		goto ctx_fail;
+
+	/* Allocate the string table: */
+	dbc->string_size = sizeof(struct dbc_str_descs);
+	dbc->string = dbc_dma_alloc_coherent(xhci,
+					     dbc->string_size,
+					     &dbc->string_dma,
+					     flags);
+	if (!dbc->string)
+		goto string_fail;
+
+	/* Setup ERST register: */
+	writel(dbc->erst.erst_size, &dbc->regs->ersts);
+	xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
+	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+				   dbc->ring_evt->dequeue);
+	xhci_write_64(xhci, deq, &dbc->regs->erdp);
+
+	/* Setup strings and contexts: */
+	string_length = xhci_dbc_populate_strings(dbc->string);
+	xhci_dbc_init_contexts(xhci, string_length);
+
+	mmiowb();
+
+	xhci_dbc_eps_init(xhci);
+	dbc->state = DS_INITIALIZED;
+
+	return 0;
+
+string_fail:
+	xhci_free_container_ctx(xhci, dbc->ctx);
+	dbc->ctx = NULL;
+ctx_fail:
+	xhci_free_erst(xhci, &dbc->erst);
+erst_fail:
+	xhci_ring_free(xhci, dbc->ring_out);
+	dbc->ring_out = NULL;
+out_fail:
+	xhci_ring_free(xhci, dbc->ring_in);
+	dbc->ring_in = NULL;
+in_fail:
+	xhci_ring_free(xhci, dbc->ring_evt);
+	dbc->ring_evt = NULL;
+evt_fail:
+	return -ENOMEM;
+}
+
+static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	if (!dbc)
+		return;
+
+	xhci_dbc_eps_exit(xhci);
+
+	if (dbc->string) {
+		dbc_dma_free_coherent(xhci,
+				      dbc->string_size,
+				      dbc->string, dbc->string_dma);
+		dbc->string = NULL;
+	}
+
+	xhci_free_container_ctx(xhci, dbc->ctx);
+	dbc->ctx = NULL;
+
+	xhci_free_erst(xhci, &dbc->erst);
+	xhci_ring_free(xhci, dbc->ring_out);
+	xhci_ring_free(xhci, dbc->ring_in);
+	xhci_ring_free(xhci, dbc->ring_evt);
+	dbc->ring_in = NULL;
+	dbc->ring_out = NULL;
+	dbc->ring_evt = NULL;
+}
+
+static int xhci_do_dbc_start(struct xhci_hcd *xhci)
+{
+	int			ret;
+	u32			ctrl;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	if (dbc->state != DS_DISABLED)
+		return -EINVAL;
+
+	writel(0, &dbc->regs->control);
+	ret = xhci_handshake(&dbc->regs->control,
+			     DBC_CTRL_DBC_ENABLE,
+			     0, 1000);
+	if (ret)
+		return ret;
+
+	ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
+	if (ret)
+		return ret;
+
+	ctrl = readl(&dbc->regs->control);
+	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
+	       &dbc->regs->control);
+	ret = xhci_handshake(&dbc->regs->control,
+			     DBC_CTRL_DBC_ENABLE,
+			     DBC_CTRL_DBC_ENABLE, 1000);
+	if (ret)
+		return ret;
+
+	dbc->state = DS_ENABLED;
+
+	return 0;
+}
+
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	if (dbc->state == DS_DISABLED)
+		return -1;
+
+	writel(0, &dbc->regs->control);
+	xhci_dbc_mem_cleanup(xhci);
+	dbc->state = DS_DISABLED;
+
+	return 0;
+}
+
+static int xhci_dbc_start(struct xhci_hcd *xhci)
+{
+	int			ret;
+	unsigned long		flags;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	WARN_ON(!dbc);
+
+	pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
+
+	spin_lock_irqsave(&dbc->lock, flags);
+	ret = xhci_do_dbc_start(xhci);
+	spin_unlock_irqrestore(&dbc->lock, flags);
+
+	if (ret) {
+		pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
+		return ret;
+	}
+
+	return mod_delayed_work(system_wq, &dbc->event_work, 1);
+}
+
+static void xhci_dbc_stop(struct xhci_hcd *xhci)
+{
+	int ret;
+	unsigned long		flags;
+	struct xhci_dbc		*dbc = xhci->dbc;
+	struct dbc_port		*port = &dbc->port;
+
+	WARN_ON(!dbc);
+
+	cancel_delayed_work_sync(&dbc->event_work);
+
+	if (port->registered)
+		xhci_dbc_tty_unregister_device(xhci);
+
+	spin_lock_irqsave(&dbc->lock, flags);
+	ret = xhci_do_dbc_stop(xhci);
+	spin_unlock_irqrestore(&dbc->lock, flags);
+
+	if (!ret)
+		pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+}
+
+static void
+dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
+{
+	u32			portsc;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	portsc = readl(&dbc->regs->portsc);
+	if (portsc & DBC_PORTSC_CONN_CHANGE)
+		xhci_info(xhci, "DbC port connect change\n");
+
+	if (portsc & DBC_PORTSC_RESET_CHANGE)
+		xhci_info(xhci, "DbC port reset change\n");
+
+	if (portsc & DBC_PORTSC_LINK_CHANGE)
+		xhci_info(xhci, "DbC port link status change\n");
+
+	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
+		xhci_info(xhci, "DbC config error change\n");
+
+	/* Port reset change bit will be cleared in other place: */
+	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
+}
+
+static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
+{
+	struct dbc_ep		*dep;
+	struct xhci_ring	*ring;
+	int			ep_id;
+	int			status;
+	u32			comp_code;
+	size_t			remain_length;
+	struct dbc_request	*req = NULL, *r;
+
+	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
+	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
+	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
+	dep		= (ep_id == EPID_OUT) ?
+				get_out_ep(xhci) : get_in_ep(xhci);
+	ring		= dep->ring;
+
+	switch (comp_code) {
+	case COMP_SUCCESS:
+		remain_length = 0;
+	/* FALLTHROUGH */
+	case COMP_SHORT_PACKET:
+		status = 0;
+		break;
+	case COMP_TRB_ERROR:
+	case COMP_BABBLE_DETECTED_ERROR:
+	case COMP_USB_TRANSACTION_ERROR:
+	case COMP_STALL_ERROR:
+		xhci_warn(xhci, "tx error %d detected\n", comp_code);
+		status = -comp_code;
+		break;
+	default:
+		xhci_err(xhci, "unknown tx error %d\n", comp_code);
+		status = -comp_code;
+		break;
+	}
+
+	/* Match the pending request: */
+	list_for_each_entry(r, &dep->list_pending, list_pending) {
+		if (r->trb_dma == event->trans_event.buffer) {
+			req = r;
+			break;
+		}
+	}
+
+	if (!req) {
+		xhci_warn(xhci, "no matched request\n");
+		return;
+	}
+
+	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+
+	ring->num_trbs_free++;
+	req->actual = req->length - remain_length;
+	xhci_dbc_giveback(req, status);
+}
+
+static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+{
+	dma_addr_t		deq;
+	struct dbc_ep		*dep;
+	union xhci_trb		*evt;
+	u32			ctrl, portsc;
+	struct xhci_hcd		*xhci = dbc->xhci;
+	bool			update_erdp = false;
+
+	/* DbC state machine: */
+	switch (dbc->state) {
+	case DS_DISABLED:
+	case DS_INITIALIZED:
+
+		return EVT_ERR;
+	case DS_ENABLED:
+		portsc = readl(&dbc->regs->portsc);
+		if (portsc & DBC_PORTSC_CONN_STATUS) {
+			dbc->state = DS_CONNECTED;
+			xhci_info(xhci, "DbC connected\n");
+		}
+
+		return EVT_DONE;
+	case DS_CONNECTED:
+		ctrl = readl(&dbc->regs->control);
+		if (ctrl & DBC_CTRL_DBC_RUN) {
+			dbc->state = DS_CONFIGURED;
+			xhci_info(xhci, "DbC configured\n");
+			portsc = readl(&dbc->regs->portsc);
+			writel(portsc, &dbc->regs->portsc);
+			return EVT_GSER;
+		}
+
+		return EVT_DONE;
+	case DS_CONFIGURED:
+		/* Handle cable unplug event: */
+		portsc = readl(&dbc->regs->portsc);
+		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
+		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
+			xhci_info(xhci, "DbC cable unplugged\n");
+			dbc->state = DS_ENABLED;
+			xhci_dbc_flush_reqests(dbc);
+
+			return EVT_DISC;
+		}
+
+		/* Handle debug port reset event: */
+		if (portsc & DBC_PORTSC_RESET_CHANGE) {
+			xhci_info(xhci, "DbC port reset\n");
+			writel(portsc, &dbc->regs->portsc);
+			dbc->state = DS_ENABLED;
+			xhci_dbc_flush_reqests(dbc);
+
+			return EVT_DISC;
+		}
+
+		/* Handle endpoint stall event: */
+		ctrl = readl(&dbc->regs->control);
+		if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
+		    (ctrl & DBC_CTRL_HALT_OUT_TR)) {
+			xhci_info(xhci, "DbC Endpoint stall\n");
+			dbc->state = DS_STALLED;
+
+			if (ctrl & DBC_CTRL_HALT_IN_TR) {
+				dep = get_in_ep(xhci);
+				xhci_dbc_flush_endpoint_requests(dep);
+			}
+
+			if (ctrl & DBC_CTRL_HALT_OUT_TR) {
+				dep = get_out_ep(xhci);
+				xhci_dbc_flush_endpoint_requests(dep);
+			}
+
+			return EVT_DONE;
+		}
+
+		/* Clear DbC run change bit: */
+		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
+			writel(ctrl, &dbc->regs->control);
+			ctrl = readl(&dbc->regs->control);
+		}
+
+		break;
+	case DS_STALLED:
+		ctrl = readl(&dbc->regs->control);
+		if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
+		    !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
+		    (ctrl & DBC_CTRL_DBC_RUN)) {
+			dbc->state = DS_CONFIGURED;
+			break;
+		}
+
+		return EVT_DONE;
+	default:
+		xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
+		break;
+	}
+
+	/* Handle the events in the event ring: */
+	evt = dbc->ring_evt->dequeue;
+	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
+			dbc->ring_evt->cycle_state) {
+		/*
+		 * Add a barrier between reading the cycle flag and any
+		 * reads of the event's flags/data below:
+		 */
+		rmb();
+
+		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
+
+		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
+		case TRB_TYPE(TRB_PORT_STATUS):
+			dbc_handle_port_status(xhci, evt);
+			break;
+		case TRB_TYPE(TRB_TRANSFER):
+			dbc_handle_xfer_event(xhci, evt);
+			break;
+		default:
+			break;
+		}
+
+		inc_deq(xhci, dbc->ring_evt);
+		evt = dbc->ring_evt->dequeue;
+		update_erdp = true;
+	}
+
+	/* Update event ring dequeue pointer: */
+	if (update_erdp) {
+		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+					   dbc->ring_evt->dequeue);
+		xhci_write_64(xhci, deq, &dbc->regs->erdp);
+	}
+
+	return EVT_DONE;
+}
+
+static void xhci_dbc_handle_events(struct work_struct *work)
+{
+	int			ret;
+	enum evtreturn		evtr;
+	struct xhci_dbc		*dbc;
+	unsigned long		flags;
+	struct xhci_hcd		*xhci;
+
+	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+	xhci = dbc->xhci;
+
+	spin_lock_irqsave(&dbc->lock, flags);
+	evtr = xhci_dbc_do_handle_events(dbc);
+	spin_unlock_irqrestore(&dbc->lock, flags);
+
+	switch (evtr) {
+	case EVT_GSER:
+		ret = xhci_dbc_tty_register_device(xhci);
+		if (ret) {
+			xhci_err(xhci, "failed to alloc tty device\n");
+			break;
+		}
+
+		xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
+		break;
+	case EVT_DISC:
+		xhci_dbc_tty_unregister_device(xhci);
+		break;
+	case EVT_DONE:
+		break;
+	default:
+		xhci_info(xhci, "stop handling dbc events\n");
+		return;
+	}
+
+	mod_delayed_work(system_wq, &dbc->event_work, 1);
+}
+
+static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
+{
+	unsigned long		flags;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	kfree(xhci->dbc);
+	xhci->dbc = NULL;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
+static int xhci_do_dbc_init(struct xhci_hcd *xhci)
+{
+	u32			reg;
+	struct xhci_dbc		*dbc;
+	unsigned long		flags;
+	void __iomem		*base;
+	int			dbc_cap_offs;
+
+	base = &xhci->cap_regs->hc_capbase;
+	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
+	if (!dbc_cap_offs)
+		return -ENODEV;
+
+	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
+	if (!dbc)
+		return -ENOMEM;
+
+	dbc->regs = base + dbc_cap_offs;
+
+	/* We will avoid using DbC in xhci driver if it's in use. */
+	reg = readl(&dbc->regs->control);
+	if (reg & DBC_CTRL_DBC_ENABLE) {
+		kfree(dbc);
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (xhci->dbc) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		kfree(dbc);
+		return -EBUSY;
+	}
+	xhci->dbc = dbc;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	dbc->xhci = xhci;
+	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
+	spin_lock_init(&dbc->lock);
+
+	return 0;
+}
+
+static ssize_t dbc_show(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	const char		*p;
+	struct xhci_dbc		*dbc;
+	struct xhci_hcd		*xhci;
+
+	xhci = hcd_to_xhci(dev_get_drvdata(dev));
+	dbc = xhci->dbc;
+
+	switch (dbc->state) {
+	case DS_DISABLED:
+		p = "disabled";
+		break;
+	case DS_INITIALIZED:
+		p = "initialized";
+		break;
+	case DS_ENABLED:
+		p = "enabled";
+		break;
+	case DS_CONNECTED:
+		p = "connected";
+		break;
+	case DS_CONFIGURED:
+		p = "configured";
+		break;
+	case DS_STALLED:
+		p = "stalled";
+		break;
+	default:
+		p = "unknown";
+	}
+
+	return sprintf(buf, "%s\n", p);
+}
+
+static ssize_t dbc_store(struct device *dev,
+			 struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct xhci_hcd		*xhci;
+
+	xhci = hcd_to_xhci(dev_get_drvdata(dev));
+
+	if (!strncmp(buf, "enable", 6))
+		xhci_dbc_start(xhci);
+	else if (!strncmp(buf, "disable", 7))
+		xhci_dbc_stop(xhci);
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(dbc);
+
+int xhci_dbc_init(struct xhci_hcd *xhci)
+{
+	int			ret;
+	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
+
+	ret = xhci_do_dbc_init(xhci);
+	if (ret)
+		goto init_err3;
+
+	ret = xhci_dbc_tty_register_driver(xhci);
+	if (ret)
+		goto init_err2;
+
+	ret = device_create_file(dev, &dev_attr_dbc);
+	if (ret)
+		goto init_err1;
+
+	return 0;
+
+init_err1:
+	xhci_dbc_tty_unregister_driver();
+init_err2:
+	xhci_do_dbc_exit(xhci);
+init_err3:
+	return ret;
+}
+
+void xhci_dbc_exit(struct xhci_hcd *xhci)
+{
+	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
+
+	if (!xhci->dbc)
+		return;
+
+	device_remove_file(dev, &dev_attr_dbc);
+	xhci_dbc_tty_unregister_driver();
+	xhci_dbc_stop(xhci);
+	xhci_do_dbc_exit(xhci);
+}
+
+#ifdef CONFIG_PM
+int xhci_dbc_suspend(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	if (!dbc)
+		return 0;
+
+	if (dbc->state == DS_CONFIGURED)
+		dbc->resume_required = 1;
+
+	xhci_dbc_stop(xhci);
+
+	return 0;
+}
+
+int xhci_dbc_resume(struct xhci_hcd *xhci)
+{
+	int			ret = 0;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	if (!dbc)
+		return 0;
+
+	if (dbc->resume_required) {
+		dbc->resume_required = 0;
+		xhci_dbc_start(xhci);
+	}
+
+	return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
new file mode 100644
index 0000000..ce0c607
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * xhci-dbgcap.h - xHCI debug capability support
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+#ifndef __LINUX_XHCI_DBGCAP_H
+#define __LINUX_XHCI_DBGCAP_H
+
+#include <linux/tty.h>
+#include <linux/kfifo.h>
+
+struct dbc_regs {
+	__le32	capability;
+	__le32	doorbell;
+	__le32	ersts;		/* Event Ring Segment Table Size*/
+	__le32	__reserved_0;	/* 0c~0f reserved bits */
+	__le64	erstba;		/* Event Ring Segment Table Base Address */
+	__le64	erdp;		/* Event Ring Dequeue Pointer */
+	__le32	control;
+	__le32	status;
+	__le32	portsc;		/* Port status and control */
+	__le32	__reserved_1;	/* 2b~28 reserved bits */
+	__le64	dccp;		/* Debug Capability Context Pointer */
+	__le32	devinfo1;	/* Device Descriptor Info Register 1 */
+	__le32	devinfo2;	/* Device Descriptor Info Register 2 */
+};
+
+struct dbc_info_context {
+	__le64	string0;
+	__le64	manufacturer;
+	__le64	product;
+	__le64	serial;
+	__le32	length;
+	__le32	__reserved_0[7];
+};
+
+#define DBC_CTRL_DBC_RUN		BIT(0)
+#define DBC_CTRL_PORT_ENABLE		BIT(1)
+#define DBC_CTRL_HALT_OUT_TR		BIT(2)
+#define DBC_CTRL_HALT_IN_TR		BIT(3)
+#define DBC_CTRL_DBC_RUN_CHANGE		BIT(4)
+#define DBC_CTRL_DBC_ENABLE		BIT(31)
+#define DBC_CTRL_MAXBURST(p)		(((p) >> 16) & 0xff)
+#define DBC_DOOR_BELL_TARGET(p)		(((p) & 0xff) << 8)
+
+#define DBC_MAX_PACKET			1024
+#define DBC_MAX_STRING_LENGTH		64
+#define DBC_STRING_MANUFACTURER		"Linux Foundation"
+#define DBC_STRING_PRODUCT		"Linux USB Debug Target"
+#define DBC_STRING_SERIAL		"0001"
+#define	DBC_CONTEXT_SIZE		64
+
+/*
+ * Port status:
+ */
+#define DBC_PORTSC_CONN_STATUS		BIT(0)
+#define DBC_PORTSC_PORT_ENABLED		BIT(1)
+#define DBC_PORTSC_CONN_CHANGE		BIT(17)
+#define DBC_PORTSC_RESET_CHANGE		BIT(21)
+#define DBC_PORTSC_LINK_CHANGE		BIT(22)
+#define DBC_PORTSC_CONFIG_CHANGE	BIT(23)
+
+struct dbc_str_descs {
+	char	string0[DBC_MAX_STRING_LENGTH];
+	char	manufacturer[DBC_MAX_STRING_LENGTH];
+	char	product[DBC_MAX_STRING_LENGTH];
+	char	serial[DBC_MAX_STRING_LENGTH];
+};
+
+#define DBC_PROTOCOL			1	/* GNU Remote Debug Command */
+#define DBC_VENDOR_ID			0x1d6b	/* Linux Foundation 0x1d6b */
+#define DBC_PRODUCT_ID			0x0010	/* device 0010 */
+#define DBC_DEVICE_REV			0x0010	/* 0.10 */
+
+enum dbc_state {
+	DS_DISABLED = 0,
+	DS_INITIALIZED,
+	DS_ENABLED,
+	DS_CONNECTED,
+	DS_CONFIGURED,
+	DS_STALLED,
+};
+
+struct dbc_request {
+	void				*buf;
+	unsigned int			length;
+	dma_addr_t			dma;
+	void				(*complete)(struct xhci_hcd *xhci,
+						    struct dbc_request *req);
+	struct list_head		list_pool;
+	int				status;
+	unsigned int			actual;
+
+	struct dbc_ep			*dep;
+	struct list_head		list_pending;
+	dma_addr_t			trb_dma;
+	union xhci_trb			*trb;
+	unsigned			direction:1;
+};
+
+struct dbc_ep {
+	struct xhci_dbc			*dbc;
+	struct list_head		list_pending;
+	struct xhci_ring		*ring;
+	unsigned			direction:1;
+};
+
+#define DBC_QUEUE_SIZE			16
+#define DBC_WRITE_BUF_SIZE		8192
+
+/*
+ * Private structure for DbC hardware state:
+ */
+struct dbc_port {
+	struct tty_port			port;
+	spinlock_t			port_lock;	/* port access */
+
+	struct list_head		read_pool;
+	struct list_head		read_queue;
+	unsigned int			n_read;
+	struct tasklet_struct		push;
+
+	struct list_head		write_pool;
+	struct kfifo			write_fifo;
+
+	bool				registered;
+	struct dbc_ep			*in;
+	struct dbc_ep			*out;
+};
+
+struct xhci_dbc {
+	spinlock_t			lock;		/* device access */
+	struct xhci_hcd			*xhci;
+	struct dbc_regs __iomem		*regs;
+	struct xhci_ring		*ring_evt;
+	struct xhci_ring		*ring_in;
+	struct xhci_ring		*ring_out;
+	struct xhci_erst		erst;
+	struct xhci_container_ctx	*ctx;
+
+	struct dbc_str_descs		*string;
+	dma_addr_t			string_dma;
+	size_t				string_size;
+
+	enum dbc_state			state;
+	struct delayed_work		event_work;
+	unsigned			resume_required:1;
+	struct dbc_ep			eps[2];
+
+	struct dbc_port			port;
+};
+
+#define dbc_bulkout_ctx(d)		\
+	((struct xhci_ep_ctx *)((d)->ctx->bytes + DBC_CONTEXT_SIZE))
+#define dbc_bulkin_ctx(d)		\
+	((struct xhci_ep_ctx *)((d)->ctx->bytes + DBC_CONTEXT_SIZE * 2))
+#define dbc_bulkout_enq(d)		\
+	xhci_trb_virt_to_dma((d)->ring_out->enq_seg, (d)->ring_out->enqueue)
+#define dbc_bulkin_enq(d)		\
+	xhci_trb_virt_to_dma((d)->ring_in->enq_seg, (d)->ring_in->enqueue)
+#define dbc_epctx_info2(t, p, b)	\
+	cpu_to_le32(EP_TYPE(t) | MAX_PACKET(p) | MAX_BURST(b))
+#define dbc_ep_dma_direction(d)		\
+	((d)->direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE)
+
+#define BULK_OUT			0
+#define BULK_IN				1
+#define EPID_OUT			2
+#define EPID_IN				3
+
+enum evtreturn {
+	EVT_ERR	= -1,
+	EVT_DONE,
+	EVT_GSER,
+	EVT_DISC,
+};
+
+static inline struct dbc_ep *get_in_ep(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	return &dbc->eps[BULK_IN];
+}
+
+static inline struct dbc_ep *get_out_ep(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	return &dbc->eps[BULK_OUT];
+}
+
+#ifdef CONFIG_USB_XHCI_DBGCAP
+int xhci_dbc_init(struct xhci_hcd *xhci);
+void xhci_dbc_exit(struct xhci_hcd *xhci);
+int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci);
+void xhci_dbc_tty_unregister_driver(void);
+int xhci_dbc_tty_register_device(struct xhci_hcd *xhci);
+void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci);
+struct dbc_request *dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags);
+void dbc_free_request(struct dbc_ep *dep, struct dbc_request *req);
+int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, gfp_t gfp_flags);
+#ifdef CONFIG_PM
+int xhci_dbc_suspend(struct xhci_hcd *xhci);
+int xhci_dbc_resume(struct xhci_hcd *xhci);
+#endif /* CONFIG_PM */
+#else
+static inline int xhci_dbc_init(struct xhci_hcd *xhci)
+{
+	return 0;
+}
+
+static inline void xhci_dbc_exit(struct xhci_hcd *xhci)
+{
+}
+
+static inline int xhci_dbc_suspend(struct xhci_hcd *xhci)
+{
+	return 0;
+}
+
+static inline int xhci_dbc_resume(struct xhci_hcd *xhci)
+{
+	return 0;
+}
+#endif /* CONFIG_USB_XHCI_DBGCAP */
+#endif /* __LINUX_XHCI_DBGCAP_H */
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
new file mode 100644
index 0000000..aff79ff
--- /dev/null
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * xhci-dbgtty.c - tty glue for xHCI debug capability
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include "xhci.h"
+#include "xhci-dbgcap.h"
+
+static unsigned int
+dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
+{
+	unsigned int		len;
+
+	len = kfifo_len(&port->write_fifo);
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = kfifo_out(&port->write_fifo, packet, size);
+	return size;
+}
+
+static int dbc_start_tx(struct dbc_port *port)
+	__releases(&port->port_lock)
+	__acquires(&port->port_lock)
+{
+	int			len;
+	struct dbc_request	*req;
+	int			status = 0;
+	bool			do_tty_wake = false;
+	struct list_head	*pool = &port->write_pool;
+
+	while (!list_empty(pool)) {
+		req = list_entry(pool->next, struct dbc_request, list_pool);
+		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
+		if (len == 0)
+			break;
+		do_tty_wake = true;
+
+		req->length = len;
+		list_del(&req->list_pool);
+
+		spin_unlock(&port->port_lock);
+		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			list_add(&req->list_pool, pool);
+			break;
+		}
+	}
+
+	if (do_tty_wake && port->port.tty)
+		tty_wakeup(port->port.tty);
+
+	return status;
+}
+
+static void dbc_start_rx(struct dbc_port *port)
+	__releases(&port->port_lock)
+	__acquires(&port->port_lock)
+{
+	struct dbc_request	*req;
+	int			status;
+	struct list_head	*pool = &port->read_pool;
+
+	while (!list_empty(pool)) {
+		if (!port->port.tty)
+			break;
+
+		req = list_entry(pool->next, struct dbc_request, list_pool);
+		list_del(&req->list_pool);
+		req->length = DBC_MAX_PACKET;
+
+		spin_unlock(&port->port_lock);
+		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			list_add(&req->list_pool, pool);
+			break;
+		}
+	}
+}
+
+static void
+dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
+{
+	unsigned long		flags;
+	struct xhci_dbc		*dbc = xhci->dbc;
+	struct dbc_port		*port = &dbc->port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add_tail(&req->list_pool, &port->read_queue);
+	tasklet_schedule(&port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
+{
+	unsigned long		flags;
+	struct xhci_dbc		*dbc = xhci->dbc;
+	struct dbc_port		*port = &dbc->port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list_pool, &port->write_pool);
+	switch (req->status) {
+	case 0:
+		dbc_start_tx(port);
+		break;
+	case -ESHUTDOWN:
+		break;
+	default:
+		xhci_warn(xhci, "unexpected write complete status %d\n",
+			  req->status);
+		break;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
+{
+	kfree(req->buf);
+	dbc_free_request(dep, req);
+}
+
+static int
+xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
+			void (*fn)(struct xhci_hcd *, struct dbc_request *))
+{
+	int			i;
+	struct dbc_request	*req;
+
+	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
+		req = dbc_alloc_request(dep, GFP_ATOMIC);
+		if (!req)
+			break;
+
+		req->length = DBC_MAX_PACKET;
+		req->buf = kmalloc(req->length, GFP_KERNEL);
+		if (!req->buf) {
+			xhci_dbc_free_req(dep, req);
+			break;
+		}
+
+		req->complete = fn;
+		list_add_tail(&req->list_pool, head);
+	}
+
+	return list_empty(head) ? -ENOMEM : 0;
+}
+
+static void
+xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
+{
+	struct dbc_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct dbc_request, list_pool);
+		list_del(&req->list_pool);
+		xhci_dbc_free_req(dep, req);
+	}
+}
+
+static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct dbc_port		*port = driver->driver_state;
+
+	tty->driver_data = port;
+
+	return tty_port_install(&port->port, driver, tty);
+}
+
+static int dbc_tty_open(struct tty_struct *tty, struct file *file)
+{
+	struct dbc_port		*port = tty->driver_data;
+
+	return tty_port_open(&port->port, tty, file);
+}
+
+static void dbc_tty_close(struct tty_struct *tty, struct file *file)
+{
+	struct dbc_port		*port = tty->driver_data;
+
+	tty_port_close(&port->port, tty, file);
+}
+
+static int dbc_tty_write(struct tty_struct *tty,
+			 const unsigned char *buf,
+			 int count)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (count)
+		count = kfifo_in(&port->write_fifo, buf, count);
+	dbc_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return count;
+}
+
+static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+	int			status;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	status = kfifo_put(&port->write_fifo, ch);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+}
+
+static void dbc_tty_flush_chars(struct tty_struct *tty)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	dbc_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int dbc_tty_write_room(struct tty_struct *tty)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+	int			room = 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	room = kfifo_avail(&port->write_fifo);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return room;
+}
+
+static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+	int			chars = 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	chars = kfifo_len(&port->write_fifo);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return chars;
+}
+
+static void dbc_tty_unthrottle(struct tty_struct *tty)
+{
+	struct dbc_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	tasklet_schedule(&port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static const struct tty_operations dbc_tty_ops = {
+	.install		= dbc_tty_install,
+	.open			= dbc_tty_open,
+	.close			= dbc_tty_close,
+	.write			= dbc_tty_write,
+	.put_char		= dbc_tty_put_char,
+	.flush_chars		= dbc_tty_flush_chars,
+	.write_room		= dbc_tty_write_room,
+	.chars_in_buffer	= dbc_tty_chars_in_buffer,
+	.unthrottle		= dbc_tty_unthrottle,
+};
+
+static struct tty_driver *dbc_tty_driver;
+
+int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
+{
+	int			status;
+	struct xhci_dbc		*dbc = xhci->dbc;
+
+	dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
+					  TTY_DRIVER_DYNAMIC_DEV);
+	if (IS_ERR(dbc_tty_driver)) {
+		status = PTR_ERR(dbc_tty_driver);
+		dbc_tty_driver = NULL;
+		return status;
+	}
+
+	dbc_tty_driver->driver_name = "dbc_serial";
+	dbc_tty_driver->name = "ttyDBC";
+
+	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	dbc_tty_driver->init_termios = tty_std_termios;
+	dbc_tty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	dbc_tty_driver->init_termios.c_ispeed = 9600;
+	dbc_tty_driver->init_termios.c_ospeed = 9600;
+	dbc_tty_driver->driver_state = &dbc->port;
+
+	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
+
+	status = tty_register_driver(dbc_tty_driver);
+	if (status) {
+		xhci_err(xhci,
+			 "can't register dbc tty driver, err %d\n", status);
+		put_tty_driver(dbc_tty_driver);
+		dbc_tty_driver = NULL;
+	}
+
+	return status;
+}
+
+void xhci_dbc_tty_unregister_driver(void)
+{
+	if (dbc_tty_driver) {
+		tty_unregister_driver(dbc_tty_driver);
+		put_tty_driver(dbc_tty_driver);
+		dbc_tty_driver = NULL;
+	}
+}
+
+static void dbc_rx_push(unsigned long _port)
+{
+	struct dbc_request	*req;
+	struct tty_struct	*tty;
+	unsigned long		flags;
+	bool			do_push = false;
+	bool			disconnect = false;
+	struct dbc_port		*port = (void *)_port;
+	struct list_head	*queue = &port->read_queue;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	tty = port->port.tty;
+	while (!list_empty(queue)) {
+		req = list_first_entry(queue, struct dbc_request, list_pool);
+
+		if (tty && tty_throttled(tty))
+			break;
+
+		switch (req->status) {
+		case 0:
+			break;
+		case -ESHUTDOWN:
+			disconnect = true;
+			break;
+		default:
+			pr_warn("ttyDBC0: unexpected RX status %d\n",
+				req->status);
+			break;
+		}
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned int	n, size = req->actual;
+			int		count;
+
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = tty_insert_flip_string(&port->port, packet,
+						       size);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				port->n_read += count;
+				break;
+			}
+			port->n_read = 0;
+		}
+
+		list_move(&req->list_pool, &port->read_pool);
+	}
+
+	if (do_push)
+		tty_flip_buffer_push(&port->port);
+
+	if (!list_empty(queue) && tty) {
+		if (!tty_throttled(tty)) {
+			if (do_push)
+				tasklet_schedule(&port->push);
+			else
+				pr_warn("ttyDBC0: RX not scheduled?\n");
+		}
+	}
+
+	if (!disconnect)
+		dbc_start_rx(port);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
+{
+	unsigned long	flags;
+	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	dbc_start_rx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return 0;
+}
+
+static const struct tty_port_operations dbc_port_ops = {
+	.activate =	dbc_port_activate,
+};
+
+static void
+xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
+{
+	tty_port_init(&port->port);
+	spin_lock_init(&port->port_lock);
+	tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_LIST_HEAD(&port->write_pool);
+
+	port->in =		get_in_ep(xhci);
+	port->out =		get_out_ep(xhci);
+	port->port.ops =	&dbc_port_ops;
+	port->n_read =		0;
+}
+
+static void
+xhci_dbc_tty_exit_port(struct dbc_port *port)
+{
+	tasklet_kill(&port->push);
+	tty_port_destroy(&port->port);
+}
+
+int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
+{
+	int			ret;
+	struct device		*tty_dev;
+	struct xhci_dbc		*dbc = xhci->dbc;
+	struct dbc_port		*port = &dbc->port;
+
+	xhci_dbc_tty_init_port(xhci, port);
+	tty_dev = tty_port_register_device(&port->port,
+					   dbc_tty_driver, 0, NULL);
+	if (IS_ERR(tty_dev)) {
+		ret = PTR_ERR(tty_dev);
+		goto register_fail;
+	}
+
+	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
+	if (ret)
+		goto buf_alloc_fail;
+
+	ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
+				      dbc_read_complete);
+	if (ret)
+		goto request_fail;
+
+	ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
+				      dbc_write_complete);
+	if (ret)
+		goto request_fail;
+
+	port->registered = true;
+
+	return 0;
+
+request_fail:
+	xhci_dbc_free_requests(port->in, &port->read_pool);
+	xhci_dbc_free_requests(port->out, &port->write_pool);
+	kfifo_free(&port->write_fifo);
+
+buf_alloc_fail:
+	tty_unregister_device(dbc_tty_driver, 0);
+
+register_fail:
+	xhci_dbc_tty_exit_port(port);
+
+	xhci_err(xhci, "can't register tty port, err %d\n", ret);
+
+	return ret;
+}
+
+void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
+{
+	struct xhci_dbc		*dbc = xhci->dbc;
+	struct dbc_port		*port = &dbc->port;
+
+	tty_unregister_device(dbc_tty_driver, 0);
+	xhci_dbc_tty_exit_port(port);
+	port->registered = false;
+
+	kfifo_free(&port->write_fifo);
+	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
+	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
+	xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
+}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
new file mode 100644
index 0000000..cadc013
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.c - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "xhci.h"
+#include "xhci-debugfs.h"
+
+static const struct debugfs_reg32 xhci_cap_regs[] = {
+	dump_register(CAPLENGTH),
+	dump_register(HCSPARAMS1),
+	dump_register(HCSPARAMS2),
+	dump_register(HCSPARAMS3),
+	dump_register(HCCPARAMS1),
+	dump_register(DOORBELLOFF),
+	dump_register(RUNTIMEOFF),
+	dump_register(HCCPARAMS2),
+};
+
+static const struct debugfs_reg32 xhci_op_regs[] = {
+	dump_register(USBCMD),
+	dump_register(USBSTS),
+	dump_register(PAGESIZE),
+	dump_register(DNCTRL),
+	dump_register(CRCR),
+	dump_register(DCBAAP_LOW),
+	dump_register(DCBAAP_HIGH),
+	dump_register(CONFIG),
+};
+
+static const struct debugfs_reg32 xhci_runtime_regs[] = {
+	dump_register(MFINDEX),
+	dump_register(IR0_IMAN),
+	dump_register(IR0_IMOD),
+	dump_register(IR0_ERSTSZ),
+	dump_register(IR0_ERSTBA_LOW),
+	dump_register(IR0_ERSTBA_HIGH),
+	dump_register(IR0_ERDP_LOW),
+	dump_register(IR0_ERDP_HIGH),
+};
+
+static const struct debugfs_reg32 xhci_extcap_legsup[] = {
+	dump_register(EXTCAP_USBLEGSUP),
+	dump_register(EXTCAP_USBLEGCTLSTS),
+};
+
+static const struct debugfs_reg32 xhci_extcap_protocol[] = {
+	dump_register(EXTCAP_REVISION),
+	dump_register(EXTCAP_NAME),
+	dump_register(EXTCAP_PORTINFO),
+	dump_register(EXTCAP_PORTTYPE),
+	dump_register(EXTCAP_MANTISSA1),
+	dump_register(EXTCAP_MANTISSA2),
+	dump_register(EXTCAP_MANTISSA3),
+	dump_register(EXTCAP_MANTISSA4),
+	dump_register(EXTCAP_MANTISSA5),
+	dump_register(EXTCAP_MANTISSA6),
+};
+
+static const struct debugfs_reg32 xhci_extcap_dbc[] = {
+	dump_register(EXTCAP_DBC_CAPABILITY),
+	dump_register(EXTCAP_DBC_DOORBELL),
+	dump_register(EXTCAP_DBC_ERSTSIZE),
+	dump_register(EXTCAP_DBC_ERST_LOW),
+	dump_register(EXTCAP_DBC_ERST_HIGH),
+	dump_register(EXTCAP_DBC_ERDP_LOW),
+	dump_register(EXTCAP_DBC_ERDP_HIGH),
+	dump_register(EXTCAP_DBC_CONTROL),
+	dump_register(EXTCAP_DBC_STATUS),
+	dump_register(EXTCAP_DBC_PORTSC),
+	dump_register(EXTCAP_DBC_CONT_LOW),
+	dump_register(EXTCAP_DBC_CONT_HIGH),
+	dump_register(EXTCAP_DBC_DEVINFO1),
+	dump_register(EXTCAP_DBC_DEVINFO2),
+};
+
+static struct dentry *xhci_debugfs_root;
+
+static struct xhci_regset *xhci_debugfs_alloc_regset(struct xhci_hcd *xhci)
+{
+	struct xhci_regset	*regset;
+
+	regset = kzalloc(sizeof(*regset), GFP_KERNEL);
+	if (!regset)
+		return NULL;
+
+	/*
+	 * The allocation and free of regset are executed in order.
+	 * We needn't a lock here.
+	 */
+	INIT_LIST_HEAD(&regset->list);
+	list_add_tail(&regset->list, &xhci->regset_list);
+
+	return regset;
+}
+
+static void xhci_debugfs_free_regset(struct xhci_regset *regset)
+{
+	if (!regset)
+		return;
+
+	list_del(&regset->list);
+	kfree(regset);
+}
+
+static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
+				const struct debugfs_reg32 *regs,
+				size_t nregs, struct dentry *parent,
+				const char *fmt, ...)
+{
+	struct xhci_regset	*rgs;
+	va_list			args;
+	struct debugfs_regset32	*regset;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+
+	rgs = xhci_debugfs_alloc_regset(xhci);
+	if (!rgs)
+		return;
+
+	va_start(args, fmt);
+	vsnprintf(rgs->name, sizeof(rgs->name), fmt, args);
+	va_end(args);
+
+	regset = &rgs->regset;
+	regset->regs = regs;
+	regset->nregs = nregs;
+	regset->base = hcd->regs + base;
+
+	debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
+}
+
+static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
+				       const struct debugfs_reg32 *regs,
+				       size_t n, const char *cap_name)
+{
+	u32			offset;
+	int			index = 0;
+	size_t			psic, nregs = n;
+	void __iomem		*base = &xhci->cap_regs->hc_capbase;
+
+	offset = xhci_find_next_ext_cap(base, 0, cap_id);
+	while (offset) {
+		if (cap_id == XHCI_EXT_CAPS_PROTOCOL) {
+			psic = XHCI_EXT_PORT_PSIC(readl(base + offset + 8));
+			nregs = min(4 + psic, n);
+		}
+
+		xhci_debugfs_regset(xhci, offset, regs, nregs,
+				    xhci->debugfs_root, "%s:%02d",
+				    cap_name, index);
+		offset = xhci_find_next_ext_cap(base, offset, cap_id);
+		index++;
+	}
+}
+
+static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
+{
+	dma_addr_t		dma;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
+
+	dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+	seq_printf(s, "%pad\n", &dma);
+
+	return 0;
+}
+
+static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
+{
+	dma_addr_t		dma;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
+
+	dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+	seq_printf(s, "%pad\n", &dma);
+
+	return 0;
+}
+
+static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
+{
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
+
+	seq_printf(s, "%d\n", ring->cycle_state);
+
+	return 0;
+}
+
+static void xhci_ring_dump_segment(struct seq_file *s,
+				   struct xhci_segment *seg)
+{
+	int			i;
+	dma_addr_t		dma;
+	union xhci_trb		*trb;
+
+	for (i = 0; i < TRBS_PER_SEGMENT; i++) {
+		trb = &seg->trbs[i];
+		dma = seg->dma + i * sizeof(*trb);
+		seq_printf(s, "%pad: %s\n", &dma,
+			   xhci_decode_trb(trb->generic.field[0],
+					   trb->generic.field[1],
+					   trb->generic.field[2],
+					   trb->generic.field[3]));
+	}
+}
+
+static int xhci_ring_trb_show(struct seq_file *s, void *unused)
+{
+	int			i;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
+	struct xhci_segment	*seg = ring->first_seg;
+
+	for (i = 0; i < ring->num_segs; i++) {
+		xhci_ring_dump_segment(s, seg);
+		seg = seg->next;
+	}
+
+	return 0;
+}
+
+static struct xhci_file_map ring_files[] = {
+	{"enqueue",		xhci_ring_enqueue_show, },
+	{"dequeue",		xhci_ring_dequeue_show, },
+	{"cycle",		xhci_ring_cycle_show, },
+	{"trbs",		xhci_ring_trb_show, },
+};
+
+static int xhci_ring_open(struct inode *inode, struct file *file)
+{
+	int			i;
+	struct xhci_file_map	*f_map;
+	const char		*file_name = file_dentry(file)->d_iname;
+
+	for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
+		f_map = &ring_files[i];
+
+		if (strcmp(f_map->name, file_name) == 0)
+			break;
+	}
+
+	return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_ring_fops = {
+	.open			= xhci_ring_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static int xhci_slot_context_show(struct seq_file *s, void *unused)
+{
+	struct xhci_hcd		*xhci;
+	struct xhci_slot_ctx	*slot_ctx;
+	struct xhci_slot_priv	*priv = s->private;
+	struct xhci_virt_device	*dev = priv->dev;
+
+	xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+	seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
+		   xhci_decode_slot_context(slot_ctx->dev_info,
+					    slot_ctx->dev_info2,
+					    slot_ctx->tt_info,
+					    slot_ctx->dev_state));
+
+	return 0;
+}
+
+static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
+{
+	int			dci;
+	dma_addr_t		dma;
+	struct xhci_hcd		*xhci;
+	struct xhci_ep_ctx	*ep_ctx;
+	struct xhci_slot_priv	*priv = s->private;
+	struct xhci_virt_device	*dev = priv->dev;
+
+	xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+
+	for (dci = 1; dci < 32; dci++) {
+		ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
+		dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+		seq_printf(s, "%pad: %s\n", &dma,
+			   xhci_decode_ep_context(ep_ctx->ep_info,
+						  ep_ctx->ep_info2,
+						  ep_ctx->deq,
+						  ep_ctx->tx_info));
+	}
+
+	return 0;
+}
+
+static int xhci_device_name_show(struct seq_file *s, void *unused)
+{
+	struct xhci_slot_priv	*priv = s->private;
+	struct xhci_virt_device	*dev = priv->dev;
+
+	seq_printf(s, "%s\n", dev_name(&dev->udev->dev));
+
+	return 0;
+}
+
+static struct xhci_file_map context_files[] = {
+	{"name",		xhci_device_name_show, },
+	{"slot-context",	xhci_slot_context_show, },
+	{"ep-context",		xhci_endpoint_context_show, },
+};
+
+static int xhci_context_open(struct inode *inode, struct file *file)
+{
+	int			i;
+	struct xhci_file_map	*f_map;
+	const char		*file_name = file_dentry(file)->d_iname;
+
+	for (i = 0; i < ARRAY_SIZE(context_files); i++) {
+		f_map = &context_files[i];
+
+		if (strcmp(f_map->name, file_name) == 0)
+			break;
+	}
+
+	return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_context_fops = {
+	.open			= xhci_context_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+
+
+static int xhci_portsc_show(struct seq_file *s, void *unused)
+{
+	struct xhci_port	*port = s->private;
+	u32			portsc;
+
+	portsc = readl(port->addr);
+	seq_printf(s, "%s\n", xhci_decode_portsc(portsc));
+
+	return 0;
+}
+
+static int xhci_port_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, xhci_portsc_show, inode->i_private);
+}
+
+static ssize_t xhci_port_write(struct file *file,  const char __user *ubuf,
+			       size_t count, loff_t *ppos)
+{
+	struct seq_file         *s = file->private_data;
+	struct xhci_port	*port = s->private;
+	struct xhci_hcd		*xhci = hcd_to_xhci(port->rhub->hcd);
+	char                    buf[32];
+	u32			portsc;
+	unsigned long		flags;
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (!strncmp(buf, "compliance", 10)) {
+		/* If CTC is clear, compliance is enabled by default */
+		if (!HCC2_CTC(xhci->hcc_params2))
+			return count;
+		spin_lock_irqsave(&xhci->lock, flags);
+		/* compliance mode can only be enabled on ports in RxDetect */
+		portsc = readl(port->addr);
+		if ((portsc & PORT_PLS_MASK) != XDEV_RXDETECT) {
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			return -EPERM;
+		}
+		portsc = xhci_port_state_to_neutral(portsc);
+		portsc &= ~PORT_PLS_MASK;
+		portsc |= PORT_LINK_STROBE | XDEV_COMP_MODE;
+		writel(portsc, port->addr);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	} else {
+		return -EINVAL;
+	}
+	return count;
+}
+
+static const struct file_operations port_fops = {
+	.open			= xhci_port_open,
+	.write                  = xhci_port_write,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
+				      struct xhci_file_map *files,
+				      size_t nentries, void *data,
+				      struct dentry *parent,
+				      const struct file_operations *fops)
+{
+	int			i;
+
+	for (i = 0; i < nentries; i++)
+		debugfs_create_file(files[i].name, 0444, parent, data, fops);
+}
+
+static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
+						   struct xhci_ring **ring,
+						   const char *name,
+						   struct dentry *parent)
+{
+	struct dentry		*dir;
+
+	dir = debugfs_create_dir(name, parent);
+	xhci_debugfs_create_files(xhci, ring_files, ARRAY_SIZE(ring_files),
+				  ring, dir, &xhci_ring_fops);
+
+	return dir;
+}
+
+static void xhci_debugfs_create_context_files(struct xhci_hcd *xhci,
+					      struct dentry *parent,
+					      int slot_id)
+{
+	struct xhci_virt_device	*dev = xhci->devs[slot_id];
+
+	xhci_debugfs_create_files(xhci, context_files,
+				  ARRAY_SIZE(context_files),
+				  dev->debugfs_private,
+				  parent, &xhci_context_fops);
+}
+
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+				  struct xhci_virt_device *dev,
+				  int ep_index)
+{
+	struct xhci_ep_priv	*epriv;
+	struct xhci_slot_priv	*spriv = dev->debugfs_private;
+
+	if (spriv->eps[ep_index])
+		return;
+
+	epriv = kzalloc(sizeof(*epriv), GFP_KERNEL);
+	if (!epriv)
+		return;
+
+	snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
+	epriv->root = xhci_debugfs_create_ring_dir(xhci,
+						   &dev->eps[ep_index].ring,
+						   epriv->name,
+						   spriv->root);
+	spriv->eps[ep_index] = epriv;
+}
+
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+				  struct xhci_virt_device *dev,
+				  int ep_index)
+{
+	struct xhci_ep_priv	*epriv;
+	struct xhci_slot_priv	*spriv = dev->debugfs_private;
+
+	if (!spriv || !spriv->eps[ep_index])
+		return;
+
+	epriv = spriv->eps[ep_index];
+	debugfs_remove_recursive(epriv->root);
+	spriv->eps[ep_index] = NULL;
+	kfree(epriv);
+}
+
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_slot_priv	*priv;
+	struct xhci_virt_device	*dev = xhci->devs[slot_id];
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return;
+
+	snprintf(priv->name, sizeof(priv->name), "%02d", slot_id);
+	priv->root = debugfs_create_dir(priv->name, xhci->debugfs_slots);
+	priv->dev = dev;
+	dev->debugfs_private = priv;
+
+	xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
+				     "ep00", priv->root);
+
+	xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
+}
+
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
+{
+	int			i;
+	struct xhci_slot_priv	*priv;
+	struct xhci_virt_device	*dev = xhci->devs[slot_id];
+
+	if (!dev || !dev->debugfs_private)
+		return;
+
+	priv = dev->debugfs_private;
+
+	debugfs_remove_recursive(priv->root);
+
+	for (i = 0; i < 31; i++)
+		kfree(priv->eps[i]);
+
+	kfree(priv);
+	dev->debugfs_private = NULL;
+}
+
+static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
+				      struct dentry *parent)
+{
+	unsigned int		num_ports;
+	char			port_name[8];
+	struct xhci_port	*port;
+	struct dentry		*dir;
+
+	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+	parent = debugfs_create_dir("ports", parent);
+
+	while (num_ports--) {
+		scnprintf(port_name, sizeof(port_name), "port%02d",
+			  num_ports + 1);
+		dir = debugfs_create_dir(port_name, parent);
+		port = &xhci->hw_ports[num_ports];
+		debugfs_create_file("portsc", 0644, dir, port, &port_fops);
+	}
+}
+
+void xhci_debugfs_init(struct xhci_hcd *xhci)
+{
+	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
+
+	xhci->debugfs_root = debugfs_create_dir(dev_name(dev),
+						xhci_debugfs_root);
+
+	INIT_LIST_HEAD(&xhci->regset_list);
+
+	xhci_debugfs_regset(xhci,
+			    0,
+			    xhci_cap_regs, ARRAY_SIZE(xhci_cap_regs),
+			    xhci->debugfs_root, "reg-cap");
+
+	xhci_debugfs_regset(xhci,
+			    HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)),
+			    xhci_op_regs, ARRAY_SIZE(xhci_op_regs),
+			    xhci->debugfs_root, "reg-op");
+
+	xhci_debugfs_regset(xhci,
+			    readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK,
+			    xhci_runtime_regs, ARRAY_SIZE(xhci_runtime_regs),
+			    xhci->debugfs_root, "reg-runtime");
+
+	xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_LEGACY,
+				   xhci_extcap_legsup,
+				   ARRAY_SIZE(xhci_extcap_legsup),
+				   "reg-ext-legsup");
+
+	xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_PROTOCOL,
+				   xhci_extcap_protocol,
+				   ARRAY_SIZE(xhci_extcap_protocol),
+				   "reg-ext-protocol");
+
+	xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_DEBUG,
+				   xhci_extcap_dbc,
+				   ARRAY_SIZE(xhci_extcap_dbc),
+				   "reg-ext-dbc");
+
+	xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
+				     "command-ring",
+				     xhci->debugfs_root);
+
+	xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
+				     "event-ring",
+				     xhci->debugfs_root);
+
+	xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
+
+	xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
+}
+
+void xhci_debugfs_exit(struct xhci_hcd *xhci)
+{
+	struct xhci_regset	*rgs, *tmp;
+
+	debugfs_remove_recursive(xhci->debugfs_root);
+	xhci->debugfs_root = NULL;
+	xhci->debugfs_slots = NULL;
+
+	list_for_each_entry_safe(rgs, tmp, &xhci->regset_list, list)
+		xhci_debugfs_free_regset(rgs);
+}
+
+void __init xhci_debugfs_create_root(void)
+{
+	xhci_debugfs_root = debugfs_create_dir("xhci", usb_debug_root);
+}
+
+void __exit xhci_debugfs_remove_root(void)
+{
+	debugfs_remove_recursive(xhci_debugfs_root);
+	xhci_debugfs_root = NULL;
+}
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
new file mode 100644
index 0000000..ac5bc40
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.h - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#ifndef __LINUX_XHCI_DEBUGFS_H
+#define __LINUX_XHCI_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#define DEBUGFS_NAMELEN 32
+
+#define REG_CAPLENGTH					0x00
+#define REG_HCSPARAMS1					0x04
+#define REG_HCSPARAMS2					0x08
+#define REG_HCSPARAMS3					0x0c
+#define REG_HCCPARAMS1					0x10
+#define REG_DOORBELLOFF					0x14
+#define REG_RUNTIMEOFF					0x18
+#define REG_HCCPARAMS2					0x1c
+
+#define	REG_USBCMD					0x00
+#define REG_USBSTS					0x04
+#define REG_PAGESIZE					0x08
+#define REG_DNCTRL					0x14
+#define REG_CRCR					0x18
+#define REG_DCBAAP_LOW					0x30
+#define REG_DCBAAP_HIGH					0x34
+#define REG_CONFIG					0x38
+
+#define REG_MFINDEX					0x00
+#define REG_IR0_IMAN					0x20
+#define REG_IR0_IMOD					0x24
+#define REG_IR0_ERSTSZ					0x28
+#define REG_IR0_ERSTBA_LOW				0x30
+#define REG_IR0_ERSTBA_HIGH				0x34
+#define REG_IR0_ERDP_LOW				0x38
+#define REG_IR0_ERDP_HIGH				0x3c
+
+#define REG_EXTCAP_USBLEGSUP				0x00
+#define REG_EXTCAP_USBLEGCTLSTS				0x04
+
+#define REG_EXTCAP_REVISION				0x00
+#define REG_EXTCAP_NAME					0x04
+#define REG_EXTCAP_PORTINFO				0x08
+#define REG_EXTCAP_PORTTYPE				0x0c
+#define REG_EXTCAP_MANTISSA1				0x10
+#define REG_EXTCAP_MANTISSA2				0x14
+#define REG_EXTCAP_MANTISSA3				0x18
+#define REG_EXTCAP_MANTISSA4				0x1c
+#define REG_EXTCAP_MANTISSA5				0x20
+#define REG_EXTCAP_MANTISSA6				0x24
+
+#define REG_EXTCAP_DBC_CAPABILITY			0x00
+#define REG_EXTCAP_DBC_DOORBELL				0x04
+#define REG_EXTCAP_DBC_ERSTSIZE				0x08
+#define REG_EXTCAP_DBC_ERST_LOW				0x10
+#define REG_EXTCAP_DBC_ERST_HIGH			0x14
+#define REG_EXTCAP_DBC_ERDP_LOW				0x18
+#define REG_EXTCAP_DBC_ERDP_HIGH			0x1c
+#define REG_EXTCAP_DBC_CONTROL				0x20
+#define REG_EXTCAP_DBC_STATUS				0x24
+#define REG_EXTCAP_DBC_PORTSC				0x28
+#define REG_EXTCAP_DBC_CONT_LOW				0x30
+#define REG_EXTCAP_DBC_CONT_HIGH			0x34
+#define REG_EXTCAP_DBC_DEVINFO1				0x38
+#define REG_EXTCAP_DBC_DEVINFO2				0x3c
+
+#define dump_register(nm)				\
+{							\
+	.name	= __stringify(nm),			\
+	.offset	= REG_ ##nm,				\
+}
+
+struct xhci_regset {
+	char			name[DEBUGFS_NAMELEN];
+	struct debugfs_regset32	regset;
+	size_t			nregs;
+	struct dentry		*parent;
+	struct list_head	list;
+};
+
+struct xhci_file_map {
+	const char		*name;
+	int			(*show)(struct seq_file *s, void *unused);
+};
+
+struct xhci_ep_priv {
+	char			name[DEBUGFS_NAMELEN];
+	struct dentry		*root;
+};
+
+struct xhci_slot_priv {
+	char			name[DEBUGFS_NAMELEN];
+	struct dentry		*root;
+	struct xhci_ep_priv	*eps[31];
+	struct xhci_virt_device	*dev;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void xhci_debugfs_init(struct xhci_hcd *xhci);
+void xhci_debugfs_exit(struct xhci_hcd *xhci);
+void __init xhci_debugfs_create_root(void);
+void __exit xhci_debugfs_remove_root(void);
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+				  struct xhci_virt_device *virt_dev,
+				  int ep_index);
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+				  struct xhci_virt_device *virt_dev,
+				  int ep_index);
+#else
+static inline void xhci_debugfs_init(struct xhci_hcd *xhci) { }
+static inline void xhci_debugfs_exit(struct xhci_hcd *xhci) { }
+static inline void __init xhci_debugfs_create_root(void) { }
+static inline void __exit xhci_debugfs_remove_root(void) { }
+static inline void xhci_debugfs_create_slot(struct xhci_hcd *x, int s) { }
+static inline void xhci_debugfs_remove_slot(struct xhci_hcd *x, int s) { }
+static inline void
+xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+			     struct xhci_virt_device *virt_dev,
+			     int ep_index) { }
+static inline void
+xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+			     struct xhci_virt_device *virt_dev,
+			     int ep_index) { }
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __LINUX_XHCI_DEBUGFS_H */
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
new file mode 100644
index 0000000..399113f
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XHCI extended capability handling
+ *
+ * Copyright (c) 2017 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/platform_device.h>
+#include "xhci.h"
+
+#define USB_SW_DRV_NAME		"intel_xhci_usb_sw"
+#define USB_SW_RESOURCE_SIZE	0x400
+
+static void xhci_intel_unregister_pdev(void *arg)
+{
+	platform_device_unregister(arg);
+}
+
+static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct device *dev = hcd->self.controller;
+	struct platform_device *pdev;
+	struct resource	res = { 0, };
+	int ret;
+
+	pdev = platform_device_alloc(USB_SW_DRV_NAME, PLATFORM_DEVID_NONE);
+	if (!pdev) {
+		xhci_err(xhci, "couldn't allocate %s platform device\n",
+			 USB_SW_DRV_NAME);
+		return -ENOMEM;
+	}
+
+	res.start = hcd->rsrc_start + cap_offset;
+	res.end	  = res.start + USB_SW_RESOURCE_SIZE - 1;
+	res.name  = USB_SW_DRV_NAME;
+	res.flags = IORESOURCE_MEM;
+
+	ret = platform_device_add_resources(pdev, &res, 1);
+	if (ret) {
+		dev_err(dev, "couldn't add resources to intel_xhci_usb_sw pdev\n");
+		platform_device_put(pdev);
+		return ret;
+	}
+
+	pdev->dev.parent = dev;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		dev_err(dev, "couldn't register intel_xhci_usb_sw pdev\n");
+		platform_device_put(pdev);
+		return ret;
+	}
+
+	ret = devm_add_action_or_reset(dev, xhci_intel_unregister_pdev, pdev);
+	if (ret) {
+		dev_err(dev, "couldn't add unregister action for intel_xhci_usb_sw pdev\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int xhci_ext_cap_init(struct xhci_hcd *xhci)
+{
+	void __iomem *base = &xhci->cap_regs->hc_capbase;
+	u32 offset, val;
+	int ret;
+
+	offset = xhci_find_next_ext_cap(base, 0, 0);
+
+	while (offset) {
+		val = readl(base + offset);
+
+		switch (XHCI_EXT_CAPS_ID(val)) {
+		case XHCI_EXT_CAPS_VENDOR_INTEL:
+			if (xhci->quirks & XHCI_INTEL_USB_ROLE_SW) {
+				ret = xhci_create_intel_xhci_sw_pdev(xhci,
+								     offset);
+				if (ret)
+					return ret;
+			}
+			break;
+		}
+		offset = xhci_find_next_ext_cap(base, offset, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_ext_cap_init);
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 0000000..268328c
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+/* Up to 16 ms to halt an HC */
+#define XHCI_MAX_HALT_USEC	(16*1000)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT		(1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET	0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p)	(((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET		0x00
+#define XHCI_STS_OFFSET		0x04
+
+#define XHCI_MAX_EXT_CAPS		50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p)	(((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p)	(((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p)	(((p)>>8)&0xff)
+#define	XHCI_EXT_CAPS_VAL(p)	((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY	1
+#define XHCI_EXT_CAPS_PROTOCOL	2
+#define XHCI_EXT_CAPS_PM	3
+#define XHCI_EXT_CAPS_VIRT	4
+#define XHCI_EXT_CAPS_ROUTE	5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG	10
+/* Vendor caps */
+#define XHCI_EXT_CAPS_VENDOR_INTEL	192
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED	(1 << 16)
+#define XHCI_HC_OS_OWNED	(1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET	(0x00)
+
+/* USB Legacy Support Control and Status Register  - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET	(0x04)
+/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define	XHCI_LEGACY_DISABLE_SMI		((0x7 << 1) + (0xff << 5) + (0x7 << 17))
+#define XHCI_LEGACY_SMI_EVENTS		(0x7 << 29)
+
+/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
+#define XHCI_L1C               (1 << 16)
+
+/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
+#define XHCI_HLC               (1 << 19)
+#define XHCI_BLC               (1 << 20)
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN		(1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE		(1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE		(1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE		(1 << 10)
+
+#define XHCI_IRQS		(XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR		(1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base	PCI MMIO registers base address.
+ * @start	address at which to start looking, (0 or HCC_PARAMS to start at
+ *		beginning of list)
+ * @id		Extended capability ID to search for, or 0 for the next
+ *		capability
+ *
+ * Returns the offset of the next matching extended capability structure.
+ * Some capabilities can occur several times, e.g., the XHCI_EXT_CAPS_PROTOCOL,
+ * and this provides a way to find them all.
+ */
+
+static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id)
+{
+	u32 val;
+	u32 next;
+	u32 offset;
+
+	offset = start;
+	if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
+		val = readl(base + XHCI_HCC_PARAMS_OFFSET);
+		if (val == ~0)
+			return 0;
+		offset = XHCI_HCC_EXT_CAPS(val) << 2;
+		if (!offset)
+			return 0;
+	}
+	do {
+		val = readl(base + offset);
+		if (val == ~0)
+			return 0;
+		if (offset != start && (id == 0 || XHCI_EXT_CAPS_ID(val) == id))
+			return offset;
+
+		next = XHCI_EXT_CAPS_NEXT(val);
+		offset += next << 2;
+	} while (next);
+
+	return 0;
+}
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
new file mode 100644
index 0000000..3c4abb5
--- /dev/null
+++ b/drivers/usb/host/xhci-histb.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2017-2018 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Jianguo Sun <sunjianguo1@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "xhci.h"
+
+#define GTXTHRCFG		0xc108
+#define GRXTHRCFG		0xc10c
+#define REG_GUSB2PHYCFG0	0xc200
+#define BIT_UTMI_8_16		BIT(3)
+#define BIT_UTMI_ULPI		BIT(4)
+#define BIT_FREECLK_EXIST	BIT(30)
+
+#define REG_GUSB3PIPECTL0	0xc2c0
+#define USB3_DEEMPHASIS_MASK	GENMASK(2, 1)
+#define USB3_DEEMPHASIS0	BIT(1)
+#define USB3_TX_MARGIN1		BIT(4)
+
+struct xhci_hcd_histb {
+	struct device		*dev;
+	struct usb_hcd		*hcd;
+	void __iomem		*ctrl;
+	struct clk		*bus_clk;
+	struct clk		*utmi_clk;
+	struct clk		*pipe_clk;
+	struct clk		*suspend_clk;
+	struct reset_control	*soft_reset;
+};
+
+static inline struct xhci_hcd_histb *hcd_to_histb(struct usb_hcd *hcd)
+{
+	return dev_get_drvdata(hcd->self.controller);
+}
+
+static int xhci_histb_config(struct xhci_hcd_histb *histb)
+{
+	struct device_node *np = histb->dev->of_node;
+	u32 regval;
+
+	if (of_property_match_string(np, "phys-names", "inno") >= 0) {
+		/* USB2 PHY chose ulpi 8bit interface */
+		regval = readl(histb->ctrl + REG_GUSB2PHYCFG0);
+		regval &= ~BIT_UTMI_ULPI;
+		regval &= ~(BIT_UTMI_8_16);
+		regval &= ~BIT_FREECLK_EXIST;
+		writel(regval, histb->ctrl + REG_GUSB2PHYCFG0);
+	}
+
+	if (of_property_match_string(np, "phys-names", "combo") >= 0) {
+		/*
+		 * write 0x010c0012 to GUSB3PIPECTL0
+		 * GUSB3PIPECTL0[5:3] = 010 : Tx Margin = 900mV ,
+		 * decrease TX voltage
+		 * GUSB3PIPECTL0[2:1] = 01 : Tx Deemphasis = -3.5dB,
+		 * refer to xHCI spec
+		 */
+		regval = readl(histb->ctrl + REG_GUSB3PIPECTL0);
+		regval &= ~USB3_DEEMPHASIS_MASK;
+		regval |= USB3_DEEMPHASIS0;
+		regval |= USB3_TX_MARGIN1;
+		writel(regval, histb->ctrl + REG_GUSB3PIPECTL0);
+	}
+
+	writel(0x23100000, histb->ctrl + GTXTHRCFG);
+	writel(0x23100000, histb->ctrl + GRXTHRCFG);
+
+	return 0;
+}
+
+static int xhci_histb_clks_get(struct xhci_hcd_histb *histb)
+{
+	struct device *dev = histb->dev;
+
+	histb->bus_clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(histb->bus_clk)) {
+		dev_err(dev, "fail to get bus clk\n");
+		return PTR_ERR(histb->bus_clk);
+	}
+
+	histb->utmi_clk = devm_clk_get(dev, "utmi");
+	if (IS_ERR(histb->utmi_clk)) {
+		dev_err(dev, "fail to get utmi clk\n");
+		return PTR_ERR(histb->utmi_clk);
+	}
+
+	histb->pipe_clk = devm_clk_get(dev, "pipe");
+	if (IS_ERR(histb->pipe_clk)) {
+		dev_err(dev, "fail to get pipe clk\n");
+		return PTR_ERR(histb->pipe_clk);
+	}
+
+	histb->suspend_clk = devm_clk_get(dev, "suspend");
+	if (IS_ERR(histb->suspend_clk)) {
+		dev_err(dev, "fail to get suspend clk\n");
+		return PTR_ERR(histb->suspend_clk);
+	}
+
+	return 0;
+}
+
+static int xhci_histb_host_enable(struct xhci_hcd_histb *histb)
+{
+	int ret;
+
+	ret = clk_prepare_enable(histb->bus_clk);
+	if (ret) {
+		dev_err(histb->dev, "failed to enable bus clk\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(histb->utmi_clk);
+	if (ret) {
+		dev_err(histb->dev, "failed to enable utmi clk\n");
+		goto err_utmi_clk;
+	}
+
+	ret = clk_prepare_enable(histb->pipe_clk);
+	if (ret) {
+		dev_err(histb->dev, "failed to enable pipe clk\n");
+		goto err_pipe_clk;
+	}
+
+	ret = clk_prepare_enable(histb->suspend_clk);
+	if (ret) {
+		dev_err(histb->dev, "failed to enable suspend clk\n");
+		goto err_suspend_clk;
+	}
+
+	reset_control_deassert(histb->soft_reset);
+
+	return 0;
+
+err_suspend_clk:
+	clk_disable_unprepare(histb->pipe_clk);
+err_pipe_clk:
+	clk_disable_unprepare(histb->utmi_clk);
+err_utmi_clk:
+	clk_disable_unprepare(histb->bus_clk);
+
+	return ret;
+}
+
+static void xhci_histb_host_disable(struct xhci_hcd_histb *histb)
+{
+	reset_control_assert(histb->soft_reset);
+
+	clk_disable_unprepare(histb->suspend_clk);
+	clk_disable_unprepare(histb->pipe_clk);
+	clk_disable_unprepare(histb->utmi_clk);
+	clk_disable_unprepare(histb->bus_clk);
+}
+
+static void xhci_histb_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	/*
+	 * As of now platform drivers don't provide MSI support so we ensure
+	 * here that the generic code does not try to make a pci_dev from our
+	 * dev struct in order to setup MSI
+	 */
+	xhci->quirks |= XHCI_PLAT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_histb_setup(struct usb_hcd *hcd)
+{
+	struct xhci_hcd_histb *histb = hcd_to_histb(hcd);
+	int ret;
+
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		ret = xhci_histb_config(histb);
+		if (ret)
+			return ret;
+	}
+
+	return xhci_gen_setup(hcd, xhci_histb_quirks);
+}
+
+static const struct xhci_driver_overrides xhci_histb_overrides __initconst = {
+	.reset = xhci_histb_setup,
+};
+
+static struct hc_driver __read_mostly xhci_histb_hc_driver;
+static int xhci_histb_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct xhci_hcd_histb *histb;
+	const struct hc_driver *driver;
+	struct usb_hcd *hcd;
+	struct xhci_hcd *xhci;
+	struct resource *res;
+	int irq;
+	int ret = -ENODEV;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	driver = &xhci_histb_hc_driver;
+	histb = devm_kzalloc(dev, sizeof(*histb), GFP_KERNEL);
+	if (!histb)
+		return -ENOMEM;
+
+	histb->dev = dev;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	histb->ctrl = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(histb->ctrl))
+		return PTR_ERR(histb->ctrl);
+
+	ret = xhci_histb_clks_get(histb);
+	if (ret)
+		return ret;
+
+	histb->soft_reset = devm_reset_control_get(dev, "soft");
+	if (IS_ERR(histb->soft_reset)) {
+		dev_err(dev, "failed to get soft reset\n");
+		return PTR_ERR(histb->soft_reset);
+	}
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+	device_enable_async_suspend(dev);
+
+	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	hcd = usb_create_hcd(driver, dev, dev_name(dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto disable_pm;
+	}
+
+	hcd->regs = histb->ctrl;
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	histb->hcd = hcd;
+	dev_set_drvdata(hcd->self.controller, histb);
+
+	ret = xhci_histb_host_enable(histb);
+	if (ret)
+		goto put_hcd;
+
+	xhci = hcd_to_xhci(hcd);
+
+	device_wakeup_enable(hcd->self.controller);
+
+	xhci->main_hcd = hcd;
+	xhci->shared_hcd = usb_create_shared_hcd(driver, dev, dev_name(dev),
+						 hcd);
+	if (!xhci->shared_hcd) {
+		ret = -ENOMEM;
+		goto disable_host;
+	}
+
+	if (device_property_read_bool(dev, "usb2-lpm-disable"))
+		xhci->quirks |= XHCI_HW_LPM_DISABLE;
+
+	if (device_property_read_bool(dev, "usb3-lpm-capable"))
+		xhci->quirks |= XHCI_LPM_SUPPORT;
+
+	/* imod_interval is the interrupt moderation value in nanoseconds. */
+	xhci->imod_interval = 40000;
+	device_property_read_u32(dev, "imod-interval-ns",
+				 &xhci->imod_interval);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto put_usb3_hcd;
+
+	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
+	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto dealloc_usb2_hcd;
+
+	device_enable_async_suspend(dev);
+	pm_runtime_put_noidle(dev);
+
+	/*
+	 * Prevent runtime pm from being on as default, users should enable
+	 * runtime pm using power/control in sysfs.
+	 */
+	pm_runtime_forbid(dev);
+
+	return 0;
+
+dealloc_usb2_hcd:
+	usb_remove_hcd(hcd);
+put_usb3_hcd:
+	usb_put_hcd(xhci->shared_hcd);
+disable_host:
+	xhci_histb_host_disable(histb);
+put_hcd:
+	usb_put_hcd(hcd);
+disable_pm:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static int xhci_histb_remove(struct platform_device *dev)
+{
+	struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
+	struct usb_hcd *hcd = histb->hcd;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct usb_hcd *shared_hcd = xhci->shared_hcd;
+
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+	usb_remove_hcd(shared_hcd);
+	xhci->shared_hcd = NULL;
+	device_wakeup_disable(&dev->dev);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(shared_hcd);
+
+	xhci_histb_host_disable(histb);
+	usb_put_hcd(hcd);
+	pm_runtime_put_sync(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
+	return 0;
+}
+
+static int __maybe_unused xhci_histb_suspend(struct device *dev)
+{
+	struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = histb->hcd;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int ret;
+
+	ret = xhci_suspend(xhci, device_may_wakeup(dev));
+
+	if (!device_may_wakeup(dev))
+		xhci_histb_host_disable(histb);
+
+	return ret;
+}
+
+static int __maybe_unused xhci_histb_resume(struct device *dev)
+{
+	struct xhci_hcd_histb *histb = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = histb->hcd;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!device_may_wakeup(dev))
+		xhci_histb_host_enable(histb);
+
+	return xhci_resume(xhci, 0);
+}
+
+static const struct dev_pm_ops xhci_histb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(xhci_histb_suspend, xhci_histb_resume)
+};
+#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &xhci_histb_pm_ops : NULL)
+
+#ifdef CONFIG_OF
+static const struct of_device_id histb_xhci_of_match[] = {
+	{ .compatible = "hisilicon,hi3798cv200-xhci"},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, histb_xhci_of_match);
+#endif
+
+static struct platform_driver histb_xhci_driver = {
+	.probe	= xhci_histb_probe,
+	.remove	= xhci_histb_remove,
+	.driver	= {
+		.name = "xhci-histb",
+		.pm = DEV_PM_OPS,
+		.of_match_table = of_match_ptr(histb_xhci_of_match),
+	},
+};
+MODULE_ALIAS("platform:xhci-histb");
+
+static int __init xhci_histb_init(void)
+{
+	xhci_init_driver(&xhci_histb_hc_driver, &xhci_histb_overrides);
+	return platform_driver_register(&histb_xhci_driver);
+}
+module_init(xhci_histb_init);
+
+static void __exit xhci_histb_exit(void)
+{
+	platform_driver_unregister(&histb_xhci_driver);
+}
+module_exit(xhci_histb_exit);
+
+MODULE_DESCRIPTION("HiSilicon STB xHCI Host Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 0000000..01b5818
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+
+#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
+#define	PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+			 PORT_RC | PORT_PLC | PORT_PE)
+
+/* USB 3 BOS descriptor and a capability descriptors, combined.
+ * Fields will be adjusted and added later in xhci_create_usb3_bos_desc()
+ */
+static u8 usb_bos_descriptor [] = {
+	USB_DT_BOS_SIZE,		/*  __u8 bLength, 5 bytes */
+	USB_DT_BOS,			/*  __u8 bDescriptorType */
+	0x0F, 0x00,			/*  __le16 wTotalLength, 15 bytes */
+	0x1,				/*  __u8 bNumDeviceCaps */
+	/* First device capability, SuperSpeed */
+	USB_DT_USB_SS_CAP_SIZE,		/*  __u8 bLength, 10 bytes */
+	USB_DT_DEVICE_CAPABILITY,	/* Device Capability */
+	USB_SS_CAP_TYPE,		/* bDevCapabilityType, SUPERSPEED_USB */
+	0x00,				/* bmAttributes, LTM off by default */
+	USB_5GBPS_OPERATION, 0x00,	/* wSpeedsSupported, 5Gbps only */
+	0x03,				/* bFunctionalitySupport,
+					   USB 3.0 speed only */
+	0x00,				/* bU1DevExitLat, set later. */
+	0x00, 0x00,			/* __le16 bU2DevExitLat, set later. */
+	/* Second device capability, SuperSpeedPlus */
+	0x1c,				/* bLength 28, will be adjusted later */
+	USB_DT_DEVICE_CAPABILITY,	/* Device Capability */
+	USB_SSP_CAP_TYPE,		/* bDevCapabilityType SUPERSPEED_PLUS */
+	0x00,				/* bReserved 0 */
+	0x23, 0x00, 0x00, 0x00,		/* bmAttributes, SSAC=3 SSIC=1 */
+	0x01, 0x00,			/* wFunctionalitySupport */
+	0x00, 0x00,			/* wReserved 0 */
+	/* Default Sublink Speed Attributes, overwrite if custom PSI exists */
+	0x34, 0x00, 0x05, 0x00,		/* 5Gbps, symmetric, rx, ID = 4 */
+	0xb4, 0x00, 0x05, 0x00,		/* 5Gbps, symmetric, tx, ID = 4 */
+	0x35, 0x40, 0x0a, 0x00,		/* 10Gbps, SSP, symmetric, rx, ID = 5 */
+	0xb5, 0x40, 0x0a, 0x00,		/* 10Gbps, SSP, symmetric, tx, ID = 5 */
+};
+
+static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+				     u16 wLength)
+{
+	int i, ssa_count;
+	u32 temp;
+	u16 desc_size, ssp_cap_size, ssa_size = 0;
+	bool usb3_1 = false;
+
+	desc_size = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+	ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
+
+	/* does xhci support USB 3.1 Enhanced SuperSpeed */
+	if (xhci->usb3_rhub.min_rev >= 0x01) {
+		/* does xhci provide a PSI table for SSA speed attributes? */
+		if (xhci->usb3_rhub.psi_count) {
+			/* two SSA entries for each unique PSI ID, RX and TX */
+			ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
+			ssa_size = ssa_count * sizeof(u32);
+			ssp_cap_size -= 16; /* skip copying the default SSA */
+		}
+		desc_size += ssp_cap_size;
+		usb3_1 = true;
+	}
+	memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
+
+	if (usb3_1) {
+		/* modify bos descriptor bNumDeviceCaps and wTotalLength */
+		buf[4] += 1;
+		put_unaligned_le16(desc_size + ssa_size, &buf[2]);
+	}
+
+	if (wLength < USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE)
+		return wLength;
+
+	/* Indicate whether the host has LTM support. */
+	temp = readl(&xhci->cap_regs->hcc_params);
+	if (HCC_LTC(temp))
+		buf[8] |= USB_LTM_SUPPORT;
+
+	/* Set the U1 and U2 exit latencies. */
+	if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
+		temp = readl(&xhci->cap_regs->hcs_params3);
+		buf[12] = HCS_U1_LATENCY(temp);
+		put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+	}
+
+	/* If PSI table exists, add the custom speed attributes from it */
+	if (usb3_1 && xhci->usb3_rhub.psi_count) {
+		u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
+		int offset;
+
+		ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+
+		if (wLength < desc_size)
+			return wLength;
+		buf[ssp_cap_base] = ssp_cap_size + ssa_size;
+
+		/* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
+		bm_attrib = (ssa_count - 1) & 0x1f;
+		bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
+		put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
+
+		if (wLength < desc_size + ssa_size)
+			return wLength;
+		/*
+		 * Create the Sublink Speed Attributes (SSA) array.
+		 * The xhci PSI field and USB 3.1 SSA fields are very similar,
+		 * but link type bits 7:6 differ for values 01b and 10b.
+		 * xhci has also only one PSI entry for a symmetric link when
+		 * USB 3.1 requires two SSA entries (RX and TX) for every link
+		 */
+		offset = desc_size;
+		for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+			psi = xhci->usb3_rhub.psi[i];
+			psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+			psi_exp = XHCI_EXT_PORT_PSIE(psi);
+			psi_mant = XHCI_EXT_PORT_PSIM(psi);
+
+			/* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
+			for (; psi_exp < 3; psi_exp++)
+				psi_mant /= 1000;
+			if (psi_mant >= 10)
+				psi |= BIT(14);
+
+			if ((psi & PLT_MASK) == PLT_SYM) {
+			/* Symmetric, create SSA RX and TX from one PSI entry */
+				put_unaligned_le32(psi, &buf[offset]);
+				psi |= 1 << 7;  /* turn entry to TX */
+				offset += 4;
+				if (offset >= desc_size + ssa_size)
+					return desc_size + ssa_size;
+			} else if ((psi & PLT_MASK) == PLT_ASYM_RX) {
+				/* Asymetric RX, flip bits 7:6 for SSA */
+				psi ^= PLT_MASK;
+			}
+			put_unaligned_le32(psi, &buf[offset]);
+			offset += 4;
+			if (offset >= desc_size + ssa_size)
+				return desc_size + ssa_size;
+		}
+	}
+	/* ssa_size is 0 for other than usb 3.1 hosts */
+	return desc_size + ssa_size;
+}
+
+static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
+		struct usb_hub_descriptor *desc, int ports)
+{
+	u16 temp;
+
+	desc->bPwrOn2PwrGood = 10;	/* xhci section 5.4.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 0;
+	/* Bits 1:0 - support per-port power switching, or power always on */
+	if (HCC_PPC(xhci->hcc_params))
+		temp |= HUB_CHAR_INDV_PORT_LPSM;
+	else
+		temp |= HUB_CHAR_NO_LPSM;
+	/* Bit  2 - root hubs are not part of a compound device */
+	/* Bits 4:3 - individual port over current protection */
+	temp |= HUB_CHAR_INDV_PORT_OCPM;
+	/* Bits 6:5 - no TTs in root ports */
+	/* Bit  7 - no port indicators */
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/* Fill in the USB 2.0 roothub descriptor */
+static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		struct usb_hub_descriptor *desc)
+{
+	int ports;
+	u16 temp;
+	__u8 port_removable[(USB_MAXCHILDREN + 1 + 7) / 8];
+	u32 portsc;
+	unsigned int i;
+	struct xhci_hub *rhub;
+
+	rhub = &xhci->usb2_rhub;
+	ports = rhub->num_ports;
+	xhci_common_hub_descriptor(xhci, desc, ports);
+	desc->bDescriptorType = USB_DT_HUB;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
+
+	/* The Device Removable bits are reported on a byte granularity.
+	 * If the port doesn't exist within that byte, the bit is set to 0.
+	 */
+	memset(port_removable, 0, sizeof(port_removable));
+	for (i = 0; i < ports; i++) {
+		portsc = readl(rhub->ports[i]->addr);
+		/* If a device is removable, PORTSC reports a 0, same as in the
+		 * hub descriptor DeviceRemovable bits.
+		 */
+		if (portsc & PORT_DEV_REMOVE)
+			/* This math is hairy because bit 0 of DeviceRemovable
+			 * is reserved, and bit 1 is for port 1, etc.
+			 */
+			port_removable[(i + 1) / 8] |= 1 << ((i + 1) % 8);
+	}
+
+	/* ch11.h defines a hub descriptor that has room for USB_MAXCHILDREN
+	 * ports on it.  The USB 2.0 specification says that there are two
+	 * variable length fields at the end of the hub descriptor:
+	 * DeviceRemovable and PortPwrCtrlMask.  But since we can have less than
+	 * USB_MAXCHILDREN ports, we may need to use the DeviceRemovable array
+	 * to set PortPwrCtrlMask bits.  PortPwrCtrlMask must always be set to
+	 * 0xFF, so we initialize the both arrays (DeviceRemovable and
+	 * PortPwrCtrlMask) to 0xFF.  Then we set the DeviceRemovable for each
+	 * set of ports that actually exist.
+	 */
+	memset(desc->u.hs.DeviceRemovable, 0xff,
+			sizeof(desc->u.hs.DeviceRemovable));
+	memset(desc->u.hs.PortPwrCtrlMask, 0xff,
+			sizeof(desc->u.hs.PortPwrCtrlMask));
+
+	for (i = 0; i < (ports + 1 + 7) / 8; i++)
+		memset(&desc->u.hs.DeviceRemovable[i], port_removable[i],
+				sizeof(__u8));
+}
+
+/* Fill in the USB 3.0 roothub descriptor */
+static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		struct usb_hub_descriptor *desc)
+{
+	int ports;
+	u16 port_removable;
+	u32 portsc;
+	unsigned int i;
+	struct xhci_hub *rhub;
+
+	rhub = &xhci->usb3_rhub;
+	ports = rhub->num_ports;
+	xhci_common_hub_descriptor(xhci, desc, ports);
+	desc->bDescriptorType = USB_DT_SS_HUB;
+	desc->bDescLength = USB_DT_SS_HUB_SIZE;
+
+	/* header decode latency should be zero for roothubs,
+	 * see section 4.23.5.2.
+	 */
+	desc->u.ss.bHubHdrDecLat = 0;
+	desc->u.ss.wHubDelay = 0;
+
+	port_removable = 0;
+	/* bit 0 is reserved, bit 1 is for port 1, etc. */
+	for (i = 0; i < ports; i++) {
+		portsc = readl(rhub->ports[i]->addr);
+		if (portsc & PORT_DEV_REMOVE)
+			port_removable |= 1 << (i + 1);
+	}
+
+	desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
+}
+
+static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		struct usb_hub_descriptor *desc)
+{
+
+	if (hcd->speed >= HCD_USB3)
+		xhci_usb3_hub_descriptor(hcd, xhci, desc);
+	else
+		xhci_usb2_hub_descriptor(hcd, xhci, desc);
+
+}
+
+static unsigned int xhci_port_speed(unsigned int port_status)
+{
+	if (DEV_LOWSPEED(port_status))
+		return USB_PORT_STAT_LOW_SPEED;
+	if (DEV_HIGHSPEED(port_status))
+		return USB_PORT_STAT_HIGH_SPEED;
+	/*
+	 * FIXME: Yes, we should check for full speed, but the core uses that as
+	 * a default in portspeed() in usb/core/hub.c (which is the only place
+	 * USB_PORT_STAT_*_SPEED is used).
+	 */
+	return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define	XHCI_PORT_RO	((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS	((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define	XHCI_PORT_RW1S	((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS	((1<<1) | (0x7f<<17))
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define	XHCI_PORT_RW	((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define	XHCI_PORT_RZ	((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+u32 xhci_port_state_to_neutral(u32 state)
+{
+	/* Save read-only status and port state */
+	return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+/*
+ * find slot id based on port number.
+ * @port: The one-based port number from one of the two split roothubs.
+ */
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		u16 port)
+{
+	int slot_id;
+	int i;
+	enum usb_device_speed speed;
+
+	slot_id = 0;
+	for (i = 0; i < MAX_HC_SLOTS; i++) {
+		if (!xhci->devs[i] || !xhci->devs[i]->udev)
+			continue;
+		speed = xhci->devs[i]->udev->speed;
+		if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
+				&& xhci->devs[i]->fake_port == port) {
+			slot_id = i;
+			break;
+		}
+	}
+
+	return slot_id;
+}
+
+/*
+ * Stop device
+ * It issues stop endpoint command for EP 0 to 30. And wait the last command
+ * to complete.
+ * suspend will set to 1, if suspend bit need to set in command.
+ */
+static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_command *cmd;
+	unsigned long flags;
+	int ret;
+	int i;
+
+	ret = 0;
+	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev)
+		return -ENODEV;
+
+	trace_xhci_stop_device(virt_dev);
+
+	cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
+	if (!cmd)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	for (i = LAST_EP_INDEX; i > 0; i--) {
+		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
+			struct xhci_ep_ctx *ep_ctx;
+			struct xhci_command *command;
+
+			ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+
+			/* Check ep is running, required by AMD SNPS 3.1 xHC */
+			if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
+				continue;
+
+			command = xhci_alloc_command(xhci, false, GFP_NOWAIT);
+			if (!command) {
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				ret = -ENOMEM;
+				goto cmd_cleanup;
+			}
+
+			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+						       i, suspend);
+			if (ret) {
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				xhci_free_command(xhci, command);
+				goto cmd_cleanup;
+			}
+		}
+	}
+	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto cmd_cleanup;
+	}
+
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Wait for last stop endpoint command to finish */
+	wait_for_completion(cmd->completion);
+
+	if (cmd->status == COMP_COMMAND_ABORTED ||
+	    cmd->status == COMP_COMMAND_RING_STOPPED) {
+		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+		ret = -ETIME;
+	}
+
+cmd_cleanup:
+	xhci_free_command(xhci, cmd);
+	return ret;
+}
+
+/*
+ * Ring device, it rings the all doorbells unconditionally.
+ */
+void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
+{
+	int i, s;
+	struct xhci_virt_ep *ep;
+
+	for (i = 0; i < LAST_EP_INDEX + 1; i++) {
+		ep = &xhci->devs[slot_id]->eps[i];
+
+		if (ep->ep_state & EP_HAS_STREAMS) {
+			for (s = 1; s < ep->stream_info->num_streams; s++)
+				xhci_ring_ep_doorbell(xhci, slot_id, i, s);
+		} else if (ep->ring && ep->ring->dequeue) {
+			xhci_ring_ep_doorbell(xhci, slot_id, i, 0);
+		}
+	}
+
+	return;
+}
+
+static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		u16 wIndex, __le32 __iomem *addr, u32 port_status)
+{
+	/* Don't allow the USB core to disable SuperSpeed ports. */
+	if (hcd->speed >= HCD_USB3) {
+		xhci_dbg(xhci, "Ignoring request to disable "
+				"SuperSpeed port.\n");
+		return;
+	}
+
+	if (xhci->quirks & XHCI_BROKEN_PORT_PED) {
+		xhci_dbg(xhci,
+			 "Broken Port Enabled/Disabled, ignoring port disable request.\n");
+		return;
+	}
+
+	/* Write 1 to disable the port */
+	writel(port_status | PORT_PE, addr);
+	port_status = readl(addr);
+	xhci_dbg(xhci, "disable port, actual port %d status  = 0x%x\n",
+			wIndex, port_status);
+}
+
+static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+		u16 wIndex, __le32 __iomem *addr, u32 port_status)
+{
+	char *port_change_bit;
+	u32 status;
+
+	switch (wValue) {
+	case USB_PORT_FEAT_C_RESET:
+		status = PORT_RC;
+		port_change_bit = "reset";
+		break;
+	case USB_PORT_FEAT_C_BH_PORT_RESET:
+		status = PORT_WRC;
+		port_change_bit = "warm(BH) reset";
+		break;
+	case USB_PORT_FEAT_C_CONNECTION:
+		status = PORT_CSC;
+		port_change_bit = "connect";
+		break;
+	case USB_PORT_FEAT_C_OVER_CURRENT:
+		status = PORT_OCC;
+		port_change_bit = "over-current";
+		break;
+	case USB_PORT_FEAT_C_ENABLE:
+		status = PORT_PEC;
+		port_change_bit = "enable/disable";
+		break;
+	case USB_PORT_FEAT_C_SUSPEND:
+		status = PORT_PLC;
+		port_change_bit = "suspend/resume";
+		break;
+	case USB_PORT_FEAT_C_PORT_LINK_STATE:
+		status = PORT_PLC;
+		port_change_bit = "link state";
+		break;
+	case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+		status = PORT_CEC;
+		port_change_bit = "config error";
+		break;
+	default:
+		/* Should never happen */
+		return;
+	}
+	/* Change bits are all write 1 to clear */
+	writel(port_status | status, addr);
+	port_status = readl(addr);
+	xhci_dbg(xhci, "clear port %s change, actual port %d status  = 0x%x\n",
+			port_change_bit, wIndex, port_status);
+}
+
+struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+
+	if (hcd->speed >= HCD_USB3)
+		return &xhci->usb3_rhub;
+	return &xhci->usb2_rhub;
+}
+
+/*
+ * xhci_set_port_power() must be called with xhci->lock held.
+ * It will release and re-aquire the lock while calling ACPI
+ * method.
+ */
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+				u16 index, bool on, unsigned long *flags)
+{
+	struct xhci_hub *rhub;
+	struct xhci_port *port;
+	u32 temp;
+
+	rhub = xhci_get_rhub(hcd);
+	port = rhub->ports[index];
+	temp = readl(port->addr);
+	temp = xhci_port_state_to_neutral(temp);
+	if (on) {
+		/* Power on */
+		writel(temp | PORT_POWER, port->addr);
+		temp = readl(port->addr);
+		xhci_dbg(xhci, "set port power, actual port %d status  = 0x%x\n",
+						index, temp);
+	} else {
+		/* Power off */
+		writel(temp & ~PORT_POWER, port->addr);
+	}
+
+	spin_unlock_irqrestore(&xhci->lock, *flags);
+	temp = usb_acpi_power_manageable(hcd->self.root_hub,
+					index);
+	if (temp)
+		usb_acpi_set_power_state(hcd->self.root_hub,
+			index, on);
+	spin_lock_irqsave(&xhci->lock, *flags);
+}
+
+static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
+	u16 test_mode, u16 wIndex)
+{
+	u32 temp;
+	struct xhci_port *port;
+
+	/* xhci only supports test mode for usb2 ports */
+	port = xhci->usb2_rhub.ports[wIndex];
+	temp = readl(port->addr + PORTPMSC);
+	temp |= test_mode << PORT_TEST_MODE_SHIFT;
+	writel(temp, port->addr + PORTPMSC);
+	xhci->test_mode = test_mode;
+	if (test_mode == TEST_FORCE_EN)
+		xhci_start(xhci);
+}
+
+static int xhci_enter_test_mode(struct xhci_hcd *xhci,
+				u16 test_mode, u16 wIndex, unsigned long *flags)
+{
+	int i, retval;
+
+	/* Disable all Device Slots */
+	xhci_dbg(xhci, "Disable all slots\n");
+	spin_unlock_irqrestore(&xhci->lock, *flags);
+	for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+		if (!xhci->devs[i])
+			continue;
+
+		retval = xhci_disable_slot(xhci, i);
+		if (retval)
+			xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
+				 i, retval);
+	}
+	spin_lock_irqsave(&xhci->lock, *flags);
+	/* Put all ports to the Disable state by clear PP */
+	xhci_dbg(xhci, "Disable all port (PP = 0)\n");
+	/* Power off USB3 ports*/
+	for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
+		xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
+	/* Power off USB2 ports*/
+	for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
+		xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
+	/* Stop the controller */
+	xhci_dbg(xhci, "Stop controller\n");
+	retval = xhci_halt(xhci);
+	if (retval)
+		return retval;
+	/* Disable runtime PM for test mode */
+	pm_runtime_forbid(xhci_to_hcd(xhci)->self.controller);
+	/* Set PORTPMSC.PTC field to enter selected test mode */
+	/* Port is selected by wIndex. port_id = wIndex + 1 */
+	xhci_dbg(xhci, "Enter Test Mode: %d, Port_id=%d\n",
+					test_mode, wIndex + 1);
+	xhci_port_set_test_mode(xhci, test_mode, wIndex);
+	return retval;
+}
+
+static int xhci_exit_test_mode(struct xhci_hcd *xhci)
+{
+	int retval;
+
+	if (!xhci->test_mode) {
+		xhci_err(xhci, "Not in test mode, do nothing.\n");
+		return 0;
+	}
+	if (xhci->test_mode == TEST_FORCE_EN &&
+		!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+		retval = xhci_halt(xhci);
+		if (retval)
+			return retval;
+	}
+	pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
+	xhci->test_mode = 0;
+	return xhci_reset(xhci);
+}
+
+void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
+			 u32 link_state)
+{
+	u32 temp;
+
+	temp = readl(port->addr);
+	temp = xhci_port_state_to_neutral(temp);
+	temp &= ~PORT_PLS_MASK;
+	temp |= PORT_LINK_STROBE | link_state;
+	writel(temp, port->addr);
+}
+
+static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
+				      struct xhci_port *port, u16 wake_mask)
+{
+	u32 temp;
+
+	temp = readl(port->addr);
+	temp = xhci_port_state_to_neutral(temp);
+
+	if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
+		temp |= PORT_WKCONN_E;
+	else
+		temp &= ~PORT_WKCONN_E;
+
+	if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT)
+		temp |= PORT_WKDISC_E;
+	else
+		temp &= ~PORT_WKDISC_E;
+
+	if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT)
+		temp |= PORT_WKOC_E;
+	else
+		temp &= ~PORT_WKOC_E;
+
+	writel(temp, port->addr);
+}
+
+/* Test and clear port RWC bit */
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
+			     u32 port_bit)
+{
+	u32 temp;
+
+	temp = readl(port->addr);
+	if (temp & port_bit) {
+		temp = xhci_port_state_to_neutral(temp);
+		temp |= port_bit;
+		writel(temp, port->addr);
+	}
+}
+
+/* Updates Link Status for USB 2.1 port */
+static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
+{
+	if ((status_reg & PORT_PLS_MASK) == XDEV_U2)
+		*status |= USB_PORT_STAT_L1;
+}
+
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+		u32 *status, u32 status_reg)
+{
+	u32 pls = status_reg & PORT_PLS_MASK;
+
+	/* resume state is a xHCI internal state.
+	 * Do not report it to usb core, instead, pretend to be U3,
+	 * thus usb core knows it's not ready for transfer
+	 */
+	if (pls == XDEV_RESUME) {
+		*status |= USB_SS_PORT_LS_U3;
+		return;
+	}
+
+	/* When the CAS bit is set then warm reset
+	 * should be performed on port
+	 */
+	if (status_reg & PORT_CAS) {
+		/* The CAS bit can be set while the port is
+		 * in any link state.
+		 * Only roothubs have CAS bit, so we
+		 * pretend to be in compliance mode
+		 * unless we're already in compliance
+		 * or the inactive state.
+		 */
+		if (pls != USB_SS_PORT_LS_COMP_MOD &&
+		    pls != USB_SS_PORT_LS_SS_INACTIVE) {
+			pls = USB_SS_PORT_LS_COMP_MOD;
+		}
+		/* Return also connection bit -
+		 * hub state machine resets port
+		 * when this bit is set.
+		 */
+		pls |= USB_PORT_STAT_CONNECTION;
+	} else {
+		/*
+		 * If CAS bit isn't set but the Port is already at
+		 * Compliance Mode, fake a connection so the USB core
+		 * notices the Compliance state and resets the port.
+		 * This resolves an issue generated by the SN65LVPE502CP
+		 * in which sometimes the port enters compliance mode
+		 * caused by a delay on the host-device negotiation.
+		 */
+		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+				(pls == USB_SS_PORT_LS_COMP_MOD))
+			pls |= USB_PORT_STAT_CONNECTION;
+	}
+
+	/* update status field */
+	*status |= pls;
+}
+
+/*
+ * Function for Compliance Mode Quirk.
+ *
+ * This Function verifies if all xhc USB3 ports have entered U0, if so,
+ * the compliance mode timer is deleted. A port won't enter
+ * compliance mode if it has previously entered U0.
+ */
+static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
+				    u16 wIndex)
+{
+	u32 all_ports_seen_u0 = ((1 << xhci->usb3_rhub.num_ports) - 1);
+	bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
+
+	if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
+		return;
+
+	if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
+		xhci->port_status_u0 |= 1 << wIndex;
+		if (xhci->port_status_u0 == all_ports_seen_u0) {
+			del_timer_sync(&xhci->comp_mode_recovery_timer);
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"All USB3 ports have entered U0 already!");
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Compliance Mode Recovery Timer Deleted.");
+		}
+	}
+}
+
+static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li)
+{
+	u32 ext_stat = 0;
+	int speed_id;
+
+	/* only support rx and tx lane counts of 1 in usb3.1 spec */
+	speed_id = DEV_PORT_SPEED(raw_port_status);
+	ext_stat |= speed_id;		/* bits 3:0, RX speed id */
+	ext_stat |= speed_id << 4;	/* bits 7:4, TX speed id */
+
+	ext_stat |= PORT_RX_LANES(port_li) << 8;  /* bits 11:8 Rx lane count */
+	ext_stat |= PORT_TX_LANES(port_li) << 12; /* bits 15:12 Tx lane count */
+
+	return ext_stat;
+}
+
+/*
+ * Converts a raw xHCI port status into the format that external USB 2.0 or USB
+ * 3.0 hubs use.
+ *
+ * Possible side effects:
+ *  - Mark a port as being done with device resume,
+ *    and ring the endpoint doorbells.
+ *  - Stop the Synopsys redriver Compliance Mode polling.
+ *  - Drop and reacquire the xHCI lock, in order to wait for port resume.
+ */
+static u32 xhci_get_port_status(struct usb_hcd *hcd,
+		struct xhci_bus_state *bus_state,
+	u16 wIndex, u32 raw_port_status,
+		unsigned long flags)
+	__releases(&xhci->lock)
+	__acquires(&xhci->lock)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	u32 status = 0;
+	int slot_id;
+	struct xhci_hub *rhub;
+	struct xhci_port *port;
+
+	rhub = xhci_get_rhub(hcd);
+	port = rhub->ports[wIndex];
+
+	/* wPortChange bits */
+	if (raw_port_status & PORT_CSC)
+		status |= USB_PORT_STAT_C_CONNECTION << 16;
+	if (raw_port_status & PORT_PEC)
+		status |= USB_PORT_STAT_C_ENABLE << 16;
+	if ((raw_port_status & PORT_OCC))
+		status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+	if ((raw_port_status & PORT_RC))
+		status |= USB_PORT_STAT_C_RESET << 16;
+	/* USB3.0 only */
+	if (hcd->speed >= HCD_USB3) {
+		/* Port link change with port in resume state should not be
+		 * reported to usbcore, as this is an internal state to be
+		 * handled by xhci driver. Reporting PLC to usbcore may
+		 * cause usbcore clearing PLC first and port change event
+		 * irq won't be generated.
+		 */
+		if ((raw_port_status & PORT_PLC) &&
+			(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
+			status |= USB_PORT_STAT_C_LINK_STATE << 16;
+		if ((raw_port_status & PORT_WRC))
+			status |= USB_PORT_STAT_C_BH_RESET << 16;
+		if ((raw_port_status & PORT_CEC))
+			status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+	}
+
+	if (hcd->speed < HCD_USB3) {
+		if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3
+				&& (raw_port_status & PORT_POWER))
+			status |= USB_PORT_STAT_SUSPEND;
+	}
+	if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
+		!DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
+		if ((raw_port_status & PORT_RESET) ||
+				!(raw_port_status & PORT_PE))
+			return 0xffffffff;
+		/* did port event handler already start resume timing? */
+		if (!bus_state->resume_done[wIndex]) {
+			/* If not, maybe we are in a host initated resume? */
+			if (test_bit(wIndex, &bus_state->resuming_ports)) {
+				/* Host initated resume doesn't time the resume
+				 * signalling using resume_done[].
+				 * It manually sets RESUME state, sleeps 20ms
+				 * and sets U0 state. This should probably be
+				 * changed, but not right now.
+				 */
+			} else {
+				/* port resume was discovered now and here,
+				 * start resume timing
+				 */
+				unsigned long timeout = jiffies +
+					msecs_to_jiffies(USB_RESUME_TIMEOUT);
+
+				set_bit(wIndex, &bus_state->resuming_ports);
+				bus_state->resume_done[wIndex] = timeout;
+				mod_timer(&hcd->rh_timer, timeout);
+				usb_hcd_start_port_resume(&hcd->self, wIndex);
+			}
+		/* Has resume been signalled for USB_RESUME_TIME yet? */
+		} else if (time_after_eq(jiffies,
+					 bus_state->resume_done[wIndex])) {
+			int time_left;
+
+			xhci_dbg(xhci, "Resume USB2 port %d\n",
+					wIndex + 1);
+			bus_state->resume_done[wIndex] = 0;
+			clear_bit(wIndex, &bus_state->resuming_ports);
+
+			set_bit(wIndex, &bus_state->rexit_ports);
+
+			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+			xhci_set_link_state(xhci, port, XDEV_U0);
+
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			time_left = wait_for_completion_timeout(
+					&bus_state->rexit_done[wIndex],
+					msecs_to_jiffies(
+						XHCI_MAX_REXIT_TIMEOUT_MS));
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			if (time_left) {
+				slot_id = xhci_find_slot_id_by_port(hcd,
+						xhci, wIndex + 1);
+				if (!slot_id) {
+					xhci_dbg(xhci, "slot_id is zero\n");
+					return 0xffffffff;
+				}
+				xhci_ring_device(xhci, slot_id);
+			} else {
+				int port_status = readl(port->addr);
+				xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+						XHCI_MAX_REXIT_TIMEOUT_MS,
+						port_status);
+				status |= USB_PORT_STAT_SUSPEND;
+				clear_bit(wIndex, &bus_state->rexit_ports);
+			}
+
+			usb_hcd_end_port_resume(&hcd->self, wIndex);
+			bus_state->port_c_suspend |= 1 << wIndex;
+			bus_state->suspended_ports &= ~(1 << wIndex);
+		} else {
+			/*
+			 * The resume has been signaling for less than
+			 * USB_RESUME_TIME. Report the port status as SUSPEND,
+			 * let the usbcore check port status again and clear
+			 * resume signaling later.
+			 */
+			status |= USB_PORT_STAT_SUSPEND;
+		}
+	}
+	/*
+	 * Clear stale usb2 resume signalling variables in case port changed
+	 * state during resume signalling. For example on error
+	 */
+	if ((bus_state->resume_done[wIndex] ||
+	     test_bit(wIndex, &bus_state->resuming_ports)) &&
+	    (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
+	    (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
+		bus_state->resume_done[wIndex] = 0;
+		clear_bit(wIndex, &bus_state->resuming_ports);
+		usb_hcd_end_port_resume(&hcd->self, wIndex);
+	}
+
+
+	if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
+	    (raw_port_status & PORT_POWER)) {
+		if (bus_state->suspended_ports & (1 << wIndex)) {
+			bus_state->suspended_ports &= ~(1 << wIndex);
+			if (hcd->speed < HCD_USB3)
+				bus_state->port_c_suspend |= 1 << wIndex;
+		}
+		bus_state->resume_done[wIndex] = 0;
+		clear_bit(wIndex, &bus_state->resuming_ports);
+	}
+	if (raw_port_status & PORT_CONNECT) {
+		status |= USB_PORT_STAT_CONNECTION;
+		status |= xhci_port_speed(raw_port_status);
+	}
+	if (raw_port_status & PORT_PE)
+		status |= USB_PORT_STAT_ENABLE;
+	if (raw_port_status & PORT_OC)
+		status |= USB_PORT_STAT_OVERCURRENT;
+	if (raw_port_status & PORT_RESET)
+		status |= USB_PORT_STAT_RESET;
+	if (raw_port_status & PORT_POWER) {
+		if (hcd->speed >= HCD_USB3)
+			status |= USB_SS_PORT_STAT_POWER;
+		else
+			status |= USB_PORT_STAT_POWER;
+	}
+	/* Update Port Link State */
+	if (hcd->speed >= HCD_USB3) {
+		xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
+		/*
+		 * Verify if all USB3 Ports Have entered U0 already.
+		 * Delete Compliance Mode Timer if so.
+		 */
+		xhci_del_comp_mod_timer(xhci, raw_port_status, wIndex);
+	} else {
+		xhci_hub_report_usb2_link_state(&status, raw_port_status);
+	}
+	if (bus_state->port_c_suspend & (1 << wIndex))
+		status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+	return status;
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+		u16 wIndex, char *buf, u16 wLength)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int max_ports;
+	unsigned long flags;
+	u32 temp, status;
+	int retval = 0;
+	int slot_id;
+	struct xhci_bus_state *bus_state;
+	u16 link_state = 0;
+	u16 wake_mask = 0;
+	u16 timeout = 0;
+	u16 test_mode = 0;
+	struct xhci_hub *rhub;
+	struct xhci_port **ports;
+
+	rhub = xhci_get_rhub(hcd);
+	ports = rhub->ports;
+	max_ports = rhub->num_ports;
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	switch (typeReq) {
+	case GetHubStatus:
+		/* No power source, over-current reported per port */
+		memset(buf, 0, 4);
+		break;
+	case GetHubDescriptor:
+		/* Check to make sure userspace is asking for the USB 3.0 hub
+		 * descriptor for the USB 3.0 roothub.  If not, we stall the
+		 * endpoint, like external hubs do.
+		 */
+		if (hcd->speed >= HCD_USB3 &&
+				(wLength < USB_DT_SS_HUB_SIZE ||
+				 wValue != (USB_DT_SS_HUB << 8))) {
+			xhci_dbg(xhci, "Wrong hub descriptor type for "
+					"USB 3.0 roothub.\n");
+			goto error;
+		}
+		xhci_hub_descriptor(hcd, xhci,
+				(struct usb_hub_descriptor *) buf);
+		break;
+	case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+		if ((wValue & 0xff00) != (USB_DT_BOS << 8))
+			goto error;
+
+		if (hcd->speed < HCD_USB3)
+			goto error;
+
+		retval = xhci_create_usb3_bos_desc(xhci, buf, wLength);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return retval;
+	case GetPortStatus:
+		if (!wIndex || wIndex > max_ports)
+			goto error;
+		wIndex--;
+		temp = readl(ports[wIndex]->addr);
+		if (temp == ~(u32)0) {
+			xhci_hc_died(xhci);
+			retval = -ENODEV;
+			break;
+		}
+		trace_xhci_get_port_status(wIndex, temp);
+		status = xhci_get_port_status(hcd, bus_state, wIndex, temp,
+					      flags);
+		if (status == 0xffffffff)
+			goto error;
+
+		xhci_dbg(xhci, "get port status, actual port %d status  = 0x%x\n",
+				wIndex, temp);
+		xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+
+		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+		/* if USB 3.1 extended port status return additional 4 bytes */
+		if (wValue == 0x02) {
+			u32 port_li;
+
+			if (hcd->speed < HCD_USB31 || wLength != 8) {
+				xhci_err(xhci, "get ext port status invalid parameter\n");
+				retval = -EINVAL;
+				break;
+			}
+			port_li = readl(ports[wIndex]->addr + PORTLI);
+			status = xhci_get_ext_port_status(temp, port_li);
+			put_unaligned_le32(cpu_to_le32(status), &buf[4]);
+		}
+		break;
+	case SetPortFeature:
+		if (wValue == USB_PORT_FEAT_LINK_STATE)
+			link_state = (wIndex & 0xff00) >> 3;
+		if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
+			wake_mask = wIndex & 0xff00;
+		if (wValue == USB_PORT_FEAT_TEST)
+			test_mode = (wIndex & 0xff00) >> 8;
+		/* The MSB of wIndex is the U1/U2 timeout */
+		timeout = (wIndex & 0xff00) >> 8;
+		wIndex &= 0xff;
+		if (!wIndex || wIndex > max_ports)
+			goto error;
+		wIndex--;
+		temp = readl(ports[wIndex]->addr);
+		if (temp == ~(u32)0) {
+			xhci_hc_died(xhci);
+			retval = -ENODEV;
+			break;
+		}
+		temp = xhci_port_state_to_neutral(temp);
+		/* FIXME: What new port features do we need to support? */
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			temp = readl(ports[wIndex]->addr);
+			if ((temp & PORT_PLS_MASK) != XDEV_U0) {
+				/* Resume the port to U0 first */
+				xhci_set_link_state(xhci, ports[wIndex],
+							XDEV_U0);
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				msleep(10);
+				spin_lock_irqsave(&xhci->lock, flags);
+			}
+			/* In spec software should not attempt to suspend
+			 * a port unless the port reports that it is in the
+			 * enabled (PED = ‘1’,PLS < ‘3’) state.
+			 */
+			temp = readl(ports[wIndex]->addr);
+			if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
+				|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
+				xhci_warn(xhci, "USB core suspending device not in U0/U1/U2.\n");
+				goto error;
+			}
+
+			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+					wIndex + 1);
+			if (!slot_id) {
+				xhci_warn(xhci, "slot_id is zero\n");
+				goto error;
+			}
+			/* unlock to execute stop endpoint commands */
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			xhci_stop_device(xhci, slot_id, 1);
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			xhci_set_link_state(xhci, ports[wIndex], XDEV_U3);
+
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			msleep(10); /* wait device to enter */
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			temp = readl(ports[wIndex]->addr);
+			bus_state->suspended_ports |= 1 << wIndex;
+			break;
+		case USB_PORT_FEAT_LINK_STATE:
+			temp = readl(ports[wIndex]->addr);
+			/* Disable port */
+			if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+				xhci_dbg(xhci, "Disable port %d\n", wIndex);
+				temp = xhci_port_state_to_neutral(temp);
+				/*
+				 * Clear all change bits, so that we get a new
+				 * connection event.
+				 */
+				temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+					PORT_OCC | PORT_RC | PORT_PLC |
+					PORT_CEC;
+				writel(temp | PORT_PE, ports[wIndex]->addr);
+				temp = readl(ports[wIndex]->addr);
+				break;
+			}
+
+			/* Put link in RxDetect (enable port) */
+			if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+				xhci_dbg(xhci, "Enable port %d\n", wIndex);
+				xhci_set_link_state(xhci, ports[wIndex],
+							link_state);
+				temp = readl(ports[wIndex]->addr);
+				break;
+			}
+
+			/*
+			 * For xHCI 1.1 according to section 4.19.1.2.4.1 a
+			 * root hub port's transition to compliance mode upon
+			 * detecting LFPS timeout may be controlled by an
+			 * Compliance Transition Enabled (CTE) flag (not
+			 * software visible). This flag is set by writing 0xA
+			 * to PORTSC PLS field which will allow transition to
+			 * compliance mode the next time LFPS timeout is
+			 * encountered. A warm reset will clear it.
+			 *
+			 * The CTE flag is only supported if the HCCPARAMS2 CTC
+			 * flag is set, otherwise, the compliance substate is
+			 * automatically entered as on 1.0 and prior.
+			 */
+			if (link_state == USB_SS_PORT_LS_COMP_MOD) {
+				if (!HCC2_CTC(xhci->hcc_params2)) {
+					xhci_dbg(xhci, "CTC flag is 0, port already supports entering compliance mode\n");
+					break;
+				}
+
+				if ((temp & PORT_CONNECT)) {
+					xhci_warn(xhci, "Can't set compliance mode when port is connected\n");
+					goto error;
+				}
+
+				xhci_dbg(xhci, "Enable compliance mode transition for port %d\n",
+						wIndex);
+				xhci_set_link_state(xhci, ports[wIndex],
+						link_state);
+
+				temp = readl(ports[wIndex]->addr);
+				break;
+			}
+			/* Port must be enabled */
+			if (!(temp & PORT_PE)) {
+				retval = -ENODEV;
+				break;
+			}
+			/* Can't set port link state above '3' (U3) */
+			if (link_state > USB_SS_PORT_LS_U3) {
+				xhci_warn(xhci, "Cannot set port %d link state %d\n",
+					 wIndex, link_state);
+				goto error;
+			}
+			if (link_state == USB_SS_PORT_LS_U3) {
+				slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+						wIndex + 1);
+				if (slot_id) {
+					/* unlock to execute stop endpoint
+					 * commands */
+					spin_unlock_irqrestore(&xhci->lock,
+								flags);
+					xhci_stop_device(xhci, slot_id, 1);
+					spin_lock_irqsave(&xhci->lock, flags);
+				}
+			}
+
+			xhci_set_link_state(xhci, ports[wIndex], link_state);
+
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			msleep(20); /* wait device to enter */
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			temp = readl(ports[wIndex]->addr);
+			if (link_state == USB_SS_PORT_LS_U3)
+				bus_state->suspended_ports |= 1 << wIndex;
+			break;
+		case USB_PORT_FEAT_POWER:
+			/*
+			 * Turn on ports, even if there isn't per-port switching.
+			 * HC will report connect events even before this is set.
+			 * However, hub_wq will ignore the roothub events until
+			 * the roothub is registered.
+			 */
+			xhci_set_port_power(xhci, hcd, wIndex, true, &flags);
+			break;
+		case USB_PORT_FEAT_RESET:
+			temp = (temp | PORT_RESET);
+			writel(temp, ports[wIndex]->addr);
+
+			temp = readl(ports[wIndex]->addr);
+			xhci_dbg(xhci, "set port reset, actual port %d status  = 0x%x\n", wIndex, temp);
+			break;
+		case USB_PORT_FEAT_REMOTE_WAKE_MASK:
+			xhci_set_remote_wake_mask(xhci, ports[wIndex],
+						  wake_mask);
+			temp = readl(ports[wIndex]->addr);
+			xhci_dbg(xhci, "set port remote wake mask, "
+					"actual port %d status  = 0x%x\n",
+					wIndex, temp);
+			break;
+		case USB_PORT_FEAT_BH_PORT_RESET:
+			temp |= PORT_WR;
+			writel(temp, ports[wIndex]->addr);
+			temp = readl(ports[wIndex]->addr);
+			break;
+		case USB_PORT_FEAT_U1_TIMEOUT:
+			if (hcd->speed < HCD_USB3)
+				goto error;
+			temp = readl(ports[wIndex]->addr + PORTPMSC);
+			temp &= ~PORT_U1_TIMEOUT_MASK;
+			temp |= PORT_U1_TIMEOUT(timeout);
+			writel(temp, ports[wIndex]->addr + PORTPMSC);
+			break;
+		case USB_PORT_FEAT_U2_TIMEOUT:
+			if (hcd->speed < HCD_USB3)
+				goto error;
+			temp = readl(ports[wIndex]->addr + PORTPMSC);
+			temp &= ~PORT_U2_TIMEOUT_MASK;
+			temp |= PORT_U2_TIMEOUT(timeout);
+			writel(temp, ports[wIndex]->addr + PORTPMSC);
+			break;
+		case USB_PORT_FEAT_TEST:
+			/* 4.19.6 Port Test Modes (USB2 Test Mode) */
+			if (hcd->speed != HCD_USB2)
+				goto error;
+			if (test_mode > TEST_FORCE_EN || test_mode < TEST_J)
+				goto error;
+			retval = xhci_enter_test_mode(xhci, test_mode, wIndex,
+						      &flags);
+			break;
+		default:
+			goto error;
+		}
+		/* unblock any posted writes */
+		temp = readl(ports[wIndex]->addr);
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > max_ports)
+			goto error;
+		wIndex--;
+		temp = readl(ports[wIndex]->addr);
+		if (temp == ~(u32)0) {
+			xhci_hc_died(xhci);
+			retval = -ENODEV;
+			break;
+		}
+		/* FIXME: What new port features do we need to support? */
+		temp = xhci_port_state_to_neutral(temp);
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			temp = readl(ports[wIndex]->addr);
+			xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
+			xhci_dbg(xhci, "PORTSC %04x\n", temp);
+			if (temp & PORT_RESET)
+				goto error;
+			if ((temp & PORT_PLS_MASK) == XDEV_U3) {
+				if ((temp & PORT_PE) == 0)
+					goto error;
+
+				set_bit(wIndex, &bus_state->resuming_ports);
+				usb_hcd_start_port_resume(&hcd->self, wIndex);
+				xhci_set_link_state(xhci, ports[wIndex],
+						    XDEV_RESUME);
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				msleep(USB_RESUME_TIMEOUT);
+				spin_lock_irqsave(&xhci->lock, flags);
+				xhci_set_link_state(xhci, ports[wIndex],
+							XDEV_U0);
+				clear_bit(wIndex, &bus_state->resuming_ports);
+				usb_hcd_end_port_resume(&hcd->self, wIndex);
+			}
+			bus_state->port_c_suspend |= 1 << wIndex;
+
+			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+					wIndex + 1);
+			if (!slot_id) {
+				xhci_dbg(xhci, "slot_id is zero\n");
+				goto error;
+			}
+			xhci_ring_device(xhci, slot_id);
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			bus_state->port_c_suspend &= ~(1 << wIndex);
+			/* fall through */
+		case USB_PORT_FEAT_C_RESET:
+		case USB_PORT_FEAT_C_BH_PORT_RESET:
+		case USB_PORT_FEAT_C_CONNECTION:
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+		case USB_PORT_FEAT_C_ENABLE:
+		case USB_PORT_FEAT_C_PORT_LINK_STATE:
+		case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+			xhci_clear_port_change_bit(xhci, wValue, wIndex,
+					ports[wIndex]->addr, temp);
+			break;
+		case USB_PORT_FEAT_ENABLE:
+			xhci_disable_port(hcd, xhci, wIndex,
+					ports[wIndex]->addr, temp);
+			break;
+		case USB_PORT_FEAT_POWER:
+			xhci_set_port_power(xhci, hcd, wIndex, false, &flags);
+			break;
+		case USB_PORT_FEAT_TEST:
+			retval = xhci_exit_test_mode(xhci);
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	unsigned long flags;
+	u32 temp, status;
+	u32 mask;
+	int i, retval;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int max_ports;
+	struct xhci_bus_state *bus_state;
+	bool reset_change = false;
+	struct xhci_hub *rhub;
+	struct xhci_port **ports;
+
+	rhub = xhci_get_rhub(hcd);
+	ports = rhub->ports;
+	max_ports = rhub->num_ports;
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+	/* Initial status is no changes */
+	retval = (max_ports + 8) / 8;
+	memset(buf, 0, retval);
+
+	/*
+	 * Inform the usbcore about resume-in-progress by returning
+	 * a non-zero value even if there are no status changes.
+	 */
+	status = bus_state->resuming_ports;
+
+	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	/* For each port, did anything change?  If so, set that bit in buf. */
+	for (i = 0; i < max_ports; i++) {
+		temp = readl(ports[i]->addr);
+		if (temp == ~(u32)0) {
+			xhci_hc_died(xhci);
+			retval = -ENODEV;
+			break;
+		}
+		trace_xhci_hub_status_data(i, temp);
+
+		if ((temp & mask) != 0 ||
+			(bus_state->port_c_suspend & 1 << i) ||
+			(bus_state->resume_done[i] && time_after_eq(
+			    jiffies, bus_state->resume_done[i]))) {
+			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+			status = 1;
+		}
+		if ((temp & PORT_RC))
+			reset_change = true;
+	}
+	if (!status && !reset_change) {
+		xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+		clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return status ? retval : 0;
+}
+
+#ifdef CONFIG_PM
+
+int xhci_bus_suspend(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int max_ports, port_index;
+	struct xhci_bus_state *bus_state;
+	unsigned long flags;
+	struct xhci_hub *rhub;
+	struct xhci_port **ports;
+	u32 portsc_buf[USB_MAXCHILDREN];
+	bool wake_enabled;
+
+	rhub = xhci_get_rhub(hcd);
+	ports = rhub->ports;
+	max_ports = rhub->num_ports;
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+	wake_enabled = hcd->self.root_hub->do_remote_wakeup;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	if (wake_enabled) {
+		if (bus_state->resuming_ports ||	/* USB2 */
+		    bus_state->port_remote_wakeup) {	/* USB3 */
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			xhci_dbg(xhci, "suspend failed because a port is resuming\n");
+			return -EBUSY;
+		}
+	}
+	/*
+	 * Prepare ports for suspend, but don't write anything before all ports
+	 * are checked and we know bus suspend can proceed
+	 */
+	bus_state->bus_suspended = 0;
+	port_index = max_ports;
+	while (port_index--) {
+		u32 t1, t2;
+
+		t1 = readl(ports[port_index]->addr);
+		t2 = xhci_port_state_to_neutral(t1);
+		portsc_buf[port_index] = 0;
+
+		/* Bail out if a USB3 port has a new device in link training */
+		if ((hcd->speed >= HCD_USB3) &&
+		    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+			bus_state->bus_suspended = 0;
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+			return -EBUSY;
+		}
+
+		/* suspend ports in U0, or bail out for new connect changes */
+		if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+			if ((t1 & PORT_CSC) && wake_enabled) {
+				bus_state->bus_suspended = 0;
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+				return -EBUSY;
+			}
+			xhci_dbg(xhci, "port %d not suspended\n", port_index);
+			t2 &= ~PORT_PLS_MASK;
+			t2 |= PORT_LINK_STROBE | XDEV_U3;
+			set_bit(port_index, &bus_state->bus_suspended);
+		}
+		/* USB core sets remote wake mask for USB 3.0 hubs,
+		 * including the USB 3.0 roothub, but only if CONFIG_PM
+		 * is enabled, so also enable remote wake here.
+		 */
+		if (wake_enabled) {
+			if (t1 & PORT_CONNECT) {
+				t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+				t2 &= ~PORT_WKCONN_E;
+			} else {
+				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+				t2 &= ~PORT_WKDISC_E;
+			}
+
+			if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+			    (hcd->speed < HCD_USB3)) {
+				if (usb_amd_pt_check_port(hcd->self.controller,
+							  port_index))
+					t2 &= ~PORT_WAKE_BITS;
+			}
+		} else
+			t2 &= ~PORT_WAKE_BITS;
+
+		t1 = xhci_port_state_to_neutral(t1);
+		if (t1 != t2)
+			portsc_buf[port_index] = t2;
+	}
+
+	/* write port settings, stopping and suspending ports if needed */
+	port_index = max_ports;
+	while (port_index--) {
+		if (!portsc_buf[port_index])
+			continue;
+		if (test_bit(port_index, &bus_state->bus_suspended)) {
+			int slot_id;
+
+			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+							    port_index + 1);
+			if (slot_id) {
+				spin_unlock_irqrestore(&xhci->lock, flags);
+				xhci_stop_device(xhci, slot_id, 1);
+				spin_lock_irqsave(&xhci->lock, flags);
+			}
+		}
+		writel(portsc_buf[port_index], ports[port_index]->addr);
+	}
+	hcd->state = HC_STATE_SUSPENDED;
+	bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return 0;
+}
+
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(struct xhci_port *port)
+{
+	u32 portsc;
+
+	portsc = readl(port->addr);
+
+	/* if any of these are set we are not stuck */
+	if (portsc & (PORT_CONNECT | PORT_CAS))
+		return false;
+
+	if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+	    ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+		return false;
+
+	/* clear wakeup/change bits, and do a warm port reset */
+	portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+	portsc |= PORT_WR;
+	writel(portsc, port->addr);
+	/* flush write */
+	readl(port->addr);
+	return true;
+}
+
+int xhci_bus_resume(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct xhci_bus_state *bus_state;
+	unsigned long flags;
+	int max_ports, port_index;
+	int slot_id;
+	int sret;
+	u32 next_state;
+	u32 temp, portsc;
+	struct xhci_hub *rhub;
+	struct xhci_port **ports;
+
+	rhub = xhci_get_rhub(hcd);
+	ports = rhub->ports;
+	max_ports = rhub->num_ports;
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+	if (time_before(jiffies, bus_state->next_statechange))
+		msleep(5);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	/* delay the irqs */
+	temp = readl(&xhci->op_regs->command);
+	temp &= ~CMD_EIE;
+	writel(temp, &xhci->op_regs->command);
+
+	/* bus specific resume for ports we suspended at bus_suspend */
+	if (hcd->speed >= HCD_USB3)
+		next_state = XDEV_U0;
+	else
+		next_state = XDEV_RESUME;
+
+	port_index = max_ports;
+	while (port_index--) {
+		portsc = readl(ports[port_index]->addr);
+
+		/* warm reset CAS limited ports stuck in polling/compliance */
+		if ((xhci->quirks & XHCI_MISSING_CAS) &&
+		    (hcd->speed >= HCD_USB3) &&
+		    xhci_port_missing_cas_quirk(ports[port_index])) {
+			xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+			clear_bit(port_index, &bus_state->bus_suspended);
+			continue;
+		}
+		/* resume if we suspended the link, and it is still suspended */
+		if (test_bit(port_index, &bus_state->bus_suspended))
+			switch (portsc & PORT_PLS_MASK) {
+			case XDEV_U3:
+				portsc = xhci_port_state_to_neutral(portsc);
+				portsc &= ~PORT_PLS_MASK;
+				portsc |= PORT_LINK_STROBE | next_state;
+				break;
+			case XDEV_RESUME:
+				/* resume already initiated */
+				break;
+			default:
+				/* not in a resumeable state, ignore it */
+				clear_bit(port_index,
+					  &bus_state->bus_suspended);
+				break;
+			}
+		/* disable wake for all ports, write new link state if needed */
+		portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+		writel(portsc, ports[port_index]->addr);
+	}
+
+	/* USB2 specific resume signaling delay and U0 link state transition */
+	if (hcd->speed < HCD_USB3) {
+		if (bus_state->bus_suspended) {
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			msleep(USB_RESUME_TIMEOUT);
+			spin_lock_irqsave(&xhci->lock, flags);
+		}
+		for_each_set_bit(port_index, &bus_state->bus_suspended,
+				 BITS_PER_LONG) {
+			/* Clear PLC to poll it later for U0 transition */
+			xhci_test_and_clear_bit(xhci, ports[port_index],
+						PORT_PLC);
+			xhci_set_link_state(xhci, ports[port_index], XDEV_U0);
+		}
+	}
+
+	/* poll for U0 link state complete, both USB2 and USB3 */
+	for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
+		sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
+				      PORT_PLC, 10 * 1000);
+		if (sret) {
+			xhci_warn(xhci, "port %d resume PLC timeout\n",
+				  port_index);
+			continue;
+		}
+		xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
+		slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
+		if (slot_id)
+			xhci_ring_device(xhci, slot_id);
+	}
+	(void) readl(&xhci->op_regs->command);
+
+	bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
+	/* re-enable irqs */
+	temp = readl(&xhci->op_regs->command);
+	temp |= CMD_EIE;
+	writel(temp, &xhci->op_regs->command);
+	temp = readl(&xhci->op_regs->command);
+
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return 0;
+}
+
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct xhci_bus_state *bus_state;
+
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+	/* USB3 port wakeups are reported via usb_wakeup_notification() */
+	return bus_state->resuming_ports;	/* USB2 ports only */
+}
+
+#endif	/* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 0000000..b1f27aa
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,2566 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-debugfs.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+					       unsigned int cycle_state,
+					       unsigned int max_packet,
+					       gfp_t flags)
+{
+	struct xhci_segment *seg;
+	dma_addr_t	dma;
+	int		i;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
+	if (!seg)
+		return NULL;
+
+	seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
+	if (!seg->trbs) {
+		kfree(seg);
+		return NULL;
+	}
+
+	if (max_packet) {
+		seg->bounce_buf = kzalloc_node(max_packet, flags,
+					dev_to_node(dev));
+		if (!seg->bounce_buf) {
+			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
+			kfree(seg);
+			return NULL;
+		}
+	}
+	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+	if (cycle_state == 0) {
+		for (i = 0; i < TRBS_PER_SEGMENT; i++)
+			seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+	}
+	seg->dma = dma;
+	seg->next = NULL;
+
+	return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+	if (seg->trbs) {
+		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+		seg->trbs = NULL;
+	}
+	kfree(seg->bounce_buf);
+	kfree(seg);
+}
+
+static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
+				struct xhci_segment *first)
+{
+	struct xhci_segment *seg;
+
+	seg = first->next;
+	while (seg != first) {
+		struct xhci_segment *next = seg->next;
+		xhci_segment_free(xhci, seg);
+		seg = next;
+	}
+	xhci_segment_free(xhci, first);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment.  The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+		struct xhci_segment *next, enum xhci_ring_type type)
+{
+	u32 val;
+
+	if (!prev || !next)
+		return;
+	prev->next = next;
+	if (type != TYPE_EVENT) {
+		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
+			cpu_to_le64(next->dma);
+
+		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
+		val &= ~TRB_TYPE_BITMASK;
+		val |= TRB_TYPE(TRB_LINK);
+		/* Always set the chain bit with 0.95 hardware */
+		/* Set chain bit for isoc rings on AMD 0.96 host */
+		if (xhci_link_trb_quirk(xhci) ||
+				(type == TYPE_ISOC &&
+				 (xhci->quirks & XHCI_AMD_0x96_HOST)))
+			val |= TRB_CHAIN;
+		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
+	}
+}
+
+/*
+ * Link the ring to the new segments.
+ * Set Toggle Cycle for the new ring if needed.
+ */
+static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		struct xhci_segment *first, struct xhci_segment *last,
+		unsigned int num_segs)
+{
+	struct xhci_segment *next;
+
+	if (!ring || !first || !last)
+		return;
+
+	next = ring->enq_seg->next;
+	xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
+	xhci_link_segments(xhci, last, next, ring->type);
+	ring->num_segs += num_segs;
+	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+
+	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
+		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
+			&= ~cpu_to_le32(LINK_TOGGLE);
+		last->trbs[TRBS_PER_SEGMENT-1].link.control
+			|= cpu_to_le32(LINK_TOGGLE);
+		ring->last_seg = last;
+	}
+}
+
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to.  We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from.  We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses.  For example, say I
+ * have segments of size 1KB, that are always 1KB aligned.  A segment may
+ * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ *	0x10c90fff >> 10 = 0x43243
+ *	0x10c912c0 >> 10 = 0x43244
+ *	0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
+ */
+static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+		struct xhci_ring *ring,
+		struct xhci_segment *seg,
+		gfp_t mem_flags)
+{
+	unsigned long key;
+	int ret;
+
+	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+	/* Skip any segments that were already added. */
+	if (radix_tree_lookup(trb_address_map, key))
+		return 0;
+
+	ret = radix_tree_maybe_preload(mem_flags);
+	if (ret)
+		return ret;
+	ret = radix_tree_insert(trb_address_map,
+			key, ring);
+	radix_tree_preload_end();
+	return ret;
+}
+
+static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+		struct xhci_segment *seg)
+{
+	unsigned long key;
+
+	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+	if (radix_tree_lookup(trb_address_map, key))
+		radix_tree_delete(trb_address_map, key);
+}
+
+static int xhci_update_stream_segment_mapping(
+		struct radix_tree_root *trb_address_map,
+		struct xhci_ring *ring,
+		struct xhci_segment *first_seg,
+		struct xhci_segment *last_seg,
+		gfp_t mem_flags)
+{
+	struct xhci_segment *seg;
+	struct xhci_segment *failed_seg;
+	int ret;
+
+	if (WARN_ON_ONCE(trb_address_map == NULL))
+		return 0;
+
+	seg = first_seg;
+	do {
+		ret = xhci_insert_segment_mapping(trb_address_map,
+				ring, seg, mem_flags);
+		if (ret)
+			goto remove_streams;
+		if (seg == last_seg)
+			return 0;
+		seg = seg->next;
+	} while (seg != first_seg);
+
+	return 0;
+
+remove_streams:
+	failed_seg = seg;
+	seg = first_seg;
+	do {
+		xhci_remove_segment_mapping(trb_address_map, seg);
+		if (seg == failed_seg)
+			return ret;
+		seg = seg->next;
+	} while (seg != first_seg);
+
+	return ret;
+}
+
+static void xhci_remove_stream_mapping(struct xhci_ring *ring)
+{
+	struct xhci_segment *seg;
+
+	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
+		return;
+
+	seg = ring->first_seg;
+	do {
+		xhci_remove_segment_mapping(ring->trb_address_map, seg);
+		seg = seg->next;
+	} while (seg != ring->first_seg);
+}
+
+static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+{
+	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+			ring->first_seg, ring->last_seg, mem_flags);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	if (!ring)
+		return;
+
+	trace_xhci_ring_free(ring);
+
+	if (ring->first_seg) {
+		if (ring->type == TYPE_STREAM)
+			xhci_remove_stream_mapping(ring);
+		xhci_free_segments_for_ring(xhci, ring->first_seg);
+	}
+
+	kfree(ring);
+}
+
+static void xhci_initialize_ring_info(struct xhci_ring *ring,
+					unsigned int cycle_state)
+{
+	/* The ring is empty, so the enqueue pointer == dequeue pointer */
+	ring->enqueue = ring->first_seg->trbs;
+	ring->enq_seg = ring->first_seg;
+	ring->dequeue = ring->enqueue;
+	ring->deq_seg = ring->first_seg;
+	/* The ring is initialized to 0. The producer must write 1 to the cycle
+	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
+	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
+	 *
+	 * New rings are initialized with cycle state equal to 1; if we are
+	 * handling ring expansion, set the cycle state equal to the old ring.
+	 */
+	ring->cycle_state = cycle_state;
+
+	/*
+	 * Each segment has a link TRB, and leave an extra TRB for SW
+	 * accounting purpose
+	 */
+	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+}
+
+/* Allocate segments and link them for a ring */
+static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+		struct xhci_segment **first, struct xhci_segment **last,
+		unsigned int num_segs, unsigned int cycle_state,
+		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+{
+	struct xhci_segment *prev;
+
+	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+	if (!prev)
+		return -ENOMEM;
+	num_segs--;
+
+	*first = prev;
+	while (num_segs > 0) {
+		struct xhci_segment	*next;
+
+		next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+		if (!next) {
+			prev = *first;
+			while (prev) {
+				next = prev->next;
+				xhci_segment_free(xhci, prev);
+				prev = next;
+			}
+			return -ENOMEM;
+		}
+		xhci_link_segments(xhci, prev, next, type);
+
+		prev = next;
+		num_segs--;
+	}
+	xhci_link_segments(xhci, prev, *first, type);
+	*last = prev;
+
+	return 0;
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+		unsigned int num_segs, unsigned int cycle_state,
+		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+{
+	struct xhci_ring	*ring;
+	int ret;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
+	if (!ring)
+		return NULL;
+
+	ring->num_segs = num_segs;
+	ring->bounce_buf_len = max_packet;
+	INIT_LIST_HEAD(&ring->td_list);
+	ring->type = type;
+	if (num_segs == 0)
+		return ring;
+
+	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
+			&ring->last_seg, num_segs, cycle_state, type,
+			max_packet, flags);
+	if (ret)
+		goto fail;
+
+	/* Only event ring does not use link TRB */
+	if (type != TYPE_EVENT) {
+		/* See section 4.9.2.1 and 6.4.4.1 */
+		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+			cpu_to_le32(LINK_TOGGLE);
+	}
+	xhci_initialize_ring_info(ring, cycle_state);
+	trace_xhci_ring_alloc(ring);
+	return ring;
+
+fail:
+	kfree(ring);
+	return NULL;
+}
+
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		unsigned int ep_index)
+{
+	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+	virt_dev->eps[ep_index].ring = NULL;
+}
+
+/*
+ * Expand an existing ring.
+ * Allocate a new ring which has same segment numbers and link the two rings.
+ */
+int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+				unsigned int num_trbs, gfp_t flags)
+{
+	struct xhci_segment	*first;
+	struct xhci_segment	*last;
+	unsigned int		num_segs;
+	unsigned int		num_segs_needed;
+	int			ret;
+
+	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
+				(TRBS_PER_SEGMENT - 1);
+
+	/* Allocate number of segments we needed, or double the ring size */
+	num_segs = ring->num_segs > num_segs_needed ?
+			ring->num_segs : num_segs_needed;
+
+	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
+			num_segs, ring->cycle_state, ring->type,
+			ring->bounce_buf_len, flags);
+	if (ret)
+		return -ENOMEM;
+
+	if (ring->type == TYPE_STREAM)
+		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+						ring, first, last, flags);
+	if (ret) {
+		struct xhci_segment *next;
+		do {
+			next = first->next;
+			xhci_segment_free(xhci, first);
+			if (first == last)
+				break;
+			first = next;
+		} while (true);
+		return ret;
+	}
+
+	xhci_link_rings(xhci, ring, first, last, num_segs);
+	trace_xhci_ring_expansion(ring);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+			"ring expansion succeed, now has %d segments",
+			ring->num_segs);
+
+	return 0;
+}
+
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+						    int type, gfp_t flags)
+{
+	struct xhci_container_ctx *ctx;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
+		return NULL;
+
+	ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
+	if (!ctx)
+		return NULL;
+
+	ctx->type = type;
+	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+	if (type == XHCI_CTX_TYPE_INPUT)
+		ctx->size += CTX_SIZE(xhci->hcc_params);
+
+	ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
+	if (!ctx->bytes) {
+		kfree(ctx);
+		return NULL;
+	}
+	return ctx;
+}
+
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
+			     struct xhci_container_ctx *ctx)
+{
+	if (!ctx)
+		return;
+	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+	kfree(ctx);
+}
+
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(
+					      struct xhci_container_ctx *ctx)
+{
+	if (ctx->type != XHCI_CTX_TYPE_INPUT)
+		return NULL;
+
+	return (struct xhci_input_control_ctx *)ctx->bytes;
+}
+
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
+					struct xhci_container_ctx *ctx)
+{
+	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
+		return (struct xhci_slot_ctx *)ctx->bytes;
+
+	return (struct xhci_slot_ctx *)
+		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
+}
+
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
+				    struct xhci_container_ctx *ctx,
+				    unsigned int ep_index)
+{
+	/* increment ep index by offset of start of ep ctx array */
+	ep_index++;
+	if (ctx->type == XHCI_CTX_TYPE_INPUT)
+		ep_index++;
+
+	return (struct xhci_ep_ctx *)
+		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
+}
+
+
+/***************** Streams structures manipulation *************************/
+
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+		unsigned int num_stream_ctxs,
+		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
+{
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
+
+	if (size > MEDIUM_STREAM_ARRAY_SIZE)
+		dma_free_coherent(dev, size,
+				stream_ctx, dma);
+	else if (size <= SMALL_STREAM_ARRAY_SIZE)
+		return dma_pool_free(xhci->small_streams_pool,
+				stream_ctx, dma);
+	else
+		return dma_pool_free(xhci->medium_streams_pool,
+				stream_ctx, dma);
+}
+
+/*
+ * The stream context array for each endpoint with bulk streams enabled can
+ * vary in size, based on:
+ *  - how many streams the endpoint supports,
+ *  - the maximum primary stream array size the host controller supports,
+ *  - and how many streams the device driver asks for.
+ *
+ * The stream context array must be a power of 2, and can be as small as
+ * 64 bytes or as large as 1MB.
+ */
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+		unsigned int num_stream_ctxs, dma_addr_t *dma,
+		gfp_t mem_flags)
+{
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
+
+	if (size > MEDIUM_STREAM_ARRAY_SIZE)
+		return dma_alloc_coherent(dev, size,
+				dma, mem_flags);
+	else if (size <= SMALL_STREAM_ARRAY_SIZE)
+		return dma_pool_alloc(xhci->small_streams_pool,
+				mem_flags, dma);
+	else
+		return dma_pool_alloc(xhci->medium_streams_pool,
+				mem_flags, dma);
+}
+
+struct xhci_ring *xhci_dma_to_transfer_ring(
+		struct xhci_virt_ep *ep,
+		u64 address)
+{
+	if (ep->ep_state & EP_HAS_STREAMS)
+		return radix_tree_lookup(&ep->stream_info->trb_address_map,
+				address >> TRB_SEGMENT_SHIFT);
+	return ep->ring;
+}
+
+struct xhci_ring *xhci_stream_id_to_ring(
+		struct xhci_virt_device *dev,
+		unsigned int ep_index,
+		unsigned int stream_id)
+{
+	struct xhci_virt_ep *ep = &dev->eps[ep_index];
+
+	if (stream_id == 0)
+		return ep->ring;
+	if (!ep->stream_info)
+		return NULL;
+
+	if (stream_id >= ep->stream_info->num_streams)
+		return NULL;
+	return ep->stream_info->stream_rings[stream_id];
+}
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs.  The
+ * number of requested streams includes stream 0, which cannot be used by device
+ * drivers.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use.  This is because the number of
+ * stream context array entries must be a power of two.
+ */
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+		unsigned int num_stream_ctxs,
+		unsigned int num_streams,
+		unsigned int max_packet, gfp_t mem_flags)
+{
+	struct xhci_stream_info *stream_info;
+	u32 cur_stream;
+	struct xhci_ring *cur_ring;
+	u64 addr;
+	int ret;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	xhci_dbg(xhci, "Allocating %u streams and %u "
+			"stream context array entries.\n",
+			num_streams, num_stream_ctxs);
+	if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
+		xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
+		return NULL;
+	}
+	xhci->cmd_ring_reserved_trbs++;
+
+	stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
+			dev_to_node(dev));
+	if (!stream_info)
+		goto cleanup_trbs;
+
+	stream_info->num_streams = num_streams;
+	stream_info->num_stream_ctxs = num_stream_ctxs;
+
+	/* Initialize the array of virtual pointers to stream rings. */
+	stream_info->stream_rings = kcalloc_node(
+			num_streams, sizeof(struct xhci_ring *), mem_flags,
+			dev_to_node(dev));
+	if (!stream_info->stream_rings)
+		goto cleanup_info;
+
+	/* Initialize the array of DMA addresses for stream rings for the HW. */
+	stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
+			num_stream_ctxs, &stream_info->ctx_array_dma,
+			mem_flags);
+	if (!stream_info->stream_ctx_array)
+		goto cleanup_ctx;
+	memset(stream_info->stream_ctx_array, 0,
+			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
+
+	/* Allocate everything needed to free the stream rings later */
+	stream_info->free_streams_command =
+		xhci_alloc_command_with_ctx(xhci, true, mem_flags);
+	if (!stream_info->free_streams_command)
+		goto cleanup_ctx;
+
+	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+
+	/* Allocate rings for all the streams that the driver will use,
+	 * and add their segment DMA addresses to the radix tree.
+	 * Stream 0 is reserved.
+	 */
+
+	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+		stream_info->stream_rings[cur_stream] =
+			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+					mem_flags);
+		cur_ring = stream_info->stream_rings[cur_stream];
+		if (!cur_ring)
+			goto cleanup_rings;
+		cur_ring->stream_id = cur_stream;
+		cur_ring->trb_address_map = &stream_info->trb_address_map;
+		/* Set deq ptr, cycle bit, and stream context type */
+		addr = cur_ring->first_seg->dma |
+			SCT_FOR_CTX(SCT_PRI_TR) |
+			cur_ring->cycle_state;
+		stream_info->stream_ctx_array[cur_stream].stream_ring =
+			cpu_to_le64(addr);
+		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
+				cur_stream, (unsigned long long) addr);
+
+		ret = xhci_update_stream_mapping(cur_ring, mem_flags);
+		if (ret) {
+			xhci_ring_free(xhci, cur_ring);
+			stream_info->stream_rings[cur_stream] = NULL;
+			goto cleanup_rings;
+		}
+	}
+	/* Leave the other unused stream ring pointers in the stream context
+	 * array initialized to zero.  This will cause the xHC to give us an
+	 * error if the device asks for a stream ID we don't have setup (if it
+	 * was any other way, the host controller would assume the ring is
+	 * "empty" and wait forever for data to be queued to that stream ID).
+	 */
+
+	return stream_info;
+
+cleanup_rings:
+	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+		cur_ring = stream_info->stream_rings[cur_stream];
+		if (cur_ring) {
+			xhci_ring_free(xhci, cur_ring);
+			stream_info->stream_rings[cur_stream] = NULL;
+		}
+	}
+	xhci_free_command(xhci, stream_info->free_streams_command);
+cleanup_ctx:
+	kfree(stream_info->stream_rings);
+cleanup_info:
+	kfree(stream_info);
+cleanup_trbs:
+	xhci->cmd_ring_reserved_trbs--;
+	return NULL;
+}
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+		struct xhci_ep_ctx *ep_ctx,
+		struct xhci_stream_info *stream_info)
+{
+	u32 max_primary_streams;
+	/* MaxPStreams is the number of stream context array entries, not the
+	 * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
+	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+	 */
+	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+			"Setting number of stream ctx array entries to %u",
+			1 << (max_primary_streams + 1));
+	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+				       | EP_HAS_LSA);
+	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field to 0.
+ * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
+ * not at the beginning of the ring).
+ */
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
+		struct xhci_virt_ep *ep)
+{
+	dma_addr_t addr;
+	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
+	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
+	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
+}
+
+/* Frees all stream contexts associated with the endpoint,
+ *
+ * Caller should fix the endpoint context streams fields.
+ */
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+		struct xhci_stream_info *stream_info)
+{
+	int cur_stream;
+	struct xhci_ring *cur_ring;
+
+	if (!stream_info)
+		return;
+
+	for (cur_stream = 1; cur_stream < stream_info->num_streams;
+			cur_stream++) {
+		cur_ring = stream_info->stream_rings[cur_stream];
+		if (cur_ring) {
+			xhci_ring_free(xhci, cur_ring);
+			stream_info->stream_rings[cur_stream] = NULL;
+		}
+	}
+	xhci_free_command(xhci, stream_info->free_streams_command);
+	xhci->cmd_ring_reserved_trbs--;
+	if (stream_info->stream_ctx_array)
+		xhci_free_stream_ctx(xhci,
+				stream_info->num_stream_ctxs,
+				stream_info->stream_ctx_array,
+				stream_info->ctx_array_dma);
+
+	kfree(stream_info->stream_rings);
+	kfree(stream_info);
+}
+
+
+/***************** Device context manipulation *************************/
+
+static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
+		struct xhci_virt_ep *ep)
+{
+	timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
+		    0);
+	ep->xhci = xhci;
+}
+
+static void xhci_free_tt_info(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		int slot_id)
+{
+	struct list_head *tt_list_head;
+	struct xhci_tt_bw_info *tt_info, *next;
+	bool slot_found = false;
+
+	/* If the device never made it past the Set Address stage,
+	 * it may not have the real_port set correctly.
+	 */
+	if (virt_dev->real_port == 0 ||
+			virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+		xhci_dbg(xhci, "Bad real port.\n");
+		return;
+	}
+
+	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+		/* Multi-TT hubs will have more than one entry */
+		if (tt_info->slot_id == slot_id) {
+			slot_found = true;
+			list_del(&tt_info->tt_list);
+			kfree(tt_info);
+		} else if (slot_found) {
+			break;
+		}
+	}
+}
+
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_device *hdev,
+		struct usb_tt *tt, gfp_t mem_flags)
+{
+	struct xhci_tt_bw_info		*tt_info;
+	unsigned int			num_ports;
+	int				i, j;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	if (!tt->multi)
+		num_ports = 1;
+	else
+		num_ports = hdev->maxchild;
+
+	for (i = 0; i < num_ports; i++, tt_info++) {
+		struct xhci_interval_bw_table *bw_table;
+
+		tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
+				dev_to_node(dev));
+		if (!tt_info)
+			goto free_tts;
+		INIT_LIST_HEAD(&tt_info->tt_list);
+		list_add(&tt_info->tt_list,
+				&xhci->rh_bw[virt_dev->real_port - 1].tts);
+		tt_info->slot_id = virt_dev->udev->slot_id;
+		if (tt->multi)
+			tt_info->ttport = i+1;
+		bw_table = &tt_info->bw_table;
+		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+	}
+	return 0;
+
+free_tts:
+	xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
+	return -ENOMEM;
+}
+
+
+/* All the xhci_tds in the ring's TD list should be freed at this point.
+ * Should be called with xhci->lock held if there is any chance the TT lists
+ * will be manipulated by the configure endpoint, allocate device, or update
+ * hub functions while this function is removing the TT entries from the list.
+ */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_virt_device *dev;
+	int i;
+	int old_active_eps = 0;
+
+	/* Slot ID 0 is reserved */
+	if (slot_id == 0 || !xhci->devs[slot_id])
+		return;
+
+	dev = xhci->devs[slot_id];
+
+	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+	if (!dev)
+		return;
+
+	trace_xhci_free_virt_device(dev);
+
+	if (dev->tt_info)
+		old_active_eps = dev->tt_info->active_eps;
+
+	for (i = 0; i < 31; i++) {
+		if (dev->eps[i].ring)
+			xhci_ring_free(xhci, dev->eps[i].ring);
+		if (dev->eps[i].stream_info)
+			xhci_free_stream_info(xhci,
+					dev->eps[i].stream_info);
+		/* Endpoints on the TT/root port lists should have been removed
+		 * when usb_disable_device() was called for the device.
+		 * We can't drop them anyway, because the udev might have gone
+		 * away by this point, and we can't tell what speed it was.
+		 */
+		if (!list_empty(&dev->eps[i].bw_endpoint_list))
+			xhci_warn(xhci, "Slot %u endpoint %u "
+					"not removed from BW list!\n",
+					slot_id, i);
+	}
+	/* If this is a hub, free the TT(s) from the TT list */
+	xhci_free_tt_info(xhci, dev, slot_id);
+	/* If necessary, update the number of active TTs on this root port */
+	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
+
+	if (dev->in_ctx)
+		xhci_free_container_ctx(xhci, dev->in_ctx);
+	if (dev->out_ctx)
+		xhci_free_container_ctx(xhci, dev->out_ctx);
+
+	if (dev->udev && dev->udev->slot_id)
+		dev->udev->slot_id = 0;
+	kfree(xhci->devs[slot_id]);
+	xhci->devs[slot_id] = NULL;
+}
+
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_virt_device *vdev;
+	struct list_head *tt_list_head;
+	struct xhci_tt_bw_info *tt_info, *next;
+	int i;
+
+	vdev = xhci->devs[slot_id];
+	if (!vdev)
+		return;
+
+	if (vdev->real_port == 0 ||
+			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+		xhci_dbg(xhci, "Bad vdev->real_port.\n");
+		goto out;
+	}
+
+	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+		/* is this a hub device that added a tt_info to the tts list */
+		if (tt_info->slot_id == slot_id) {
+			/* are any devices using this tt_info? */
+			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+				vdev = xhci->devs[i];
+				if (vdev && (vdev->tt_info == tt_info))
+					xhci_free_virt_devices_depth_first(
+						xhci, i);
+			}
+		}
+	}
+out:
+	/* we are now at a leaf device */
+	xhci_debugfs_remove_slot(xhci, slot_id);
+	xhci_free_virt_device(xhci, slot_id);
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+		struct usb_device *udev, gfp_t flags)
+{
+	struct xhci_virt_device *dev;
+	int i;
+
+	/* Slot ID 0 is reserved */
+	if (slot_id == 0 || xhci->devs[slot_id]) {
+		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+		return 0;
+	}
+
+	dev = kzalloc(sizeof(*dev), flags);
+	if (!dev)
+		return 0;
+
+	/* Allocate the (output) device context that will be used in the HC. */
+	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+	if (!dev->out_ctx)
+		goto fail;
+
+	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+			(unsigned long long)dev->out_ctx->dma);
+
+	/* Allocate the (input) device context for address device command */
+	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
+	if (!dev->in_ctx)
+		goto fail;
+
+	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+			(unsigned long long)dev->in_ctx->dma);
+
+	/* Initialize the cancellation list and watchdog timers for each ep */
+	for (i = 0; i < 31; i++) {
+		xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+		INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
+	}
+
+	/* Allocate endpoint 0 ring */
+	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+	if (!dev->eps[0].ring)
+		goto fail;
+
+	dev->udev = udev;
+
+	/* Point to output device context in dcbaa. */
+	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
+	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+		 slot_id,
+		 &xhci->dcbaa->dev_context_ptrs[slot_id],
+		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
+
+	trace_xhci_alloc_virt_device(dev);
+
+	xhci->devs[slot_id] = dev;
+
+	return 1;
+fail:
+
+	if (dev->in_ctx)
+		xhci_free_container_ctx(xhci, dev->in_ctx);
+	if (dev->out_ctx)
+		xhci_free_container_ctx(xhci, dev->out_ctx);
+	kfree(dev);
+
+	return 0;
+}
+
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+		struct usb_device *udev)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_ep_ctx	*ep0_ctx;
+	struct xhci_ring	*ep_ring;
+
+	virt_dev = xhci->devs[udev->slot_id];
+	ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
+	ep_ring = virt_dev->eps[0].ring;
+	/*
+	 * FIXME we don't keep track of the dequeue pointer very well after a
+	 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
+	 * host to our enqueue pointer.  This should only be called after a
+	 * configured device has reset, so all control transfers should have
+	 * been completed or cancelled before the reset.
+	 */
+	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
+							ep_ring->enqueue)
+				   | ep_ring->cycle_state);
+}
+
+/*
+ * The xHCI roothub may have ports of differing speeds in any order in the port
+ * status registers.
+ *
+ * The xHCI hardware wants to know the roothub port number that the USB device
+ * is attached to (or the roothub port its ancestor hub is attached to).  All we
+ * know is the index of that port under either the USB 2.0 or the USB 3.0
+ * roothub, but that doesn't give us the real index into the HW port status
+ * registers. Call xhci_find_raw_port_number() to get real index.
+ */
+static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
+		struct usb_device *udev)
+{
+	struct usb_device *top_dev;
+	struct usb_hcd *hcd;
+
+	if (udev->speed >= USB_SPEED_SUPER)
+		hcd = xhci->shared_hcd;
+	else
+		hcd = xhci->main_hcd;
+
+	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+			top_dev = top_dev->parent)
+		/* Found device below root hub */;
+
+	return	xhci_find_raw_port_number(hcd, top_dev->portnum);
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+	struct xhci_virt_device *dev;
+	struct xhci_ep_ctx	*ep0_ctx;
+	struct xhci_slot_ctx    *slot_ctx;
+	u32			port_num;
+	u32			max_packets;
+	struct usb_device *top_dev;
+
+	dev = xhci->devs[udev->slot_id];
+	/* Slot ID 0 is reserved */
+	if (udev->slot_id == 0 || !dev) {
+		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+				udev->slot_id);
+		return -EINVAL;
+	}
+	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
+	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
+
+	/* 3) Only the control endpoint is valid - one endpoint context */
+	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
+	switch (udev->speed) {
+	case USB_SPEED_SUPER_PLUS:
+		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+		max_packets = MAX_PACKET(512);
+		break;
+	case USB_SPEED_SUPER:
+		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+		max_packets = MAX_PACKET(512);
+		break;
+	case USB_SPEED_HIGH:
+		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+		max_packets = MAX_PACKET(64);
+		break;
+	/* USB core guesses at a 64-byte max packet first for FS devices */
+	case USB_SPEED_FULL:
+		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+		max_packets = MAX_PACKET(64);
+		break;
+	case USB_SPEED_LOW:
+		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
+		max_packets = MAX_PACKET(8);
+		break;
+	case USB_SPEED_WIRELESS:
+		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+		return -EINVAL;
+		break;
+	default:
+		/* Speed was set earlier, this shouldn't happen. */
+		return -EINVAL;
+	}
+	/* Find the root hub port this device is under */
+	port_num = xhci_find_real_port_number(xhci, udev);
+	if (!port_num)
+		return -EINVAL;
+	slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
+	/* Set the port number in the virtual_device to the faked port number */
+	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+			top_dev = top_dev->parent)
+		/* Found device below root hub */;
+	dev->fake_port = top_dev->portnum;
+	dev->real_port = port_num;
+	xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
+	xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+
+	/* Find the right bandwidth table that this device will be a part of.
+	 * If this is a full speed device attached directly to a root port (or a
+	 * decendent of one), it counts as a primary bandwidth domain, not a
+	 * secondary bandwidth domain under a TT.  An xhci_tt_info structure
+	 * will never be created for the HS root hub.
+	 */
+	if (!udev->tt || !udev->tt->hub->parent) {
+		dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+	} else {
+		struct xhci_root_port_bw_info *rh_bw;
+		struct xhci_tt_bw_info *tt_bw;
+
+		rh_bw = &xhci->rh_bw[port_num - 1];
+		/* Find the right TT. */
+		list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
+			if (tt_bw->slot_id != udev->tt->hub->slot_id)
+				continue;
+
+			if (!dev->udev->tt->multi ||
+					(udev->tt->multi &&
+					 tt_bw->ttport == dev->udev->ttport)) {
+				dev->bw_table = &tt_bw->bw_table;
+				dev->tt_info = tt_bw;
+				break;
+			}
+		}
+		if (!dev->tt_info)
+			xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
+	}
+
+	/* Is this a LS/FS device under an external HS hub? */
+	if (udev->tt && udev->tt->hub->parent) {
+		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
+						(udev->ttport << 8));
+		if (udev->tt->multi)
+			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+	}
+	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+
+	/* Step 4 - ring already allocated */
+	/* Step 5 */
+	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+
+	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+					 max_packets);
+
+	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
+				   dev->eps[0].ring->cycle_state);
+
+	trace_xhci_setup_addressable_virt_device(dev);
+
+	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+
+	return 0;
+}
+
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ *
+ */
+static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	unsigned int interval;
+
+	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
+	if (interval != ep->desc.bInterval - 1)
+		dev_warn(&udev->dev,
+			 "ep %#x - rounding interval to %d %sframes\n",
+			 ep->desc.bEndpointAddress,
+			 1 << interval,
+			 udev->speed == USB_SPEED_FULL ? "" : "micro");
+
+	if (udev->speed == USB_SPEED_FULL) {
+		/*
+		 * Full speed isoc endpoints specify interval in frames,
+		 * not microframes. We are using microframes everywhere,
+		 * so adjust accordingly.
+		 */
+		interval += 3;	/* 1 frame = 2^3 uframes */
+	}
+
+	return interval;
+}
+
+/*
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+		struct usb_host_endpoint *ep, unsigned int desc_interval,
+		unsigned int min_exponent, unsigned int max_exponent)
+{
+	unsigned int interval;
+
+	interval = fls(desc_interval) - 1;
+	interval = clamp_val(interval, min_exponent, max_exponent);
+	if ((1 << interval) != desc_interval)
+		dev_dbg(&udev->dev,
+			 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
+			 ep->desc.bEndpointAddress,
+			 1 << interval,
+			 desc_interval);
+
+	return interval;
+}
+
+static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	if (ep->desc.bInterval == 0)
+		return 0;
+	return xhci_microframes_to_exponent(udev, ep,
+			ep->desc.bInterval, 0, 15);
+}
+
+
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	return xhci_microframes_to_exponent(udev, ep,
+			ep->desc.bInterval * 8, 3, 10);
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes".  If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	unsigned int interval = 0;
+
+	switch (udev->speed) {
+	case USB_SPEED_HIGH:
+		/* Max NAK rate */
+		if (usb_endpoint_xfer_control(&ep->desc) ||
+		    usb_endpoint_xfer_bulk(&ep->desc)) {
+			interval = xhci_parse_microframe_interval(udev, ep);
+			break;
+		}
+		/* Fall through - SS and HS isoc/int have same decoding */
+
+	case USB_SPEED_SUPER_PLUS:
+	case USB_SPEED_SUPER:
+		if (usb_endpoint_xfer_int(&ep->desc) ||
+		    usb_endpoint_xfer_isoc(&ep->desc)) {
+			interval = xhci_parse_exponent_interval(udev, ep);
+		}
+		break;
+
+	case USB_SPEED_FULL:
+		if (usb_endpoint_xfer_isoc(&ep->desc)) {
+			interval = xhci_parse_exponent_interval(udev, ep);
+			break;
+		}
+		/*
+		 * Fall through for interrupt endpoint interval decoding
+		 * since it uses the same rules as low speed interrupt
+		 * endpoints.
+		 */
+		/* fall through */
+
+	case USB_SPEED_LOW:
+		if (usb_endpoint_xfer_int(&ep->desc) ||
+		    usb_endpoint_xfer_isoc(&ep->desc)) {
+
+			interval = xhci_parse_frame_interval(udev, ep);
+		}
+		break;
+
+	default:
+		BUG();
+	}
+	return interval;
+}
+
+/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static u32 xhci_get_endpoint_mult(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	if (udev->speed < USB_SPEED_SUPER ||
+			!usb_endpoint_xfer_isoc(&ep->desc))
+		return 0;
+	return ep->ss_ep_comp.bmAttributes;
+}
+
+static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
+				       struct usb_host_endpoint *ep)
+{
+	/* Super speed and Plus have max burst in ep companion desc */
+	if (udev->speed >= USB_SPEED_SUPER)
+		return ep->ss_ep_comp.bMaxBurst;
+
+	if (udev->speed == USB_SPEED_HIGH &&
+	    (usb_endpoint_xfer_isoc(&ep->desc) ||
+	     usb_endpoint_xfer_int(&ep->desc)))
+		return usb_endpoint_maxp_mult(&ep->desc) - 1;
+
+	return 0;
+}
+
+static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
+{
+	int in;
+
+	in = usb_endpoint_dir_in(&ep->desc);
+
+	switch (usb_endpoint_type(&ep->desc)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		return CTRL_EP;
+	case USB_ENDPOINT_XFER_BULK:
+		return in ? BULK_IN_EP : BULK_OUT_EP;
+	case USB_ENDPOINT_XFER_ISOC:
+		return in ? ISOC_IN_EP : ISOC_OUT_EP;
+	case USB_ENDPOINT_XFER_INT:
+		return in ? INT_IN_EP : INT_OUT_EP;
+	}
+	return 0;
+}
+
+/* Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static u32 xhci_get_max_esit_payload(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	int max_burst;
+	int max_packet;
+
+	/* Only applies for interrupt or isochronous endpoints */
+	if (usb_endpoint_xfer_control(&ep->desc) ||
+			usb_endpoint_xfer_bulk(&ep->desc))
+		return 0;
+
+	/* SuperSpeedPlus Isoc ep sending over 48k per esit */
+	if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
+	    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
+		return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
+	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
+	else if (udev->speed >= USB_SPEED_SUPER)
+		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
+
+	max_packet = usb_endpoint_maxp(&ep->desc);
+	max_burst = usb_endpoint_maxp_mult(&ep->desc);
+	/* A 0 in max burst means 1 transfer per ESIT */
+	return max_packet * max_burst;
+}
+
+/* Set up an endpoint with one ring segment.  Do not allocate stream rings.
+ * Drivers will have to call usb_alloc_streams() to do that.
+ */
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_device *udev,
+		struct usb_host_endpoint *ep,
+		gfp_t mem_flags)
+{
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_ring *ep_ring;
+	unsigned int max_packet;
+	enum xhci_ring_type ring_type;
+	u32 max_esit_payload;
+	u32 endpoint_type;
+	unsigned int max_burst;
+	unsigned int interval;
+	unsigned int mult;
+	unsigned int avg_trb_len;
+	unsigned int err_count = 0;
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+
+	endpoint_type = xhci_get_endpoint_type(ep);
+	if (!endpoint_type)
+		return -EINVAL;
+
+	ring_type = usb_endpoint_type(&ep->desc);
+
+	/*
+	 * Get values to fill the endpoint context, mostly from ep descriptor.
+	 * The average TRB buffer lengt for bulk endpoints is unclear as we
+	 * have no clue on scatter gather list entry size. For Isoc and Int,
+	 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
+	 */
+	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
+	interval = xhci_get_endpoint_interval(udev, ep);
+
+	/* Periodic endpoint bInterval limit quirk */
+	if (usb_endpoint_xfer_int(&ep->desc) ||
+	    usb_endpoint_xfer_isoc(&ep->desc)) {
+		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
+		    udev->speed >= USB_SPEED_HIGH &&
+		    interval >= 7) {
+			interval = 6;
+		}
+	}
+
+	mult = xhci_get_endpoint_mult(udev, ep);
+	max_packet = usb_endpoint_maxp(&ep->desc);
+	max_burst = xhci_get_endpoint_max_burst(udev, ep);
+	avg_trb_len = max_esit_payload;
+
+	/* FIXME dig Mult and streams info out of ep companion desc */
+
+	/* Allow 3 retries for everything but isoc, set CErr = 3 */
+	if (!usb_endpoint_xfer_isoc(&ep->desc))
+		err_count = 3;
+	/* Some devices get this wrong */
+	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
+		max_packet = 512;
+	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
+	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+		avg_trb_len = 8;
+	/* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
+	if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
+		mult = 0;
+
+	/* Set up the endpoint ring */
+	virt_dev->eps[ep_index].new_ring =
+		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+	if (!virt_dev->eps[ep_index].new_ring)
+		return -ENOMEM;
+
+	virt_dev->eps[ep_index].skip = false;
+	ep_ring = virt_dev->eps[ep_index].new_ring;
+
+	/* Fill the endpoint context */
+	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
+				      EP_INTERVAL(interval) |
+				      EP_MULT(mult));
+	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
+				       MAX_PACKET(max_packet) |
+				       MAX_BURST(max_burst) |
+				       ERROR_COUNT(err_count));
+	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
+				  ep_ring->cycle_state);
+
+	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
+				      EP_AVG_TRB_LENGTH(avg_trb_len));
+
+	return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_host_endpoint *ep)
+{
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+
+	ep_ctx->ep_info = 0;
+	ep_ctx->ep_info2 = 0;
+	ep_ctx->deq = 0;
+	ep_ctx->tx_info = 0;
+	/* Don't free the endpoint ring until the set interface or configuration
+	 * request succeeds.
+	 */
+}
+
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
+{
+	bw_info->ep_interval = 0;
+	bw_info->mult = 0;
+	bw_info->num_packets = 0;
+	bw_info->max_packet_size = 0;
+	bw_info->type = 0;
+	bw_info->max_esit_payload = 0;
+}
+
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_input_control_ctx *ctrl_ctx,
+		struct xhci_virt_device *virt_dev)
+{
+	struct xhci_bw_info *bw_info;
+	struct xhci_ep_ctx *ep_ctx;
+	unsigned int ep_type;
+	int i;
+
+	for (i = 1; i < 31; i++) {
+		bw_info = &virt_dev->eps[i].bw_info;
+
+		/* We can't tell what endpoint type is being dropped, but
+		 * unconditionally clearing the bandwidth info for non-periodic
+		 * endpoints should be harmless because the info will never be
+		 * set in the first place.
+		 */
+		if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
+			/* Dropped endpoint */
+			xhci_clear_endpoint_bw_info(bw_info);
+			continue;
+		}
+
+		if (EP_IS_ADDED(ctrl_ctx, i)) {
+			ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
+			ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
+
+			/* Ignore non-periodic endpoints */
+			if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+					ep_type != ISOC_IN_EP &&
+					ep_type != INT_IN_EP)
+				continue;
+
+			/* Added or changed endpoint */
+			bw_info->ep_interval = CTX_TO_EP_INTERVAL(
+					le32_to_cpu(ep_ctx->ep_info));
+			/* Number of packets and mult are zero-based in the
+			 * input context, but we want one-based for the
+			 * interval table.
+			 */
+			bw_info->mult = CTX_TO_EP_MULT(
+					le32_to_cpu(ep_ctx->ep_info)) + 1;
+			bw_info->num_packets = CTX_TO_MAX_BURST(
+					le32_to_cpu(ep_ctx->ep_info2)) + 1;
+			bw_info->max_packet_size = MAX_PACKET_DECODED(
+					le32_to_cpu(ep_ctx->ep_info2));
+			bw_info->type = ep_type;
+			bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
+					le32_to_cpu(ep_ctx->tx_info));
+		}
+	}
+}
+
+/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command.
+ */
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_container_ctx *out_ctx,
+		unsigned int ep_index)
+{
+	struct xhci_ep_ctx *out_ep_ctx;
+	struct xhci_ep_ctx *in_ep_ctx;
+
+	out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+	in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+
+	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
+	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
+	in_ep_ctx->deq = out_ep_ctx->deq;
+	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+	if (xhci->quirks & XHCI_MTK_HOST) {
+		in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
+		in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
+	}
+}
+
+/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command.  Only the context entries field matters,
+ * but we'll copy the whole thing anyway.
+ */
+void xhci_slot_copy(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_container_ctx *out_ctx)
+{
+	struct xhci_slot_ctx *in_slot_ctx;
+	struct xhci_slot_ctx *out_slot_ctx;
+
+	in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
+	out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
+
+	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
+	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
+	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
+	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
+}
+
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int i;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Allocating %d scratchpad buffers", num_sp);
+
+	if (!num_sp)
+		return 0;
+
+	xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
+				dev_to_node(dev));
+	if (!xhci->scratchpad)
+		goto fail_sp;
+
+	xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
+				     num_sp * sizeof(u64),
+				     &xhci->scratchpad->sp_dma, flags);
+	if (!xhci->scratchpad->sp_array)
+		goto fail_sp2;
+
+	xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
+					flags, dev_to_node(dev));
+	if (!xhci->scratchpad->sp_buffers)
+		goto fail_sp3;
+
+	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
+	for (i = 0; i < num_sp; i++) {
+		dma_addr_t dma;
+		void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
+				flags);
+		if (!buf)
+			goto fail_sp4;
+
+		xhci->scratchpad->sp_array[i] = dma;
+		xhci->scratchpad->sp_buffers[i] = buf;
+	}
+
+	return 0;
+
+ fail_sp4:
+	for (i = i - 1; i >= 0; i--) {
+		dma_free_coherent(dev, xhci->page_size,
+				    xhci->scratchpad->sp_buffers[i],
+				    xhci->scratchpad->sp_array[i]);
+	}
+
+	kfree(xhci->scratchpad->sp_buffers);
+
+ fail_sp3:
+	dma_free_coherent(dev, num_sp * sizeof(u64),
+			    xhci->scratchpad->sp_array,
+			    xhci->scratchpad->sp_dma);
+
+ fail_sp2:
+	kfree(xhci->scratchpad);
+	xhci->scratchpad = NULL;
+
+ fail_sp:
+	return -ENOMEM;
+}
+
+static void scratchpad_free(struct xhci_hcd *xhci)
+{
+	int num_sp;
+	int i;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	if (!xhci->scratchpad)
+		return;
+
+	num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
+
+	for (i = 0; i < num_sp; i++) {
+		dma_free_coherent(dev, xhci->page_size,
+				    xhci->scratchpad->sp_buffers[i],
+				    xhci->scratchpad->sp_array[i]);
+	}
+	kfree(xhci->scratchpad->sp_buffers);
+	dma_free_coherent(dev, num_sp * sizeof(u64),
+			    xhci->scratchpad->sp_array,
+			    xhci->scratchpad->sp_dma);
+	kfree(xhci->scratchpad);
+	xhci->scratchpad = NULL;
+}
+
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+		bool allocate_completion, gfp_t mem_flags)
+{
+	struct xhci_command *command;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
+	if (!command)
+		return NULL;
+
+	if (allocate_completion) {
+		command->completion =
+			kzalloc_node(sizeof(struct completion), mem_flags,
+				dev_to_node(dev));
+		if (!command->completion) {
+			kfree(command);
+			return NULL;
+		}
+		init_completion(command->completion);
+	}
+
+	command->status = 0;
+	INIT_LIST_HEAD(&command->cmd_list);
+	return command;
+}
+
+struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
+		bool allocate_completion, gfp_t mem_flags)
+{
+	struct xhci_command *command;
+
+	command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
+	if (!command)
+		return NULL;
+
+	command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+						   mem_flags);
+	if (!command->in_ctx) {
+		kfree(command->completion);
+		kfree(command);
+		return NULL;
+	}
+	return command;
+}
+
+void xhci_urb_free_priv(struct urb_priv *urb_priv)
+{
+	kfree(urb_priv);
+}
+
+void xhci_free_command(struct xhci_hcd *xhci,
+		struct xhci_command *command)
+{
+	xhci_free_container_ctx(xhci,
+			command->in_ctx);
+	kfree(command->completion);
+	kfree(command);
+}
+
+int xhci_alloc_erst(struct xhci_hcd *xhci,
+		    struct xhci_ring *evt_ring,
+		    struct xhci_erst *erst,
+		    gfp_t flags)
+{
+	size_t size;
+	unsigned int val;
+	struct xhci_segment *seg;
+	struct xhci_erst_entry *entry;
+
+	size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
+	erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
+					    size, &erst->erst_dma_addr, flags);
+	if (!erst->entries)
+		return -ENOMEM;
+
+	erst->num_entries = evt_ring->num_segs;
+
+	seg = evt_ring->first_seg;
+	for (val = 0; val < evt_ring->num_segs; val++) {
+		entry = &erst->entries[val];
+		entry->seg_addr = cpu_to_le64(seg->dma);
+		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	return 0;
+}
+
+void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+	size_t size;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
+	if (erst->entries)
+		dma_free_coherent(dev, size,
+				erst->entries,
+				erst->erst_dma_addr);
+	erst->entries = NULL;
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
+	int i, j, num_ports;
+
+	cancel_delayed_work_sync(&xhci->cmd_timer);
+
+	xhci_free_erst(xhci, &xhci->erst);
+
+	if (xhci->event_ring)
+		xhci_ring_free(xhci, xhci->event_ring);
+	xhci->event_ring = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+
+	if (xhci->lpm_command)
+		xhci_free_command(xhci, xhci->lpm_command);
+	xhci->lpm_command = NULL;
+	if (xhci->cmd_ring)
+		xhci_ring_free(xhci, xhci->cmd_ring);
+	xhci->cmd_ring = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
+	xhci_cleanup_command_queue(xhci);
+
+	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
+		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+			struct list_head *ep = &bwt->interval_bw[j].endpoints;
+			while (!list_empty(ep))
+				list_del_init(ep->next);
+		}
+	}
+
+	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+		xhci_free_virt_devices_depth_first(xhci, i);
+
+	dma_pool_destroy(xhci->segment_pool);
+	xhci->segment_pool = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
+
+	dma_pool_destroy(xhci->device_pool);
+	xhci->device_pool = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
+
+	dma_pool_destroy(xhci->small_streams_pool);
+	xhci->small_streams_pool = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Freed small stream array pool");
+
+	dma_pool_destroy(xhci->medium_streams_pool);
+	xhci->medium_streams_pool = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Freed medium stream array pool");
+
+	if (xhci->dcbaa)
+		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
+				xhci->dcbaa, xhci->dcbaa->dma);
+	xhci->dcbaa = NULL;
+
+	scratchpad_free(xhci);
+
+	if (!xhci->rh_bw)
+		goto no_bw;
+
+	for (i = 0; i < num_ports; i++) {
+		struct xhci_tt_bw_info *tt, *n;
+		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+			list_del(&tt->tt_list);
+			kfree(tt);
+		}
+	}
+
+no_bw:
+	xhci->cmd_ring_reserved_trbs = 0;
+	xhci->usb2_rhub.num_ports = 0;
+	xhci->usb3_rhub.num_ports = 0;
+	xhci->num_active_eps = 0;
+	kfree(xhci->usb2_rhub.ports);
+	kfree(xhci->usb3_rhub.ports);
+	kfree(xhci->hw_ports);
+	kfree(xhci->rh_bw);
+	kfree(xhci->ext_caps);
+
+	xhci->usb2_rhub.ports = NULL;
+	xhci->usb3_rhub.ports = NULL;
+	xhci->hw_ports = NULL;
+	xhci->rh_bw = NULL;
+	xhci->ext_caps = NULL;
+
+	xhci->page_size = 0;
+	xhci->page_shift = 0;
+	xhci->bus_state[0].bus_suspended = 0;
+	xhci->bus_state[1].bus_suspended = 0;
+}
+
+static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
+		struct xhci_segment *input_seg,
+		union xhci_trb *start_trb,
+		union xhci_trb *end_trb,
+		dma_addr_t input_dma,
+		struct xhci_segment *result_seg,
+		char *test_name, int test_number)
+{
+	unsigned long long start_dma;
+	unsigned long long end_dma;
+	struct xhci_segment *seg;
+
+	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
+	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
+
+	seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
+	if (seg != result_seg) {
+		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
+				test_name, test_number);
+		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+				"input DMA 0x%llx\n",
+				input_seg,
+				(unsigned long long) input_dma);
+		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
+				"ending TRB %p (0x%llx DMA)\n",
+				start_trb, start_dma,
+				end_trb, end_dma);
+		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+				result_seg, seg);
+		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
+			  true);
+		return -1;
+	}
+	return 0;
+}
+
+/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
+{
+	struct {
+		dma_addr_t		input_dma;
+		struct xhci_segment	*result_seg;
+	} simple_test_vector [] = {
+		/* A zeroed DMA field should fail */
+		{ 0, NULL },
+		/* One TRB before the ring start should fail */
+		{ xhci->event_ring->first_seg->dma - 16, NULL },
+		/* One byte before the ring start should fail */
+		{ xhci->event_ring->first_seg->dma - 1, NULL },
+		/* Starting TRB should succeed */
+		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
+		/* Ending TRB should succeed */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
+			xhci->event_ring->first_seg },
+		/* One byte after the ring end should fail */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
+		/* One TRB after the ring end should fail */
+		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
+		/* An address of all ones should fail */
+		{ (dma_addr_t) (~0), NULL },
+	};
+	struct {
+		struct xhci_segment	*input_seg;
+		union xhci_trb		*start_trb;
+		union xhci_trb		*end_trb;
+		dma_addr_t		input_dma;
+		struct xhci_segment	*result_seg;
+	} complex_test_vector [] = {
+		/* Test feeding a valid DMA address from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->event_ring->first_seg->trbs,
+			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* Test feeding a valid end TRB from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->event_ring->first_seg->trbs,
+			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* Test feeding a valid start and end TRB from a different ring */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = xhci->cmd_ring->first_seg->trbs,
+			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+			.input_dma = xhci->cmd_ring->first_seg->dma,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but after this TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[0],
+			.end_trb = &xhci->event_ring->first_seg->trbs[3],
+			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but before this TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[6],
+			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but after this wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+		/* TRB in this ring, but before this wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
+			.result_seg = NULL,
+		},
+		/* TRB not in this ring, and we have a wrapped TD */
+		{	.input_seg = xhci->event_ring->first_seg,
+			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+			.end_trb = &xhci->event_ring->first_seg->trbs[1],
+			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
+			.result_seg = NULL,
+		},
+	};
+
+	unsigned int num_tests;
+	int i, ret;
+
+	num_tests = ARRAY_SIZE(simple_test_vector);
+	for (i = 0; i < num_tests; i++) {
+		ret = xhci_test_trb_in_td(xhci,
+				xhci->event_ring->first_seg,
+				xhci->event_ring->first_seg->trbs,
+				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+				simple_test_vector[i].input_dma,
+				simple_test_vector[i].result_seg,
+				"Simple", i);
+		if (ret < 0)
+			return ret;
+	}
+
+	num_tests = ARRAY_SIZE(complex_test_vector);
+	for (i = 0; i < num_tests; i++) {
+		ret = xhci_test_trb_in_td(xhci,
+				complex_test_vector[i].input_seg,
+				complex_test_vector[i].start_trb,
+				complex_test_vector[i].end_trb,
+				complex_test_vector[i].input_dma,
+				complex_test_vector[i].result_seg,
+				"Complex", i);
+		if (ret < 0)
+			return ret;
+	}
+	xhci_dbg(xhci, "TRB math tests passed.\n");
+	return 0;
+}
+
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+	u64 temp;
+	dma_addr_t deq;
+
+	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+			xhci->event_ring->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci, "WARN something wrong with SW event ring "
+				"dequeue ptr.\n");
+	/* Update HC event ring dequeue pointer */
+	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+	temp &= ERST_PTR_MASK;
+	/* Don't clear the EHB bit (which is RW1C) because
+	 * there might be more events to service.
+	 */
+	temp &= ~ERST_EHB;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Write event ring dequeue pointer, "
+			"preserving EHB bit");
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+			&xhci->ir_set->erst_dequeue);
+}
+
+static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+		__le32 __iomem *addr, int max_caps)
+{
+	u32 temp, port_offset, port_count;
+	int i;
+	u8 major_revision, minor_revision;
+	struct xhci_hub *rhub;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	temp = readl(addr);
+	major_revision = XHCI_EXT_PORT_MAJOR(temp);
+	minor_revision = XHCI_EXT_PORT_MINOR(temp);
+
+	if (major_revision == 0x03) {
+		rhub = &xhci->usb3_rhub;
+	} else if (major_revision <= 0x02) {
+		rhub = &xhci->usb2_rhub;
+	} else {
+		xhci_warn(xhci, "Ignoring unknown port speed, "
+				"Ext Cap %p, revision = 0x%x\n",
+				addr, major_revision);
+		/* Ignoring port protocol we can't understand. FIXME */
+		return;
+	}
+	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
+
+	if (rhub->min_rev < minor_revision)
+		rhub->min_rev = minor_revision;
+
+	/* Port offset and count in the third dword, see section 7.2 */
+	temp = readl(addr + 2);
+	port_offset = XHCI_EXT_PORT_OFF(temp);
+	port_count = XHCI_EXT_PORT_COUNT(temp);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Ext Cap %p, port offset = %u, "
+			"count = %u, revision = 0x%x",
+			addr, port_offset, port_count, major_revision);
+	/* Port count includes the current port offset */
+	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
+		/* WTF? "Valid values are ‘1’ to MaxPorts" */
+		return;
+
+	rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
+	if (rhub->psi_count) {
+		rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
+				    GFP_KERNEL, dev_to_node(dev));
+		if (!rhub->psi)
+			rhub->psi_count = 0;
+
+		rhub->psi_uid_count++;
+		for (i = 0; i < rhub->psi_count; i++) {
+			rhub->psi[i] = readl(addr + 4 + i);
+
+			/* count unique ID values, two consecutive entries can
+			 * have the same ID if link is assymetric
+			 */
+			if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
+				  XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
+				rhub->psi_uid_count++;
+
+			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+				  XHCI_EXT_PORT_PSIV(rhub->psi[i]),
+				  XHCI_EXT_PORT_PSIE(rhub->psi[i]),
+				  XHCI_EXT_PORT_PLT(rhub->psi[i]),
+				  XHCI_EXT_PORT_PFD(rhub->psi[i]),
+				  XHCI_EXT_PORT_LP(rhub->psi[i]),
+				  XHCI_EXT_PORT_PSIM(rhub->psi[i]));
+		}
+	}
+	/* cache usb2 port capabilities */
+	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
+		xhci->ext_caps[xhci->num_ext_caps++] = temp;
+
+	/* Check the host's USB2 LPM capability */
+	if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
+			(temp & XHCI_L1C)) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"xHCI 0.96: support USB2 software lpm");
+		xhci->sw_lpm_support = 1;
+	}
+
+	if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"xHCI 1.0: support USB2 software lpm");
+		xhci->sw_lpm_support = 1;
+		if (temp & XHCI_HLC) {
+			xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+					"xHCI 1.0: support USB2 hardware lpm");
+			xhci->hw_lpm_support = 1;
+		}
+	}
+
+	port_offset--;
+	for (i = port_offset; i < (port_offset + port_count); i++) {
+		struct xhci_port *hw_port = &xhci->hw_ports[i];
+		/* Duplicate entry.  Ignore the port if the revisions differ. */
+		if (hw_port->rhub) {
+			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
+					" port %u\n", addr, i);
+			xhci_warn(xhci, "Port was marked as USB %u, "
+					"duplicated as USB %u\n",
+					hw_port->rhub->maj_rev, major_revision);
+			/* Only adjust the roothub port counts if we haven't
+			 * found a similar duplicate.
+			 */
+			if (hw_port->rhub != rhub &&
+				 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
+				hw_port->rhub->num_ports--;
+				hw_port->hcd_portnum = DUPLICATE_ENTRY;
+			}
+			continue;
+		}
+		hw_port->rhub = rhub;
+		rhub->num_ports++;
+	}
+	/* FIXME: Should we disable ports not in the Extended Capabilities? */
+}
+
+static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
+					struct xhci_hub *rhub, gfp_t flags)
+{
+	int port_index = 0;
+	int i;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	if (!rhub->num_ports)
+		return;
+	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
+			dev_to_node(dev));
+	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+		if (xhci->hw_ports[i].rhub != rhub ||
+		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
+			continue;
+		xhci->hw_ports[i].hcd_portnum = port_index;
+		rhub->ports[port_index] = &xhci->hw_ports[i];
+		port_index++;
+		if (port_index == rhub->num_ports)
+			break;
+	}
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be.  We can't count on the port
+ * speed bits in the PORTSC register being correct until a device is connected,
+ * but we need to set up the two fake roothubs with the correct number of USB
+ * 3.0 and USB 2.0 ports at host controller initialization time.
+ */
+static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+{
+	void __iomem *base;
+	u32 offset;
+	unsigned int num_ports;
+	int i, j;
+	int cap_count = 0;
+	u32 cap_start;
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+	xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
+				flags, dev_to_node(dev));
+	if (!xhci->hw_ports)
+		return -ENOMEM;
+
+	for (i = 0; i < num_ports; i++) {
+		xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
+			NUM_PORT_REGS * i;
+		xhci->hw_ports[i].hw_portnum = i;
+	}
+
+	xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
+				   dev_to_node(dev));
+	if (!xhci->rh_bw)
+		return -ENOMEM;
+	for (i = 0; i < num_ports; i++) {
+		struct xhci_interval_bw_table *bw_table;
+
+		INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
+		bw_table = &xhci->rh_bw[i].bw_table;
+		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
+			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
+	}
+	base = &xhci->cap_regs->hc_capbase;
+
+	cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
+	if (!cap_start) {
+		xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
+		return -ENODEV;
+	}
+
+	offset = cap_start;
+	/* count extended protocol capability entries for later caching */
+	while (offset) {
+		cap_count++;
+		offset = xhci_find_next_ext_cap(base, offset,
+						      XHCI_EXT_CAPS_PROTOCOL);
+	}
+
+	xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
+				flags, dev_to_node(dev));
+	if (!xhci->ext_caps)
+		return -ENOMEM;
+
+	offset = cap_start;
+
+	while (offset) {
+		xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
+		if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
+		    num_ports)
+			break;
+		offset = xhci_find_next_ext_cap(base, offset,
+						XHCI_EXT_CAPS_PROTOCOL);
+	}
+	if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
+		xhci_warn(xhci, "No ports on the roothubs?\n");
+		return -ENODEV;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		       "Found %u USB 2.0 ports and %u USB 3.0 ports.",
+		       xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
+
+	/* Place limits on the number of roothub ports so that the hub
+	 * descriptors aren't longer than the USB core will allocate.
+	 */
+	if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"Limiting USB 3.0 roothub ports to %u.",
+				USB_SS_MAXPORTS);
+		xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
+	}
+	if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"Limiting USB 2.0 roothub ports to %u.",
+				USB_MAXCHILDREN);
+		xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
+	}
+
+	/*
+	 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
+	 * Not sure how the USB core will handle a hub with no ports...
+	 */
+
+	xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
+	xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
+
+	return 0;
+}
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	dma_addr_t	dma;
+	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
+	unsigned int	val, val2;
+	u64		val_64;
+	u32		page_size, temp;
+	int		i, ret;
+
+	INIT_LIST_HEAD(&xhci->cmd_list);
+
+	/* init command timeout work */
+	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+	init_completion(&xhci->cmd_ring_stop_completion);
+
+	page_size = readl(&xhci->op_regs->page_size);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Supported page size register = 0x%x", page_size);
+	for (i = 0; i < 16; i++) {
+		if ((0x1 & page_size) != 0)
+			break;
+		page_size = page_size >> 1;
+	}
+	if (i < 16)
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Supported page size of %iK", (1 << (i+12)) / 1024);
+	else
+		xhci_warn(xhci, "WARN: no supported page size\n");
+	/* Use 4K pages, since that's common and the minimum the HC supports */
+	xhci->page_shift = 12;
+	xhci->page_size = 1 << xhci->page_shift;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"HCD page size set to %iK", xhci->page_size / 1024);
+
+	/*
+	 * Program the Number of Device Slots Enabled field in the CONFIG
+	 * register with the max value of slots the HC can handle.
+	 */
+	val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// xHC can handle at most %d device slots.", val);
+	val2 = readl(&xhci->op_regs->config_reg);
+	val |= (val2 & ~HCS_SLOTS_MASK);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Setting Max device slots reg = 0x%x.", val);
+	writel(val, &xhci->op_regs->config_reg);
+
+	/*
+	 * xHCI section 5.4.6 - doorbell array must be
+	 * "physically contiguous and 64-byte (cache line) aligned".
+	 */
+	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
+			flags);
+	if (!xhci->dcbaa)
+		goto fail;
+	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+	xhci->dcbaa->dma = dma;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Device context base array address = 0x%llx (DMA), %p (virt)",
+			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+
+	/*
+	 * Initialize the ring segment pool.  The ring must be a contiguous
+	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
+	 * however, the command ring segment needs 64-byte aligned segments
+	 * and our use of dma addresses in the trb_address_map radix tree needs
+	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
+	 */
+	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+
+	/* See Table 46 and Note on Figure 55 */
+	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+			2112, 64, xhci->page_size);
+	if (!xhci->segment_pool || !xhci->device_pool)
+		goto fail;
+
+	/* Linear stream context arrays don't have any boundary restrictions,
+	 * and only need to be 16-byte aligned.
+	 */
+	xhci->small_streams_pool =
+		dma_pool_create("xHCI 256 byte stream ctx arrays",
+			dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+	xhci->medium_streams_pool =
+		dma_pool_create("xHCI 1KB stream ctx arrays",
+			dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+	/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
+	 * will be allocated with dma_alloc_coherent()
+	 */
+
+	if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+		goto fail;
+
+	/* Set up the command ring to have one segments for now. */
+	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+	if (!xhci->cmd_ring)
+		goto fail;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Allocated command ring at %p", xhci->cmd_ring);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
+			(unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+	/* Set the address in the Command Ring Control register */
+	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
+		xhci->cmd_ring->cycle_state;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Setting command ring address to 0x%016llx", val_64);
+	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+
+	xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
+	if (!xhci->lpm_command)
+		goto fail;
+
+	/* Reserve one command ring TRB for disabling LPM.
+	 * Since the USB core grabs the shared usb_bus bandwidth mutex before
+	 * disabling LPM, we only need to reserve one TRB for all devices.
+	 */
+	xhci->cmd_ring_reserved_trbs++;
+
+	val = readl(&xhci->cap_regs->db_off);
+	val &= DBOFF_MASK;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Doorbell array is located at offset 0x%x"
+			" from cap regs base addr", val);
+	xhci->dba = (void __iomem *) xhci->cap_regs + val;
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
+
+	/*
+	 * Event ring setup: Allocate a normal ring, but also setup
+	 * the event ring segment table (ERST).  Section 4.9.3.
+	 */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
+	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
+					0, flags);
+	if (!xhci->event_ring)
+		goto fail;
+	if (xhci_check_trb_in_td_math(xhci) < 0)
+		goto fail;
+
+	ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
+	if (ret)
+		goto fail;
+
+	/* set ERST count with the number of entries in the segment table */
+	val = readl(&xhci->ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
+			val);
+	writel(val, &xhci->ir_set->erst_size);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Set ERST entries to point to event ring.");
+	/* set the segment table base address */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Set ERST base address for ir_set 0 = 0x%llx",
+			(unsigned long long)xhci->erst.erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	xhci_set_hc_event_deq(xhci);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Wrote ERST address to ir_set 0.");
+
+	/*
+	 * XXX: Might need to set the Interrupter Moderation Register to
+	 * something other than the default (~1ms minimum between interrupts).
+	 * See section 5.5.1.2.
+	 */
+	for (i = 0; i < MAX_HC_SLOTS; i++)
+		xhci->devs[i] = NULL;
+	for (i = 0; i < USB_MAXCHILDREN; i++) {
+		xhci->bus_state[0].resume_done[i] = 0;
+		xhci->bus_state[1].resume_done[i] = 0;
+		/* Only the USB 2.0 completions will ever be used. */
+		init_completion(&xhci->bus_state[1].rexit_done[i]);
+	}
+
+	if (scratchpad_alloc(xhci, flags))
+		goto fail;
+	if (xhci_setup_port_arrays(xhci, flags))
+		goto fail;
+
+	/* Enable USB 3.0 device notifications for function remote wake, which
+	 * is necessary for allowing USB 3.0 devices to do remote wakeup from
+	 * U3 (device suspend).
+	 */
+	temp = readl(&xhci->op_regs->dev_notification);
+	temp &= ~DEV_NOTE_MASK;
+	temp |= DEV_NOTE_FWAKE;
+	writel(temp, &xhci->op_regs->dev_notification);
+
+	return 0;
+
+fail:
+	xhci_halt(xhci);
+	xhci_reset(xhci);
+	xhci_mem_cleanup(xhci);
+	return -ENOMEM;
+}
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
new file mode 100644
index 0000000..fa33d6e
--- /dev/null
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:
+ *  Zhigang.Wei <zhigang.wei@mediatek.com>
+ *  Chunfeng.Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xhci.h"
+#include "xhci-mtk.h"
+
+#define SS_BW_BOUNDARY	51000
+/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
+#define HS_BW_BOUNDARY	6144
+/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
+#define FS_PAYLOAD_MAX 188
+
+/* mtk scheduler bitmasks */
+#define EP_BPKTS(p)	((p) & 0x3f)
+#define EP_BCSCOUNT(p)	(((p) & 0x7) << 8)
+#define EP_BBM(p)	((p) << 11)
+#define EP_BOFFSET(p)	((p) & 0x3fff)
+#define EP_BREPEAT(p)	(((p) & 0x7fff) << 16)
+
+static int is_fs_or_ls(enum usb_device_speed speed)
+{
+	return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
+}
+
+/*
+* get the index of bandwidth domains array which @ep belongs to.
+*
+* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk,
+* each HS root port is treated as a single bandwidth domain,
+* but each SS root port is treated as two bandwidth domains, one for IN eps,
+* one for OUT eps.
+* @real_port value is defined as follow according to xHCI spec:
+* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
+* so the bandwidth domain array is organized as follow for simplification:
+* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
+*/
+static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
+	struct usb_host_endpoint *ep)
+{
+	struct xhci_virt_device *virt_dev;
+	int bw_index;
+
+	virt_dev = xhci->devs[udev->slot_id];
+
+	if (udev->speed == USB_SPEED_SUPER) {
+		if (usb_endpoint_dir_out(&ep->desc))
+			bw_index = (virt_dev->real_port - 1) * 2;
+		else
+			bw_index = (virt_dev->real_port - 1) * 2 + 1;
+	} else {
+		/* add one more for each SS port */
+		bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
+	}
+
+	return bw_index;
+}
+
+static void setup_sch_info(struct usb_device *udev,
+		struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep)
+{
+	u32 ep_type;
+	u32 ep_interval;
+	u32 max_packet_size;
+	u32 max_burst;
+	u32 mult;
+	u32 esit_pkts;
+
+	ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
+	ep_interval = CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
+	max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+	max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
+	mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
+
+	sch_ep->esit = 1 << ep_interval;
+	sch_ep->offset = 0;
+	sch_ep->burst_mode = 0;
+
+	if (udev->speed == USB_SPEED_HIGH) {
+		sch_ep->cs_count = 0;
+
+		/*
+		 * usb_20 spec section5.9
+		 * a single microframe is enough for HS synchromous endpoints
+		 * in a interval
+		 */
+		sch_ep->num_budget_microframes = 1;
+		sch_ep->repeat = 0;
+
+		/*
+		 * xHCI spec section6.2.3.4
+		 * @max_burst is the number of additional transactions
+		 * opportunities per microframe
+		 */
+		sch_ep->pkts = max_burst + 1;
+		sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
+	} else if (udev->speed == USB_SPEED_SUPER) {
+		/* usb3_r1 spec section4.4.7 & 4.4.8 */
+		sch_ep->cs_count = 0;
+		esit_pkts = (mult + 1) * (max_burst + 1);
+		if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
+			sch_ep->pkts = esit_pkts;
+			sch_ep->num_budget_microframes = 1;
+			sch_ep->repeat = 0;
+		}
+
+		if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
+			if (esit_pkts <= sch_ep->esit)
+				sch_ep->pkts = 1;
+			else
+				sch_ep->pkts = roundup_pow_of_two(esit_pkts)
+					/ sch_ep->esit;
+
+			sch_ep->num_budget_microframes =
+				DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
+
+			if (sch_ep->num_budget_microframes > 1)
+				sch_ep->repeat = 1;
+			else
+				sch_ep->repeat = 0;
+		}
+		sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
+	} else if (is_fs_or_ls(udev->speed)) {
+
+		/*
+		 * usb_20 spec section11.18.4
+		 * assume worst cases
+		 */
+		sch_ep->repeat = 0;
+		sch_ep->pkts = 1; /* at most one packet for each microframe */
+		if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
+			sch_ep->cs_count = 3; /* at most need 3 CS*/
+			/* one for SS and one for budgeted transaction */
+			sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
+			sch_ep->bw_cost_per_microframe = max_packet_size;
+		}
+		if (ep_type == ISOC_OUT_EP) {
+
+			/*
+			 * the best case FS budget assumes that 188 FS bytes
+			 * occur in each microframe
+			 */
+			sch_ep->num_budget_microframes = DIV_ROUND_UP(
+				max_packet_size, FS_PAYLOAD_MAX);
+			sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
+			sch_ep->cs_count = sch_ep->num_budget_microframes;
+		}
+		if (ep_type == ISOC_IN_EP) {
+			/* at most need additional two CS. */
+			sch_ep->cs_count = DIV_ROUND_UP(
+				max_packet_size, FS_PAYLOAD_MAX) + 2;
+			sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
+			sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
+		}
+	}
+}
+
+/* Get maximum bandwidth when we schedule at offset slot. */
+static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
+	struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+	u32 num_esit;
+	u32 max_bw = 0;
+	int i;
+	int j;
+
+	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+	for (i = 0; i < num_esit; i++) {
+		u32 base = offset + i * sch_ep->esit;
+
+		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+			if (sch_bw->bus_bw[base + j] > max_bw)
+				max_bw = sch_bw->bus_bw[base + j];
+		}
+	}
+	return max_bw;
+}
+
+static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
+	struct mu3h_sch_ep_info *sch_ep, int bw_cost)
+{
+	u32 num_esit;
+	u32 base;
+	int i;
+	int j;
+
+	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+	for (i = 0; i < num_esit; i++) {
+		base = sch_ep->offset + i * sch_ep->esit;
+		for (j = 0; j < sch_ep->num_budget_microframes; j++)
+			sch_bw->bus_bw[base + j] += bw_cost;
+	}
+}
+
+static int check_sch_bw(struct usb_device *udev,
+	struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+	u32 offset;
+	u32 esit;
+	u32 num_budget_microframes;
+	u32 min_bw;
+	u32 min_index;
+	u32 worst_bw;
+	u32 bw_boundary;
+
+	if (sch_ep->esit > XHCI_MTK_MAX_ESIT)
+		sch_ep->esit = XHCI_MTK_MAX_ESIT;
+
+	esit = sch_ep->esit;
+	num_budget_microframes = sch_ep->num_budget_microframes;
+
+	/*
+	 * Search through all possible schedule microframes.
+	 * and find a microframe where its worst bandwidth is minimum.
+	 */
+	min_bw = ~0;
+	min_index = 0;
+	for (offset = 0; offset < esit; offset++) {
+		if ((offset + num_budget_microframes) > sch_ep->esit)
+			break;
+
+		/*
+		 * usb_20 spec section11.18:
+		 * must never schedule Start-Split in Y6
+		 */
+		if (is_fs_or_ls(udev->speed) && (offset % 8 == 6))
+			continue;
+
+		worst_bw = get_max_bw(sch_bw, sch_ep, offset);
+		if (min_bw > worst_bw) {
+			min_bw = worst_bw;
+			min_index = offset;
+		}
+		if (min_bw == 0)
+			break;
+	}
+	sch_ep->offset = min_index;
+
+	bw_boundary = (udev->speed == USB_SPEED_SUPER)
+				? SS_BW_BOUNDARY : HS_BW_BOUNDARY;
+
+	/* check bandwidth */
+	if (min_bw + sch_ep->bw_cost_per_microframe > bw_boundary)
+		return -ERANGE;
+
+	/* update bus bandwidth info */
+	update_bus_bw(sch_bw, sch_ep, sch_ep->bw_cost_per_microframe);
+
+	return 0;
+}
+
+static bool need_bw_sch(struct usb_host_endpoint *ep,
+	enum usb_device_speed speed, int has_tt)
+{
+	/* only for periodic endpoints */
+	if (usb_endpoint_xfer_control(&ep->desc)
+		|| usb_endpoint_xfer_bulk(&ep->desc))
+		return false;
+
+	/*
+	 * for LS & FS periodic endpoints which its device is not behind
+	 * a TT are also ignored, root-hub will schedule them directly,
+	 * but need set @bpkts field of endpoint context to 1.
+	 */
+	if (is_fs_or_ls(speed) && !has_tt)
+		return false;
+
+	return true;
+}
+
+int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
+	struct mu3h_sch_bw_info *sch_array;
+	int num_usb_bus;
+	int i;
+
+	/* ss IN and OUT are separated */
+	num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
+
+	sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
+	if (sch_array == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < num_usb_bus; i++)
+		INIT_LIST_HEAD(&sch_array[i].bw_ep_list);
+
+	mtk->sch_array = sch_array;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
+
+void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk)
+{
+	kfree(mtk->sch_array);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_sch_exit);
+
+int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+	struct xhci_hcd *xhci;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	struct xhci_virt_device *virt_dev;
+	struct mu3h_sch_bw_info *sch_bw;
+	struct mu3h_sch_ep_info *sch_ep;
+	struct mu3h_sch_bw_info *sch_array;
+	unsigned int ep_index;
+	int bw_index;
+	int ret = 0;
+
+	xhci = hcd_to_xhci(hcd);
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+	sch_array = mtk->sch_array;
+
+	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
+		__func__, usb_endpoint_type(&ep->desc), udev->speed,
+		usb_endpoint_maxp(&ep->desc),
+		usb_endpoint_dir_in(&ep->desc), ep);
+
+	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
+		/*
+		 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
+		 * device does not connected through an external HS hub
+		 */
+		if (usb_endpoint_xfer_int(&ep->desc)
+			|| usb_endpoint_xfer_isoc(&ep->desc))
+			ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
+
+		return 0;
+	}
+
+	bw_index = get_bw_index(xhci, udev, ep);
+	sch_bw = &sch_array[bw_index];
+
+	sch_ep = kzalloc(sizeof(struct mu3h_sch_ep_info), GFP_NOIO);
+	if (!sch_ep)
+		return -ENOMEM;
+
+	setup_sch_info(udev, ep_ctx, sch_ep);
+
+	ret = check_sch_bw(udev, sch_bw, sch_ep);
+	if (ret) {
+		xhci_err(xhci, "Not enough bandwidth!\n");
+		kfree(sch_ep);
+		return -ENOSPC;
+	}
+
+	list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+	sch_ep->ep = ep;
+
+	ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+		| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
+	ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+		| EP_BREPEAT(sch_ep->repeat));
+
+	xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+			sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+			sch_ep->offset, sch_ep->repeat);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk);
+
+void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+	struct xhci_hcd *xhci;
+	struct xhci_slot_ctx *slot_ctx;
+	struct xhci_virt_device *virt_dev;
+	struct mu3h_sch_bw_info *sch_array;
+	struct mu3h_sch_bw_info *sch_bw;
+	struct mu3h_sch_ep_info *sch_ep;
+	int bw_index;
+
+	xhci = hcd_to_xhci(hcd);
+	virt_dev = xhci->devs[udev->slot_id];
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+	sch_array = mtk->sch_array;
+
+	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n",
+		__func__, usb_endpoint_type(&ep->desc), udev->speed,
+		usb_endpoint_maxp(&ep->desc),
+		usb_endpoint_dir_in(&ep->desc), ep);
+
+	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
+		return;
+
+	bw_index = get_bw_index(xhci, udev, ep);
+	sch_bw = &sch_array[bw_index];
+
+	list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+		if (sch_ep->ep == ep) {
+			update_bus_bw(sch_bw, sch_ep,
+				-sch_ep->bw_cost_per_microframe);
+			list_del(&sch_ep->endpoint);
+			kfree(sch_ep);
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
new file mode 100644
index 0000000..60987c7
--- /dev/null
+++ b/drivers/usb/host/xhci-mtk.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek xHCI Host Controller Driver
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:
+ *  Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "xhci.h"
+#include "xhci-mtk.h"
+
+/* ip_pw_ctrl0 register */
+#define CTRL0_IP_SW_RST	BIT(0)
+
+/* ip_pw_ctrl1 register */
+#define CTRL1_IP_HOST_PDN	BIT(0)
+
+/* ip_pw_ctrl2 register */
+#define CTRL2_IP_DEV_PDN	BIT(0)
+
+/* ip_pw_sts1 register */
+#define STS1_IP_SLEEP_STS	BIT(30)
+#define STS1_U3_MAC_RST	BIT(16)
+#define STS1_XHCI_RST		BIT(11)
+#define STS1_SYS125_RST	BIT(10)
+#define STS1_REF_RST		BIT(8)
+#define STS1_SYSPLL_STABLE	BIT(0)
+
+/* ip_xhci_cap register */
+#define CAP_U3_PORT_NUM(p)	((p) & 0xff)
+#define CAP_U2_PORT_NUM(p)	(((p) >> 8) & 0xff)
+
+/* u3_ctrl_p register */
+#define CTRL_U3_PORT_HOST_SEL	BIT(2)
+#define CTRL_U3_PORT_PDN	BIT(1)
+#define CTRL_U3_PORT_DIS	BIT(0)
+
+/* u2_ctrl_p register */
+#define CTRL_U2_PORT_HOST_SEL	BIT(2)
+#define CTRL_U2_PORT_PDN	BIT(1)
+#define CTRL_U2_PORT_DIS	BIT(0)
+
+/* u2_phy_pll register */
+#define CTRL_U2_FORCE_PLL_STB	BIT(28)
+
+/* usb remote wakeup registers in syscon */
+/* mt8173 etc */
+#define PERI_WK_CTRL1	0x4
+#define WC1_IS_C(x)	(((x) & 0xf) << 26)  /* cycle debounce */
+#define WC1_IS_EN	BIT(25)
+#define WC1_IS_P	BIT(6)  /* polarity for ip sleep */
+
+/* mt2712 etc */
+#define PERI_SSUSB_SPM_CTRL	0x0
+#define SSC_IP_SLEEP_EN	BIT(4)
+#define SSC_SPM_INT_EN		BIT(1)
+
+enum ssusb_uwk_vers {
+	SSUSB_UWK_V1 = 1,
+	SSUSB_UWK_V2,
+};
+
+static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
+{
+	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+	u32 value, check_val;
+	int u3_ports_disabed = 0;
+	int ret;
+	int i;
+
+	if (!mtk->has_ippc)
+		return 0;
+
+	/* power on host ip */
+	value = readl(&ippc->ip_pw_ctr1);
+	value &= ~CTRL1_IP_HOST_PDN;
+	writel(value, &ippc->ip_pw_ctr1);
+
+	/* power on and enable u3 ports except skipped ones */
+	for (i = 0; i < mtk->num_u3_ports; i++) {
+		if ((0x1 << i) & mtk->u3p_dis_msk) {
+			u3_ports_disabed++;
+			continue;
+		}
+
+		value = readl(&ippc->u3_ctrl_p[i]);
+		value &= ~(CTRL_U3_PORT_PDN | CTRL_U3_PORT_DIS);
+		value |= CTRL_U3_PORT_HOST_SEL;
+		writel(value, &ippc->u3_ctrl_p[i]);
+	}
+
+	/* power on and enable all u2 ports */
+	for (i = 0; i < mtk->num_u2_ports; i++) {
+		value = readl(&ippc->u2_ctrl_p[i]);
+		value &= ~(CTRL_U2_PORT_PDN | CTRL_U2_PORT_DIS);
+		value |= CTRL_U2_PORT_HOST_SEL;
+		writel(value, &ippc->u2_ctrl_p[i]);
+	}
+
+	/*
+	 * wait for clocks to be stable, and clock domains reset to
+	 * be inactive after power on and enable ports
+	 */
+	check_val = STS1_SYSPLL_STABLE | STS1_REF_RST |
+			STS1_SYS125_RST | STS1_XHCI_RST;
+
+	if (mtk->num_u3_ports > u3_ports_disabed)
+		check_val |= STS1_U3_MAC_RST;
+
+	ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
+			  (check_val == (value & check_val)), 100, 20000);
+	if (ret) {
+		dev_err(mtk->dev, "clocks are not stable (0x%x)\n", value);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
+{
+	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+	u32 value;
+	int ret;
+	int i;
+
+	if (!mtk->has_ippc)
+		return 0;
+
+	/* power down u3 ports except skipped ones */
+	for (i = 0; i < mtk->num_u3_ports; i++) {
+		if ((0x1 << i) & mtk->u3p_dis_msk)
+			continue;
+
+		value = readl(&ippc->u3_ctrl_p[i]);
+		value |= CTRL_U3_PORT_PDN;
+		writel(value, &ippc->u3_ctrl_p[i]);
+	}
+
+	/* power down all u2 ports */
+	for (i = 0; i < mtk->num_u2_ports; i++) {
+		value = readl(&ippc->u2_ctrl_p[i]);
+		value |= CTRL_U2_PORT_PDN;
+		writel(value, &ippc->u2_ctrl_p[i]);
+	}
+
+	/* power down host ip */
+	value = readl(&ippc->ip_pw_ctr1);
+	value |= CTRL1_IP_HOST_PDN;
+	writel(value, &ippc->ip_pw_ctr1);
+
+	/* wait for host ip to sleep */
+	ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
+			  (value & STS1_IP_SLEEP_STS), 100, 100000);
+	if (ret) {
+		dev_err(mtk->dev, "ip sleep failed!!!\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
+{
+	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+	u32 value;
+
+	if (!mtk->has_ippc)
+		return 0;
+
+	/* reset whole ip */
+	value = readl(&ippc->ip_pw_ctr0);
+	value |= CTRL0_IP_SW_RST;
+	writel(value, &ippc->ip_pw_ctr0);
+	udelay(1);
+	value = readl(&ippc->ip_pw_ctr0);
+	value &= ~CTRL0_IP_SW_RST;
+	writel(value, &ippc->ip_pw_ctr0);
+
+	/*
+	 * device ip is default power-on in fact
+	 * power down device ip, otherwise ip-sleep will fail
+	 */
+	value = readl(&ippc->ip_pw_ctr2);
+	value |= CTRL2_IP_DEV_PDN;
+	writel(value, &ippc->ip_pw_ctr2);
+
+	value = readl(&ippc->ip_xhci_cap);
+	mtk->num_u3_ports = CAP_U3_PORT_NUM(value);
+	mtk->num_u2_ports = CAP_U2_PORT_NUM(value);
+	dev_dbg(mtk->dev, "%s u2p:%d, u3p:%d\n", __func__,
+			mtk->num_u2_ports, mtk->num_u3_ports);
+
+	return xhci_mtk_host_enable(mtk);
+}
+
+/* ignore the error if the clock does not exist */
+static struct clk *optional_clk_get(struct device *dev, const char *id)
+{
+	struct clk *opt_clk;
+
+	opt_clk = devm_clk_get(dev, id);
+	/* ignore error number except EPROBE_DEFER */
+	if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
+		opt_clk = NULL;
+
+	return opt_clk;
+}
+
+static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
+{
+	struct device *dev = mtk->dev;
+
+	mtk->sys_clk = devm_clk_get(dev, "sys_ck");
+	if (IS_ERR(mtk->sys_clk)) {
+		dev_err(dev, "fail to get sys_ck\n");
+		return PTR_ERR(mtk->sys_clk);
+	}
+
+	mtk->ref_clk = optional_clk_get(dev, "ref_ck");
+	if (IS_ERR(mtk->ref_clk))
+		return PTR_ERR(mtk->ref_clk);
+
+	mtk->mcu_clk = optional_clk_get(dev, "mcu_ck");
+	if (IS_ERR(mtk->mcu_clk))
+		return PTR_ERR(mtk->mcu_clk);
+
+	mtk->dma_clk = optional_clk_get(dev, "dma_ck");
+	return PTR_ERR_OR_ZERO(mtk->dma_clk);
+}
+
+static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
+{
+	int ret;
+
+	ret = clk_prepare_enable(mtk->ref_clk);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable ref_clk\n");
+		goto ref_clk_err;
+	}
+
+	ret = clk_prepare_enable(mtk->sys_clk);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable sys_clk\n");
+		goto sys_clk_err;
+	}
+
+	ret = clk_prepare_enable(mtk->mcu_clk);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable mcu_clk\n");
+		goto mcu_clk_err;
+	}
+
+	ret = clk_prepare_enable(mtk->dma_clk);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable dma_clk\n");
+		goto dma_clk_err;
+	}
+
+	return 0;
+
+dma_clk_err:
+	clk_disable_unprepare(mtk->mcu_clk);
+mcu_clk_err:
+	clk_disable_unprepare(mtk->sys_clk);
+sys_clk_err:
+	clk_disable_unprepare(mtk->ref_clk);
+ref_clk_err:
+	return ret;
+}
+
+static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk)
+{
+	clk_disable_unprepare(mtk->dma_clk);
+	clk_disable_unprepare(mtk->mcu_clk);
+	clk_disable_unprepare(mtk->sys_clk);
+	clk_disable_unprepare(mtk->ref_clk);
+}
+
+/* only clocks can be turn off for ip-sleep wakeup mode */
+static void usb_wakeup_ip_sleep_set(struct xhci_hcd_mtk *mtk, bool enable)
+{
+	u32 reg, msk, val;
+
+	switch (mtk->uwk_vers) {
+	case SSUSB_UWK_V1:
+		reg = mtk->uwk_reg_base + PERI_WK_CTRL1;
+		msk = WC1_IS_EN | WC1_IS_C(0xf) | WC1_IS_P;
+		val = enable ? (WC1_IS_EN | WC1_IS_C(0x8)) : 0;
+		break;
+	case SSUSB_UWK_V2:
+		reg = mtk->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
+		msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
+		val = enable ? msk : 0;
+		break;
+	default:
+		return;
+	}
+	regmap_update_bits(mtk->uwk, reg, msk, val);
+}
+
+static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
+				struct device_node *dn)
+{
+	struct of_phandle_args args;
+	int ret;
+
+	/* Wakeup function is optional */
+	mtk->uwk_en = of_property_read_bool(dn, "wakeup-source");
+	if (!mtk->uwk_en)
+		return 0;
+
+	ret = of_parse_phandle_with_fixed_args(dn,
+				"mediatek,syscon-wakeup", 2, 0, &args);
+	if (ret)
+		return ret;
+
+	mtk->uwk_reg_base = args.args[0];
+	mtk->uwk_vers = args.args[1];
+	mtk->uwk = syscon_node_to_regmap(args.np);
+	of_node_put(args.np);
+	dev_info(mtk->dev, "uwk - reg:0x%x, version:%d\n",
+			mtk->uwk_reg_base, mtk->uwk_vers);
+
+	return PTR_ERR_OR_ZERO(mtk->uwk);
+
+}
+
+static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
+{
+	if (mtk->uwk_en)
+		usb_wakeup_ip_sleep_set(mtk, enable);
+}
+
+static int xhci_mtk_setup(struct usb_hcd *hcd);
+static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
+	.reset = xhci_mtk_setup,
+};
+
+static struct hc_driver __read_mostly xhci_mtk_hc_driver;
+
+static int xhci_mtk_ldos_enable(struct xhci_hcd_mtk *mtk)
+{
+	int ret;
+
+	ret = regulator_enable(mtk->vbus);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable vbus\n");
+		return ret;
+	}
+
+	ret = regulator_enable(mtk->vusb33);
+	if (ret) {
+		dev_err(mtk->dev, "failed to enable vusb33\n");
+		regulator_disable(mtk->vbus);
+		return ret;
+	}
+	return 0;
+}
+
+static void xhci_mtk_ldos_disable(struct xhci_hcd_mtk *mtk)
+{
+	regulator_disable(mtk->vbus);
+	regulator_disable(mtk->vusb33);
+}
+
+static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+
+	/*
+	 * As of now platform drivers don't provide MSI support so we ensure
+	 * here that the generic code does not try to make a pci_dev from our
+	 * dev struct in order to setup MSI
+	 */
+	xhci->quirks |= XHCI_PLAT;
+	xhci->quirks |= XHCI_MTK_HOST;
+	/*
+	 * MTK host controller gives a spurious successful event after a
+	 * short transfer. Ignore it.
+	 */
+	xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+	if (mtk->lpm_support)
+		xhci->quirks |= XHCI_LPM_SUPPORT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_mtk_setup(struct usb_hcd *hcd)
+{
+	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+	int ret;
+
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		ret = xhci_mtk_ssusb_config(mtk);
+		if (ret)
+			return ret;
+	}
+
+	ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+	if (ret)
+		return ret;
+
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		ret = xhci_mtk_sch_init(mtk);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int xhci_mtk_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct xhci_hcd_mtk *mtk;
+	const struct hc_driver *driver;
+	struct xhci_hcd *xhci;
+	struct resource *res;
+	struct usb_hcd *hcd;
+	int ret = -ENODEV;
+	int irq;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	driver = &xhci_mtk_hc_driver;
+	mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
+	if (!mtk)
+		return -ENOMEM;
+
+	mtk->dev = dev;
+	mtk->vbus = devm_regulator_get(dev, "vbus");
+	if (IS_ERR(mtk->vbus)) {
+		dev_err(dev, "fail to get vbus\n");
+		return PTR_ERR(mtk->vbus);
+	}
+
+	mtk->vusb33 = devm_regulator_get(dev, "vusb33");
+	if (IS_ERR(mtk->vusb33)) {
+		dev_err(dev, "fail to get vusb33\n");
+		return PTR_ERR(mtk->vusb33);
+	}
+
+	ret = xhci_mtk_clks_get(mtk);
+	if (ret)
+		return ret;
+
+	mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
+	/* optional property, ignore the error if it does not exist */
+	of_property_read_u32(node, "mediatek,u3p-dis-msk",
+			     &mtk->u3p_dis_msk);
+
+	ret = usb_wakeup_of_property_parse(mtk, node);
+	if (ret) {
+		dev_err(dev, "failed to parse uwk property\n");
+		return ret;
+	}
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+	device_enable_async_suspend(dev);
+
+	ret = xhci_mtk_ldos_enable(mtk);
+	if (ret)
+		goto disable_pm;
+
+	ret = xhci_mtk_clks_enable(mtk);
+	if (ret)
+		goto disable_ldos;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto disable_clk;
+	}
+
+	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto disable_clk;
+
+	hcd = usb_create_hcd(driver, dev, dev_name(dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto disable_clk;
+	}
+
+	/*
+	 * USB 2.0 roothub is stored in the platform_device.
+	 * Swap it with mtk HCD.
+	 */
+	mtk->hcd = platform_get_drvdata(pdev);
+	platform_set_drvdata(pdev, mtk);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
+	hcd->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto put_usb2_hcd;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+	if (res) {	/* ippc register is optional */
+		mtk->ippc_regs = devm_ioremap_resource(dev, res);
+		if (IS_ERR(mtk->ippc_regs)) {
+			ret = PTR_ERR(mtk->ippc_regs);
+			goto put_usb2_hcd;
+		}
+		mtk->has_ippc = true;
+	} else {
+		mtk->has_ippc = false;
+	}
+
+	device_init_wakeup(dev, true);
+
+	xhci = hcd_to_xhci(hcd);
+	xhci->main_hcd = hcd;
+
+	/*
+	 * imod_interval is the interrupt moderation value in nanoseconds.
+	 * The increment interval is 8 times as much as that defined in
+	 * the xHCI spec on MTK's controller.
+	 */
+	xhci->imod_interval = 5000;
+	device_property_read_u32(dev, "imod-interval-ns", &xhci->imod_interval);
+
+	xhci->shared_hcd = usb_create_shared_hcd(driver, dev,
+			dev_name(dev), hcd);
+	if (!xhci->shared_hcd) {
+		ret = -ENOMEM;
+		goto disable_device_wakeup;
+	}
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto put_usb3_hcd;
+
+	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
+	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto dealloc_usb2_hcd;
+
+	return 0;
+
+dealloc_usb2_hcd:
+	usb_remove_hcd(hcd);
+
+put_usb3_hcd:
+	xhci_mtk_sch_exit(mtk);
+	usb_put_hcd(xhci->shared_hcd);
+
+disable_device_wakeup:
+	device_init_wakeup(dev, false);
+
+put_usb2_hcd:
+	usb_put_hcd(hcd);
+
+disable_clk:
+	xhci_mtk_clks_disable(mtk);
+
+disable_ldos:
+	xhci_mtk_ldos_disable(mtk);
+
+disable_pm:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int xhci_mtk_remove(struct platform_device *dev)
+{
+	struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
+	struct usb_hcd	*hcd = mtk->hcd;
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct usb_hcd  *shared_hcd = xhci->shared_hcd;
+
+	usb_remove_hcd(shared_hcd);
+	xhci->shared_hcd = NULL;
+	device_init_wakeup(&dev->dev, false);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(shared_hcd);
+	usb_put_hcd(hcd);
+	xhci_mtk_sch_exit(mtk);
+	xhci_mtk_clks_disable(mtk);
+	xhci_mtk_ldos_disable(mtk);
+	pm_runtime_put_sync(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
+	return 0;
+}
+
+/*
+ * if ip sleep fails, and all clocks are disabled, access register will hang
+ * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
+ * and no need to check whether ip sleep failed or not; this will cause SPM
+ * to wake up system immediately after system suspend complete if ip sleep
+ * fails, it is what we wanted.
+ */
+static int __maybe_unused xhci_mtk_suspend(struct device *dev)
+{
+	struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = mtk->hcd;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	xhci_dbg(xhci, "%s: stop port polling\n", __func__);
+	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	del_timer_sync(&hcd->rh_timer);
+	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	del_timer_sync(&xhci->shared_hcd->rh_timer);
+
+	xhci_mtk_host_disable(mtk);
+	xhci_mtk_clks_disable(mtk);
+	usb_wakeup_set(mtk, true);
+	return 0;
+}
+
+static int __maybe_unused xhci_mtk_resume(struct device *dev)
+{
+	struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+	struct usb_hcd *hcd = mtk->hcd;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	usb_wakeup_set(mtk, false);
+	xhci_mtk_clks_enable(mtk);
+	xhci_mtk_host_enable(mtk);
+
+	xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	usb_hcd_poll_rh_status(xhci->shared_hcd);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
+	return 0;
+}
+
+static const struct dev_pm_ops xhci_mtk_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(xhci_mtk_suspend, xhci_mtk_resume)
+};
+#define DEV_PM_OPS IS_ENABLED(CONFIG_PM) ? &xhci_mtk_pm_ops : NULL
+
+#ifdef CONFIG_OF
+static const struct of_device_id mtk_xhci_of_match[] = {
+	{ .compatible = "mediatek,mt8173-xhci"},
+	{ .compatible = "mediatek,mtk-xhci"},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mtk_xhci_of_match);
+#endif
+
+static struct platform_driver mtk_xhci_driver = {
+	.probe	= xhci_mtk_probe,
+	.remove	= xhci_mtk_remove,
+	.driver	= {
+		.name = "xhci-mtk",
+		.pm = DEV_PM_OPS,
+		.of_match_table = of_match_ptr(mtk_xhci_of_match),
+	},
+};
+MODULE_ALIAS("platform:xhci-mtk");
+
+static int __init xhci_mtk_init(void)
+{
+	xhci_init_driver(&xhci_mtk_hc_driver, &xhci_mtk_overrides);
+	return platform_driver_register(&mtk_xhci_driver);
+}
+module_init(xhci_mtk_init);
+
+static void __exit xhci_mtk_exit(void)
+{
+	platform_driver_unregister(&mtk_xhci_driver);
+}
+module_exit(xhci_mtk_exit);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek xHCI Host Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
new file mode 100644
index 0000000..cc59d80
--- /dev/null
+++ b/drivers/usb/host/xhci-mtk.h
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author:
+ *  Zhigang.Wei <zhigang.wei@mediatek.com>
+ *  Chunfeng.Yun <chunfeng.yun@mediatek.com>
+ */
+
+#ifndef _XHCI_MTK_H_
+#define _XHCI_MTK_H_
+
+#include "xhci.h"
+
+/**
+ * To simplify scheduler algorithm, set a upper limit for ESIT,
+ * if a synchromous ep's ESIT is larger than @XHCI_MTK_MAX_ESIT,
+ * round down to the limit value, that means allocating more
+ * bandwidth to it.
+ */
+#define XHCI_MTK_MAX_ESIT	64
+
+/**
+ * struct mu3h_sch_bw_info: schedule information for bandwidth domain
+ *
+ * @bus_bw: array to keep track of bandwidth already used at each uframes
+ * @bw_ep_list: eps in the bandwidth domain
+ *
+ * treat a HS root port as a bandwidth domain, but treat a SS root port as
+ * two bandwidth domains, one for IN eps and another for OUT eps.
+ */
+struct mu3h_sch_bw_info {
+	u32 bus_bw[XHCI_MTK_MAX_ESIT];
+	struct list_head bw_ep_list;
+};
+
+/**
+ * struct mu3h_sch_ep_info: schedule information for endpoint
+ *
+ * @esit: unit is 125us, equal to 2 << Interval field in ep-context
+ * @num_budget_microframes: number of continuous uframes
+ *		(@repeat==1) scheduled within the interval
+ * @bw_cost_per_microframe: bandwidth cost per microframe
+ * @endpoint: linked into bandwidth domain which it belongs to
+ * @ep: address of usb_host_endpoint struct
+ * @offset: which uframe of the interval that transfer should be
+ *		scheduled first time within the interval
+ * @repeat: the time gap between two uframes that transfers are
+ *		scheduled within a interval. in the simple algorithm, only
+ *		assign 0 or 1 to it; 0 means using only one uframe in a
+ *		interval, and 1 means using @num_budget_microframes
+ *		continuous uframes
+ * @pkts: number of packets to be transferred in the scheduled uframes
+ * @cs_count: number of CS that host will trigger
+ * @burst_mode: burst mode for scheduling. 0: normal burst mode,
+ *		distribute the bMaxBurst+1 packets for a single burst
+ *		according to @pkts and @repeat, repeate the burst multiple
+ *		times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
+ *		according to @pkts and @repeat. normal mode is used by
+ *		default
+ */
+struct mu3h_sch_ep_info {
+	u32 esit;
+	u32 num_budget_microframes;
+	u32 bw_cost_per_microframe;
+	struct list_head endpoint;
+	void *ep;
+	/*
+	 * mtk xHCI scheduling information put into reserved DWs
+	 * in ep context
+	 */
+	u32 offset;
+	u32 repeat;
+	u32 pkts;
+	u32 cs_count;
+	u32 burst_mode;
+};
+
+#define MU3C_U3_PORT_MAX 4
+#define MU3C_U2_PORT_MAX 5
+
+/**
+ * struct mu3c_ippc_regs: MTK ssusb ip port control registers
+ * @ip_pw_ctr0~3: ip power and clock control registers
+ * @ip_pw_sts1~2: ip power and clock status registers
+ * @ip_xhci_cap: ip xHCI capability register
+ * @u3_ctrl_p[x]: ip usb3 port x control register, only low 4bytes are used
+ * @u2_ctrl_p[x]: ip usb2 port x control register, only low 4bytes are used
+ * @u2_phy_pll: usb2 phy pll control register
+ */
+struct mu3c_ippc_regs {
+	__le32 ip_pw_ctr0;
+	__le32 ip_pw_ctr1;
+	__le32 ip_pw_ctr2;
+	__le32 ip_pw_ctr3;
+	__le32 ip_pw_sts1;
+	__le32 ip_pw_sts2;
+	__le32 reserved0[3];
+	__le32 ip_xhci_cap;
+	__le32 reserved1[2];
+	__le64 u3_ctrl_p[MU3C_U3_PORT_MAX];
+	__le64 u2_ctrl_p[MU3C_U2_PORT_MAX];
+	__le32 reserved2;
+	__le32 u2_phy_pll;
+	__le32 reserved3[33]; /* 0x80 ~ 0xff */
+};
+
+struct xhci_hcd_mtk {
+	struct device *dev;
+	struct usb_hcd *hcd;
+	struct mu3h_sch_bw_info *sch_array;
+	struct mu3c_ippc_regs __iomem *ippc_regs;
+	bool has_ippc;
+	int num_u2_ports;
+	int num_u3_ports;
+	int u3p_dis_msk;
+	struct regulator *vusb33;
+	struct regulator *vbus;
+	struct clk *sys_clk;	/* sys and mac clock */
+	struct clk *ref_clk;
+	struct clk *mcu_clk;
+	struct clk *dma_clk;
+	struct regmap *pericfg;
+	struct phy **phys;
+	int num_phys;
+	bool lpm_support;
+	/* usb remote wakeup */
+	bool uwk_en;
+	struct regmap *uwk;
+	u32 uwk_reg_base;
+	u32 uwk_vers;
+};
+
+static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
+{
+	return dev_get_drvdata(hcd->self.controller);
+}
+
+#if IS_ENABLED(CONFIG_USB_XHCI_MTK)
+int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk);
+void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk);
+int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep);
+void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep);
+
+#else
+static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+	return 0;
+}
+
+static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+}
+
+#endif
+
+#endif		/* _XHCI_MTK_H_ */
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
new file mode 100644
index 0000000..32e1585
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Marvell
+ * Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#include "xhci-mvebu.h"
+
+#define USB3_MAX_WINDOWS	4
+#define USB3_WIN_CTRL(w)	(0x0 + ((w) * 8))
+#define USB3_WIN_BASE(w)	(0x4 + ((w) * 8))
+
+static void xhci_mvebu_mbus_config(void __iomem *base,
+			const struct mbus_dram_target_info *dram)
+{
+	int win;
+
+	/* Clear all existing windows */
+	for (win = 0; win < USB3_MAX_WINDOWS; win++) {
+		writel(0, base + USB3_WIN_CTRL(win));
+		writel(0, base + USB3_WIN_BASE(win));
+	}
+
+	/* Program each DRAM CS in a seperate window */
+	for (win = 0; win < dram->num_cs; win++) {
+		const struct mbus_dram_window *cs = dram->cs + win;
+
+		writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+		       (dram->mbus_dram_target_id << 4) | 1,
+		       base + USB3_WIN_CTRL(win));
+
+		writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win));
+	}
+}
+
+int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource	*res;
+	void __iomem *base;
+	const struct mbus_dram_target_info *dram;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res)
+		return -ENODEV;
+
+	/*
+	 * We don't use devm_ioremap() because this mapping should
+	 * only exists for the duration of this probe function.
+	 */
+	base = ioremap(res->start, resource_size(res));
+	if (!base)
+		return -ENODEV;
+
+	dram = mv_mbus_dram_info();
+	xhci_mvebu_mbus_config(base, dram);
+
+	/*
+	 * This memory area was only needed to configure the MBus
+	 * windows, and is therefore no longer useful.
+	 */
+	iounmap(base);
+
+	return 0;
+}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
new file mode 100644
index 0000000..09791df
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ */
+
+#ifndef __LINUX_XHCI_MVEBU_H
+#define __LINUX_XHCI_MVEBU_H
+
+struct usb_hcd;
+
+#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
+int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+#else
+static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
+{
+	return 0;
+}
+#endif
+#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 0000000..09bf6b4
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+
+#define SSIC_PORT_NUM		2
+#define SSIC_PORT_CFG2		0x880c
+#define SSIC_PORT_CFG2_OFFSET	0x30
+#define PROG_DONE		(1 << 30)
+#define SSIC_PORT_UNUSED	(1 << 31)
+
+/* Device for a quirk */
+#define PCI_VENDOR_ID_FRESCO_LOGIC	0x1b73
+#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK	0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009	0x1009
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400	0x1400
+
+#define PCI_VENDOR_ID_ETRON		0x1b6f
+#define PCI_DEVICE_ID_EJ168		0x7023
+
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI	0x9cb1
+#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+#define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
+
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+
+static const char hcd_name[] = "xhci_hcd";
+
+static struct hc_driver __read_mostly xhci_pci_hc_driver;
+
+static int xhci_pci_setup(struct usb_hcd *hcd);
+
+static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
+	.reset = xhci_pci_setup,
+};
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
+{
+	/*
+	 * TODO: Implement finding debug ports later.
+	 * TODO: see if there are any quirks that need to be added to handle
+	 * new extended capabilities.
+	 */
+
+	/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+	if (!pci_set_mwi(pdev))
+		xhci_dbg(xhci, "MWI active\n");
+
+	xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
+	return 0;
+}
+
+static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	struct pci_dev		*pdev = to_pci_dev(dev);
+
+	/* Look for vendor-specific quirks */
+	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+			(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
+			 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
+		if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+				pdev->revision == 0x0) {
+			xhci->quirks |= XHCI_RESET_EP_QUIRK;
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"QUIRK: Fresco Logic xHC needs configure"
+				" endpoint cmd after reset endpoint");
+		}
+		if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+				pdev->revision == 0x4) {
+			xhci->quirks |= XHCI_SLOW_SUSPEND;
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"QUIRK: Fresco Logic xHC revision %u"
+				"must be suspended extra slowly",
+				pdev->revision);
+		}
+		if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+			xhci->quirks |= XHCI_BROKEN_STREAMS;
+		/* Fresco Logic confirms: all revisions of this chip do not
+		 * support MSI, even though some of them claim to in their PCI
+		 * capabilities.
+		 */
+		xhci->quirks |= XHCI_BROKEN_MSI;
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"QUIRK: Fresco Logic revision %u "
+				"has broken MSI implementation",
+				pdev->revision);
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+	}
+
+	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+			pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+		xhci->quirks |= XHCI_BROKEN_STREAMS;
+
+	if (pdev->vendor == PCI_VENDOR_ID_NEC)
+		xhci->quirks |= XHCI_NEC_HOST;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+		xhci->quirks |= XHCI_AMD_0x96_HOST;
+
+	/* AMD PLL quirk */
+	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+		xhci->quirks |= XHCI_AMD_PLL_FIX;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+		(pdev->device == 0x15e0 ||
+		 pdev->device == 0x15e1 ||
+		 pdev->device == 0x43bb))
+		xhci->quirks |= XHCI_SUSPEND_DELAY;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+	    (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+		xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
+	if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+		((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+		xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+		xhci->quirks |= XHCI_LPM_SUPPORT;
+		xhci->quirks |= XHCI_INTEL_HOST;
+		xhci->quirks |= XHCI_AVOID_BEI;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+			pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+		xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+		xhci->limit_active_eps = 64;
+		xhci->quirks |= XHCI_SW_BW_CHECKING;
+		/*
+		 * PPT desktop boards DH77EB and DH77DF will power back on after
+		 * a few seconds of being shutdown.  The fix for this is to
+		 * switch the ports from xHCI to EHCI on shutdown.  We can't use
+		 * DMI information to find those particular boards (since each
+		 * vendor will change the board name), so we have to key off all
+		 * PPT chipsets.
+		 */
+		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
+		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+		xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
+		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
+		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
+		xhci->quirks |= XHCI_MISSING_CAS;
+
+	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+			pdev->device == PCI_DEVICE_ID_EJ168) {
+		xhci->quirks |= XHCI_RESET_ON_RESUME;
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+		xhci->quirks |= XHCI_BROKEN_STREAMS;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+	    pdev->device == 0x0014) {
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+		xhci->quirks |= XHCI_ZERO_64B_REGS;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+	    pdev->device == 0x0015) {
+		xhci->quirks |= XHCI_RESET_ON_RESUME;
+		xhci->quirks |= XHCI_ZERO_64B_REGS;
+	}
+	if (pdev->vendor == PCI_VENDOR_ID_VIA)
+		xhci->quirks |= XHCI_RESET_ON_RESUME;
+
+	/* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
+	if (pdev->vendor == PCI_VENDOR_ID_VIA &&
+			pdev->device == 0x3432)
+		xhci->quirks |= XHCI_BROKEN_STREAMS;
+
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+			pdev->device == 0x1042)
+		xhci->quirks |= XHCI_BROKEN_STREAMS;
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+			pdev->device == 0x1142)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+		xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
+	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
+		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+
+	if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
+	     pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
+	     pdev->device == 0x9026)
+		xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
+
+	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"QUIRK: Resetting on resume");
+}
+
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+	static const guid_t intel_dsm_guid =
+		GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
+			  0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
+	union acpi_object *obj;
+
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
+				NULL);
+	ACPI_FREE(obj);
+}
+#else
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+	struct xhci_hcd		*xhci;
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	int			retval;
+
+	xhci = hcd_to_xhci(hcd);
+	if (!xhci->sbrn)
+		pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+
+	/* imod_interval is the interrupt moderation value in nanoseconds. */
+	xhci->imod_interval = 40000;
+
+	retval = xhci_gen_setup(hcd, xhci_pci_quirks);
+	if (retval)
+		return retval;
+
+	if (!usb_hcd_is_primary_hcd(hcd))
+		return 0;
+
+	xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+	/* Find any debug ports */
+	return xhci_pci_reinit(xhci, pdev);
+}
+
+/*
+ * We need to register our own PCI probe function (instead of the USB core's
+ * function) in order to create a second roothub under xHCI.
+ */
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int retval;
+	struct xhci_hcd *xhci;
+	struct hc_driver *driver;
+	struct usb_hcd *hcd;
+
+	driver = (struct hc_driver *)id->driver_data;
+
+	/* Prevent runtime suspending between USB-2 and USB-3 initialization */
+	pm_runtime_get_noresume(&dev->dev);
+
+	/* Register the USB 2.0 roothub.
+	 * FIXME: USB core must know to register the USB 2.0 roothub first.
+	 * This is sort of silly, because we could just set the HCD driver flags
+	 * to say USB 2.0, but I'm not sure what the implications would be in
+	 * the other parts of the HCD code.
+	 */
+	retval = usb_hcd_pci_probe(dev, id);
+
+	if (retval)
+		goto put_runtime_pm;
+
+	/* USB 2.0 roothub is stored in the PCI device now. */
+	hcd = dev_get_drvdata(&dev->dev);
+	xhci = hcd_to_xhci(hcd);
+	xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev,
+				pci_name(dev), hcd);
+	if (!xhci->shared_hcd) {
+		retval = -ENOMEM;
+		goto dealloc_usb2_hcd;
+	}
+
+	retval = xhci_ext_cap_init(xhci);
+	if (retval)
+		goto put_usb3_hcd;
+
+	retval = usb_add_hcd(xhci->shared_hcd, dev->irq,
+			IRQF_SHARED);
+	if (retval)
+		goto put_usb3_hcd;
+	/* Roothub already marked as USB 3.0 speed */
+
+	if (!(xhci->quirks & XHCI_BROKEN_STREAMS) &&
+			HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
+	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+		xhci_pme_acpi_rtd3_enable(dev);
+
+	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+	pm_runtime_put_noidle(&dev->dev);
+
+	return 0;
+
+put_usb3_hcd:
+	usb_put_hcd(xhci->shared_hcd);
+dealloc_usb2_hcd:
+	usb_hcd_pci_remove(dev);
+put_runtime_pm:
+	pm_runtime_put_noidle(&dev->dev);
+	return retval;
+}
+
+static void xhci_pci_remove(struct pci_dev *dev)
+{
+	struct xhci_hcd *xhci;
+
+	xhci = hcd_to_xhci(pci_get_drvdata(dev));
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
+	if (xhci->shared_hcd) {
+		usb_remove_hcd(xhci->shared_hcd);
+		usb_put_hcd(xhci->shared_hcd);
+		xhci->shared_hcd = NULL;
+	}
+
+	/* Workaround for spurious wakeups at shutdown with HSW */
+	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+		pci_set_power_state(dev, PCI_D3hot);
+
+	usb_hcd_pci_remove(dev);
+}
+
+#ifdef CONFIG_PM
+/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
+ */
+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	u32 val;
+	void __iomem *reg;
+	int i;
+
+	for (i = 0; i < SSIC_PORT_NUM; i++) {
+		reg = (void __iomem *) xhci->cap_regs +
+				SSIC_PORT_CFG2 +
+				i * SSIC_PORT_CFG2_OFFSET;
+
+		/* Notify SSIC that SSIC profile programming is not done. */
+		val = readl(reg) & ~PROG_DONE;
+		writel(val, reg);
+
+		/* Mark SSIC port as unused(suspend) or used(resume) */
+		val = readl(reg);
+		if (suspend)
+			val |= SSIC_PORT_UNUSED;
+		else
+			val &= ~SSIC_PORT_UNUSED;
+		writel(val, reg);
+
+		/* Notify SSIC that SSIC profile programming is done */
+		val = readl(reg) | PROG_DONE;
+		writel(val, reg);
+		readl(reg);
+	}
+}
+
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	void __iomem *reg;
+	u32 val;
+
+	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+	val = readl(reg);
+	writel(val | BIT(28), reg);
+	readl(reg);
+}
+
+static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	int			ret;
+
+	/*
+	 * Systems with the TI redriver that loses port status change events
+	 * need to have the registers polled during D3, so avoid D3cold.
+	 */
+	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+		pci_d3cold_disable(pdev);
+
+	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+		xhci_pme_quirk(hcd);
+
+	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+		xhci_ssic_port_unused_quirk(hcd, true);
+
+	ret = xhci_suspend(xhci, do_wakeup);
+	if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+		xhci_ssic_port_unused_quirk(hcd, false);
+
+	return ret;
+}
+
+static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	int			retval = 0;
+
+	/* The BIOS on systems with the Intel Panther Point chipset may or may
+	 * not support xHCI natively.  That means that during system resume, it
+	 * may switch the ports back to EHCI so that users can use their
+	 * keyboard to select a kernel from GRUB after resume from hibernate.
+	 *
+	 * The BIOS is supposed to remember whether the OS had xHCI ports
+	 * enabled before resume, and switch the ports back to xHCI when the
+	 * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+	 * writers.
+	 *
+	 * Unconditionally switch the ports back to xHCI after a system resume.
+	 * It should not matter whether the EHCI or xHCI controller is
+	 * resumed first. It's enough to do the switchover in xHCI because
+	 * USB core won't notice anything as the hub driver doesn't start
+	 * running again until after all the devices (including both EHCI and
+	 * xHCI host controllers) have been resumed.
+	 */
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+		usb_enable_intel_xhci_ports(pdev);
+
+	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+		xhci_ssic_port_unused_quirk(hcd, false);
+
+	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+		xhci_pme_quirk(hcd);
+
+	retval = xhci_resume(xhci, hibernated);
+	return retval;
+}
+#endif /* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids[] = { {
+	/* handle any USB 3.0 xHCI controller */
+	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
+	.driver_data =	(unsigned long) &xhci_pci_hc_driver,
+	},
+	{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver xhci_pci_driver = {
+	.name =		(char *) hcd_name,
+	.id_table =	pci_ids,
+
+	.probe =	xhci_pci_probe,
+	.remove =	xhci_pci_remove,
+	/* suspend and resume implemented later */
+
+	.shutdown = 	usb_hcd_pci_shutdown,
+#ifdef CONFIG_PM
+	.driver = {
+		.pm = &usb_hcd_pci_pm_ops
+	},
+#endif
+};
+
+static int __init xhci_pci_init(void)
+{
+	xhci_init_driver(&xhci_pci_hc_driver, &xhci_pci_overrides);
+#ifdef CONFIG_PM
+	xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend;
+	xhci_pci_hc_driver.pci_resume = xhci_pci_resume;
+#endif
+	return pci_register_driver(&xhci_pci_driver);
+}
+module_init(xhci_pci_init);
+
+static void __exit xhci_pci_exit(void)
+{
+	pci_unregister_driver(&xhci_pci_driver);
+}
+module_exit(xhci_pci_exit);
+
+MODULE_DESCRIPTION("xHCI PCI Host Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
new file mode 100644
index 0000000..e5da8ce
--- /dev/null
+++ b/drivers/usb/host/xhci-plat.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-plat.c - xHCI host controller driver platform Bus Glue.
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * A lot of code borrowed from the Linux xHCI driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/usb/phy.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+
+#include "xhci.h"
+#include "xhci-plat.h"
+#include "xhci-mvebu.h"
+#include "xhci-rcar.h"
+
+static struct hc_driver __read_mostly xhci_plat_hc_driver;
+
+static int xhci_plat_setup(struct usb_hcd *hcd);
+static int xhci_plat_start(struct usb_hcd *hcd);
+
+static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
+	.extra_priv_size = sizeof(struct xhci_plat_priv),
+	.reset = xhci_plat_setup,
+	.start = xhci_plat_start,
+};
+
+static void xhci_priv_plat_start(struct usb_hcd *hcd)
+{
+	struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+	if (priv->plat_start)
+		priv->plat_start(hcd);
+}
+
+static int xhci_priv_init_quirk(struct usb_hcd *hcd)
+{
+	struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+	if (!priv->init_quirk)
+		return 0;
+
+	return priv->init_quirk(hcd);
+}
+
+static int xhci_priv_resume_quirk(struct usb_hcd *hcd)
+{
+	struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+	if (!priv->resume_quirk)
+		return 0;
+
+	return priv->resume_quirk(hcd);
+}
+
+static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	/*
+	 * As of now platform drivers don't provide MSI support so we ensure
+	 * here that the generic code does not try to make a pci_dev from our
+	 * dev struct in order to setup MSI
+	 */
+	xhci->quirks |= XHCI_PLAT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_plat_setup(struct usb_hcd *hcd)
+{
+	int ret;
+
+
+	ret = xhci_priv_init_quirk(hcd);
+	if (ret)
+		return ret;
+
+	return xhci_gen_setup(hcd, xhci_plat_quirks);
+}
+
+static int xhci_plat_start(struct usb_hcd *hcd)
+{
+	xhci_priv_plat_start(hcd);
+	return xhci_run(hcd);
+}
+
+#ifdef CONFIG_OF
+static const struct xhci_plat_priv xhci_plat_marvell_armada = {
+	.init_quirk = xhci_mvebu_mbus_init_quirk,
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
+	.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1,
+	.init_quirk = xhci_rcar_init_quirk,
+	.plat_start = xhci_rcar_start,
+	.resume_quirk = xhci_rcar_resume_quirk,
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
+	.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
+	.init_quirk = xhci_rcar_init_quirk,
+	.plat_start = xhci_rcar_start,
+	.resume_quirk = xhci_rcar_resume_quirk,
+};
+
+static const struct of_device_id usb_xhci_of_match[] = {
+	{
+		.compatible = "generic-xhci",
+	}, {
+		.compatible = "xhci-platform",
+	}, {
+		.compatible = "marvell,armada-375-xhci",
+		.data = &xhci_plat_marvell_armada,
+	}, {
+		.compatible = "marvell,armada-380-xhci",
+		.data = &xhci_plat_marvell_armada,
+	}, {
+		.compatible = "renesas,xhci-r8a7790",
+		.data = &xhci_plat_renesas_rcar_gen2,
+	}, {
+		.compatible = "renesas,xhci-r8a7791",
+		.data = &xhci_plat_renesas_rcar_gen2,
+	}, {
+		.compatible = "renesas,xhci-r8a7793",
+		.data = &xhci_plat_renesas_rcar_gen2,
+	}, {
+		.compatible = "renesas,xhci-r8a7795",
+		.data = &xhci_plat_renesas_rcar_gen3,
+	}, {
+		.compatible = "renesas,xhci-r8a7796",
+		.data = &xhci_plat_renesas_rcar_gen3,
+	}, {
+		.compatible = "renesas,rcar-gen2-xhci",
+		.data = &xhci_plat_renesas_rcar_gen2,
+	}, {
+		.compatible = "renesas,rcar-gen3-xhci",
+		.data = &xhci_plat_renesas_rcar_gen3,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+#endif
+
+static int xhci_plat_probe(struct platform_device *pdev)
+{
+	const struct xhci_plat_priv *priv_match;
+	const struct hc_driver	*driver;
+	struct device		*sysdev, *tmpdev;
+	struct xhci_hcd		*xhci;
+	struct resource         *res;
+	struct usb_hcd		*hcd;
+	struct clk              *clk;
+	struct clk              *reg_clk;
+	int			ret;
+	int			irq;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	driver = &xhci_plat_hc_driver;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	/*
+	 * sysdev must point to a device that is known to the system firmware
+	 * or PCI hardware. We handle these three cases here:
+	 * 1. xhci_plat comes from firmware
+	 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+	 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+	 */
+	for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
+		if (is_of_node(sysdev->fwnode) ||
+			is_acpi_device_node(sysdev->fwnode))
+			break;
+#ifdef CONFIG_PCI
+		else if (sysdev->bus == &pci_bus_type)
+			break;
+#endif
+	}
+
+	if (!sysdev)
+		sysdev = &pdev->dev;
+
+	/* Try to set 64-bit DMA first */
+	if (WARN_ON(!sysdev->dma_mask))
+		/* Platform did not initialize dma_mask */
+		ret = dma_coerce_mask_and_coherent(sysdev,
+						   DMA_BIT_MASK(64));
+	else
+		ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+
+	/* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
+	if (ret) {
+		ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32));
+		if (ret)
+			return ret;
+	}
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+			       dev_name(&pdev->dev), NULL);
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto disable_runtime;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hcd->regs)) {
+		ret = PTR_ERR(hcd->regs);
+		goto put_hcd;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	/*
+	 * Not all platforms have clks so it is not an error if the
+	 * clock do not exist.
+	 */
+	reg_clk = devm_clk_get(&pdev->dev, "reg");
+	if (!IS_ERR(reg_clk)) {
+		ret = clk_prepare_enable(reg_clk);
+		if (ret)
+			goto put_hcd;
+	} else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
+		ret = -EPROBE_DEFER;
+		goto put_hcd;
+	}
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			goto disable_reg_clk;
+	} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+		ret = -EPROBE_DEFER;
+		goto disable_reg_clk;
+	}
+
+	xhci = hcd_to_xhci(hcd);
+	priv_match = of_device_get_match_data(&pdev->dev);
+	if (priv_match) {
+		struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+		/* Just copy data for now */
+		if (priv_match)
+			*priv = *priv_match;
+	}
+
+	device_wakeup_enable(hcd->self.controller);
+
+	xhci->clk = clk;
+	xhci->reg_clk = reg_clk;
+	xhci->main_hcd = hcd;
+	xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+			dev_name(&pdev->dev), hcd);
+	if (!xhci->shared_hcd) {
+		ret = -ENOMEM;
+		goto disable_clk;
+	}
+
+	/* imod_interval is the interrupt moderation value in nanoseconds. */
+	xhci->imod_interval = 40000;
+
+	/* Iterate over all parent nodes for finding quirks */
+	for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
+
+		if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
+			xhci->quirks |= XHCI_HW_LPM_DISABLE;
+
+		if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
+			xhci->quirks |= XHCI_LPM_SUPPORT;
+
+		if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+			xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+		device_property_read_u32(tmpdev, "imod-interval-ns",
+					 &xhci->imod_interval);
+	}
+
+	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+	if (IS_ERR(hcd->usb_phy)) {
+		ret = PTR_ERR(hcd->usb_phy);
+		if (ret == -EPROBE_DEFER)
+			goto put_usb3_hcd;
+		hcd->usb_phy = NULL;
+	} else {
+		ret = usb_phy_init(hcd->usb_phy);
+		if (ret)
+			goto put_usb3_hcd;
+		hcd->skip_phy_initialization = 1;
+	}
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto disable_usb_phy;
+
+	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
+	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+	if (ret)
+		goto dealloc_usb2_hcd;
+
+	device_enable_async_suspend(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
+
+	/*
+	 * Prevent runtime pm from being on as default, users should enable
+	 * runtime pm using power/control in sysfs.
+	 */
+	pm_runtime_forbid(&pdev->dev);
+
+	return 0;
+
+
+dealloc_usb2_hcd:
+	usb_remove_hcd(hcd);
+
+disable_usb_phy:
+	usb_phy_shutdown(hcd->usb_phy);
+
+put_usb3_hcd:
+	usb_put_hcd(xhci->shared_hcd);
+
+disable_clk:
+	clk_disable_unprepare(clk);
+
+disable_reg_clk:
+	clk_disable_unprepare(reg_clk);
+
+put_hcd:
+	usb_put_hcd(hcd);
+
+disable_runtime:
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return ret;
+}
+
+static int xhci_plat_remove(struct platform_device *dev)
+{
+	struct usb_hcd	*hcd = platform_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct clk *clk = xhci->clk;
+	struct clk *reg_clk = xhci->reg_clk;
+	struct usb_hcd *shared_hcd = xhci->shared_hcd;
+
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+	usb_remove_hcd(shared_hcd);
+	xhci->shared_hcd = NULL;
+	usb_phy_shutdown(hcd->usb_phy);
+
+	usb_remove_hcd(hcd);
+	usb_put_hcd(shared_hcd);
+
+	clk_disable_unprepare(clk);
+	clk_disable_unprepare(reg_clk);
+	usb_put_hcd(hcd);
+
+	pm_runtime_set_suspended(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
+	return 0;
+}
+
+static int __maybe_unused xhci_plat_suspend(struct device *dev)
+{
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+
+	/*
+	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
+	 * to do wakeup during suspend. Since xhci_plat_suspend is currently
+	 * only designed for system suspend, device_may_wakeup() is enough
+	 * to dertermine whether host is allowed to do wakeup. Need to
+	 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
+	 * also applies to runtime suspend.
+	 */
+	return xhci_suspend(xhci, device_may_wakeup(dev));
+}
+
+static int __maybe_unused xhci_plat_resume(struct device *dev)
+{
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int ret;
+
+	ret = xhci_priv_resume_quirk(hcd);
+	if (ret)
+		return ret;
+
+	return xhci_resume(xhci, 0);
+}
+
+static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+{
+	struct usb_hcd  *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	return xhci_suspend(xhci, true);
+}
+
+static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
+{
+	struct usb_hcd  *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	return xhci_resume(xhci, 0);
+}
+
+static const struct dev_pm_ops xhci_plat_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+
+	SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
+			   xhci_plat_runtime_resume,
+			   NULL)
+};
+
+static const struct acpi_device_id usb_xhci_acpi_match[] = {
+	/* XHCI-compliant USB Controller */
+	{ "PNP0D10", },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
+
+static struct platform_driver usb_xhci_driver = {
+	.probe	= xhci_plat_probe,
+	.remove	= xhci_plat_remove,
+	.driver	= {
+		.name = "xhci-hcd",
+		.pm = &xhci_plat_pm_ops,
+		.of_match_table = of_match_ptr(usb_xhci_of_match),
+		.acpi_match_table = ACPI_PTR(usb_xhci_acpi_match),
+	},
+};
+MODULE_ALIAS("platform:xhci-hcd");
+
+static int __init xhci_plat_init(void)
+{
+	xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides);
+	return platform_driver_register(&usb_xhci_driver);
+}
+module_init(xhci_plat_init);
+
+static void __exit xhci_plat_exit(void)
+{
+	platform_driver_unregister(&usb_xhci_driver);
+}
+module_exit(xhci_plat_exit);
+
+MODULE_DESCRIPTION("xHCI Platform Host Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
new file mode 100644
index 0000000..ae29f22
--- /dev/null
+++ b/drivers/usb/host/xhci-plat.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-plat.h - xHCI host controller driver platform Bus Glue.
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ */
+
+#ifndef _XHCI_PLAT_H
+#define _XHCI_PLAT_H
+
+#include "xhci.h"	/* for hcd_to_xhci() */
+
+struct xhci_plat_priv {
+	const char *firmware_name;
+	void (*plat_start)(struct usb_hcd *);
+	int (*init_quirk)(struct usb_hcd *);
+	int (*resume_quirk)(struct usb_hcd *);
+};
+
+#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
+#endif	/* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
new file mode 100644
index 0000000..a6e4637
--- /dev/null
+++ b/drivers/usb/host/xhci-rcar.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver for R-Car SoCs
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/usb/phy.h>
+#include <linux/sys_soc.h>
+
+#include "xhci.h"
+#include "xhci-plat.h"
+#include "xhci-rcar.h"
+
+/*
+* - The V3 firmware is for almost all R-Car Gen3 (except r8a7795 ES1.x)
+* - The V2 firmware is for r8a7795 ES1.x.
+* - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
+*   performance degradation. So, this driver continues to use the V1 if R-Car
+*   Gen2.
+* - The V1 firmware is impossible to use on R-Car Gen3.
+*/
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1);
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2);
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
+
+/*** Register Offset ***/
+#define RCAR_USB3_AXH_STA	0x104	/* AXI Host Control Status */
+#define RCAR_USB3_INT_ENA	0x224	/* Interrupt Enable */
+#define RCAR_USB3_DL_CTRL	0x250	/* FW Download Control & Status */
+#define RCAR_USB3_FW_DATA0	0x258	/* FW Data0 */
+
+#define RCAR_USB3_LCLK		0xa44	/* LCLK Select */
+#define RCAR_USB3_CONF1		0xa48	/* USB3.0 Configuration1 */
+#define RCAR_USB3_CONF2		0xa5c	/* USB3.0 Configuration2 */
+#define RCAR_USB3_CONF3		0xaa8	/* USB3.0 Configuration3 */
+#define RCAR_USB3_RX_POL	0xab0	/* USB3.0 RX Polarity */
+#define RCAR_USB3_TX_POL	0xab8	/* USB3.0 TX Polarity */
+
+/*** Register Settings ***/
+/* AXI Host Control Status */
+#define RCAR_USB3_AXH_STA_B3_PLL_ACTIVE		0x00010000
+#define RCAR_USB3_AXH_STA_B2_PLL_ACTIVE		0x00000001
+#define RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK (RCAR_USB3_AXH_STA_B3_PLL_ACTIVE | \
+					   RCAR_USB3_AXH_STA_B2_PLL_ACTIVE)
+
+/* Interrupt Enable */
+#define RCAR_USB3_INT_XHC_ENA	0x00000001
+#define RCAR_USB3_INT_PME_ENA	0x00000002
+#define RCAR_USB3_INT_HSE_ENA	0x00000004
+#define RCAR_USB3_INT_ENA_VAL	(RCAR_USB3_INT_XHC_ENA | \
+				RCAR_USB3_INT_PME_ENA | RCAR_USB3_INT_HSE_ENA)
+
+/* FW Download Control & Status */
+#define RCAR_USB3_DL_CTRL_ENABLE	0x00000001
+#define RCAR_USB3_DL_CTRL_FW_SUCCESS	0x00000010
+#define RCAR_USB3_DL_CTRL_FW_SET_DATA0	0x00000100
+
+/* LCLK Select */
+#define RCAR_USB3_LCLK_ENA_VAL	0x01030001
+
+/* USB3.0 Configuration */
+#define RCAR_USB3_CONF1_VAL	0x00030204
+#define RCAR_USB3_CONF2_VAL	0x00030300
+#define RCAR_USB3_CONF3_VAL	0x13802007
+
+/* USB3.0 Polarity */
+#define RCAR_USB3_RX_POL_VAL	BIT(21)
+#define RCAR_USB3_TX_POL_VAL	BIT(4)
+
+/* For soc_device_attribute */
+#define RCAR_XHCI_FIRMWARE_V2   BIT(0) /* FIRMWARE V2 */
+#define RCAR_XHCI_FIRMWARE_V3   BIT(1) /* FIRMWARE V3 */
+
+static const struct soc_device_attribute rcar_quirks_match[]  = {
+	{
+		.soc_id = "r8a7795", .revision = "ES1.*",
+		.data = (void *)RCAR_XHCI_FIRMWARE_V2,
+	},
+	{ /* sentinel */ },
+};
+
+static void xhci_rcar_start_gen2(struct usb_hcd *hcd)
+{
+	/* LCLK Select */
+	writel(RCAR_USB3_LCLK_ENA_VAL, hcd->regs + RCAR_USB3_LCLK);
+	/* USB3.0 Configuration */
+	writel(RCAR_USB3_CONF1_VAL, hcd->regs + RCAR_USB3_CONF1);
+	writel(RCAR_USB3_CONF2_VAL, hcd->regs + RCAR_USB3_CONF2);
+	writel(RCAR_USB3_CONF3_VAL, hcd->regs + RCAR_USB3_CONF3);
+	/* USB3.0 Polarity */
+	writel(RCAR_USB3_RX_POL_VAL, hcd->regs + RCAR_USB3_RX_POL);
+	writel(RCAR_USB3_TX_POL_VAL, hcd->regs + RCAR_USB3_TX_POL);
+}
+
+static int xhci_rcar_is_gen2(struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+
+	return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
+		of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
+		of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
+		of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+}
+
+static int xhci_rcar_is_gen3(struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+
+	return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
+		of_device_is_compatible(node, "renesas,xhci-r8a7796") ||
+		of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
+}
+
+void xhci_rcar_start(struct usb_hcd *hcd)
+{
+	u32 temp;
+
+	if (hcd->regs != NULL) {
+		/* Interrupt Enable */
+		temp = readl(hcd->regs + RCAR_USB3_INT_ENA);
+		temp |= RCAR_USB3_INT_ENA_VAL;
+		writel(temp, hcd->regs + RCAR_USB3_INT_ENA);
+		if (xhci_rcar_is_gen2(hcd->self.controller))
+			xhci_rcar_start_gen2(hcd);
+	}
+}
+
+static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	void __iomem *regs = hcd->regs;
+	struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+	const struct firmware *fw;
+	int retval, index, j, time;
+	int timeout = 10000;
+	u32 data, val, temp;
+	u32 quirks = 0;
+	const struct soc_device_attribute *attr;
+	const char *firmware_name;
+
+	attr = soc_device_match(rcar_quirks_match);
+	if (attr)
+		quirks = (uintptr_t)attr->data;
+
+	if (quirks & RCAR_XHCI_FIRMWARE_V2)
+		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
+	else if (quirks & RCAR_XHCI_FIRMWARE_V3)
+		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
+	else
+		firmware_name = priv->firmware_name;
+
+	/* request R-Car USB3.0 firmware */
+	retval = request_firmware(&fw, firmware_name, dev);
+	if (retval)
+		return retval;
+
+	/* download R-Car USB3.0 firmware */
+	temp = readl(regs + RCAR_USB3_DL_CTRL);
+	temp |= RCAR_USB3_DL_CTRL_ENABLE;
+	writel(temp, regs + RCAR_USB3_DL_CTRL);
+
+	for (index = 0; index < fw->size; index += 4) {
+		/* to avoid reading beyond the end of the buffer */
+		for (data = 0, j = 3; j >= 0; j--) {
+			if ((j + index) < fw->size)
+				data |= fw->data[index + j] << (8 * j);
+		}
+		writel(data, regs + RCAR_USB3_FW_DATA0);
+		temp = readl(regs + RCAR_USB3_DL_CTRL);
+		temp |= RCAR_USB3_DL_CTRL_FW_SET_DATA0;
+		writel(temp, regs + RCAR_USB3_DL_CTRL);
+
+		for (time = 0; time < timeout; time++) {
+			val = readl(regs + RCAR_USB3_DL_CTRL);
+			if ((val & RCAR_USB3_DL_CTRL_FW_SET_DATA0) == 0)
+				break;
+			udelay(1);
+		}
+		if (time == timeout) {
+			retval = -ETIMEDOUT;
+			break;
+		}
+	}
+
+	temp = readl(regs + RCAR_USB3_DL_CTRL);
+	temp &= ~RCAR_USB3_DL_CTRL_ENABLE;
+	writel(temp, regs + RCAR_USB3_DL_CTRL);
+
+	for (time = 0; time < timeout; time++) {
+		val = readl(regs + RCAR_USB3_DL_CTRL);
+		if (val & RCAR_USB3_DL_CTRL_FW_SUCCESS) {
+			retval = 0;
+			break;
+		}
+		udelay(1);
+	}
+	if (time == timeout)
+		retval = -ETIMEDOUT;
+
+	release_firmware(fw);
+
+	return retval;
+}
+
+static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
+{
+	int timeout = 1000;
+	u32 val, mask = RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK;
+
+	while (timeout > 0) {
+		val = readl(hcd->regs + RCAR_USB3_AXH_STA);
+		if ((val & mask) == mask)
+			return true;
+		udelay(1);
+		timeout--;
+	}
+
+	return false;
+}
+
+/* This function needs to initialize a "phy" of usb before */
+int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	/* If hcd->regs is NULL, we don't just call the following function */
+	if (!hcd->regs)
+		return 0;
+
+	/*
+	 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+	 * to 1. However, these SoCs don't support 64-bit address memory
+	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+	 * xhci_gen_setup().
+	 */
+	if (xhci_rcar_is_gen2(hcd->self.controller) ||
+			xhci_rcar_is_gen3(hcd->self.controller))
+		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+
+	if (!xhci_rcar_wait_for_pll_active(hcd))
+		return -ETIMEDOUT;
+
+	return xhci_rcar_download_firmware(hcd);
+}
+
+int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+{
+	int ret;
+
+	ret = xhci_rcar_download_firmware(hcd);
+	if (!ret)
+		xhci_rcar_start(hcd);
+
+	return ret;
+}
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
new file mode 100644
index 0000000..804b6ab
--- /dev/null
+++ b/drivers/usb/host/xhci-rcar.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/usb/host/xhci-rcar.h
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ */
+
+#ifndef _XHCI_RCAR_H
+#define _XHCI_RCAR_H
+
+#define XHCI_RCAR_FIRMWARE_NAME_V1	"r8a779x_usb3_v1.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V2	"r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3	"r8a779x_usb3_v3.dlmem"
+
+#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
+void xhci_rcar_start(struct usb_hcd *hcd);
+int xhci_rcar_init_quirk(struct usb_hcd *hcd);
+int xhci_rcar_resume_quirk(struct usb_hcd *hcd);
+#else
+static inline void xhci_rcar_start(struct usb_hcd *hcd)
+{
+}
+
+static inline int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+{
+	return 0;
+}
+#endif
+#endif /* _XHCI_RCAR_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 0000000..9ae17a6
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,4046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
+ *    Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
+ *    least one free TRB in the ring.  This is useful if you want to turn that
+ *    into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ *    link TRB, then load the pointer with the address in the link TRB.  If the
+ *    link TRB had its toggle bit set, you may need to update the ring cycle
+ *    state (see cycle bit rules).  You may have to do this multiple times
+ *    until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ *    equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ *    Update enqueue pointer between each write (which may update the ring
+ *    cycle state).
+ * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
+ *    and endpoint rings.  If HC is the producer for the event ring,
+ *    and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
+ *    the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ *    continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer.  SW is the consumer for the event ring, and it
+ *   updates event ring dequeue pointer.  HC is the consumer for the command and
+ *   endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-mtk.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+		union xhci_trb *trb)
+{
+	unsigned long segment_offset;
+
+	if (!seg || !trb || trb < seg->trbs)
+		return 0;
+	/* offset in TRBs */
+	segment_offset = trb - seg->trbs;
+	if (segment_offset >= TRBS_PER_SEGMENT)
+		return 0;
+	return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+static bool trb_is_noop(union xhci_trb *trb)
+{
+	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
+static bool trb_is_link(union xhci_trb *trb)
+{
+	return TRB_TYPE_LINK_LE32(trb->link.control);
+}
+
+static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
+{
+	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
+}
+
+static bool last_trb_on_ring(struct xhci_ring *ring,
+			struct xhci_segment *seg, union xhci_trb *trb)
+{
+	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool link_trb_toggles_cycle(union xhci_trb *trb)
+{
+	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+}
+
+static bool last_td_in_urb(struct xhci_td *td)
+{
+	struct urb_priv *urb_priv = td->urb->hcpriv;
+
+	return urb_priv->num_tds_done == urb_priv->num_tds;
+}
+
+static void inc_td_cnt(struct urb *urb)
+{
+	struct urb_priv *urb_priv = urb->hcpriv;
+
+	urb_priv->num_tds_done++;
+}
+
+static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
+{
+	if (trb_is_link(trb)) {
+		/* unchain chained link TRBs */
+		trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+	} else {
+		trb->generic.field[0] = 0;
+		trb->generic.field[1] = 0;
+		trb->generic.field[2] = 0;
+		/* Preserve only the cycle bit of this TRB */
+		trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+		trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
+	}
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment.  This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+		struct xhci_ring *ring,
+		struct xhci_segment **seg,
+		union xhci_trb **trb)
+{
+	if (trb_is_link(*trb)) {
+		*seg = (*seg)->next;
+		*trb = ((*seg)->trbs);
+	} else {
+		(*trb)++;
+	}
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ */
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	/* event ring doesn't have link trbs, check for last trb */
+	if (ring->type == TYPE_EVENT) {
+		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+			ring->dequeue++;
+			goto out;
+		}
+		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+			ring->cycle_state ^= 1;
+		ring->deq_seg = ring->deq_seg->next;
+		ring->dequeue = ring->deq_seg->trbs;
+		goto out;
+	}
+
+	/* All other rings have link trbs */
+	if (!trb_is_link(ring->dequeue)) {
+		ring->dequeue++;
+		ring->num_trbs_free++;
+	}
+	while (trb_is_link(ring->dequeue)) {
+		ring->deq_seg = ring->deq_seg->next;
+		ring->dequeue = ring->deq_seg->trbs;
+	}
+
+out:
+	trace_xhci_inc_deq(ring);
+
+	return;
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.  This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * @more_trbs_coming:	Will you enqueue more TRBs before calling
+ *			prepare_transfer()?
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
+			bool more_trbs_coming)
+{
+	u32 chain;
+	union xhci_trb *next;
+
+	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+	/* If this is not event ring, there is one less usable TRB */
+	if (!trb_is_link(ring->enqueue))
+		ring->num_trbs_free--;
+	next = ++(ring->enqueue);
+
+	/* Update the dequeue pointer further if that was a link TRB */
+	while (trb_is_link(next)) {
+
+		/*
+		 * If the caller doesn't plan on enqueueing more TDs before
+		 * ringing the doorbell, then we don't want to give the link TRB
+		 * to the hardware just yet. We'll give the link TRB back in
+		 * prepare_ring() just before we enqueue the TD at the top of
+		 * the ring.
+		 */
+		if (!chain && !more_trbs_coming)
+			break;
+
+		/* If we're not dealing with 0.95 hardware or isoc rings on
+		 * AMD 0.96 host, carry over the chain bit of the previous TRB
+		 * (which may mean the chain bit is cleared).
+		 */
+		if (!(ring->type == TYPE_ISOC &&
+		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
+		    !xhci_link_trb_quirk(xhci)) {
+			next->link.control &= cpu_to_le32(~TRB_CHAIN);
+			next->link.control |= cpu_to_le32(chain);
+		}
+		/* Give this link TRB to the hardware */
+		wmb();
+		next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+		/* Toggle the cycle bit after the last ring segment. */
+		if (link_trb_toggles_cycle(next))
+			ring->cycle_state ^= 1;
+
+		ring->enq_seg = ring->enq_seg->next;
+		ring->enqueue = ring->enq_seg->trbs;
+		next = ring->enqueue;
+	}
+
+	trace_xhci_inc_enq(ring);
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment. See rules above.
+ */
+static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		unsigned int num_trbs)
+{
+	int num_trbs_in_deq_seg;
+
+	if (ring->num_trbs_free < num_trbs)
+		return 0;
+
+	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
+			return 0;
+	}
+
+	return 1;
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
+		return;
+
+	xhci_dbg(xhci, "// Ding dong!\n");
+	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+	/* Flush PCI posted writes */
+	readl(&xhci->dba->doorbell[0]);
+}
+
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+{
+	return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+}
+
+static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+{
+	return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
+					cmd_list);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+					 struct xhci_command *cur_cmd)
+{
+	struct xhci_command *i_cmd;
+
+	/* Turn all aborted commands in list to no-ops, then restart */
+	list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
+
+		if (i_cmd->status != COMP_COMMAND_ABORTED)
+			continue;
+
+		i_cmd->status = COMP_COMMAND_RING_STOPPED;
+
+		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+			 i_cmd->command_trb);
+
+		trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
+
+		/*
+		 * caller waiting for completion is called when command
+		 *  completion event is received for these no-op commands
+		 */
+	}
+
+	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+	/* ring command ring doorbell to restart the command ring */
+	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
+		xhci->current_cmd = cur_cmd;
+		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+		xhci_ring_cmd_db(xhci);
+	}
+}
+
+/* Must be called with xhci->lock held, releases and aquires lock back */
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
+{
+	u64 temp_64;
+	int ret;
+
+	xhci_dbg(xhci, "Abort command ring\n");
+
+	reinit_completion(&xhci->cmd_ring_stop_completion);
+
+	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+			&xhci->op_regs->cmd_ring);
+
+	/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
+	 * completion of the Command Abort operation. If CRR is not negated in 5
+	 * seconds then driver handles it as if host died (-ENODEV).
+	 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
+	 * and try to recover a -ETIMEDOUT with a host controller reset.
+	 */
+	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+	if (ret < 0) {
+		xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
+		xhci_halt(xhci);
+		xhci_hc_died(xhci);
+		return ret;
+	}
+	/*
+	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+	 * but the completion event in never sent. Wait 2 secs (arbitrary
+	 * number) to handle those cases after negation of CMD_RING_RUNNING.
+	 */
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
+					  msecs_to_jiffies(2000));
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (!ret) {
+		xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
+		xhci_cleanup_command_queue(xhci);
+	} else {
+		xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
+	}
+	return 0;
+}
+
+void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
+		unsigned int slot_id,
+		unsigned int ep_index,
+		unsigned int stream_id)
+{
+	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+	unsigned int ep_state = ep->ep_state;
+
+	/* Don't ring the doorbell for this endpoint if there are pending
+	 * cancellations because we don't want to interrupt processing.
+	 * We don't want to restart any stream rings if there's a set dequeue
+	 * pointer command pending because the device can choose to start any
+	 * stream once the endpoint is on the HW schedule.
+	 */
+	if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+	    (ep_state & EP_HALTED))
+		return;
+	writel(DB_VALUE(ep_index, stream_id), db_addr);
+	/* The CPU has better things to do at this point than wait for a
+	 * write-posting flush.  It'll get there soon enough.
+	 */
+}
+
+/* Ring the doorbell for any rings with pending URBs */
+static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+		unsigned int slot_id,
+		unsigned int ep_index)
+{
+	unsigned int stream_id;
+	struct xhci_virt_ep *ep;
+
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+
+	/* A ring has pending URBs if its TD list is not empty */
+	if (!(ep->ep_state & EP_HAS_STREAMS)) {
+		if (ep->ring && !(list_empty(&ep->ring->td_list)))
+			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+		return;
+	}
+
+	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+			stream_id++) {
+		struct xhci_stream_info *stream_info = ep->stream_info;
+		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
+			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
+						stream_id);
+	}
+}
+
+/* Get the right ring for the given slot_id, ep_index and stream_id.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		unsigned int stream_id)
+{
+	struct xhci_virt_ep *ep;
+
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	/* Common case: no streams */
+	if (!(ep->ep_state & EP_HAS_STREAMS))
+		return ep->ring;
+
+	if (stream_id == 0) {
+		xhci_warn(xhci,
+				"WARN: Slot ID %u, ep index %u has streams, "
+				"but URB has no stream ID.\n",
+				slot_id, ep_index);
+		return NULL;
+	}
+
+	if (stream_id < ep->stream_info->num_streams)
+		return ep->stream_info->stream_rings[stream_id];
+
+	xhci_warn(xhci,
+			"WARN: Slot ID %u, ep index %u has "
+			"stream IDs 1 to %u allocated, "
+			"but stream ID %u is requested.\n",
+			slot_id, ep_index,
+			ep->stream_info->num_streams - 1,
+			stream_id);
+	return NULL;
+}
+
+
+/*
+ * Get the hw dequeue pointer xHC stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possbile stream context type.
+ */
+static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
+			   unsigned int ep_index, unsigned int stream_id)
+{
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_stream_ctx *st_ctx;
+	struct xhci_virt_ep *ep;
+
+	ep = &vdev->eps[ep_index];
+
+	if (ep->ep_state & EP_HAS_STREAMS) {
+		st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+		return le64_to_cpu(st_ctx->stream_ring);
+	}
+	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+	return le64_to_cpu(ep_ctx->deq);
+}
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, stream id, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ *  - First we update our new ring state to be the same as when the xHC stopped.
+ *  - Then we traverse the ring to find the segment that contains
+ *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
+ *    any link TRBs with the toggle cycle bit set.
+ *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ *    if we've moved it past a link TRB with the toggle cycle bit set.
+ *
+ * Some of the uses of xhci_generic_trb are grotty, but if they're done
+ * with correct __le32 accesses they should work fine.  Only users of this are
+ * in here.
+ */
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		unsigned int stream_id, struct xhci_td *cur_td,
+		struct xhci_dequeue_state *state)
+{
+	struct xhci_virt_device *dev = xhci->devs[slot_id];
+	struct xhci_virt_ep *ep = &dev->eps[ep_index];
+	struct xhci_ring *ep_ring;
+	struct xhci_segment *new_seg;
+	union xhci_trb *new_deq;
+	dma_addr_t addr;
+	u64 hw_dequeue;
+	bool cycle_found = false;
+	bool td_last_trb_found = false;
+
+	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+			ep_index, stream_id);
+	if (!ep_ring) {
+		xhci_warn(xhci, "WARN can't find new dequeue state "
+				"for invalid stream ID %u.\n",
+				stream_id);
+		return;
+	}
+	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"Finding endpoint context");
+
+	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
+	new_seg = ep_ring->deq_seg;
+	new_deq = ep_ring->dequeue;
+	state->new_cycle_state = hw_dequeue & 0x1;
+	state->stream_id = stream_id;
+
+	/*
+	 * We want to find the pointer, segment and cycle state of the new trb
+	 * (the one after current TD's last_trb). We know the cycle state at
+	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+	 * found.
+	 */
+	do {
+		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
+		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
+			cycle_found = true;
+			if (td_last_trb_found)
+				break;
+		}
+		if (new_deq == cur_td->last_trb)
+			td_last_trb_found = true;
+
+		if (cycle_found && trb_is_link(new_deq) &&
+		    link_trb_toggles_cycle(new_deq))
+			state->new_cycle_state ^= 0x1;
+
+		next_trb(xhci, ep_ring, &new_seg, &new_deq);
+
+		/* Search wrapped around, bail out */
+		if (new_deq == ep->ring->dequeue) {
+			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
+			state->new_deq_seg = NULL;
+			state->new_deq_ptr = NULL;
+			return;
+		}
+
+	} while (!cycle_found || !td_last_trb_found);
+
+	state->new_deq_seg = new_seg;
+	state->new_deq_ptr = new_deq;
+
+	/* Don't update the ring cycle state for the producer (us). */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"Cycle state = 0x%x", state->new_cycle_state);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"New dequeue segment = %p (virtual)",
+			state->new_deq_seg);
+	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"New dequeue pointer = 0x%llx (DMA)",
+			(unsigned long long) addr);
+}
+
+/* flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
+ */
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+		       struct xhci_td *td, bool flip_cycle)
+{
+	struct xhci_segment *seg	= td->start_seg;
+	union xhci_trb *trb		= td->first_trb;
+
+	while (1) {
+		trb_to_noop(trb, TRB_TR_NOOP);
+
+		/* flip cycle if asked to */
+		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+		if (trb == td->last_trb)
+			break;
+
+		next_trb(xhci, ep_ring, &seg, &trb);
+	}
+}
+
+static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+		struct xhci_virt_ep *ep)
+{
+	ep->ep_state &= ~EP_STOP_CMD_PENDING;
+	/* Can't del_timer_sync in interrupt */
+	del_timer(&ep->stop_cmd_timer);
+}
+
+/*
+ * Must be called with xhci->lock held in interrupt context,
+ * releases and re-acquires xhci->lock
+ */
+static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+				     struct xhci_td *cur_td, int status)
+{
+	struct urb	*urb		= cur_td->urb;
+	struct urb_priv	*urb_priv	= urb->hcpriv;
+	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);
+
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
+			if (xhci->quirks & XHCI_AMD_PLL_FIX)
+				usb_amd_quirk_pll_enable();
+		}
+	}
+	xhci_urb_free_priv(urb_priv);
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	spin_unlock(&xhci->lock);
+	trace_xhci_urb_giveback(urb);
+	usb_hcd_giveback_urb(hcd, urb, status);
+	spin_lock(&xhci->lock);
+}
+
+static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+		struct xhci_ring *ring, struct xhci_td *td)
+{
+	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	struct xhci_segment *seg = td->bounce_seg;
+	struct urb *urb = td->urb;
+
+	if (!ring || !seg || !urb)
+		return;
+
+	if (usb_urb_dir_out(urb)) {
+		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+				 DMA_TO_DEVICE);
+		return;
+	}
+
+	/* for in tranfers we need to copy the data from bounce to sg */
+	sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
+			     seg->bounce_len, seg->bounce_offs);
+	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+			 DMA_FROM_DEVICE);
+	seg->bounce_len = 0;
+	seg->bounce_offs = 0;
+}
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring.  There are two ways to do that:
+ *
+ *  1. If the HW was in the middle of processing the TD that needs to be
+ *     cancelled, then we must move the ring's dequeue pointer past the last TRB
+ *     in the TD with a Set Dequeue Pointer Command.
+ *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ *     bit cleared) so that the HW will skip over them.
+ */
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+		union xhci_trb *trb, struct xhci_event_cmd *event)
+{
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+	struct xhci_virt_ep *ep;
+	struct xhci_td *cur_td = NULL;
+	struct xhci_td *last_unlinked_td;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_virt_device *vdev;
+	u64 hw_deq;
+	struct xhci_dequeue_state deq_state;
+
+	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+		if (!xhci->devs[slot_id])
+			xhci_warn(xhci, "Stop endpoint command "
+				"completion for disabled slot %u\n",
+				slot_id);
+		return;
+	}
+
+	memset(&deq_state, 0, sizeof(deq_state));
+	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+
+	vdev = xhci->devs[slot_id];
+	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+	trace_xhci_handle_cmd_stop_ep(ep_ctx);
+
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
+			struct xhci_td, cancelled_td_list);
+
+	if (list_empty(&ep->cancelled_td_list)) {
+		xhci_stop_watchdog_timer_in_irq(xhci, ep);
+		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+		return;
+	}
+
+	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
+	 * We have the xHCI lock, so nothing can modify this list until we drop
+	 * it.  We're also in the event handler, so we can't get re-interrupted
+	 * if another Stop Endpoint command completes
+	 */
+	list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+				"Removing canceled TD starting at 0x%llx (dma).",
+				(unsigned long long)xhci_trb_virt_to_dma(
+					cur_td->start_seg, cur_td->first_trb));
+		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+		if (!ep_ring) {
+			/* This shouldn't happen unless a driver is mucking
+			 * with the stream ID after submission.  This will
+			 * leave the TD on the hardware ring, and the hardware
+			 * will try to execute it, and may access a buffer
+			 * that has already been freed.  In the best case, the
+			 * hardware will execute it, and the event handler will
+			 * ignore the completion event for that TD, since it was
+			 * removed from the td_list for that endpoint.  In
+			 * short, don't muck with the stream ID after
+			 * submission.
+			 */
+			xhci_warn(xhci, "WARN Cancelled URB %p "
+					"has invalid stream ID %u.\n",
+					cur_td->urb,
+					cur_td->urb->stream_id);
+			goto remove_finished_td;
+		}
+		/*
+		 * If we stopped on the TD we need to cancel, then we have to
+		 * move the xHC endpoint ring dequeue pointer past this TD.
+		 */
+		hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
+					 cur_td->urb->stream_id);
+		hw_deq &= ~0xf;
+
+		if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
+			      cur_td->last_trb, hw_deq, false)) {
+			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
+						    cur_td->urb->stream_id,
+						    cur_td, &deq_state);
+		} else {
+			td_to_noop(xhci, ep_ring, cur_td, false);
+		}
+
+remove_finished_td:
+		/*
+		 * The event handler won't see a completion for this TD anymore,
+		 * so remove it from the endpoint ring's TD list.  Keep it in
+		 * the cancelled TD list for URB completion later.
+		 */
+		list_del_init(&cur_td->td_list);
+	}
+
+	xhci_stop_watchdog_timer_in_irq(xhci, ep);
+
+	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
+					     &deq_state);
+		xhci_ring_cmd_db(xhci);
+	} else {
+		/* Otherwise ring the doorbell(s) to restart queued transfers */
+		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+	}
+
+	/*
+	 * Drop the lock and complete the URBs in the cancelled TD list.
+	 * New TDs to be cancelled might be added to the end of the list before
+	 * we can complete all the URBs for the TDs we already unlinked.
+	 * So stop when we've completed the URB for the last TD we unlinked.
+	 */
+	do {
+		cur_td = list_first_entry(&ep->cancelled_td_list,
+				struct xhci_td, cancelled_td_list);
+		list_del_init(&cur_td->cancelled_td_list);
+
+		/* Clean up the cancelled URB */
+		/* Doesn't matter what we pass for status, since the core will
+		 * just overwrite it (because the URB has been unlinked).
+		 */
+		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+		xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
+		inc_td_cnt(cur_td->urb);
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+
+		/* Stop processing the cancelled list if the watchdog timer is
+		 * running.
+		 */
+		if (xhci->xhc_state & XHCI_STATE_DYING)
+			return;
+	} while (cur_td != last_unlinked_td);
+
+	/* Return to the event handler with xhci->lock re-acquired */
+}
+
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	struct xhci_td *cur_td;
+	struct xhci_td *tmp;
+
+	list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
+		list_del_init(&cur_td->td_list);
+
+		if (!list_empty(&cur_td->cancelled_td_list))
+			list_del_init(&cur_td->cancelled_td_list);
+
+		xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
+
+		inc_td_cnt(cur_td->urb);
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+	}
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+		int slot_id, int ep_index)
+{
+	struct xhci_td *cur_td;
+	struct xhci_td *tmp;
+	struct xhci_virt_ep *ep;
+	struct xhci_ring *ring;
+
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	if ((ep->ep_state & EP_HAS_STREAMS) ||
+			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
+		int stream_id;
+
+		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+				stream_id++) {
+			ring = ep->stream_info->stream_rings[stream_id];
+			if (!ring)
+				continue;
+
+			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+					"Killing URBs for slot ID %u, ep index %u, stream %u",
+					slot_id, ep_index, stream_id);
+			xhci_kill_ring_urbs(xhci, ring);
+		}
+	} else {
+		ring = ep->ring;
+		if (!ring)
+			return;
+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+				"Killing URBs for slot ID %u, ep index %u",
+				slot_id, ep_index);
+		xhci_kill_ring_urbs(xhci, ring);
+	}
+
+	list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
+			cancelled_td_list) {
+		list_del_init(&cur_td->cancelled_td_list);
+		inc_td_cnt(cur_td->urb);
+
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+	}
+}
+
+/*
+ * host controller died, register read returns 0xffffffff
+ * Complete pending commands, mark them ABORTED.
+ * URBs need to be given back as usb core might be waiting with device locks
+ * held for the URBs to finish during device disconnect, blocking host remove.
+ *
+ * Call with xhci->lock held.
+ * lock is relased and re-acquired while giving back urb.
+ */
+void xhci_hc_died(struct xhci_hcd *xhci)
+{
+	int i, j;
+
+	if (xhci->xhc_state & XHCI_STATE_DYING)
+		return;
+
+	xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
+	xhci->xhc_state |= XHCI_STATE_DYING;
+
+	xhci_cleanup_command_queue(xhci);
+
+	/* return any pending urbs, remove may be waiting for them */
+	for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+		if (!xhci->devs[i])
+			continue;
+		for (j = 0; j < 31; j++)
+			xhci_kill_endpoint_urbs(xhci, i, j);
+	}
+
+	/* inform usb core hc died if PCI remove isn't already handling it */
+	if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
+		usb_hc_died(xhci_to_hcd(xhci));
+}
+
+/* Watchdog timer function for when a stop endpoint command fails to complete.
+ * In this case, we assume the host controller is broken or dying or dead.  The
+ * host may still be completing some other events, so we have to be careful to
+ * let the event ring handler and the URB dequeueing/enqueueing functions know
+ * through xhci->state.
+ *
+ * The timer may also fire if the host takes a very long time to respond to the
+ * command, and the stop endpoint command completion handler cannot delete the
+ * timer before the timer function is called.  Another endpoint cancellation may
+ * sneak in before the timer function can grab the lock, and that may queue
+ * another stop endpoint command and add the timer back.  So we cannot use a
+ * simple flag to say whether there is a pending stop endpoint command for a
+ * particular endpoint.
+ *
+ * Instead we use a combination of that flag and checking if a new timer is
+ * pending.
+ */
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
+{
+	struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
+	struct xhci_hcd *xhci = ep->xhci;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/* bail out if cmd completed but raced with stop ep watchdog timer.*/
+	if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
+	    timer_pending(&ep->stop_cmd_timer)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
+		return;
+	}
+
+	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+	ep->ep_state &= ~EP_STOP_CMD_PENDING;
+
+	xhci_halt(xhci);
+
+	/*
+	 * handle a stop endpoint cmd timeout as if host died (-ENODEV).
+	 * In the future we could distinguish between -ENODEV and -ETIMEDOUT
+	 * and try to recover a -ETIMEDOUT with a host controller reset
+	 */
+	xhci_hc_died(xhci);
+
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"xHCI host controller is dead.");
+}
+
+static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
+		struct xhci_virt_device *dev,
+		struct xhci_ring *ep_ring,
+		unsigned int ep_index)
+{
+	union xhci_trb *dequeue_temp;
+	int num_trbs_free_temp;
+	bool revert = false;
+
+	num_trbs_free_temp = ep_ring->num_trbs_free;
+	dequeue_temp = ep_ring->dequeue;
+
+	/* If we get two back-to-back stalls, and the first stalled transfer
+	 * ends just before a link TRB, the dequeue pointer will be left on
+	 * the link TRB by the code in the while loop.  So we have to update
+	 * the dequeue pointer one segment further, or we'll jump off
+	 * the segment into la-la-land.
+	 */
+	if (trb_is_link(ep_ring->dequeue)) {
+		ep_ring->deq_seg = ep_ring->deq_seg->next;
+		ep_ring->dequeue = ep_ring->deq_seg->trbs;
+	}
+
+	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
+		/* We have more usable TRBs */
+		ep_ring->num_trbs_free++;
+		ep_ring->dequeue++;
+		if (trb_is_link(ep_ring->dequeue)) {
+			if (ep_ring->dequeue ==
+					dev->eps[ep_index].queued_deq_ptr)
+				break;
+			ep_ring->deq_seg = ep_ring->deq_seg->next;
+			ep_ring->dequeue = ep_ring->deq_seg->trbs;
+		}
+		if (ep_ring->dequeue == dequeue_temp) {
+			revert = true;
+			break;
+		}
+	}
+
+	if (revert) {
+		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
+		ep_ring->num_trbs_free = num_trbs_free_temp;
+	}
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again.  We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+		union xhci_trb *trb, u32 cmd_comp_code)
+{
+	unsigned int ep_index;
+	unsigned int stream_id;
+	struct xhci_ring *ep_ring;
+	struct xhci_virt_device *dev;
+	struct xhci_virt_ep *ep;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+
+	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+	dev = xhci->devs[slot_id];
+	ep = &dev->eps[ep_index];
+
+	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+	if (!ep_ring) {
+		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
+				stream_id);
+		/* XXX: Harmless??? */
+		goto cleanup;
+	}
+
+	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+	trace_xhci_handle_cmd_set_deq(slot_ctx);
+	trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
+
+	if (cmd_comp_code != COMP_SUCCESS) {
+		unsigned int ep_state;
+		unsigned int slot_state;
+
+		switch (cmd_comp_code) {
+		case COMP_TRB_ERROR:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
+			break;
+		case COMP_CONTEXT_STATE_ERROR:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
+			ep_state = GET_EP_CTX_STATE(ep_ctx);
+			slot_state = le32_to_cpu(slot_ctx->dev_state);
+			slot_state = GET_SLOT_STATE(slot_state);
+			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+					"Slot state = %u, EP state = %u",
+					slot_state, ep_state);
+			break;
+		case COMP_SLOT_NOT_ENABLED_ERROR:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+					slot_id);
+			break;
+		default:
+			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+					cmd_comp_code);
+			break;
+		}
+		/* OK what do we do now?  The endpoint state is hosed, and we
+		 * should never get to this point if the synchronization between
+		 * queueing, and endpoint state are correct.  This might happen
+		 * if the device gets disconnected after we've finished
+		 * cancelling URBs, which might not be an error...
+		 */
+	} else {
+		u64 deq;
+		/* 4.6.10 deq ptr is written to the stream ctx for streams */
+		if (ep->ep_state & EP_HAS_STREAMS) {
+			struct xhci_stream_ctx *ctx =
+				&ep->stream_info->stream_ctx_array[stream_id];
+			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+		} else {
+			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+		}
+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+					 ep->queued_deq_ptr) == deq) {
+			/* Update the ring's dequeue segment and dequeue pointer
+			 * to reflect the new position.
+			 */
+			update_ring_for_set_deq_completion(xhci, dev,
+				ep_ring, ep_index);
+		} else {
+			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
+			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+				  ep->queued_deq_seg, ep->queued_deq_ptr);
+		}
+	}
+
+cleanup:
+	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+	dev->eps[ep_index].queued_deq_seg = NULL;
+	dev->eps[ep_index].queued_deq_ptr = NULL;
+	/* Restart any rings with pending URBs */
+	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+}
+
+static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+		union xhci_trb *trb, u32 cmd_comp_code)
+{
+	struct xhci_virt_device *vdev;
+	struct xhci_ep_ctx *ep_ctx;
+	unsigned int ep_index;
+
+	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+	vdev = xhci->devs[slot_id];
+	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+	trace_xhci_handle_cmd_reset_ep(ep_ctx);
+
+	/* This command will only fail if the endpoint wasn't halted,
+	 * but we don't care.
+	 */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+		"Ignoring reset ep completion code of %u", cmd_comp_code);
+
+	/* HW with the reset endpoint quirk needs to have a configure endpoint
+	 * command complete before the endpoint can be used.  Queue that here
+	 * because the HW can't handle two commands being queued in a row.
+	 */
+	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+		struct xhci_command *command;
+
+		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+		if (!command)
+			return;
+
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Queueing configure endpoint command");
+		xhci_queue_configure_endpoint(xhci, command,
+				xhci->devs[slot_id]->in_ctx->dma, slot_id,
+				false);
+		xhci_ring_cmd_db(xhci);
+	} else {
+		/* Clear our internal halted state */
+		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+	}
+}
+
+static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
+		struct xhci_command *command, u32 cmd_comp_code)
+{
+	if (cmd_comp_code == COMP_SUCCESS)
+		command->slot_id = slot_id;
+	else
+		command->slot_id = 0;
+}
+
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_slot_ctx *slot_ctx;
+
+	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev)
+		return;
+
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+	trace_xhci_handle_cmd_disable_slot(slot_ctx);
+
+	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+		/* Delete default control endpoint resources */
+		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+	xhci_free_virt_device(xhci, slot_id);
+}
+
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
+		struct xhci_event_cmd *event, u32 cmd_comp_code)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_ep_ctx *ep_ctx;
+	unsigned int ep_index;
+	unsigned int ep_state;
+	u32 add_flags, drop_flags;
+
+	/*
+	 * Configure endpoint commands can come from the USB core
+	 * configuration or alt setting changes, or because the HW
+	 * needed an extra configure endpoint command after a reset
+	 * endpoint command or streams were being configured.
+	 * If the command was for a halted endpoint, the xHCI driver
+	 * is not waiting on the configure endpoint command.
+	 */
+	virt_dev = xhci->devs[slot_id];
+	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "Could not get input context, bad type.\n");
+		return;
+	}
+
+	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+	/* Input ctx add_flags are the endpoint index plus one */
+	ep_index = xhci_last_valid_endpoint(add_flags) - 1;
+
+	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
+	trace_xhci_handle_cmd_config_ep(ep_ctx);
+
+	/* A usb_set_interface() call directly after clearing a halted
+	 * condition may race on this quirky hardware.  Not worth
+	 * worrying about, since this is prototype hardware.  Not sure
+	 * if this will work for streams, but streams support was
+	 * untested on this prototype.
+	 */
+	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+			ep_index != (unsigned int) -1 &&
+			add_flags - SLOT_FLAG == drop_flags) {
+		ep_state = virt_dev->eps[ep_index].ep_state;
+		if (!(ep_state & EP_HALTED))
+			return;
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Completed config ep cmd - "
+				"last ep index = %d, state = %d",
+				ep_index, ep_state);
+		/* Clear internal halted state and restart ring(s) */
+		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
+		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+		return;
+	}
+	return;
+}
+
+static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
+{
+	struct xhci_virt_device *vdev;
+	struct xhci_slot_ctx *slot_ctx;
+
+	vdev = xhci->devs[slot_id];
+	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+	trace_xhci_handle_cmd_addr_dev(slot_ctx);
+}
+
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
+		struct xhci_event_cmd *event)
+{
+	struct xhci_virt_device *vdev;
+	struct xhci_slot_ctx *slot_ctx;
+
+	vdev = xhci->devs[slot_id];
+	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+	trace_xhci_handle_cmd_reset_dev(slot_ctx);
+
+	xhci_dbg(xhci, "Completed reset device command.\n");
+	if (!xhci->devs[slot_id])
+		xhci_warn(xhci, "Reset device command completion "
+				"for disabled slot %u\n", slot_id);
+}
+
+static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event)
+{
+	if (!(xhci->quirks & XHCI_NEC_HOST)) {
+		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
+		return;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+			"NEC firmware version %2x.%02x",
+			NEC_FW_MAJOR(le32_to_cpu(event->status)),
+			NEC_FW_MINOR(le32_to_cpu(event->status)));
+}
+
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+{
+	list_del(&cmd->cmd_list);
+
+	if (cmd->completion) {
+		cmd->status = status;
+		complete(cmd->completion);
+	} else {
+		kfree(cmd);
+	}
+}
+
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+{
+	struct xhci_command *cur_cmd, *tmp_cmd;
+	xhci->current_cmd = NULL;
+	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+		xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
+}
+
+void xhci_handle_command_timeout(struct work_struct *work)
+{
+	struct xhci_hcd *xhci;
+	unsigned long flags;
+	u64 hw_ring_state;
+
+	xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/*
+	 * If timeout work is pending, or current_cmd is NULL, it means we
+	 * raced with command completion. Command is handled so just return.
+	 */
+	if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return;
+	}
+	/* mark this command to be cancelled */
+	xhci->current_cmd->status = COMP_COMMAND_ABORTED;
+
+	/* Make sure command ring is running before aborting it */
+	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	if (hw_ring_state == ~(u64)0) {
+		xhci_hc_died(xhci);
+		goto time_out_completed;
+	}
+
+	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+	    (hw_ring_state & CMD_RING_RUNNING))  {
+		/* Prevent new doorbell, and start command abort */
+		xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+		xhci_dbg(xhci, "Command timeout\n");
+		xhci_abort_cmd_ring(xhci, flags);
+		goto time_out_completed;
+	}
+
+	/* host removed. Bail out */
+	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+		xhci_dbg(xhci, "host removed, ring start fail?\n");
+		xhci_cleanup_command_queue(xhci);
+
+		goto time_out_completed;
+	}
+
+	/* command timeout on stopped ring, ring can't be aborted */
+	xhci_dbg(xhci, "Command timeout on stopped ring\n");
+	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+
+time_out_completed:
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return;
+}
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event)
+{
+	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	u64 cmd_dma;
+	dma_addr_t cmd_dequeue_dma;
+	u32 cmd_comp_code;
+	union xhci_trb *cmd_trb;
+	struct xhci_command *cmd;
+	u32 cmd_type;
+
+	cmd_dma = le64_to_cpu(event->cmd_trb);
+	cmd_trb = xhci->cmd_ring->dequeue;
+
+	trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
+
+	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+			cmd_trb);
+	/*
+	 * Check whether the completion event is for our internal kept
+	 * command.
+	 */
+	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
+		xhci_warn(xhci,
+			  "ERROR mismatched command completion event\n");
+		return;
+	}
+
+	cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
+
+	cancel_delayed_work(&xhci->cmd_timer);
+
+	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+
+	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
+	if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
+		complete_all(&xhci->cmd_ring_stop_completion);
+		return;
+	}
+
+	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+		xhci_err(xhci,
+			 "Command completion event does not match command\n");
+		return;
+	}
+
+	/*
+	 * Host aborted the command ring, check if the current command was
+	 * supposed to be aborted, otherwise continue normally.
+	 * The command ring is stopped now, but the xHC will issue a Command
+	 * Ring Stopped event which will cause us to restart it.
+	 */
+	if (cmd_comp_code == COMP_COMMAND_ABORTED) {
+		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+		if (cmd->status == COMP_COMMAND_ABORTED) {
+			if (xhci->current_cmd == cmd)
+				xhci->current_cmd = NULL;
+			goto event_handled;
+		}
+	}
+
+	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+	switch (cmd_type) {
+	case TRB_ENABLE_SLOT:
+		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
+		break;
+	case TRB_DISABLE_SLOT:
+		xhci_handle_cmd_disable_slot(xhci, slot_id);
+		break;
+	case TRB_CONFIG_EP:
+		if (!cmd->completion)
+			xhci_handle_cmd_config_ep(xhci, slot_id, event,
+						  cmd_comp_code);
+		break;
+	case TRB_EVAL_CONTEXT:
+		break;
+	case TRB_ADDR_DEV:
+		xhci_handle_cmd_addr_dev(xhci, slot_id);
+		break;
+	case TRB_STOP_RING:
+		WARN_ON(slot_id != TRB_TO_SLOT_ID(
+				le32_to_cpu(cmd_trb->generic.field[3])));
+		if (!cmd->completion)
+			xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
+		break;
+	case TRB_SET_DEQ:
+		WARN_ON(slot_id != TRB_TO_SLOT_ID(
+				le32_to_cpu(cmd_trb->generic.field[3])));
+		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
+		break;
+	case TRB_CMD_NOOP:
+		/* Is this an aborted command turned to NO-OP? */
+		if (cmd->status == COMP_COMMAND_RING_STOPPED)
+			cmd_comp_code = COMP_COMMAND_RING_STOPPED;
+		break;
+	case TRB_RESET_EP:
+		WARN_ON(slot_id != TRB_TO_SLOT_ID(
+				le32_to_cpu(cmd_trb->generic.field[3])));
+		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
+		break;
+	case TRB_RESET_DEV:
+		/* SLOT_ID field in reset device cmd completion event TRB is 0.
+		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+		 */
+		slot_id = TRB_TO_SLOT_ID(
+				le32_to_cpu(cmd_trb->generic.field[3]));
+		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
+		break;
+	case TRB_NEC_GET_FW:
+		xhci_handle_cmd_nec_get_fw(xhci, event);
+		break;
+	default:
+		/* Skip over unknown commands on the event ring */
+		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
+		break;
+	}
+
+	/* restart timer if this wasn't the last command */
+	if (!list_is_singular(&xhci->cmd_list)) {
+		xhci->current_cmd = list_first_entry(&cmd->cmd_list,
+						struct xhci_command, cmd_list);
+		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+	} else if (xhci->current_cmd == cmd) {
+		xhci->current_cmd = NULL;
+	}
+
+event_handled:
+	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+
+	inc_deq(xhci, xhci->cmd_ring);
+}
+
+static void handle_vendor_event(struct xhci_hcd *xhci,
+		union xhci_trb *event)
+{
+	u32 trb_type;
+
+	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
+	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
+	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
+		handle_cmd_completion(xhci, &event->event_cmd);
+}
+
+static void handle_device_notification(struct xhci_hcd *xhci,
+		union xhci_trb *event)
+{
+	u32 slot_id;
+	struct usb_device *udev;
+
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
+	if (!xhci->devs[slot_id]) {
+		xhci_warn(xhci, "Device Notification event for "
+				"unused slot %u\n", slot_id);
+		return;
+	}
+
+	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
+			slot_id);
+	udev = xhci->devs[slot_id]->udev;
+	if (udev && udev->parent)
+		usb_wakeup_notification(udev->parent, udev->portnum);
+}
+
+/*
+ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
+ * Controller.
+ * As per ThunderX2errata-129 USB 2 device may come up as USB 1
+ * If a connection to a USB 1 device is followed by another connection
+ * to a USB 2 device.
+ *
+ * Reset the PHY after the USB device is disconnected if device speed
+ * is less than HCD_USB3.
+ * Retry the reset sequence max of 4 times checking the PLL lock status.
+ *
+ */
+static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	u32 pll_lock_check;
+	u32 retry_count = 4;
+
+	do {
+		/* Assert PHY reset */
+		writel(0x6F, hcd->regs + 0x1048);
+		udelay(10);
+		/* De-assert the PHY reset */
+		writel(0x7F, hcd->regs + 0x1048);
+		udelay(200);
+		pll_lock_check = readl(hcd->regs + 0x1070);
+	} while (!(pll_lock_check & 0x1) && --retry_count);
+}
+
+static void handle_port_status(struct xhci_hcd *xhci,
+		union xhci_trb *event)
+{
+	struct usb_hcd *hcd;
+	u32 port_id;
+	u32 portsc, cmd_reg;
+	int max_ports;
+	int slot_id;
+	unsigned int hcd_portnum;
+	struct xhci_bus_state *bus_state;
+	bool bogus_port_status = false;
+	struct xhci_port *port;
+
+	/* Port status change events always have a successful completion code */
+	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+		xhci_warn(xhci,
+			  "WARN: xHC returned failed port status event\n");
+
+	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+
+	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+	if ((port_id <= 0) || (port_id > max_ports)) {
+		xhci_warn(xhci, "Invalid port id %d\n", port_id);
+		inc_deq(xhci, xhci->event_ring);
+		return;
+	}
+
+	port = &xhci->hw_ports[port_id - 1];
+	if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
+		xhci_warn(xhci, "Event for invalid port %u\n", port_id);
+		bogus_port_status = true;
+		goto cleanup;
+	}
+
+	/* We might get interrupts after shared_hcd is removed */
+	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+		xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
+		bogus_port_status = true;
+		goto cleanup;
+	}
+
+	hcd = port->rhub->hcd;
+	bus_state = &xhci->bus_state[hcd_index(hcd)];
+	hcd_portnum = port->hcd_portnum;
+	portsc = readl(port->addr);
+
+	trace_xhci_handle_port_status(hcd_portnum, portsc);
+
+	if (hcd->state == HC_STATE_SUSPENDED) {
+		xhci_dbg(xhci, "resume root hub\n");
+		usb_hcd_resume_root_hub(hcd);
+	}
+
+	if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
+		bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
+
+	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
+		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
+
+		cmd_reg = readl(&xhci->op_regs->command);
+		if (!(cmd_reg & CMD_RUN)) {
+			xhci_warn(xhci, "xHC is not running.\n");
+			goto cleanup;
+		}
+
+		if (DEV_SUPERSPEED_ANY(portsc)) {
+			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
+			/* Set a flag to say the port signaled remote wakeup,
+			 * so we can tell the difference between the end of
+			 * device and host initiated resume.
+			 */
+			bus_state->port_remote_wakeup |= 1 << hcd_portnum;
+			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+			xhci_set_link_state(xhci, port, XDEV_U0);
+			/* Need to wait until the next link state change
+			 * indicates the device is actually in U0.
+			 */
+			bogus_port_status = true;
+			goto cleanup;
+		} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
+			xhci_dbg(xhci, "resume HS port %d\n", port_id);
+			bus_state->resume_done[hcd_portnum] = jiffies +
+				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+			set_bit(hcd_portnum, &bus_state->resuming_ports);
+			/* Do the rest in GetPortStatus after resume time delay.
+			 * Avoid polling roothub status before that so that a
+			 * usb device auto-resume latency around ~40ms.
+			 */
+			set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+			mod_timer(&hcd->rh_timer,
+				  bus_state->resume_done[hcd_portnum]);
+			usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
+			bogus_port_status = true;
+		}
+	}
+
+	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
+			DEV_SUPERSPEED_ANY(portsc)) {
+		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
+		/* We've just brought the device into U0 through either the
+		 * Resume state after a device remote wakeup, or through the
+		 * U3Exit state after a host-initiated resume.  If it's a device
+		 * initiated remote wake, don't pass up the link state change,
+		 * so the roothub behavior is consistent with external
+		 * USB 3.0 hub behavior.
+		 */
+		slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
+		if (slot_id && xhci->devs[slot_id])
+			xhci_ring_device(xhci, slot_id);
+		if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
+			bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
+			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+			usb_wakeup_notification(hcd->self.root_hub,
+					hcd_portnum + 1);
+			bogus_port_status = true;
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
+	 * RExit to a disconnect state).  If so, let the the driver know it's
+	 * out of the RExit state.
+	 */
+	if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
+			test_and_clear_bit(hcd_portnum,
+				&bus_state->rexit_ports)) {
+		complete(&bus_state->rexit_done[hcd_portnum]);
+		bogus_port_status = true;
+		goto cleanup;
+	}
+
+	if (hcd->speed < HCD_USB3) {
+		xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+		if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
+		    (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
+			xhci_cavium_reset_phy_quirk(xhci);
+	}
+
+cleanup:
+	/* Update event ring dequeue pointer before dropping the lock */
+	inc_deq(xhci, xhci->event_ring);
+
+	/* Don't make the USB core poll the roothub if we got a bad port status
+	 * change event.  Besides, at that point we can't tell which roothub
+	 * (USB 2.0 or USB 3.0) to kick.
+	 */
+	if (bogus_port_status)
+		return;
+
+	/*
+	 * xHCI port-status-change events occur when the "or" of all the
+	 * status-change bits in the portsc register changes from 0 to 1.
+	 * New status changes won't cause an event if any other change
+	 * bits are still set.  When an event occurs, switch over to
+	 * polling to avoid losing status changes.
+	 */
+	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	spin_unlock(&xhci->lock);
+	/* Pass this up to the core */
+	usb_hcd_poll_rh_status(hcd);
+	spin_lock(&xhci->lock);
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment.  If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment.  Otherwise it
+ * returns 0.
+ */
+struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
+		struct xhci_segment *start_seg,
+		union xhci_trb	*start_trb,
+		union xhci_trb	*end_trb,
+		dma_addr_t	suspect_dma,
+		bool		debug)
+{
+	dma_addr_t start_dma;
+	dma_addr_t end_seg_dma;
+	dma_addr_t end_trb_dma;
+	struct xhci_segment *cur_seg;
+
+	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+	cur_seg = start_seg;
+
+	do {
+		if (start_dma == 0)
+			return NULL;
+		/* We may get an event for a Link TRB in the middle of a TD */
+		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
+		/* If the end TRB isn't in this segment, this is set to 0 */
+		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+
+		if (debug)
+			xhci_warn(xhci,
+				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
+				(unsigned long long)suspect_dma,
+				(unsigned long long)start_dma,
+				(unsigned long long)end_trb_dma,
+				(unsigned long long)cur_seg->dma,
+				(unsigned long long)end_seg_dma);
+
+		if (end_trb_dma > 0) {
+			/* The end TRB is in this segment, so suspect should be here */
+			if (start_dma <= end_trb_dma) {
+				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+					return cur_seg;
+			} else {
+				/* Case for one segment with
+				 * a TD wrapped around to the top
+				 */
+				if ((suspect_dma >= start_dma &&
+							suspect_dma <= end_seg_dma) ||
+						(suspect_dma >= cur_seg->dma &&
+						 suspect_dma <= end_trb_dma))
+					return cur_seg;
+			}
+			return NULL;
+		} else {
+			/* Might still be somewhere in this segment */
+			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+				return cur_seg;
+		}
+		cur_seg = cur_seg->next;
+		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+	} while (cur_seg != start_seg);
+
+	return NULL;
+}
+
+static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		unsigned int stream_id, struct xhci_td *td,
+		enum xhci_ep_reset_type reset_type)
+{
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+	struct xhci_command *command;
+	command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+	if (!command)
+		return;
+
+	ep->ep_state |= EP_HALTED;
+
+	xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
+
+	if (reset_type == EP_HARD_RESET) {
+		ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
+		xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
+	}
+	xhci_ring_cmd_db(xhci);
+}
+
+/* Check if an error has halted the endpoint ring.  The class driver will
+ * cleanup the halt for a non-default control endpoint if we indicate a stall.
+ * However, a babble and other errors also halt the endpoint ring, and the class
+ * driver won't clear the halt in that case, so we need to issue a Set Transfer
+ * Ring Dequeue Pointer command manually.
+ */
+static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
+		struct xhci_ep_ctx *ep_ctx,
+		unsigned int trb_comp_code)
+{
+	/* TRB completion codes that may require a manual halt cleanup */
+	if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
+			trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
+			trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
+		/* The 0.95 spec says a babbling control endpoint
+		 * is not halted. The 0.96 spec says it is.  Some HW
+		 * claims to be 0.95 compliant, but it halts the control
+		 * endpoint anyway.  Check if a babble halted the
+		 * endpoint.
+		 */
+		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
+			return 1;
+
+	return 0;
+}
+
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+{
+	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+		/* Vendor defined "informational" completion code,
+		 * treat as not-an-error.
+		 */
+		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+				trb_comp_code);
+		xhci_dbg(xhci, "Treating code as success.\n");
+		return 1;
+	}
+	return 0;
+}
+
+static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+		struct xhci_ring *ep_ring, int *status)
+{
+	struct urb *urb = NULL;
+
+	/* Clean up the endpoint's TD list */
+	urb = td->urb;
+
+	/* if a bounce buffer was used to align this td then unmap it */
+	xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
+	/* Do one last check of the actual transfer length.
+	 * If the host controller said we transferred more data than the buffer
+	 * length, urb->actual_length will be a very big number (since it's
+	 * unsigned).  Play it safe and say we didn't transfer anything.
+	 */
+	if (urb->actual_length > urb->transfer_buffer_length) {
+		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+			  urb->transfer_buffer_length, urb->actual_length);
+		urb->actual_length = 0;
+		*status = 0;
+	}
+	list_del_init(&td->td_list);
+	/* Was this TD slated to be cancelled but completed anyway? */
+	if (!list_empty(&td->cancelled_td_list))
+		list_del_init(&td->cancelled_td_list);
+
+	inc_td_cnt(urb);
+	/* Giveback the urb when all the tds are completed */
+	if (last_td_in_urb(td)) {
+		if ((urb->actual_length != urb->transfer_buffer_length &&
+		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+				 urb, urb->actual_length,
+				 urb->transfer_buffer_length, *status);
+
+		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+			*status = 0;
+		xhci_giveback_urb_in_irq(xhci, td, *status);
+	}
+
+	return 0;
+}
+
+static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+	struct xhci_transfer_event *event,
+	struct xhci_virt_ep *ep, int *status)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_ring *ep_ring;
+	unsigned int slot_id;
+	u32 trb_comp_code;
+	int ep_index;
+
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	xdev = xhci->devs[slot_id];
+	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+			trb_comp_code == COMP_STOPPED ||
+			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
+		/* The Endpoint Stop Command completion will take care of any
+		 * stopped TDs.  A stopped TD may be restarted, so don't update
+		 * the ring dequeue pointer or take this TD off any lists yet.
+		 */
+		return 0;
+	}
+	if (trb_comp_code == COMP_STALL_ERROR ||
+		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+						trb_comp_code)) {
+		/* Issue a reset endpoint command to clear the host side
+		 * halt, followed by a set dequeue command to move the
+		 * dequeue pointer past the TD.
+		 * The class driver clears the device side halt later.
+		 */
+		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+					ep_ring->stream_id, td, EP_HARD_RESET);
+	} else {
+		/* Update ring dequeue pointer */
+		while (ep_ring->dequeue != td->last_trb)
+			inc_deq(xhci, ep_ring);
+		inc_deq(xhci, ep_ring);
+	}
+
+	return xhci_td_cleanup(xhci, td, ep_ring, status);
+}
+
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
+			   union xhci_trb *stop_trb)
+{
+	u32 sum;
+	union xhci_trb *trb = ring->dequeue;
+	struct xhci_segment *seg = ring->deq_seg;
+
+	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+		if (!trb_is_noop(trb) && !trb_is_link(trb))
+			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+	}
+	return sum;
+}
+
+/*
+ * Process control tds, update urb status and actual_length.
+ */
+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+	struct xhci_virt_ep *ep, int *status)
+{
+	struct xhci_virt_device *xdev;
+	unsigned int slot_id;
+	int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	u32 trb_comp_code;
+	u32 remaining, requested;
+	u32 trb_type;
+
+	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	xdev = xhci->devs[slot_id];
+	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	requested = td->urb->transfer_buffer_length;
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+	switch (trb_comp_code) {
+	case COMP_SUCCESS:
+		if (trb_type != TRB_STATUS) {
+			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
+				  (trb_type == TRB_DATA) ? "data" : "setup");
+			*status = -ESHUTDOWN;
+			break;
+		}
+		*status = 0;
+		break;
+	case COMP_SHORT_PACKET:
+		*status = 0;
+		break;
+	case COMP_STOPPED_SHORT_PACKET:
+		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
+			td->urb->actual_length = remaining;
+		else
+			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+		goto finish_td;
+	case COMP_STOPPED:
+		switch (trb_type) {
+		case TRB_SETUP:
+			td->urb->actual_length = 0;
+			goto finish_td;
+		case TRB_DATA:
+		case TRB_NORMAL:
+			td->urb->actual_length = requested - remaining;
+			goto finish_td;
+		case TRB_STATUS:
+			td->urb->actual_length = requested;
+			goto finish_td;
+		default:
+			xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
+				  trb_type);
+			goto finish_td;
+		}
+	case COMP_STOPPED_LENGTH_INVALID:
+		goto finish_td;
+	default:
+		if (!xhci_requires_manual_halt_cleanup(xhci,
+						       ep_ctx, trb_comp_code))
+			break;
+		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
+			 trb_comp_code, ep_index);
+		/* else fall through */
+	case COMP_STALL_ERROR:
+		/* Did we transfer part of the data (middle) phase? */
+		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
+			td->urb->actual_length = requested - remaining;
+		else if (!td->urb_length_set)
+			td->urb->actual_length = 0;
+		goto finish_td;
+	}
+
+	/* stopped at setup stage, no data transferred */
+	if (trb_type == TRB_SETUP)
+		goto finish_td;
+
+	/*
+	 * if on data stage then update the actual_length of the URB and flag it
+	 * as set, so it won't be overwritten in the event for the last TRB.
+	 */
+	if (trb_type == TRB_DATA ||
+		trb_type == TRB_NORMAL) {
+		td->urb_length_set = true;
+		td->urb->actual_length = requested - remaining;
+		xhci_dbg(xhci, "Waiting for status stage event\n");
+		return 0;
+	}
+
+	/* at status stage */
+	if (!td->urb_length_set)
+		td->urb->actual_length = requested;
+
+finish_td:
+	return finish_td(xhci, td, event, ep, status);
+}
+
+/*
+ * Process isochronous tds, update urb packet status and actual_length.
+ */
+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+	struct xhci_virt_ep *ep, int *status)
+{
+	struct xhci_ring *ep_ring;
+	struct urb_priv *urb_priv;
+	int idx;
+	struct usb_iso_packet_descriptor *frame;
+	u32 trb_comp_code;
+	bool sum_trbs_for_length = false;
+	u32 remaining, requested, ep_trb_len;
+	int short_framestatus;
+
+	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	urb_priv = td->urb->hcpriv;
+	idx = urb_priv->num_tds_done;
+	frame = &td->urb->iso_frame_desc[idx];
+	requested = frame->length;
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+		-EREMOTEIO : 0;
+
+	/* handle completion code */
+	switch (trb_comp_code) {
+	case COMP_SUCCESS:
+		if (remaining) {
+			frame->status = short_framestatus;
+			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+				sum_trbs_for_length = true;
+			break;
+		}
+		frame->status = 0;
+		break;
+	case COMP_SHORT_PACKET:
+		frame->status = short_framestatus;
+		sum_trbs_for_length = true;
+		break;
+	case COMP_BANDWIDTH_OVERRUN_ERROR:
+		frame->status = -ECOMM;
+		break;
+	case COMP_ISOCH_BUFFER_OVERRUN:
+	case COMP_BABBLE_DETECTED_ERROR:
+		frame->status = -EOVERFLOW;
+		break;
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+	case COMP_STALL_ERROR:
+		frame->status = -EPROTO;
+		break;
+	case COMP_USB_TRANSACTION_ERROR:
+		frame->status = -EPROTO;
+		if (ep_trb != td->last_trb)
+			return 0;
+		break;
+	case COMP_STOPPED:
+		sum_trbs_for_length = true;
+		break;
+	case COMP_STOPPED_SHORT_PACKET:
+		/* field normally containing residue now contains tranferred */
+		frame->status = short_framestatus;
+		requested = remaining;
+		break;
+	case COMP_STOPPED_LENGTH_INVALID:
+		requested = 0;
+		remaining = 0;
+		break;
+	default:
+		sum_trbs_for_length = true;
+		frame->status = -1;
+		break;
+	}
+
+	if (sum_trbs_for_length)
+		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+			ep_trb_len - remaining;
+	else
+		frame->actual_length = requested;
+
+	td->urb->actual_length += frame->actual_length;
+
+	return finish_td(xhci, td, event, ep, status);
+}
+
+static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+			struct xhci_transfer_event *event,
+			struct xhci_virt_ep *ep, int *status)
+{
+	struct xhci_ring *ep_ring;
+	struct urb_priv *urb_priv;
+	struct usb_iso_packet_descriptor *frame;
+	int idx;
+
+	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+	urb_priv = td->urb->hcpriv;
+	idx = urb_priv->num_tds_done;
+	frame = &td->urb->iso_frame_desc[idx];
+
+	/* The transfer is partly done. */
+	frame->status = -EXDEV;
+
+	/* calc actual length */
+	frame->actual_length = 0;
+
+	/* Update ring dequeue pointer */
+	while (ep_ring->dequeue != td->last_trb)
+		inc_deq(xhci, ep_ring);
+	inc_deq(xhci, ep_ring);
+
+	return xhci_td_cleanup(xhci, td, ep_ring, status);
+}
+
+/*
+ * Process bulk and interrupt tds, update urb status and actual_length.
+ */
+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
+	struct xhci_virt_ep *ep, int *status)
+{
+	struct xhci_ring *ep_ring;
+	u32 trb_comp_code;
+	u32 remaining, requested, ep_trb_len;
+
+	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+	requested = td->urb->transfer_buffer_length;
+
+	switch (trb_comp_code) {
+	case COMP_SUCCESS:
+		/* handle success with untransferred data as short packet */
+		if (ep_trb != td->last_trb || remaining) {
+			xhci_warn(xhci, "WARN Successful completion on short TX\n");
+			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+				 td->urb->ep->desc.bEndpointAddress,
+				 requested, remaining);
+		}
+		*status = 0;
+		break;
+	case COMP_SHORT_PACKET:
+		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+			 td->urb->ep->desc.bEndpointAddress,
+			 requested, remaining);
+		*status = 0;
+		break;
+	case COMP_STOPPED_SHORT_PACKET:
+		td->urb->actual_length = remaining;
+		goto finish_td;
+	case COMP_STOPPED_LENGTH_INVALID:
+		/* stopped on ep trb with invalid length, exclude it */
+		ep_trb_len	= 0;
+		remaining	= 0;
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+
+	if (ep_trb == td->last_trb)
+		td->urb->actual_length = requested - remaining;
+	else
+		td->urb->actual_length =
+			sum_trb_lengths(xhci, ep_ring, ep_trb) +
+			ep_trb_len - remaining;
+finish_td:
+	if (remaining > requested) {
+		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
+			  remaining);
+		td->urb->actual_length = 0;
+	}
+	return finish_td(xhci, td, event, ep, status);
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+		struct xhci_transfer_event *event)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_virt_ep *ep;
+	struct xhci_ring *ep_ring;
+	unsigned int slot_id;
+	int ep_index;
+	struct xhci_td *td = NULL;
+	dma_addr_t ep_trb_dma;
+	struct xhci_segment *ep_seg;
+	union xhci_trb *ep_trb;
+	int status = -EINPROGRESS;
+	struct xhci_ep_ctx *ep_ctx;
+	struct list_head *tmp;
+	u32 trb_comp_code;
+	int td_num = 0;
+	bool handling_skipped_tds = false;
+
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	ep_trb_dma = le64_to_cpu(event->buffer);
+
+	xdev = xhci->devs[slot_id];
+	if (!xdev) {
+		xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
+			 slot_id);
+		goto err_out;
+	}
+
+	ep = &xdev->eps[ep_index];
+	ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+	if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
+		xhci_err(xhci,
+			 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
+			  slot_id, ep_index);
+		goto err_out;
+	}
+
+	/* Some transfer events don't always point to a trb, see xhci 4.17.4 */
+	if (!ep_ring) {
+		switch (trb_comp_code) {
+		case COMP_STALL_ERROR:
+		case COMP_USB_TRANSACTION_ERROR:
+		case COMP_INVALID_STREAM_TYPE_ERROR:
+		case COMP_INVALID_STREAM_ID_ERROR:
+			xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
+						     NULL, EP_SOFT_RESET);
+			goto cleanup;
+		case COMP_RING_UNDERRUN:
+		case COMP_RING_OVERRUN:
+		case COMP_STOPPED_LENGTH_INVALID:
+			goto cleanup;
+		default:
+			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+				 slot_id, ep_index);
+			goto err_out;
+		}
+	}
+
+	/* Count current td numbers if ep->skip is set */
+	if (ep->skip) {
+		list_for_each(tmp, &ep_ring->td_list)
+			td_num++;
+	}
+
+	/* Look for common error cases */
+	switch (trb_comp_code) {
+	/* Skip codes that require special handling depending on
+	 * transfer type
+	 */
+	case COMP_SUCCESS:
+		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+			break;
+		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+			trb_comp_code = COMP_SHORT_PACKET;
+		else
+			xhci_warn_ratelimited(xhci,
+					      "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
+					      slot_id, ep_index);
+	case COMP_SHORT_PACKET:
+		break;
+	/* Completion codes for endpoint stopped state */
+	case COMP_STOPPED:
+		xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
+			 slot_id, ep_index);
+		break;
+	case COMP_STOPPED_LENGTH_INVALID:
+		xhci_dbg(xhci,
+			 "Stopped on No-op or Link TRB for slot %u ep %u\n",
+			 slot_id, ep_index);
+		break;
+	case COMP_STOPPED_SHORT_PACKET:
+		xhci_dbg(xhci,
+			 "Stopped with short packet transfer detected for slot %u ep %u\n",
+			 slot_id, ep_index);
+		break;
+	/* Completion codes for endpoint halted state */
+	case COMP_STALL_ERROR:
+		xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
+			 ep_index);
+		ep->ep_state |= EP_HALTED;
+		status = -EPIPE;
+		break;
+	case COMP_SPLIT_TRANSACTION_ERROR:
+	case COMP_USB_TRANSACTION_ERROR:
+		xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
+			 slot_id, ep_index);
+		status = -EPROTO;
+		break;
+	case COMP_BABBLE_DETECTED_ERROR:
+		xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
+			 slot_id, ep_index);
+		status = -EOVERFLOW;
+		break;
+	/* Completion codes for endpoint error state */
+	case COMP_TRB_ERROR:
+		xhci_warn(xhci,
+			  "WARN: TRB error for slot %u ep %u on endpoint\n",
+			  slot_id, ep_index);
+		status = -EILSEQ;
+		break;
+	/* completion codes not indicating endpoint state change */
+	case COMP_DATA_BUFFER_ERROR:
+		xhci_warn(xhci,
+			  "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
+			  slot_id, ep_index);
+		status = -ENOSR;
+		break;
+	case COMP_BANDWIDTH_OVERRUN_ERROR:
+		xhci_warn(xhci,
+			  "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
+			  slot_id, ep_index);
+		break;
+	case COMP_ISOCH_BUFFER_OVERRUN:
+		xhci_warn(xhci,
+			  "WARN: buffer overrun event for slot %u ep %u on endpoint",
+			  slot_id, ep_index);
+		break;
+	case COMP_RING_UNDERRUN:
+		/*
+		 * When the Isoch ring is empty, the xHC will generate
+		 * a Ring Overrun Event for IN Isoch endpoint or Ring
+		 * Underrun Event for OUT Isoch endpoint.
+		 */
+		xhci_dbg(xhci, "underrun event on endpoint\n");
+		if (!list_empty(&ep_ring->td_list))
+			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
+					"still with TDs queued?\n",
+				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+				 ep_index);
+		goto cleanup;
+	case COMP_RING_OVERRUN:
+		xhci_dbg(xhci, "overrun event on endpoint\n");
+		if (!list_empty(&ep_ring->td_list))
+			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
+					"still with TDs queued?\n",
+				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+				 ep_index);
+		goto cleanup;
+	case COMP_MISSED_SERVICE_ERROR:
+		/*
+		 * When encounter missed service error, one or more isoc tds
+		 * may be missed by xHC.
+		 * Set skip flag of the ep_ring; Complete the missed tds as
+		 * short transfer when process the ep_ring next time.
+		 */
+		ep->skip = true;
+		xhci_dbg(xhci,
+			 "Miss service interval error for slot %u ep %u, set skip flag\n",
+			 slot_id, ep_index);
+		goto cleanup;
+	case COMP_NO_PING_RESPONSE_ERROR:
+		ep->skip = true;
+		xhci_dbg(xhci,
+			 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
+			 slot_id, ep_index);
+		goto cleanup;
+
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		/* needs disable slot command to recover */
+		xhci_warn(xhci,
+			  "WARN: detect an incompatible device for slot %u ep %u",
+			  slot_id, ep_index);
+		status = -EPROTO;
+		break;
+	default:
+		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
+			status = 0;
+			break;
+		}
+		xhci_warn(xhci,
+			  "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
+			  trb_comp_code, slot_id, ep_index);
+		goto cleanup;
+	}
+
+	do {
+		/* This TRB should be in the TD at the head of this ring's
+		 * TD list.
+		 */
+		if (list_empty(&ep_ring->td_list)) {
+			/*
+			 * Don't print wanings if it's due to a stopped endpoint
+			 * generating an extra completion event if the device
+			 * was suspended. Or, a event for the last TRB of a
+			 * short TD we already got a short event for.
+			 * The short TD is already removed from the TD list.
+			 */
+
+			if (!(trb_comp_code == COMP_STOPPED ||
+			      trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+			      ep_ring->last_td_was_short)) {
+				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+						ep_index);
+			}
+			if (ep->skip) {
+				ep->skip = false;
+				xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
+					 slot_id, ep_index);
+			}
+			goto cleanup;
+		}
+
+		/* We've skipped all the TDs on the ep ring when ep->skip set */
+		if (ep->skip && td_num == 0) {
+			ep->skip = false;
+			xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
+				 slot_id, ep_index);
+			goto cleanup;
+		}
+
+		td = list_first_entry(&ep_ring->td_list, struct xhci_td,
+				      td_list);
+		if (ep->skip)
+			td_num--;
+
+		/* Is this a TRB in the currently executing TD? */
+		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+				td->last_trb, ep_trb_dma, false);
+
+		/*
+		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+		 * is not in the current TD pointed by ep_ring->dequeue because
+		 * that the hardware dequeue pointer still at the previous TRB
+		 * of the current TD. The previous TRB maybe a Link TD or the
+		 * last TRB of the previous TD. The command completion handle
+		 * will take care the rest.
+		 */
+		if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
+			   trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+			goto cleanup;
+		}
+
+		if (!ep_seg) {
+			if (!ep->skip ||
+			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+				/* Some host controllers give a spurious
+				 * successful event after a short transfer.
+				 * Ignore it.
+				 */
+				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+						ep_ring->last_td_was_short) {
+					ep_ring->last_td_was_short = false;
+					goto cleanup;
+				}
+				/* HC is busted, give up! */
+				xhci_err(xhci,
+					"ERROR Transfer event TRB DMA ptr not "
+					"part of current TD ep_index %d "
+					"comp_code %u\n", ep_index,
+					trb_comp_code);
+				trb_in_td(xhci, ep_ring->deq_seg,
+					  ep_ring->dequeue, td->last_trb,
+					  ep_trb_dma, true);
+				return -ESHUTDOWN;
+			}
+
+			skip_isoc_td(xhci, td, event, ep, &status);
+			goto cleanup;
+		}
+		if (trb_comp_code == COMP_SHORT_PACKET)
+			ep_ring->last_td_was_short = true;
+		else
+			ep_ring->last_td_was_short = false;
+
+		if (ep->skip) {
+			xhci_dbg(xhci,
+				 "Found td. Clear skip flag for slot %u ep %u.\n",
+				 slot_id, ep_index);
+			ep->skip = false;
+		}
+
+		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
+						sizeof(*ep_trb)];
+
+		trace_xhci_handle_transfer(ep_ring,
+				(struct xhci_generic_trb *) ep_trb);
+
+		/*
+		 * No-op TRB could trigger interrupts in a case where
+		 * a URB was killed and a STALL_ERROR happens right
+		 * after the endpoint ring stopped. Reset the halted
+		 * endpoint. Otherwise, the endpoint remains stalled
+		 * indefinitely.
+		 */
+		if (trb_is_noop(ep_trb)) {
+			if (trb_comp_code == COMP_STALL_ERROR ||
+			    xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+							      trb_comp_code))
+				xhci_cleanup_halted_endpoint(xhci, slot_id,
+							     ep_index,
+							     ep_ring->stream_id,
+							     td, EP_HARD_RESET);
+			goto cleanup;
+		}
+
+		/* update the urb's actual_length and give back to the core */
+		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
+		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
+		else
+			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
+					     &status);
+cleanup:
+		handling_skipped_tds = ep->skip &&
+			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
+			trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
+
+		/*
+		 * Do not update event ring dequeue pointer if we're in a loop
+		 * processing missed tds.
+		 */
+		if (!handling_skipped_tds)
+			inc_deq(xhci, xhci->event_ring);
+
+	/*
+	 * If ep->skip is set, it means there are missed tds on the
+	 * endpoint ring need to take care of.
+	 * Process them as short transfer until reach the td pointed by
+	 * the event.
+	 */
+	} while (handling_skipped_tds);
+
+	return 0;
+
+err_out:
+	xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+		 (unsigned long long) xhci_trb_virt_to_dma(
+			 xhci->event_ring->deq_seg,
+			 xhci->event_ring->dequeue),
+		 lower_32_bits(le64_to_cpu(event->buffer)),
+		 upper_32_bits(le64_to_cpu(event->buffer)),
+		 le32_to_cpu(event->transfer_len),
+		 le32_to_cpu(event->flags));
+	return -ENODEV;
+}
+
+/*
+ * This function handles all OS-owned events on the event ring.  It may drop
+ * xhci->lock between event processing (e.g. to pass up port status changes).
+ * Returns >0 for "possibly more events to process" (caller should call again),
+ * otherwise 0 if done.  In future, <0 returns should indicate error code.
+ */
+static int xhci_handle_event(struct xhci_hcd *xhci)
+{
+	union xhci_trb *event;
+	int update_ptrs = 1;
+	int ret;
+
+	/* Event ring hasn't been allocated yet. */
+	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+		xhci_err(xhci, "ERROR event ring not ready\n");
+		return -ENOMEM;
+	}
+
+	event = xhci->event_ring->dequeue;
+	/* Does the HC or OS own the TRB? */
+	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
+	    xhci->event_ring->cycle_state)
+		return 0;
+
+	trace_xhci_handle_event(xhci->event_ring, &event->generic);
+
+	/*
+	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
+	 * speculative reads of the event's flags/data below.
+	 */
+	rmb();
+	/* FIXME: Handle more event types. */
+	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
+	case TRB_TYPE(TRB_COMPLETION):
+		handle_cmd_completion(xhci, &event->event_cmd);
+		break;
+	case TRB_TYPE(TRB_PORT_STATUS):
+		handle_port_status(xhci, event);
+		update_ptrs = 0;
+		break;
+	case TRB_TYPE(TRB_TRANSFER):
+		ret = handle_tx_event(xhci, &event->trans_event);
+		if (ret >= 0)
+			update_ptrs = 0;
+		break;
+	case TRB_TYPE(TRB_DEV_NOTE):
+		handle_device_notification(xhci, event);
+		break;
+	default:
+		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
+		    TRB_TYPE(48))
+			handle_vendor_event(xhci, event);
+		else
+			xhci_warn(xhci, "ERROR unknown event type %d\n",
+				  TRB_FIELD_TO_TYPE(
+				  le32_to_cpu(event->event_cmd.flags)));
+	}
+	/* Any of the above functions may drop and re-acquire the lock, so check
+	 * to make sure a watchdog timer didn't mark the host as non-responsive.
+	 */
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_dbg(xhci, "xHCI host dying, returning from "
+				"event handler.\n");
+		return 0;
+	}
+
+	if (update_ptrs)
+		/* Update SW event ring dequeue pointer */
+		inc_deq(xhci, xhci->event_ring);
+
+	/* Are there more items on the event ring?  Caller will call us again to
+	 * check.
+	 */
+	return 1;
+}
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	union xhci_trb *event_ring_deq;
+	irqreturn_t ret = IRQ_NONE;
+	unsigned long flags;
+	dma_addr_t deq;
+	u64 temp_64;
+	u32 status;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	/* Check if the xHC generated the interrupt, or the irq is shared */
+	status = readl(&xhci->op_regs->status);
+	if (status == ~(u32)0) {
+		xhci_hc_died(xhci);
+		ret = IRQ_HANDLED;
+		goto out;
+	}
+
+	if (!(status & STS_EINT))
+		goto out;
+
+	if (status & STS_FATAL) {
+		xhci_warn(xhci, "WARNING: Host System Error\n");
+		xhci_halt(xhci);
+		ret = IRQ_HANDLED;
+		goto out;
+	}
+
+	/*
+	 * Clear the op reg interrupt status first,
+	 * so we can receive interrupts from other MSI-X interrupters.
+	 * Write 1 to clear the interrupt status.
+	 */
+	status |= STS_EINT;
+	writel(status, &xhci->op_regs->status);
+
+	if (!hcd->msi_enabled) {
+		u32 irq_pending;
+		irq_pending = readl(&xhci->ir_set->irq_pending);
+		irq_pending |= IMAN_IP;
+		writel(irq_pending, &xhci->ir_set->irq_pending);
+	}
+
+	if (xhci->xhc_state & XHCI_STATE_DYING ||
+	    xhci->xhc_state & XHCI_STATE_HALTED) {
+		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
+				"Shouldn't IRQs be disabled?\n");
+		/* Clear the event handler busy flag (RW1C);
+		 * the event ring should be empty.
+		 */
+		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+		xhci_write_64(xhci, temp_64 | ERST_EHB,
+				&xhci->ir_set->erst_dequeue);
+		ret = IRQ_HANDLED;
+		goto out;
+	}
+
+	event_ring_deq = xhci->event_ring->dequeue;
+	/* FIXME this should be a delayed service routine
+	 * that clears the EHB.
+	 */
+	while (xhci_handle_event(xhci) > 0) {}
+
+	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+	/* If necessary, update the HW's version of the event ring deq ptr. */
+	if (event_ring_deq != xhci->event_ring->dequeue) {
+		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+				xhci->event_ring->dequeue);
+		if (deq == 0)
+			xhci_warn(xhci, "WARN something wrong with SW event "
+					"ring dequeue ptr.\n");
+		/* Update HC event ring dequeue pointer */
+		temp_64 &= ERST_PTR_MASK;
+		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+	}
+
+	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+	temp_64 |= ERST_EHB;
+	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+	ret = IRQ_HANDLED;
+
+out:
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return ret;
+}
+
+irqreturn_t xhci_msi_irq(int irq, void *hcd)
+{
+	return xhci_irq(hcd);
+}
+
+/****		Endpoint Ring Operations	****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming:	Will you enqueue more TRBs before calling
+ *			prepare_transfer()?
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		bool more_trbs_coming,
+		u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	struct xhci_generic_trb *trb;
+
+	trb = &ring->enqueue->generic;
+	trb->field[0] = cpu_to_le32(field1);
+	trb->field[1] = cpu_to_le32(field2);
+	trb->field[2] = cpu_to_le32(field3);
+	trb->field[3] = cpu_to_le32(field4);
+
+	trace_xhci_queue_trb(ring, trb);
+
+	inc_enq(xhci, ring, more_trbs_coming);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+	unsigned int num_trbs_needed;
+
+	/* Make sure the endpoint has been added to xHC schedule */
+	switch (ep_state) {
+	case EP_STATE_DISABLED:
+		/*
+		 * USB core changed config/interfaces without notifying us,
+		 * or hardware is reporting the wrong state.
+		 */
+		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+		return -ENOENT;
+	case EP_STATE_ERROR:
+		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
+		/* FIXME event handling code for error needs to clear it */
+		/* XXX not sure if this should be -ENOENT or not */
+		return -EINVAL;
+	case EP_STATE_HALTED:
+		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
+	case EP_STATE_STOPPED:
+	case EP_STATE_RUNNING:
+		break;
+	default:
+		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
+		/*
+		 * FIXME issue Configure Endpoint command to try to get the HC
+		 * back into a known state.
+		 */
+		return -EINVAL;
+	}
+
+	while (1) {
+		if (room_on_ring(xhci, ep_ring, num_trbs))
+			break;
+
+		if (ep_ring == xhci->cmd_ring) {
+			xhci_err(xhci, "Do not support expand command ring\n");
+			return -ENOMEM;
+		}
+
+		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+				"ERROR no room on ep ring, try ring expansion");
+		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
+					mem_flags)) {
+			xhci_err(xhci, "Ring expansion failed\n");
+			return -ENOMEM;
+		}
+	}
+
+	while (trb_is_link(ep_ring->enqueue)) {
+		/* If we're not dealing with 0.95 hardware or isoc rings
+		 * on AMD 0.96 host, clear the chain bit.
+		 */
+		if (!xhci_link_trb_quirk(xhci) &&
+		    !(ep_ring->type == TYPE_ISOC &&
+		      (xhci->quirks & XHCI_AMD_0x96_HOST)))
+			ep_ring->enqueue->link.control &=
+				cpu_to_le32(~TRB_CHAIN);
+		else
+			ep_ring->enqueue->link.control |=
+				cpu_to_le32(TRB_CHAIN);
+
+		wmb();
+		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+		/* Toggle the cycle bit after the last ring segment. */
+		if (link_trb_toggles_cycle(ep_ring->enqueue))
+			ep_ring->cycle_state ^= 1;
+
+		ep_ring->enq_seg = ep_ring->enq_seg->next;
+		ep_ring->enqueue = ep_ring->enq_seg->trbs;
+	}
+	return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+		struct xhci_virt_device *xdev,
+		unsigned int ep_index,
+		unsigned int stream_id,
+		unsigned int num_trbs,
+		struct urb *urb,
+		unsigned int td_index,
+		gfp_t mem_flags)
+{
+	int ret;
+	struct urb_priv *urb_priv;
+	struct xhci_td	*td;
+	struct xhci_ring *ep_ring;
+	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+	if (!ep_ring) {
+		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
+				stream_id);
+		return -EINVAL;
+	}
+
+	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
+			   num_trbs, mem_flags);
+	if (ret)
+		return ret;
+
+	urb_priv = urb->hcpriv;
+	td = &urb_priv->td[td_index];
+
+	INIT_LIST_HEAD(&td->td_list);
+	INIT_LIST_HEAD(&td->cancelled_td_list);
+
+	if (td_index == 0) {
+		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	td->urb = urb;
+	/* Add this TD to the tail of the endpoint ring's TD list */
+	list_add_tail(&td->td_list, &ep_ring->td_list);
+	td->start_seg = ep_ring->enq_seg;
+	td->first_trb = ep_ring->enqueue;
+
+	return 0;
+}
+
+unsigned int count_trbs(u64 addr, u64 len)
+{
+	unsigned int num_trbs;
+
+	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+			TRB_MAX_BUFF_SIZE);
+	if (num_trbs == 0)
+		num_trbs++;
+
+	return num_trbs;
+}
+
+static inline unsigned int count_trbs_needed(struct urb *urb)
+{
+	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
+}
+
+static unsigned int count_sg_trbs_needed(struct urb *urb)
+{
+	struct scatterlist *sg;
+	unsigned int i, len, full_len, num_trbs = 0;
+
+	full_len = urb->transfer_buffer_length;
+
+	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+		len = sg_dma_len(sg);
+		num_trbs += count_trbs(sg_dma_address(sg), len);
+		len = min_t(unsigned int, len, full_len);
+		full_len -= len;
+		if (full_len == 0)
+			break;
+	}
+
+	return num_trbs;
+}
+
+static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
+{
+	u64 addr, len;
+
+	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+	len = urb->iso_frame_desc[i].length;
+
+	return count_trbs(addr, len);
+}
+
+static void check_trb_math(struct urb *urb, int running_total)
+{
+	if (unlikely(running_total != urb->transfer_buffer_length))
+		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+				"queued %#x (%d), asked for %#x (%d)\n",
+				__func__,
+				urb->ep->desc.bEndpointAddress,
+				running_total, running_total,
+				urb->transfer_buffer_length,
+				urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+		unsigned int ep_index, unsigned int stream_id, int start_cycle,
+		struct xhci_generic_trb *start_trb)
+{
+	/*
+	 * Pass all the TRBs to the hardware at once and make sure this write
+	 * isn't reordered.
+	 */
+	wmb();
+	if (start_cycle)
+		start_trb->field[3] |= cpu_to_le32(start_cycle);
+	else
+		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+}
+
+static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
+						struct xhci_ep_ctx *ep_ctx)
+{
+	int xhci_interval;
+	int ep_interval;
+
+	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
+	ep_interval = urb->interval;
+
+	/* Convert to microframes */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		ep_interval *= 8;
+
+	/* FIXME change this to a warning and a suggestion to use the new API
+	 * to set the polling interval (once the API is added).
+	 */
+	if (xhci_interval != ep_interval) {
+		dev_dbg_ratelimited(&urb->dev->dev,
+				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
+				ep_interval, ep_interval == 1 ? "" : "s",
+				xhci_interval, xhci_interval == 1 ? "" : "s");
+		urb->interval = xhci_interval;
+		/* Convert back to frames for LS/FS devices */
+		if (urb->dev->speed == USB_SPEED_LOW ||
+				urb->dev->speed == USB_SPEED_FULL)
+			urb->interval /= 8;
+	}
+}
+
+/*
+ * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
+ * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ep_ctx *ep_ctx;
+
+	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
+	check_interval(xhci, urb, ep_ctx);
+
+	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
+}
+
+/*
+ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
+ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * For xHCI 0.96 and older, TD size field should be the remaining bytes
+ * including this TRB, right shifted by 10
+ *
+ * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
+			      int trb_buff_len, unsigned int td_total_len,
+			      struct urb *urb, bool more_trbs_coming)
+{
+	u32 maxp, total_packet_count;
+
+	/* MTK xHCI 0.96 contains some features from 1.0 */
+	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
+		return ((td_total_len - transferred) >> 10);
+
+	/* One TRB with a zero-length data packet. */
+	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
+	    trb_buff_len == td_total_len)
+		return 0;
+
+	/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+	if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
+		trb_buff_len = 0;
+
+	maxp = usb_endpoint_maxp(&urb->ep->desc);
+	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+	/* Queueing functions don't count the current TRB into transferred */
+	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+}
+
+
+static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+			 u32 *trb_buff_len, struct xhci_segment *seg)
+{
+	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+	unsigned int unalign;
+	unsigned int max_pkt;
+	u32 new_buff_len;
+
+	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
+	unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+	/* we got lucky, last normal TRB data on segment is packet aligned */
+	if (unalign == 0)
+		return 0;
+
+	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
+		 unalign, *trb_buff_len);
+
+	/* is the last nornal TRB alignable by splitting it */
+	if (*trb_buff_len > unalign) {
+		*trb_buff_len -= unalign;
+		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
+		return 0;
+	}
+
+	/*
+	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
+	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+	 */
+	new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
+		new_buff_len = (urb->transfer_buffer_length - enqd_len);
+
+	/* create a max max_pkt sized bounce buffer pointed to by last trb */
+	if (usb_urb_dir_out(urb)) {
+		sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+				   seg->bounce_buf, new_buff_len, enqd_len);
+		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+						 max_pkt, DMA_TO_DEVICE);
+	} else {
+		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+						 max_pkt, DMA_FROM_DEVICE);
+	}
+
+	if (dma_mapping_error(dev, seg->bounce_dma)) {
+		/* try without aligning. Some host controllers survive */
+		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
+		return 0;
+	}
+	*trb_buff_len = new_buff_len;
+	seg->bounce_len = new_buff_len;
+	seg->bounce_offs = enqd_len;
+
+	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
+
+	return 1;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ring;
+	struct urb_priv *urb_priv;
+	struct xhci_td *td;
+	struct xhci_generic_trb *start_trb;
+	struct scatterlist *sg = NULL;
+	bool more_trbs_coming = true;
+	bool need_zero_pkt = false;
+	bool first_trb = true;
+	unsigned int num_trbs;
+	unsigned int start_cycle, num_sgs = 0;
+	unsigned int enqd_len, block_len, trb_buff_len, full_len;
+	int sent_len, ret;
+	u32 field, length_field, remainder;
+	u64 addr, send_addr;
+
+	ring = xhci_urb_to_transfer_ring(xhci, urb);
+	if (!ring)
+		return -EINVAL;
+
+	full_len = urb->transfer_buffer_length;
+	/* If we have scatter/gather list, we use it. */
+	if (urb->num_sgs) {
+		num_sgs = urb->num_mapped_sgs;
+		sg = urb->sg;
+		addr = (u64) sg_dma_address(sg);
+		block_len = sg_dma_len(sg);
+		num_trbs = count_sg_trbs_needed(urb);
+	} else {
+		num_trbs = count_trbs_needed(urb);
+		addr = (u64) urb->transfer_dma;
+		block_len = full_len;
+	}
+	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+			ep_index, urb->stream_id,
+			num_trbs, urb, 0, mem_flags);
+	if (unlikely(ret < 0))
+		return ret;
+
+	urb_priv = urb->hcpriv;
+
+	/* Deal with URB_ZERO_PACKET - need one more td/trb */
+	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
+		need_zero_pkt = true;
+
+	td = &urb_priv->td[0];
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ring->enqueue->generic;
+	start_cycle = ring->cycle_state;
+	send_addr = addr;
+
+	/* Queue the TRBs, even if they are zero-length */
+	for (enqd_len = 0; first_trb || enqd_len < full_len;
+			enqd_len += trb_buff_len) {
+		field = TRB_TYPE(TRB_NORMAL);
+
+		/* TRB buffer should not cross 64KB boundaries */
+		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
+
+		if (enqd_len + trb_buff_len > full_len)
+			trb_buff_len = full_len - enqd_len;
+
+		/* Don't change the cycle bit of the first TRB until later */
+		if (first_trb) {
+			first_trb = false;
+			if (start_cycle == 0)
+				field |= TRB_CYCLE;
+		} else
+			field |= ring->cycle_state;
+
+		/* Chain all the TRBs together; clear the chain bit in the last
+		 * TRB to indicate it's the last TRB in the chain.
+		 */
+		if (enqd_len + trb_buff_len < full_len) {
+			field |= TRB_CHAIN;
+			if (trb_is_link(ring->enqueue + 1)) {
+				if (xhci_align_td(xhci, urb, enqd_len,
+						  &trb_buff_len,
+						  ring->enq_seg)) {
+					send_addr = ring->enq_seg->bounce_dma;
+					/* assuming TD won't span 2 segs */
+					td->bounce_seg = ring->enq_seg;
+				}
+			}
+		}
+		if (enqd_len + trb_buff_len >= full_len) {
+			field &= ~TRB_CHAIN;
+			field |= TRB_IOC;
+			more_trbs_coming = false;
+			td->last_trb = ring->enqueue;
+		}
+
+		/* Only set interrupt on short packet for IN endpoints */
+		if (usb_urb_dir_in(urb))
+			field |= TRB_ISP;
+
+		/* Set the TRB length, TD size, and interrupter fields. */
+		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
+					      full_len, urb, more_trbs_coming);
+
+		length_field = TRB_LEN(trb_buff_len) |
+			TRB_TD_SIZE(remainder) |
+			TRB_INTR_TARGET(0);
+
+		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
+				lower_32_bits(send_addr),
+				upper_32_bits(send_addr),
+				length_field,
+				field);
+
+		addr += trb_buff_len;
+		sent_len = trb_buff_len;
+
+		while (sg && sent_len >= block_len) {
+			/* New sg entry */
+			--num_sgs;
+			sent_len -= block_len;
+			if (num_sgs != 0) {
+				sg = sg_next(sg);
+				block_len = sg_dma_len(sg);
+				addr = (u64) sg_dma_address(sg);
+				addr += sent_len;
+			}
+		}
+		block_len -= sent_len;
+		send_addr = addr;
+	}
+
+	if (need_zero_pkt) {
+		ret = prepare_transfer(xhci, xhci->devs[slot_id],
+				       ep_index, urb->stream_id,
+				       1, urb, 1, mem_flags);
+		urb_priv->td[1].last_trb = ring->enqueue;
+		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
+		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
+	}
+
+	check_trb_math(urb, enqd_len);
+	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+			start_cycle, start_trb);
+	return 0;
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	int num_trbs;
+	int ret;
+	struct usb_ctrlrequest *setup;
+	struct xhci_generic_trb *start_trb;
+	int start_cycle;
+	u32 field;
+	struct urb_priv *urb_priv;
+	struct xhci_td *td;
+
+	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+	if (!ep_ring)
+		return -EINVAL;
+
+	/*
+	 * Need to copy setup packet into setup TRB, so we can't use the setup
+	 * DMA address.
+	 */
+	if (!urb->setup_packet)
+		return -EINVAL;
+
+	/* 1 TRB for setup, 1 for status */
+	num_trbs = 2;
+	/*
+	 * Don't need to check if we need additional event data and normal TRBs,
+	 * since data in control transfers will never get bigger than 16MB
+	 * XXX: can we get a buffer that crosses 64KB boundaries?
+	 */
+	if (urb->transfer_buffer_length > 0)
+		num_trbs++;
+	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+			ep_index, urb->stream_id,
+			num_trbs, urb, 0, mem_flags);
+	if (ret < 0)
+		return ret;
+
+	urb_priv = urb->hcpriv;
+	td = &urb_priv->td[0];
+
+	/*
+	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
+	 * until we've finished creating all the other TRBs.  The ring's cycle
+	 * state may change as we enqueue the other TRBs, so save it too.
+	 */
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	/* Queue setup TRB - see section 6.4.1.2.1 */
+	/* FIXME better way to translate setup_packet into two u32 fields? */
+	setup = (struct usb_ctrlrequest *) urb->setup_packet;
+	field = 0;
+	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+	if (start_cycle == 0)
+		field |= 0x1;
+
+	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
+		if (urb->transfer_buffer_length > 0) {
+			if (setup->bRequestType & USB_DIR_IN)
+				field |= TRB_TX_TYPE(TRB_DATA_IN);
+			else
+				field |= TRB_TX_TYPE(TRB_DATA_OUT);
+		}
+	}
+
+	queue_trb(xhci, ep_ring, true,
+		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
+		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
+		  TRB_LEN(8) | TRB_INTR_TARGET(0),
+		  /* Immediate data in pointer */
+		  field);
+
+	/* If there's data, queue data TRBs */
+	/* Only set interrupt on short packet for IN endpoints */
+	if (usb_urb_dir_in(urb))
+		field = TRB_ISP | TRB_TYPE(TRB_DATA);
+	else
+		field = TRB_TYPE(TRB_DATA);
+
+	if (urb->transfer_buffer_length > 0) {
+		u32 length_field, remainder;
+
+		remainder = xhci_td_remainder(xhci, 0,
+				urb->transfer_buffer_length,
+				urb->transfer_buffer_length,
+				urb, 1);
+		length_field = TRB_LEN(urb->transfer_buffer_length) |
+				TRB_TD_SIZE(remainder) |
+				TRB_INTR_TARGET(0);
+		if (setup->bRequestType & USB_DIR_IN)
+			field |= TRB_DIR_IN;
+		queue_trb(xhci, ep_ring, true,
+				lower_32_bits(urb->transfer_dma),
+				upper_32_bits(urb->transfer_dma),
+				length_field,
+				field | ep_ring->cycle_state);
+	}
+
+	/* Save the DMA address of the last TRB in the TD */
+	td->last_trb = ep_ring->enqueue;
+
+	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+	/* If the device sent data, the status stage is an OUT transfer */
+	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+		field = 0;
+	else
+		field = TRB_DIR_IN;
+	queue_trb(xhci, ep_ring, false,
+			0,
+			0,
+			TRB_INTR_TARGET(0),
+			/* Event on completion */
+			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+
+	giveback_first_trb(xhci, slot_id, ep_index, 0,
+			start_cycle, start_trb);
+	return 0;
+}
+
+/*
+ * The transfer burst count field of the isochronous TRB defines the number of
+ * bursts that are required to move all packets in this TD.  Only SuperSpeed
+ * devices can burst up to bMaxBurst number of packets per service interval.
+ * This field is zero based, meaning a value of zero in the field means one
+ * burst.  Basically, for everything but SuperSpeed devices, this field will be
+ * zero.  Only xHCI 1.0 host controllers support this field.
+ */
+static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+		struct urb *urb, unsigned int total_packet_count)
+{
+	unsigned int max_burst;
+
+	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
+		return 0;
+
+	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+}
+
+/*
+ * Returns the number of packets in the last "burst" of packets.  This field is
+ * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
+ * the last burst packet count is equal to the total number of packets in the
+ * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
+ * must contain (bMaxBurst + 1) number of packets, but the last burst can
+ * contain 1 to (bMaxBurst + 1) packets.
+ */
+static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
+		struct urb *urb, unsigned int total_packet_count)
+{
+	unsigned int max_burst;
+	unsigned int residue;
+
+	if (xhci->hci_version < 0x100)
+		return 0;
+
+	if (urb->dev->speed >= USB_SPEED_SUPER) {
+		/* bMaxBurst is zero based: 0 means 1 packet per burst */
+		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+		residue = total_packet_count % (max_burst + 1);
+		/* If residue is zero, the last burst contains (max_burst + 1)
+		 * number of packets, but the TLBPC field is zero-based.
+		 */
+		if (residue == 0)
+			return max_burst;
+		return residue - 1;
+	}
+	if (total_packet_count == 0)
+		return 0;
+	return total_packet_count - 1;
+}
+
+/*
+ * Calculates Frame ID field of the isochronous TRB identifies the
+ * target frame that the Interval associated with this Isochronous
+ * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
+ *
+ * Returns actual frame id on success, negative value on error.
+ */
+static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
+		struct urb *urb, int index)
+{
+	int start_frame, ist, ret = 0;
+	int start_frame_id, end_frame_id, current_frame_id;
+
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		start_frame = urb->start_frame + index * urb->interval;
+	else
+		start_frame = (urb->start_frame + index * urb->interval) >> 3;
+
+	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
+	 *
+	 * If bit [3] of IST is cleared to '0', software can add a TRB no
+	 * later than IST[2:0] Microframes before that TRB is scheduled to
+	 * be executed.
+	 * If bit [3] of IST is set to '1', software can add a TRB no later
+	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
+	 */
+	ist = HCS_IST(xhci->hcs_params2) & 0x7;
+	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+		ist <<= 3;
+
+	/* Software shall not schedule an Isoch TD with a Frame ID value that
+	 * is less than the Start Frame ID or greater than the End Frame ID,
+	 * where:
+	 *
+	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
+	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
+	 *
+	 * Both the End Frame ID and Start Frame ID values are calculated
+	 * in microframes. When software determines the valid Frame ID value;
+	 * The End Frame ID value should be rounded down to the nearest Frame
+	 * boundary, and the Start Frame ID value should be rounded up to the
+	 * nearest Frame boundary.
+	 */
+	current_frame_id = readl(&xhci->run_regs->microframe_index);
+	start_frame_id = roundup(current_frame_id + ist + 1, 8);
+	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
+
+	start_frame &= 0x7ff;
+	start_frame_id = (start_frame_id >> 3) & 0x7ff;
+	end_frame_id = (end_frame_id >> 3) & 0x7ff;
+
+	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
+		 __func__, index, readl(&xhci->run_regs->microframe_index),
+		 start_frame_id, end_frame_id, start_frame);
+
+	if (start_frame_id < end_frame_id) {
+		if (start_frame > end_frame_id ||
+				start_frame < start_frame_id)
+			ret = -EINVAL;
+	} else if (start_frame_id > end_frame_id) {
+		if ((start_frame > end_frame_id &&
+				start_frame < start_frame_id))
+			ret = -EINVAL;
+	} else {
+			ret = -EINVAL;
+	}
+
+	if (index == 0) {
+		if (ret == -EINVAL || start_frame == start_frame_id) {
+			start_frame = start_frame_id + 1;
+			if (urb->dev->speed == USB_SPEED_LOW ||
+					urb->dev->speed == USB_SPEED_FULL)
+				urb->start_frame = start_frame;
+			else
+				urb->start_frame = start_frame << 3;
+			ret = 0;
+		}
+	}
+
+	if (ret) {
+		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
+				start_frame, current_frame_id, index,
+				start_frame_id, end_frame_id);
+		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
+		return ret;
+	}
+
+	return start_frame;
+}
+
+/* This is for isoc transfer */
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_ring *ep_ring;
+	struct urb_priv *urb_priv;
+	struct xhci_td *td;
+	int num_tds, trbs_per_td;
+	struct xhci_generic_trb *start_trb;
+	bool first_trb;
+	int start_cycle;
+	u32 field, length_field;
+	int running_total, trb_buff_len, td_len, td_remain_len, ret;
+	u64 start_addr, addr;
+	int i, j;
+	bool more_trbs_coming;
+	struct xhci_virt_ep *xep;
+	int frame_id;
+
+	xep = &xhci->devs[slot_id]->eps[ep_index];
+	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+
+	num_tds = urb->number_of_packets;
+	if (num_tds < 1) {
+		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
+		return -EINVAL;
+	}
+	start_addr = (u64) urb->transfer_dma;
+	start_trb = &ep_ring->enqueue->generic;
+	start_cycle = ep_ring->cycle_state;
+
+	urb_priv = urb->hcpriv;
+	/* Queue the TRBs for each TD, even if they are zero-length */
+	for (i = 0; i < num_tds; i++) {
+		unsigned int total_pkt_count, max_pkt;
+		unsigned int burst_count, last_burst_pkt_count;
+		u32 sia_frame_id;
+
+		first_trb = true;
+		running_total = 0;
+		addr = start_addr + urb->iso_frame_desc[i].offset;
+		td_len = urb->iso_frame_desc[i].length;
+		td_remain_len = td_len;
+		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
+		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
+
+		/* A zero-length transfer still involves at least one packet. */
+		if (total_pkt_count == 0)
+			total_pkt_count++;
+		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
+		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
+							urb, total_pkt_count);
+
+		trbs_per_td = count_isoc_trbs_needed(urb, i);
+
+		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+				urb->stream_id, trbs_per_td, urb, i, mem_flags);
+		if (ret < 0) {
+			if (i == 0)
+				return ret;
+			goto cleanup;
+		}
+		td = &urb_priv->td[i];
+
+		/* use SIA as default, if frame id is used overwrite it */
+		sia_frame_id = TRB_SIA;
+		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
+		    HCC_CFC(xhci->hcc_params)) {
+			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
+			if (frame_id >= 0)
+				sia_frame_id = TRB_FRAME_ID(frame_id);
+		}
+		/*
+		 * Set isoc specific data for the first TRB in a TD.
+		 * Prevent HW from getting the TRBs by keeping the cycle state
+		 * inverted in the first TDs isoc TRB.
+		 */
+		field = TRB_TYPE(TRB_ISOC) |
+			TRB_TLBPC(last_burst_pkt_count) |
+			sia_frame_id |
+			(i ? ep_ring->cycle_state : !start_cycle);
+
+		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
+		if (!xep->use_extended_tbc)
+			field |= TRB_TBC(burst_count);
+
+		/* fill the rest of the TRB fields, and remaining normal TRBs */
+		for (j = 0; j < trbs_per_td; j++) {
+			u32 remainder = 0;
+
+			/* only first TRB is isoc, overwrite otherwise */
+			if (!first_trb)
+				field = TRB_TYPE(TRB_NORMAL) |
+					ep_ring->cycle_state;
+
+			/* Only set interrupt on short packet for IN EPs */
+			if (usb_urb_dir_in(urb))
+				field |= TRB_ISP;
+
+			/* Set the chain bit for all except the last TRB  */
+			if (j < trbs_per_td - 1) {
+				more_trbs_coming = true;
+				field |= TRB_CHAIN;
+			} else {
+				more_trbs_coming = false;
+				td->last_trb = ep_ring->enqueue;
+				field |= TRB_IOC;
+				/* set BEI, except for the last TD */
+				if (xhci->hci_version >= 0x100 &&
+				    !(xhci->quirks & XHCI_AVOID_BEI) &&
+				    i < num_tds - 1)
+					field |= TRB_BEI;
+			}
+			/* Calculate TRB length */
+			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+			if (trb_buff_len > td_remain_len)
+				trb_buff_len = td_remain_len;
+
+			/* Set the TRB length, TD size, & interrupter fields. */
+			remainder = xhci_td_remainder(xhci, running_total,
+						   trb_buff_len, td_len,
+						   urb, more_trbs_coming);
+
+			length_field = TRB_LEN(trb_buff_len) |
+				TRB_INTR_TARGET(0);
+
+			/* xhci 1.1 with ETE uses TD Size field for TBC */
+			if (first_trb && xep->use_extended_tbc)
+				length_field |= TRB_TD_SIZE_TBC(burst_count);
+			else
+				length_field |= TRB_TD_SIZE(remainder);
+			first_trb = false;
+
+			queue_trb(xhci, ep_ring, more_trbs_coming,
+				lower_32_bits(addr),
+				upper_32_bits(addr),
+				length_field,
+				field);
+			running_total += trb_buff_len;
+
+			addr += trb_buff_len;
+			td_remain_len -= trb_buff_len;
+		}
+
+		/* Check TD length */
+		if (running_total != td_len) {
+			xhci_err(xhci, "ISOC TD length unmatch\n");
+			ret = -EINVAL;
+			goto cleanup;
+		}
+	}
+
+	/* store the next frame id */
+	if (HCC_CFC(xhci->hcc_params))
+		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
+
+	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+		if (xhci->quirks & XHCI_AMD_PLL_FIX)
+			usb_amd_quirk_pll_disable();
+	}
+	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
+
+	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+			start_cycle, start_trb);
+	return 0;
+cleanup:
+	/* Clean up a partially enqueued isoc transfer. */
+
+	for (i--; i >= 0; i--)
+		list_del_init(&urb_priv->td[i].td_list);
+
+	/* Use the first TD as a temporary variable to turn the TDs we've queued
+	 * into No-ops with a software-owned cycle bit. That way the hardware
+	 * won't accidentally start executing bogus TDs when we partially
+	 * overwrite them.  td->first_trb and td->start_seg are already set.
+	 */
+	urb_priv->td[0].last_trb = ep_ring->enqueue;
+	/* Every TRB except the first & last will have its cycle bit flipped. */
+	td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
+
+	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
+	ep_ring->enqueue = urb_priv->td[0].first_trb;
+	ep_ring->enq_seg = urb_priv->td[0].start_seg;
+	ep_ring->cycle_state = start_cycle;
+	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
+	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+	return ret;
+}
+
+/*
+ * Check transfer ring to guarantee there is enough room for the urb.
+ * Update ISO URB start_frame and interval.
+ * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
+ * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
+ * Contiguous Frame ID is not supported by HC.
+ */
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_ring *ep_ring;
+	struct xhci_ep_ctx *ep_ctx;
+	int start_frame;
+	int num_tds, num_trbs, i;
+	int ret;
+	struct xhci_virt_ep *xep;
+	int ist;
+
+	xdev = xhci->devs[slot_id];
+	xep = &xhci->devs[slot_id]->eps[ep_index];
+	ep_ring = xdev->eps[ep_index].ring;
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+	num_trbs = 0;
+	num_tds = urb->number_of_packets;
+	for (i = 0; i < num_tds; i++)
+		num_trbs += count_isoc_trbs_needed(urb, i);
+
+	/* Check the ring to guarantee there is enough room for the whole urb.
+	 * Do not insert any td of the urb to the ring if the check failed.
+	 */
+	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
+			   num_trbs, mem_flags);
+	if (ret)
+		return ret;
+
+	/*
+	 * Check interval value. This should be done before we start to
+	 * calculate the start frame value.
+	 */
+	check_interval(xhci, urb, ep_ctx);
+
+	/* Calculate the start frame and put it in urb->start_frame. */
+	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
+		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
+			urb->start_frame = xep->next_frame_id;
+			goto skip_start_over;
+		}
+	}
+
+	start_frame = readl(&xhci->run_regs->microframe_index);
+	start_frame &= 0x3fff;
+	/*
+	 * Round up to the next frame and consider the time before trb really
+	 * gets scheduled by hardare.
+	 */
+	ist = HCS_IST(xhci->hcs_params2) & 0x7;
+	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+		ist <<= 3;
+	start_frame += ist + XHCI_CFC_DELAY;
+	start_frame = roundup(start_frame, 8);
+
+	/*
+	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
+	 * is greate than 8 microframes.
+	 */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL) {
+		start_frame = roundup(start_frame, urb->interval << 3);
+		urb->start_frame = start_frame >> 3;
+	} else {
+		start_frame = roundup(start_frame, urb->interval);
+		urb->start_frame = start_frame;
+	}
+
+skip_start_over:
+	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
+
+	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
+}
+
+/****		Command Ring Operations		****/
+
+/* Generic function for queueing a command TRB on the command ring.
+ * Check to make sure there's room on the command ring for one command TRB.
+ * Also check that there's room reserved for commands that must not fail.
+ * If this is a command that must not fail, meaning command_must_succeed = TRUE,
+ * then only check for the number of reserved spots.
+ * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
+ * because the command event handler may want to resubmit a failed command.
+ */
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+			 u32 field1, u32 field2,
+			 u32 field3, u32 field4, bool command_must_succeed)
+{
+	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+	int ret;
+
+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+		(xhci->xhc_state & XHCI_STATE_HALTED)) {
+		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
+		return -ESHUTDOWN;
+	}
+
+	if (!command_must_succeed)
+		reserved_trbs++;
+
+	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
+			reserved_trbs, GFP_ATOMIC);
+	if (ret < 0) {
+		xhci_err(xhci, "ERR: No room for command on command ring\n");
+		if (command_must_succeed)
+			xhci_err(xhci, "ERR: Reserved TRB counting for "
+					"unfailable commands failed.\n");
+		return ret;
+	}
+
+	cmd->command_trb = xhci->cmd_ring->enqueue;
+
+	/* if there are no other commands queued we start the timeout timer */
+	if (list_empty(&xhci->cmd_list)) {
+		xhci->current_cmd = cmd;
+		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+	}
+
+	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+
+	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+			field4 | xhci->cmd_ring->cycle_state);
+	return 0;
+}
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 trb_type, u32 slot_id)
+{
+	return queue_command(xhci, cmd, 0, 0, 0,
+			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
+{
+	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+			upper_32_bits(in_ctx_ptr), 0,
+			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
+			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
+}
+
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
+}
+
+/* Queue a reset device command TRB */
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 slot_id)
+{
+	return queue_command(xhci, cmd, 0, 0, 0,
+			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
+			false);
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+		u32 slot_id, bool command_must_succeed)
+{
+	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+			upper_32_bits(in_ctx_ptr), 0,
+			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
+			command_must_succeed);
+}
+
+/* Queue an evaluate context command TRB */
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
+{
+	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+			upper_32_bits(in_ctx_ptr), 0,
+			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
+			command_must_succeed);
+}
+
+/*
+ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
+ * activity on an endpoint that is about to be suspended.
+ */
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+			     int slot_id, unsigned int ep_index, int suspend)
+{
+	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+	u32 type = TRB_TYPE(TRB_STOP_RING);
+	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
+
+	return queue_command(xhci, cmd, 0, 0, 0,
+			trb_slot_id | trb_ep_index | type | trb_suspend, false);
+}
+
+/* Set Transfer Ring Dequeue Pointer command */
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_dequeue_state *deq_state)
+{
+	dma_addr_t addr;
+	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
+	u32 trb_sct = 0;
+	u32 type = TRB_TYPE(TRB_SET_DEQ);
+	struct xhci_virt_ep *ep;
+	struct xhci_command *cmd;
+	int ret;
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
+		deq_state->new_deq_seg,
+		(unsigned long long)deq_state->new_deq_seg->dma,
+		deq_state->new_deq_ptr,
+		(unsigned long long)xhci_trb_virt_to_dma(
+			deq_state->new_deq_seg, deq_state->new_deq_ptr),
+		deq_state->new_cycle_state);
+
+	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
+				    deq_state->new_deq_ptr);
+	if (addr == 0) {
+		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
+		return;
+	}
+	ep = &xhci->devs[slot_id]->eps[ep_index];
+	if ((ep->ep_state & SET_DEQ_PENDING)) {
+		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
+		return;
+	}
+
+	/* This function gets called from contexts where it cannot sleep */
+	cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+	if (!cmd)
+		return;
+
+	ep->queued_deq_seg = deq_state->new_deq_seg;
+	ep->queued_deq_ptr = deq_state->new_deq_ptr;
+	if (deq_state->stream_id)
+		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+	ret = queue_command(xhci, cmd,
+		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
+		upper_32_bits(addr), trb_stream_id,
+		trb_slot_id | trb_ep_index | type, false);
+	if (ret < 0) {
+		xhci_free_command(xhci, cmd);
+		return;
+	}
+
+	/* Stop the TD queueing code from ringing the doorbell until
+	 * this command completes.  The HC won't set the dequeue pointer
+	 * if the ring is running, and ringing the doorbell starts the
+	 * ring running.
+	 */
+	ep->ep_state |= SET_DEQ_PENDING;
+}
+
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+			int slot_id, unsigned int ep_index,
+			enum xhci_ep_reset_type reset_type)
+{
+	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+	u32 type = TRB_TYPE(TRB_RESET_EP);
+
+	if (reset_type == EP_SOFT_RESET)
+		type |= TRB_TSP;
+
+	return queue_command(xhci, cmd, 0, 0, 0,
+			trb_slot_id | trb_ep_index | type, false);
+}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
new file mode 100644
index 0000000..b1cce98
--- /dev/null
+++ b/drivers/usb/host/xhci-tegra.c
@@ -0,0 +1,1394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVIDIA Tegra xHCI host controller driver
+ *
+ * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <soc/tegra/pmc.h>
+
+#include "xhci.h"
+
+#define TEGRA_XHCI_SS_HIGH_SPEED 120000000
+#define TEGRA_XHCI_SS_LOW_SPEED   12000000
+
+/* FPCI CFG registers */
+#define XUSB_CFG_1				0x004
+#define  XUSB_IO_SPACE_EN			BIT(0)
+#define  XUSB_MEM_SPACE_EN			BIT(1)
+#define  XUSB_BUS_MASTER_EN			BIT(2)
+#define XUSB_CFG_4				0x010
+#define  XUSB_BASE_ADDR_SHIFT			15
+#define  XUSB_BASE_ADDR_MASK			0x1ffff
+#define XUSB_CFG_ARU_C11_CSBRANGE		0x41c
+#define XUSB_CFG_CSB_BASE_ADDR			0x800
+
+/* FPCI mailbox registers */
+#define XUSB_CFG_ARU_MBOX_CMD			0x0e4
+#define  MBOX_DEST_FALC				BIT(27)
+#define  MBOX_DEST_PME				BIT(28)
+#define  MBOX_DEST_SMI				BIT(29)
+#define  MBOX_DEST_XHCI				BIT(30)
+#define  MBOX_INT_EN				BIT(31)
+#define XUSB_CFG_ARU_MBOX_DATA_IN		0x0e8
+#define  CMD_DATA_SHIFT				0
+#define  CMD_DATA_MASK				0xffffff
+#define  CMD_TYPE_SHIFT				24
+#define  CMD_TYPE_MASK				0xff
+#define XUSB_CFG_ARU_MBOX_DATA_OUT		0x0ec
+#define XUSB_CFG_ARU_MBOX_OWNER			0x0f0
+#define  MBOX_OWNER_NONE			0
+#define  MBOX_OWNER_FW				1
+#define  MBOX_OWNER_SW				2
+#define XUSB_CFG_ARU_SMI_INTR			0x428
+#define  MBOX_SMI_INTR_FW_HANG			BIT(1)
+#define  MBOX_SMI_INTR_EN			BIT(3)
+
+/* IPFS registers */
+#define IPFS_XUSB_HOST_CONFIGURATION_0		0x180
+#define  IPFS_EN_FPCI				BIT(0)
+#define IPFS_XUSB_HOST_INTR_MASK_0		0x188
+#define  IPFS_IP_INT_MASK			BIT(16)
+#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0	0x1bc
+
+#define CSB_PAGE_SELECT_MASK			0x7fffff
+#define CSB_PAGE_SELECT_SHIFT			9
+#define CSB_PAGE_OFFSET_MASK			0x1ff
+#define CSB_PAGE_SELECT(addr)	((addr) >> (CSB_PAGE_SELECT_SHIFT) &	\
+				 CSB_PAGE_SELECT_MASK)
+#define CSB_PAGE_OFFSET(addr)	((addr) & CSB_PAGE_OFFSET_MASK)
+
+/* Falcon CSB registers */
+#define XUSB_FALC_CPUCTL			0x100
+#define  CPUCTL_STARTCPU			BIT(1)
+#define  CPUCTL_STATE_HALTED			BIT(4)
+#define  CPUCTL_STATE_STOPPED			BIT(5)
+#define XUSB_FALC_BOOTVEC			0x104
+#define XUSB_FALC_DMACTL			0x10c
+#define XUSB_FALC_IMFILLRNG1			0x154
+#define  IMFILLRNG1_TAG_MASK			0xffff
+#define  IMFILLRNG1_TAG_LO_SHIFT		0
+#define  IMFILLRNG1_TAG_HI_SHIFT		16
+#define XUSB_FALC_IMFILLCTL			0x158
+
+/* MP CSB registers */
+#define XUSB_CSB_MP_ILOAD_ATTR			0x101a00
+#define XUSB_CSB_MP_ILOAD_BASE_LO		0x101a04
+#define XUSB_CSB_MP_ILOAD_BASE_HI		0x101a08
+#define XUSB_CSB_MP_L2IMEMOP_SIZE		0x101a10
+#define  L2IMEMOP_SIZE_SRC_OFFSET_SHIFT		8
+#define  L2IMEMOP_SIZE_SRC_OFFSET_MASK		0x3ff
+#define  L2IMEMOP_SIZE_SRC_COUNT_SHIFT		24
+#define  L2IMEMOP_SIZE_SRC_COUNT_MASK		0xff
+#define XUSB_CSB_MP_L2IMEMOP_TRIG		0x101a14
+#define  L2IMEMOP_ACTION_SHIFT			24
+#define  L2IMEMOP_INVALIDATE_ALL		(0x40 << L2IMEMOP_ACTION_SHIFT)
+#define  L2IMEMOP_LOAD_LOCKED_RESULT		(0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MP_APMAP			0x10181c
+#define  APMAP_BOOTPATH				BIT(31)
+
+#define IMEM_BLOCK_SIZE				256
+
+struct tegra_xusb_fw_header {
+	u32 boot_loadaddr_in_imem;
+	u32 boot_codedfi_offset;
+	u32 boot_codetag;
+	u32 boot_codesize;
+	u32 phys_memaddr;
+	u16 reqphys_memsize;
+	u16 alloc_phys_memsize;
+	u32 rodata_img_offset;
+	u32 rodata_section_start;
+	u32 rodata_section_end;
+	u32 main_fnaddr;
+	u32 fwimg_cksum;
+	u32 fwimg_created_time;
+	u32 imem_resident_start;
+	u32 imem_resident_end;
+	u32 idirect_start;
+	u32 idirect_end;
+	u32 l2_imem_start;
+	u32 l2_imem_end;
+	u32 version_id;
+	u8 init_ddirect;
+	u8 reserved[3];
+	u32 phys_addr_log_buffer;
+	u32 total_log_entries;
+	u32 dequeue_ptr;
+	u32 dummy_var[2];
+	u32 fwimg_len;
+	u8 magic[8];
+	u32 ss_low_power_entry_timeout;
+	u8 num_hsic_port;
+	u8 padding[139]; /* Pad to 256 bytes */
+};
+
+struct tegra_xusb_phy_type {
+	const char *name;
+	unsigned int num;
+};
+
+struct tegra_xusb_soc {
+	const char *firmware;
+	const char * const *supply_names;
+	unsigned int num_supplies;
+	const struct tegra_xusb_phy_type *phy_types;
+	unsigned int num_types;
+
+	struct {
+		struct {
+			unsigned int offset;
+			unsigned int count;
+		} usb2, ulpi, hsic, usb3;
+	} ports;
+
+	bool scale_ss_clock;
+};
+
+struct tegra_xusb {
+	struct device *dev;
+	void __iomem *regs;
+	struct usb_hcd *hcd;
+
+	struct mutex lock;
+
+	int xhci_irq;
+	int mbox_irq;
+
+	void __iomem *ipfs_base;
+	void __iomem *fpci_base;
+
+	const struct tegra_xusb_soc *soc;
+
+	struct regulator_bulk_data *supplies;
+
+	struct tegra_xusb_padctl *padctl;
+
+	struct clk *host_clk;
+	struct clk *falcon_clk;
+	struct clk *ss_clk;
+	struct clk *ss_src_clk;
+	struct clk *hs_src_clk;
+	struct clk *fs_src_clk;
+	struct clk *pll_u_480m;
+	struct clk *clk_m;
+	struct clk *pll_e;
+
+	struct reset_control *host_rst;
+	struct reset_control *ss_rst;
+
+	struct phy **phys;
+	unsigned int num_phys;
+
+	/* Firmware loading related */
+	struct {
+		size_t size;
+		void *virt;
+		dma_addr_t phys;
+	} fw;
+};
+
+static struct hc_driver __read_mostly tegra_xhci_hc_driver;
+
+static inline u32 fpci_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	return readl(tegra->fpci_base + offset);
+}
+
+static inline void fpci_writel(struct tegra_xusb *tegra, u32 value,
+			       unsigned int offset)
+{
+	writel(value, tegra->fpci_base + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	return readl(tegra->ipfs_base + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
+			       unsigned int offset)
+{
+	writel(value, tegra->ipfs_base + offset);
+}
+
+static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	u32 page = CSB_PAGE_SELECT(offset);
+	u32 ofs = CSB_PAGE_OFFSET(offset);
+
+	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+
+	return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static void csb_writel(struct tegra_xusb *tegra, u32 value,
+		       unsigned int offset)
+{
+	u32 page = CSB_PAGE_SELECT(offset);
+	u32 ofs = CSB_PAGE_OFFSET(offset);
+
+	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+	fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
+				 unsigned long rate)
+{
+	unsigned long new_parent_rate, old_parent_rate;
+	struct clk *clk = tegra->ss_src_clk;
+	unsigned int div;
+	int err;
+
+	if (clk_get_rate(clk) == rate)
+		return 0;
+
+	switch (rate) {
+	case TEGRA_XHCI_SS_HIGH_SPEED:
+		/*
+		 * Reparent to PLLU_480M. Set divider first to avoid
+		 * overclocking.
+		 */
+		old_parent_rate = clk_get_rate(clk_get_parent(clk));
+		new_parent_rate = clk_get_rate(tegra->pll_u_480m);
+		div = new_parent_rate / rate;
+
+		err = clk_set_rate(clk, old_parent_rate / div);
+		if (err)
+			return err;
+
+		err = clk_set_parent(clk, tegra->pll_u_480m);
+		if (err)
+			return err;
+
+		/*
+		 * The rate should already be correct, but set it again just
+		 * to be sure.
+		 */
+		err = clk_set_rate(clk, rate);
+		if (err)
+			return err;
+
+		break;
+
+	case TEGRA_XHCI_SS_LOW_SPEED:
+		/* Reparent to CLK_M */
+		err = clk_set_parent(clk, tegra->clk_m);
+		if (err)
+			return err;
+
+		err = clk_set_rate(clk, rate);
+		if (err)
+			return err;
+
+		break;
+
+	default:
+		dev_err(tegra->dev, "Invalid SS rate: %lu Hz\n", rate);
+		return -EINVAL;
+	}
+
+	if (clk_get_rate(clk) != rate) {
+		dev_err(tegra->dev, "SS clock doesn't match requested rate\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long extract_field(u32 value, unsigned int start,
+				   unsigned int count)
+{
+	return (value >> start) & ((1 << count) - 1);
+}
+
+/* Command requests from the firmware */
+enum tegra_xusb_mbox_cmd {
+	MBOX_CMD_MSG_ENABLED = 1,
+	MBOX_CMD_INC_FALC_CLOCK,
+	MBOX_CMD_DEC_FALC_CLOCK,
+	MBOX_CMD_INC_SSPI_CLOCK,
+	MBOX_CMD_DEC_SSPI_CLOCK,
+	MBOX_CMD_SET_BW, /* no ACK/NAK required */
+	MBOX_CMD_SET_SS_PWR_GATING,
+	MBOX_CMD_SET_SS_PWR_UNGATING,
+	MBOX_CMD_SAVE_DFE_CTLE_CTX,
+	MBOX_CMD_AIRPLANE_MODE_ENABLED, /* unused */
+	MBOX_CMD_AIRPLANE_MODE_DISABLED, /* unused */
+	MBOX_CMD_START_HSIC_IDLE,
+	MBOX_CMD_STOP_HSIC_IDLE,
+	MBOX_CMD_DBC_WAKE_STACK, /* unused */
+	MBOX_CMD_HSIC_PRETEND_CONNECT,
+	MBOX_CMD_RESET_SSPI,
+	MBOX_CMD_DISABLE_SS_LFPS_DETECTION,
+	MBOX_CMD_ENABLE_SS_LFPS_DETECTION,
+
+	MBOX_CMD_MAX,
+
+	/* Response message to above commands */
+	MBOX_CMD_ACK = 128,
+	MBOX_CMD_NAK
+};
+
+static const char * const mbox_cmd_name[] = {
+	[  1] = "MSG_ENABLE",
+	[  2] = "INC_FALCON_CLOCK",
+	[  3] = "DEC_FALCON_CLOCK",
+	[  4] = "INC_SSPI_CLOCK",
+	[  5] = "DEC_SSPI_CLOCK",
+	[  6] = "SET_BW",
+	[  7] = "SET_SS_PWR_GATING",
+	[  8] = "SET_SS_PWR_UNGATING",
+	[  9] = "SAVE_DFE_CTLE_CTX",
+	[ 10] = "AIRPLANE_MODE_ENABLED",
+	[ 11] = "AIRPLANE_MODE_DISABLED",
+	[ 12] = "START_HSIC_IDLE",
+	[ 13] = "STOP_HSIC_IDLE",
+	[ 14] = "DBC_WAKE_STACK",
+	[ 15] = "HSIC_PRETEND_CONNECT",
+	[ 16] = "RESET_SSPI",
+	[ 17] = "DISABLE_SS_LFPS_DETECTION",
+	[ 18] = "ENABLE_SS_LFPS_DETECTION",
+	[128] = "ACK",
+	[129] = "NAK",
+};
+
+struct tegra_xusb_mbox_msg {
+	u32 cmd;
+	u32 data;
+};
+
+static inline u32 tegra_xusb_mbox_pack(const struct tegra_xusb_mbox_msg *msg)
+{
+	return (msg->cmd & CMD_TYPE_MASK) << CMD_TYPE_SHIFT |
+	       (msg->data & CMD_DATA_MASK) << CMD_DATA_SHIFT;
+}
+static inline void tegra_xusb_mbox_unpack(struct tegra_xusb_mbox_msg *msg,
+					  u32 value)
+{
+	msg->cmd = (value >> CMD_TYPE_SHIFT) & CMD_TYPE_MASK;
+	msg->data = (value >> CMD_DATA_SHIFT) & CMD_DATA_MASK;
+}
+
+static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
+{
+	switch (cmd) {
+	case MBOX_CMD_SET_BW:
+	case MBOX_CMD_ACK:
+	case MBOX_CMD_NAK:
+		return false;
+
+	default:
+		return true;
+	}
+}
+
+static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
+				const struct tegra_xusb_mbox_msg *msg)
+{
+	bool wait_for_idle = false;
+	u32 value;
+
+	/*
+	 * Acquire the mailbox. The firmware still owns the mailbox for
+	 * ACK/NAK messages.
+	 */
+	if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
+		value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+		if (value != MBOX_OWNER_NONE) {
+			dev_err(tegra->dev, "mailbox is busy\n");
+			return -EBUSY;
+		}
+
+		fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+
+		value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+		if (value != MBOX_OWNER_SW) {
+			dev_err(tegra->dev, "failed to acquire mailbox\n");
+			return -EBUSY;
+		}
+
+		wait_for_idle = true;
+	}
+
+	value = tegra_xusb_mbox_pack(msg);
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+	value |= MBOX_INT_EN | MBOX_DEST_FALC;
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+	if (wait_for_idle) {
+		unsigned long timeout = jiffies + msecs_to_jiffies(250);
+
+		while (time_before(jiffies, timeout)) {
+			value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+			if (value == MBOX_OWNER_NONE)
+				break;
+
+			usleep_range(10, 20);
+		}
+
+		if (time_after(jiffies, timeout))
+			value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+
+		if (value != MBOX_OWNER_NONE)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
+{
+	struct tegra_xusb *tegra = data;
+	u32 value;
+
+	/* clear mailbox interrupts */
+	value = fpci_readl(tegra, XUSB_CFG_ARU_SMI_INTR);
+	fpci_writel(tegra, value, XUSB_CFG_ARU_SMI_INTR);
+
+	if (value & MBOX_SMI_INTR_FW_HANG)
+		dev_err(tegra->dev, "controller firmware hang\n");
+
+	return IRQ_WAKE_THREAD;
+}
+
+static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
+				   const struct tegra_xusb_mbox_msg *msg)
+{
+	struct tegra_xusb_padctl *padctl = tegra->padctl;
+	const struct tegra_xusb_soc *soc = tegra->soc;
+	struct device *dev = tegra->dev;
+	struct tegra_xusb_mbox_msg rsp;
+	unsigned long mask;
+	unsigned int port;
+	bool idle, enable;
+	int err = 0;
+
+	memset(&rsp, 0, sizeof(rsp));
+
+	switch (msg->cmd) {
+	case MBOX_CMD_INC_FALC_CLOCK:
+	case MBOX_CMD_DEC_FALC_CLOCK:
+		rsp.data = clk_get_rate(tegra->falcon_clk) / 1000;
+		if (rsp.data != msg->data)
+			rsp.cmd = MBOX_CMD_NAK;
+		else
+			rsp.cmd = MBOX_CMD_ACK;
+
+		break;
+
+	case MBOX_CMD_INC_SSPI_CLOCK:
+	case MBOX_CMD_DEC_SSPI_CLOCK:
+		if (tegra->soc->scale_ss_clock) {
+			err = tegra_xusb_set_ss_clk(tegra, msg->data * 1000);
+			if (err < 0)
+				rsp.cmd = MBOX_CMD_NAK;
+			else
+				rsp.cmd = MBOX_CMD_ACK;
+
+			rsp.data = clk_get_rate(tegra->ss_src_clk) / 1000;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+			rsp.data = msg->data;
+		}
+
+		break;
+
+	case MBOX_CMD_SET_BW:
+		/*
+		 * TODO: Request bandwidth once EMC scaling is supported.
+		 * Ignore for now since ACK/NAK is not required for SET_BW
+		 * messages.
+		 */
+		break;
+
+	case MBOX_CMD_SAVE_DFE_CTLE_CTX:
+		err = tegra_xusb_padctl_usb3_save_context(padctl, msg->data);
+		if (err < 0) {
+			dev_err(dev, "failed to save context for USB3#%u: %d\n",
+				msg->data, err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	case MBOX_CMD_START_HSIC_IDLE:
+	case MBOX_CMD_STOP_HSIC_IDLE:
+		if (msg->cmd == MBOX_CMD_STOP_HSIC_IDLE)
+			idle = false;
+		else
+			idle = true;
+
+		mask = extract_field(msg->data, 1 + soc->ports.hsic.offset,
+				     soc->ports.hsic.count);
+
+		for_each_set_bit(port, &mask, 32) {
+			err = tegra_xusb_padctl_hsic_set_idle(padctl, port,
+							      idle);
+			if (err < 0)
+				break;
+		}
+
+		if (err < 0) {
+			dev_err(dev, "failed to set HSIC#%u %s: %d\n", port,
+				idle ? "idle" : "busy", err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	case MBOX_CMD_DISABLE_SS_LFPS_DETECTION:
+	case MBOX_CMD_ENABLE_SS_LFPS_DETECTION:
+		if (msg->cmd == MBOX_CMD_DISABLE_SS_LFPS_DETECTION)
+			enable = false;
+		else
+			enable = true;
+
+		mask = extract_field(msg->data, 1 + soc->ports.usb3.offset,
+				     soc->ports.usb3.count);
+
+		for_each_set_bit(port, &mask, soc->ports.usb3.count) {
+			err = tegra_xusb_padctl_usb3_set_lfps_detect(padctl,
+								     port,
+								     enable);
+			if (err < 0)
+				break;
+		}
+
+		if (err < 0) {
+			dev_err(dev,
+				"failed to %s LFPS detection on USB3#%u: %d\n",
+				enable ? "enable" : "disable", port, err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	default:
+		dev_warn(dev, "unknown message: %#x\n", msg->cmd);
+		break;
+	}
+
+	if (rsp.cmd) {
+		const char *cmd = (rsp.cmd == MBOX_CMD_ACK) ? "ACK" : "NAK";
+
+		err = tegra_xusb_mbox_send(tegra, &rsp);
+		if (err < 0)
+			dev_err(dev, "failed to send %s: %d\n", cmd, err);
+	}
+}
+
+static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
+{
+	struct tegra_xusb *tegra = data;
+	struct tegra_xusb_mbox_msg msg;
+	u32 value;
+
+	mutex_lock(&tegra->lock);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+	tegra_xusb_mbox_unpack(&msg, value);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+	value &= ~MBOX_DEST_SMI;
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+	/* clear mailbox owner if no ACK/NAK is required */
+	if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
+		fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+
+	tegra_xusb_mbox_handle(tegra, &msg);
+
+	mutex_unlock(&tegra->lock);
+	return IRQ_HANDLED;
+}
+
+static void tegra_xusb_ipfs_config(struct tegra_xusb *tegra,
+				   struct resource *regs)
+{
+	u32 value;
+
+	value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
+	value |= IPFS_EN_FPCI;
+	ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
+
+	usleep_range(10, 20);
+
+	/* Program BAR0 space */
+	value = fpci_readl(tegra, XUSB_CFG_4);
+	value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+	value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+	fpci_writel(tegra, value, XUSB_CFG_4);
+
+	usleep_range(100, 200);
+
+	/* Enable bus master */
+	value = fpci_readl(tegra, XUSB_CFG_1);
+	value |= XUSB_IO_SPACE_EN | XUSB_MEM_SPACE_EN | XUSB_BUS_MASTER_EN;
+	fpci_writel(tegra, value, XUSB_CFG_1);
+
+	/* Enable interrupt assertion */
+	value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
+	value |= IPFS_IP_INT_MASK;
+	ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
+
+	/* Set hysteresis */
+	ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
+}
+
+static int tegra_xusb_clk_enable(struct tegra_xusb *tegra)
+{
+	int err;
+
+	err = clk_prepare_enable(tegra->pll_e);
+	if (err < 0)
+		return err;
+
+	err = clk_prepare_enable(tegra->host_clk);
+	if (err < 0)
+		goto disable_plle;
+
+	err = clk_prepare_enable(tegra->ss_clk);
+	if (err < 0)
+		goto disable_host;
+
+	err = clk_prepare_enable(tegra->falcon_clk);
+	if (err < 0)
+		goto disable_ss;
+
+	err = clk_prepare_enable(tegra->fs_src_clk);
+	if (err < 0)
+		goto disable_falc;
+
+	err = clk_prepare_enable(tegra->hs_src_clk);
+	if (err < 0)
+		goto disable_fs_src;
+
+	if (tegra->soc->scale_ss_clock) {
+		err = tegra_xusb_set_ss_clk(tegra, TEGRA_XHCI_SS_HIGH_SPEED);
+		if (err < 0)
+			goto disable_hs_src;
+	}
+
+	return 0;
+
+disable_hs_src:
+	clk_disable_unprepare(tegra->hs_src_clk);
+disable_fs_src:
+	clk_disable_unprepare(tegra->fs_src_clk);
+disable_falc:
+	clk_disable_unprepare(tegra->falcon_clk);
+disable_ss:
+	clk_disable_unprepare(tegra->ss_clk);
+disable_host:
+	clk_disable_unprepare(tegra->host_clk);
+disable_plle:
+	clk_disable_unprepare(tegra->pll_e);
+	return err;
+}
+
+static void tegra_xusb_clk_disable(struct tegra_xusb *tegra)
+{
+	clk_disable_unprepare(tegra->pll_e);
+	clk_disable_unprepare(tegra->host_clk);
+	clk_disable_unprepare(tegra->ss_clk);
+	clk_disable_unprepare(tegra->falcon_clk);
+	clk_disable_unprepare(tegra->fs_src_clk);
+	clk_disable_unprepare(tegra->hs_src_clk);
+}
+
+static int tegra_xusb_phy_enable(struct tegra_xusb *tegra)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < tegra->num_phys; i++) {
+		err = phy_init(tegra->phys[i]);
+		if (err)
+			goto disable_phy;
+
+		err = phy_power_on(tegra->phys[i]);
+		if (err) {
+			phy_exit(tegra->phys[i]);
+			goto disable_phy;
+		}
+	}
+
+	return 0;
+
+disable_phy:
+	while (i--) {
+		phy_power_off(tegra->phys[i]);
+		phy_exit(tegra->phys[i]);
+	}
+
+	return err;
+}
+
+static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
+{
+	unsigned int i;
+
+	for (i = 0; i < tegra->num_phys; i++) {
+		phy_power_off(tegra->phys[i]);
+		phy_exit(tegra->phys[i]);
+	}
+}
+
+static int tegra_xusb_runtime_suspend(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+
+	tegra_xusb_phy_disable(tegra);
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+	tegra_xusb_clk_disable(tegra);
+
+	return 0;
+}
+
+static int tegra_xusb_runtime_resume(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+	int err;
+
+	err = tegra_xusb_clk_enable(tegra);
+	if (err) {
+		dev_err(dev, "failed to enable clocks: %d\n", err);
+		return err;
+	}
+
+	err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
+	if (err) {
+		dev_err(dev, "failed to enable regulators: %d\n", err);
+		goto disable_clk;
+	}
+
+	err = tegra_xusb_phy_enable(tegra);
+	if (err < 0) {
+		dev_err(dev, "failed to enable PHYs: %d\n", err);
+		goto disable_regulator;
+	}
+
+	return 0;
+
+disable_regulator:
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+disable_clk:
+	tegra_xusb_clk_disable(tegra);
+	return err;
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+	unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+	struct tegra_xusb_fw_header *header;
+	struct device *dev = tegra->dev;
+	const struct firmware *fw;
+	unsigned long timeout;
+	time64_t timestamp;
+	struct tm time;
+	u64 address;
+	u32 value;
+	int err;
+
+	err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to request firmware: %d\n", err);
+		return err;
+	}
+
+	/* Load Falcon controller with its firmware. */
+	header = (struct tegra_xusb_fw_header *)fw->data;
+	tegra->fw.size = le32_to_cpu(header->fwimg_len);
+
+	tegra->fw.virt = dma_alloc_coherent(tegra->dev, tegra->fw.size,
+					    &tegra->fw.phys, GFP_KERNEL);
+	if (!tegra->fw.virt) {
+		dev_err(tegra->dev, "failed to allocate memory for firmware\n");
+		release_firmware(fw);
+		return -ENOMEM;
+	}
+
+	header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+	memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
+	release_firmware(fw);
+
+	if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
+		dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
+			 csb_readl(tegra, XUSB_FALC_CPUCTL));
+		return 0;
+	}
+
+	/* Program the size of DFI into ILOAD_ATTR. */
+	csb_writel(tegra, tegra->fw.size, XUSB_CSB_MP_ILOAD_ATTR);
+
+	/*
+	 * Boot code of the firmware reads the ILOAD_BASE registers
+	 * to get to the start of the DFI in system memory.
+	 */
+	address = tegra->fw.phys + sizeof(*header);
+	csb_writel(tegra, address >> 32, XUSB_CSB_MP_ILOAD_BASE_HI);
+	csb_writel(tegra, address, XUSB_CSB_MP_ILOAD_BASE_LO);
+
+	/* Set BOOTPATH to 1 in APMAP. */
+	csb_writel(tegra, APMAP_BOOTPATH, XUSB_CSB_MP_APMAP);
+
+	/* Invalidate L2IMEM. */
+	csb_writel(tegra, L2IMEMOP_INVALIDATE_ALL, XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+	/*
+	 * Initiate fetch of bootcode from system memory into L2IMEM.
+	 * Program bootcode location and size in system memory.
+	 */
+	code_tag_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codetag),
+				       IMEM_BLOCK_SIZE);
+	code_size_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codesize),
+					IMEM_BLOCK_SIZE);
+	code_blocks = code_tag_blocks + code_size_blocks;
+
+	value = ((code_tag_blocks & L2IMEMOP_SIZE_SRC_OFFSET_MASK) <<
+			L2IMEMOP_SIZE_SRC_OFFSET_SHIFT) |
+		((code_size_blocks & L2IMEMOP_SIZE_SRC_COUNT_MASK) <<
+			L2IMEMOP_SIZE_SRC_COUNT_SHIFT);
+	csb_writel(tegra, value, XUSB_CSB_MP_L2IMEMOP_SIZE);
+
+	/* Trigger L2IMEM load operation. */
+	csb_writel(tegra, L2IMEMOP_LOAD_LOCKED_RESULT,
+		   XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+	/* Setup Falcon auto-fill. */
+	csb_writel(tegra, code_size_blocks, XUSB_FALC_IMFILLCTL);
+
+	value = ((code_tag_blocks & IMFILLRNG1_TAG_MASK) <<
+			IMFILLRNG1_TAG_LO_SHIFT) |
+		((code_blocks & IMFILLRNG1_TAG_MASK) <<
+			IMFILLRNG1_TAG_HI_SHIFT);
+	csb_writel(tegra, value, XUSB_FALC_IMFILLRNG1);
+
+	csb_writel(tegra, 0, XUSB_FALC_DMACTL);
+
+	msleep(50);
+
+	csb_writel(tegra, le32_to_cpu(header->boot_codetag),
+		   XUSB_FALC_BOOTVEC);
+
+	/* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
+	timeout = jiffies + msecs_to_jiffies(5);
+
+	csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
+
+	while (time_before(jiffies, timeout)) {
+		if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+			break;
+
+		usleep_range(100, 200);
+	}
+
+	if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
+		dev_err(dev, "Falcon failed to start, state: %#x\n",
+			csb_readl(tegra, XUSB_FALC_CPUCTL));
+		return -EIO;
+	}
+
+	timestamp = le32_to_cpu(header->fwimg_created_time);
+	time64_to_tm(timestamp, 0, &time);
+
+	dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
+		 time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
+		 time.tm_hour, time.tm_min, time.tm_sec);
+
+	return 0;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
+	struct tegra_xusb_mbox_msg msg;
+	struct resource *res, *regs;
+	struct tegra_xusb *tegra;
+	struct xhci_hcd *xhci;
+	unsigned int i, j, k;
+	struct phy *phy;
+	int err;
+
+	BUILD_BUG_ON(sizeof(struct tegra_xusb_fw_header) != 256);
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	tegra->soc = of_device_get_match_data(&pdev->dev);
+	mutex_init(&tegra->lock);
+	tegra->dev = &pdev->dev;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(tegra->regs))
+		return PTR_ERR(tegra->regs);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->fpci_base))
+		return PTR_ERR(tegra->fpci_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->ipfs_base))
+		return PTR_ERR(tegra->ipfs_base);
+
+	tegra->xhci_irq = platform_get_irq(pdev, 0);
+	if (tegra->xhci_irq < 0)
+		return tegra->xhci_irq;
+
+	tegra->mbox_irq = platform_get_irq(pdev, 1);
+	if (tegra->mbox_irq < 0)
+		return tegra->mbox_irq;
+
+	tegra->padctl = tegra_xusb_padctl_get(&pdev->dev);
+	if (IS_ERR(tegra->padctl))
+		return PTR_ERR(tegra->padctl);
+
+	tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
+	if (IS_ERR(tegra->host_clk)) {
+		err = PTR_ERR(tegra->host_clk);
+		dev_err(&pdev->dev, "failed to get xusb_host: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->falcon_clk = devm_clk_get(&pdev->dev, "xusb_falcon_src");
+	if (IS_ERR(tegra->falcon_clk)) {
+		err = PTR_ERR(tegra->falcon_clk);
+		dev_err(&pdev->dev, "failed to get xusb_falcon_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->ss_clk = devm_clk_get(&pdev->dev, "xusb_ss");
+	if (IS_ERR(tegra->ss_clk)) {
+		err = PTR_ERR(tegra->ss_clk);
+		dev_err(&pdev->dev, "failed to get xusb_ss: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->ss_src_clk = devm_clk_get(&pdev->dev, "xusb_ss_src");
+	if (IS_ERR(tegra->ss_src_clk)) {
+		err = PTR_ERR(tegra->ss_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_ss_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->hs_src_clk = devm_clk_get(&pdev->dev, "xusb_hs_src");
+	if (IS_ERR(tegra->hs_src_clk)) {
+		err = PTR_ERR(tegra->hs_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_hs_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->fs_src_clk = devm_clk_get(&pdev->dev, "xusb_fs_src");
+	if (IS_ERR(tegra->fs_src_clk)) {
+		err = PTR_ERR(tegra->fs_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_fs_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->pll_u_480m = devm_clk_get(&pdev->dev, "pll_u_480m");
+	if (IS_ERR(tegra->pll_u_480m)) {
+		err = PTR_ERR(tegra->pll_u_480m);
+		dev_err(&pdev->dev, "failed to get pll_u_480m: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->clk_m = devm_clk_get(&pdev->dev, "clk_m");
+	if (IS_ERR(tegra->clk_m)) {
+		err = PTR_ERR(tegra->clk_m);
+		dev_err(&pdev->dev, "failed to get clk_m: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->pll_e = devm_clk_get(&pdev->dev, "pll_e");
+	if (IS_ERR(tegra->pll_e)) {
+		err = PTR_ERR(tegra->pll_e);
+		dev_err(&pdev->dev, "failed to get pll_e: %d\n", err);
+		goto put_padctl;
+	}
+
+	if (!pdev->dev.pm_domain) {
+		tegra->host_rst = devm_reset_control_get(&pdev->dev,
+							 "xusb_host");
+		if (IS_ERR(tegra->host_rst)) {
+			err = PTR_ERR(tegra->host_rst);
+			dev_err(&pdev->dev,
+				"failed to get xusb_host reset: %d\n", err);
+			goto put_padctl;
+		}
+
+		tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
+		if (IS_ERR(tegra->ss_rst)) {
+			err = PTR_ERR(tegra->ss_rst);
+			dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n",
+				err);
+			goto put_padctl;
+		}
+
+		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBA,
+							tegra->ss_clk,
+							tegra->ss_rst);
+		if (err) {
+			dev_err(&pdev->dev,
+				"failed to enable XUSBA domain: %d\n", err);
+			goto put_padctl;
+		}
+
+		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_XUSBC,
+							tegra->host_clk,
+							tegra->host_rst);
+		if (err) {
+			dev_err(&pdev->dev,
+				"failed to enable XUSBC domain: %d\n", err);
+			goto disable_xusba;
+		}
+	}
+
+	tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
+				       sizeof(*tegra->supplies), GFP_KERNEL);
+	if (!tegra->supplies) {
+		err = -ENOMEM;
+		goto disable_xusbc;
+	}
+
+	for (i = 0; i < tegra->soc->num_supplies; i++)
+		tegra->supplies[i].supply = tegra->soc->supply_names[i];
+
+	err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
+				      tegra->supplies);
+	if (err) {
+		dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
+		goto disable_xusbc;
+	}
+
+	for (i = 0; i < tegra->soc->num_types; i++)
+		tegra->num_phys += tegra->soc->phy_types[i].num;
+
+	tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
+				   sizeof(*tegra->phys), GFP_KERNEL);
+	if (!tegra->phys) {
+		err = -ENOMEM;
+		goto disable_xusbc;
+	}
+
+	for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
+		char prop[8];
+
+		for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
+			snprintf(prop, sizeof(prop), "%s-%d",
+				 tegra->soc->phy_types[i].name, j);
+
+			phy = devm_phy_optional_get(&pdev->dev, prop);
+			if (IS_ERR(phy)) {
+				dev_err(&pdev->dev,
+					"failed to get PHY %s: %ld\n", prop,
+					PTR_ERR(phy));
+				err = PTR_ERR(phy);
+				goto disable_xusbc;
+			}
+
+			tegra->phys[k++] = phy;
+		}
+	}
+
+	tegra->hcd = usb_create_hcd(&tegra_xhci_hc_driver, &pdev->dev,
+				    dev_name(&pdev->dev));
+	if (!tegra->hcd) {
+		err = -ENOMEM;
+		goto disable_xusbc;
+	}
+
+	/*
+	 * This must happen after usb_create_hcd(), because usb_create_hcd()
+	 * will overwrite the drvdata of the device with the hcd it creates.
+	 */
+	platform_set_drvdata(pdev, tegra);
+
+	pm_runtime_enable(&pdev->dev);
+	if (pm_runtime_enabled(&pdev->dev))
+		err = pm_runtime_get_sync(&pdev->dev);
+	else
+		err = tegra_xusb_runtime_resume(&pdev->dev);
+
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable device: %d\n", err);
+		goto disable_rpm;
+	}
+
+	tegra_xusb_ipfs_config(tegra, regs);
+
+	err = tegra_xusb_load_firmware(tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
+		goto put_rpm;
+	}
+
+	tegra->hcd->regs = tegra->regs;
+	tegra->hcd->rsrc_start = regs->start;
+	tegra->hcd->rsrc_len = resource_size(regs);
+
+	err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
+		goto put_rpm;
+	}
+
+	device_wakeup_enable(tegra->hcd->self.controller);
+
+	xhci = hcd_to_xhci(tegra->hcd);
+
+	xhci->shared_hcd = usb_create_shared_hcd(&tegra_xhci_hc_driver,
+						 &pdev->dev,
+						 dev_name(&pdev->dev),
+						 tegra->hcd);
+	if (!xhci->shared_hcd) {
+		dev_err(&pdev->dev, "failed to create shared HCD\n");
+		err = -ENOMEM;
+		goto remove_usb2;
+	}
+
+	err = usb_add_hcd(xhci->shared_hcd, tegra->xhci_irq, IRQF_SHARED);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to add shared HCD: %d\n", err);
+		goto put_usb3;
+	}
+
+	mutex_lock(&tegra->lock);
+
+	/* Enable firmware messages from controller. */
+	msg.cmd = MBOX_CMD_MSG_ENABLED;
+	msg.data = 0;
+
+	err = tegra_xusb_mbox_send(tegra, &msg);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
+		mutex_unlock(&tegra->lock);
+		goto remove_usb3;
+	}
+
+	mutex_unlock(&tegra->lock);
+
+	err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
+					tegra_xusb_mbox_irq,
+					tegra_xusb_mbox_thread, 0,
+					dev_name(&pdev->dev), tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		goto remove_usb3;
+	}
+
+	return 0;
+
+remove_usb3:
+	usb_remove_hcd(xhci->shared_hcd);
+put_usb3:
+	usb_put_hcd(xhci->shared_hcd);
+remove_usb2:
+	usb_remove_hcd(tegra->hcd);
+put_rpm:
+	if (!pm_runtime_status_suspended(&pdev->dev))
+		tegra_xusb_runtime_suspend(&pdev->dev);
+disable_rpm:
+	pm_runtime_disable(&pdev->dev);
+	usb_put_hcd(tegra->hcd);
+disable_xusbc:
+	if (!pdev->dev.pm_domain)
+		tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
+disable_xusba:
+	if (!pdev->dev.pm_domain)
+		tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+put_padctl:
+	tegra_xusb_padctl_put(tegra->padctl);
+	return err;
+}
+
+static int tegra_xusb_remove(struct platform_device *pdev)
+{
+	struct tegra_xusb *tegra = platform_get_drvdata(pdev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+	usb_remove_hcd(xhci->shared_hcd);
+	usb_put_hcd(xhci->shared_hcd);
+	xhci->shared_hcd = NULL;
+	usb_remove_hcd(tegra->hcd);
+	usb_put_hcd(tegra->hcd);
+
+	dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+			  tegra->fw.phys);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	tegra_xusb_padctl_put(tegra->padctl);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_suspend(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	bool wakeup = device_may_wakeup(dev);
+
+	/* TODO: Powergate controller across suspend/resume. */
+	return xhci_suspend(xhci, wakeup);
+}
+
+static int tegra_xusb_resume(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+	return xhci_resume(xhci, 0);
+}
+#endif
+
+static const struct dev_pm_ops tegra_xusb_pm_ops = {
+	SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
+			   tegra_xusb_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(tegra_xusb_suspend, tegra_xusb_resume)
+};
+
+static const char * const tegra124_supply_names[] = {
+	"avddio-pex",
+	"dvddio-pex",
+	"avdd-usb",
+	"avdd-pll-utmip",
+	"avdd-pll-erefe",
+	"avdd-usb-ss-pll",
+	"hvdd-usb-ss",
+	"hvdd-usb-ss-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
+	{ .name = "usb3", .num = 2, },
+	{ .name = "usb2", .num = 3, },
+	{ .name = "hsic", .num = 2, },
+};
+
+static const struct tegra_xusb_soc tegra124_soc = {
+	.firmware = "nvidia/tegra124/xusb.bin",
+	.supply_names = tegra124_supply_names,
+	.num_supplies = ARRAY_SIZE(tegra124_supply_names),
+	.phy_types = tegra124_phy_types,
+	.num_types = ARRAY_SIZE(tegra124_phy_types),
+	.ports = {
+		.usb2 = { .offset = 4, .count = 4, },
+		.hsic = { .offset = 6, .count = 2, },
+		.usb3 = { .offset = 0, .count = 2, },
+	},
+	.scale_ss_clock = true,
+};
+MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
+
+static const char * const tegra210_supply_names[] = {
+	"dvddio-pex",
+	"hvddio-pex",
+	"avdd-usb",
+	"avdd-pll-utmip",
+	"avdd-pll-uerefe",
+	"dvdd-pex-pll",
+	"hvdd-pex-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra210_phy_types[] = {
+	{ .name = "usb3", .num = 4, },
+	{ .name = "usb2", .num = 4, },
+	{ .name = "hsic", .num = 1, },
+};
+
+static const struct tegra_xusb_soc tegra210_soc = {
+	.firmware = "nvidia/tegra210/xusb.bin",
+	.supply_names = tegra210_supply_names,
+	.num_supplies = ARRAY_SIZE(tegra210_supply_names),
+	.phy_types = tegra210_phy_types,
+	.num_types = ARRAY_SIZE(tegra210_phy_types),
+	.ports = {
+		.usb2 = { .offset = 4, .count = 4, },
+		.hsic = { .offset = 8, .count = 1, },
+		.usb3 = { .offset = 0, .count = 4, },
+	},
+	.scale_ss_clock = false,
+};
+MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
+
+static const struct of_device_id tegra_xusb_of_match[] = {
+	{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
+	{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
+
+static struct platform_driver tegra_xusb_driver = {
+	.probe = tegra_xusb_probe,
+	.remove = tegra_xusb_remove,
+	.driver = {
+		.name = "tegra-xusb",
+		.pm = &tegra_xusb_pm_ops,
+		.of_match_table = tegra_xusb_of_match,
+	},
+};
+
+static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	xhci->quirks |= XHCI_PLAT;
+}
+
+static int tegra_xhci_setup(struct usb_hcd *hcd)
+{
+	return xhci_gen_setup(hcd, tegra_xhci_quirks);
+}
+
+static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
+	.reset = tegra_xhci_setup,
+};
+
+static int __init tegra_xusb_init(void)
+{
+	xhci_init_driver(&tegra_xhci_hc_driver, &tegra_xhci_overrides);
+
+	return platform_driver_register(&tegra_xusb_driver);
+}
+module_init(tegra_xusb_init);
+
+static void __exit tegra_xusb_exit(void)
+{
+	platform_driver_unregister(&tegra_xusb_driver);
+}
+module_exit(tegra_xusb_exit);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB xHCI host-controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
new file mode 100644
index 0000000..d007081
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
+ */
+
+#define CREATE_TRACE_POINTS
+#include "xhci-trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(xhci_dbg_quirks);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
new file mode 100644
index 0000000..88b4274
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.h
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xhci-hcd
+
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR xhci_hcd
+
+#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __XHCI_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "xhci.h"
+#include "xhci-dbgcap.h"
+
+#define XHCI_MSG_MAX	500
+
+DECLARE_EVENT_CLASS(xhci_log_msg,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf),
+	TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+	TP_fast_assign(
+		vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ctx,
+	TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+		 unsigned int ep_num),
+	TP_ARGS(xhci, ctx, ep_num),
+	TP_STRUCT__entry(
+		__field(int, ctx_64)
+		__field(unsigned, ctx_type)
+		__field(dma_addr_t, ctx_dma)
+		__field(u8 *, ctx_va)
+		__field(unsigned, ctx_ep_num)
+		__field(int, slot_id)
+		__dynamic_array(u32, ctx_data,
+			((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
+			((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
+	),
+	TP_fast_assign(
+		struct usb_device *udev;
+
+		udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
+		__entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+		__entry->ctx_type = ctx->type;
+		__entry->ctx_dma = ctx->dma;
+		__entry->ctx_va = ctx->bytes;
+		__entry->slot_id = udev->slot_id;
+		__entry->ctx_ep_num = ep_num;
+		memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
+			((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
+			((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
+	),
+	TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
+			__entry->ctx_64, __entry->ctx_type,
+			(unsigned long long) __entry->ctx_dma, __entry->ctx_va
+	)
+);
+
+DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
+	TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+		 unsigned int ep_num),
+	TP_ARGS(xhci, ctx, ep_num)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_trb,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb),
+	TP_STRUCT__entry(
+		__field(u32, type)
+		__field(u32, field0)
+		__field(u32, field1)
+		__field(u32, field2)
+		__field(u32, field3)
+	),
+	TP_fast_assign(
+		__entry->type = ring->type;
+		__entry->field0 = le32_to_cpu(trb->field[0]);
+		__entry->field1 = le32_to_cpu(trb->field[1]);
+		__entry->field2 = le32_to_cpu(trb->field[2]);
+		__entry->field3 = le32_to_cpu(trb->field[3]);
+	),
+	TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
+			xhci_decode_trb(__entry->field0, __entry->field1,
+					__entry->field2, __entry->field3)
+	)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
+	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
+	TP_ARGS(ring, trb)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev),
+	TP_STRUCT__entry(
+		__field(void *, vdev)
+		__field(unsigned long long, out_ctx)
+		__field(unsigned long long, in_ctx)
+		__field(u8, fake_port)
+		__field(u8, real_port)
+		__field(u16, current_mel)
+
+	),
+	TP_fast_assign(
+		__entry->vdev = vdev;
+		__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+		__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+		__entry->fake_port = (u8) vdev->fake_port;
+		__entry->real_port = (u8) vdev->real_port;
+		__entry->current_mel = (u16) vdev->current_mel;
+		),
+	TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+		__entry->vdev, __entry->in_ctx, __entry->out_ctx,
+		__entry->fake_port, __entry->real_port, __entry->current_mel
+	)
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_virt_dev,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev),
+	TP_STRUCT__entry(
+		__field(void *, vdev)
+		__field(unsigned long long, out_ctx)
+		__field(unsigned long long, in_ctx)
+		__field(int, devnum)
+		__field(int, state)
+		__field(int, speed)
+		__field(u8, portnum)
+		__field(u8, level)
+		__field(int, slot_id)
+	),
+	TP_fast_assign(
+		__entry->vdev = vdev;
+		__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+		__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+		__entry->devnum = vdev->udev->devnum;
+		__entry->state = vdev->udev->state;
+		__entry->speed = vdev->udev->speed;
+		__entry->portnum = vdev->udev->portnum;
+		__entry->level = vdev->udev->level;
+		__entry->slot_id = vdev->udev->slot_id;
+	),
+	TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
+		__entry->vdev, __entry->in_ctx, __entry->out_ctx,
+		__entry->devnum, __entry->state, __entry->speed,
+		__entry->portnum, __entry->level, __entry->slot_id
+	)
+);
+
+DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
+	TP_PROTO(struct xhci_virt_device *vdev),
+	TP_ARGS(vdev)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_urb,
+	TP_PROTO(struct urb *urb),
+	TP_ARGS(urb),
+	TP_STRUCT__entry(
+		__field(void *, urb)
+		__field(unsigned int, pipe)
+		__field(unsigned int, stream)
+		__field(int, status)
+		__field(unsigned int, flags)
+		__field(int, num_mapped_sgs)
+		__field(int, num_sgs)
+		__field(int, length)
+		__field(int, actual)
+		__field(int, epnum)
+		__field(int, dir_in)
+		__field(int, type)
+		__field(int, slot_id)
+	),
+	TP_fast_assign(
+		__entry->urb = urb;
+		__entry->pipe = urb->pipe;
+		__entry->stream = urb->stream_id;
+		__entry->status = urb->status;
+		__entry->flags = urb->transfer_flags;
+		__entry->num_mapped_sgs = urb->num_mapped_sgs;
+		__entry->num_sgs = urb->num_sgs;
+		__entry->length = urb->transfer_buffer_length;
+		__entry->actual = urb->actual_length;
+		__entry->epnum = usb_endpoint_num(&urb->ep->desc);
+		__entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
+		__entry->type = usb_endpoint_type(&urb->ep->desc);
+		__entry->slot_id = urb->dev->slot_id;
+	),
+	TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
+			__entry->epnum, __entry->dir_in ? "in" : "out",
+			({ char *s;
+			switch (__entry->type) {
+			case USB_ENDPOINT_XFER_INT:
+				s = "intr";
+				break;
+			case USB_ENDPOINT_XFER_CONTROL:
+				s = "control";
+				break;
+			case USB_ENDPOINT_XFER_BULK:
+				s = "bulk";
+				break;
+			case USB_ENDPOINT_XFER_ISOC:
+				s = "isoc";
+				break;
+			default:
+				s = "UNKNOWN";
+			} s; }), __entry->urb, __entry->pipe, __entry->slot_id,
+			__entry->actual, __entry->length, __entry->num_mapped_sgs,
+			__entry->num_sgs, __entry->stream, __entry->flags
+		)
+);
+
+DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
+	TP_PROTO(struct urb *urb),
+	TP_ARGS(urb)
+);
+
+DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
+	TP_PROTO(struct urb *urb),
+	TP_ARGS(urb)
+);
+
+DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
+	TP_PROTO(struct urb *urb),
+	TP_ARGS(urb)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
+	TP_PROTO(struct xhci_ep_ctx *ctx),
+	TP_ARGS(ctx),
+	TP_STRUCT__entry(
+		__field(u32, info)
+		__field(u32, info2)
+		__field(u64, deq)
+		__field(u32, tx_info)
+	),
+	TP_fast_assign(
+		__entry->info = le32_to_cpu(ctx->ep_info);
+		__entry->info2 = le32_to_cpu(ctx->ep_info2);
+		__entry->deq = le64_to_cpu(ctx->deq);
+		__entry->tx_info = le32_to_cpu(ctx->tx_info);
+	),
+	TP_printk("%s", xhci_decode_ep_context(__entry->info,
+		__entry->info2, __entry->deq, __entry->tx_info)
+	)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
+	TP_PROTO(struct xhci_ep_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
+	TP_PROTO(struct xhci_ep_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
+	TP_PROTO(struct xhci_ep_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
+	TP_PROTO(struct xhci_ep_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx),
+	TP_STRUCT__entry(
+		__field(u32, info)
+		__field(u32, info2)
+		__field(u32, tt_info)
+		__field(u32, state)
+	),
+	TP_fast_assign(
+		__entry->info = le32_to_cpu(ctx->dev_info);
+		__entry->info2 = le32_to_cpu(ctx->dev_info2);
+		__entry->tt_info = le64_to_cpu(ctx->tt_info);
+		__entry->state = le32_to_cpu(ctx->dev_state);
+	),
+	TP_printk("%s", xhci_decode_slot_context(__entry->info,
+			__entry->info2, __entry->tt_info,
+			__entry->state)
+	)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
+	TP_PROTO(struct xhci_slot_ctx *ctx),
+	TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ring,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring),
+	TP_STRUCT__entry(
+		__field(u32, type)
+		__field(void *, ring)
+		__field(dma_addr_t, enq)
+		__field(dma_addr_t, deq)
+		__field(dma_addr_t, enq_seg)
+		__field(dma_addr_t, deq_seg)
+		__field(unsigned int, num_segs)
+		__field(unsigned int, stream_id)
+		__field(unsigned int, cycle_state)
+		__field(unsigned int, num_trbs_free)
+		__field(unsigned int, bounce_buf_len)
+	),
+	TP_fast_assign(
+		__entry->ring = ring;
+		__entry->type = ring->type;
+		__entry->num_segs = ring->num_segs;
+		__entry->stream_id = ring->stream_id;
+		__entry->enq_seg = ring->enq_seg->dma;
+		__entry->deq_seg = ring->deq_seg->dma;
+		__entry->cycle_state = ring->cycle_state;
+		__entry->num_trbs_free = ring->num_trbs_free;
+		__entry->bounce_buf_len = ring->bounce_buf_len;
+		__entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+		__entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+	),
+	TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
+			xhci_ring_type_string(__entry->type), __entry->ring,
+			&__entry->enq, &__entry->enq_seg,
+			&__entry->deq, &__entry->deq_seg,
+			__entry->num_segs,
+			__entry->stream_id,
+			__entry->num_trbs_free,
+			__entry->bounce_buf_len,
+			__entry->cycle_state
+		)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
+	TP_PROTO(struct xhci_ring *ring),
+	TP_ARGS(ring)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_portsc,
+		    TP_PROTO(u32 portnum, u32 portsc),
+		    TP_ARGS(portnum, portsc),
+		    TP_STRUCT__entry(
+				     __field(u32, portnum)
+				     __field(u32, portsc)
+				     ),
+		    TP_fast_assign(
+				   __entry->portnum = portnum;
+				   __entry->portsc = portsc;
+				   ),
+		    TP_printk("port-%d: %s",
+			      __entry->portnum,
+			      xhci_decode_portsc(__entry->portsc)
+			      )
+);
+
+DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
+	     TP_PROTO(u32 portnum, u32 portsc),
+	     TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
+	     TP_PROTO(u32 portnum, u32 portsc),
+	     TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
+	     TP_PROTO(u32 portnum, u32 portsc),
+	     TP_ARGS(portnum, portsc)
+);
+
+DECLARE_EVENT_CLASS(xhci_dbc_log_request,
+	TP_PROTO(struct dbc_request *req),
+	TP_ARGS(req),
+	TP_STRUCT__entry(
+		__field(struct dbc_request *, req)
+		__field(bool, dir)
+		__field(unsigned int, actual)
+		__field(unsigned int, length)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__entry->req = req;
+		__entry->dir = req->direction;
+		__entry->actual = req->actual;
+		__entry->length = req->length;
+		__entry->status = req->status;
+	),
+	TP_printk("%s: req %p length %u/%u ==> %d",
+		__entry->dir ? "bulk-in" : "bulk-out",
+		__entry->req, __entry->actual,
+		__entry->length, __entry->status
+	)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
+	TP_PROTO(struct dbc_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
+	TP_PROTO(struct dbc_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
+	TP_PROTO(struct dbc_request *req),
+	TP_ARGS(req)
+);
+
+DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
+	TP_PROTO(struct dbc_request *req),
+	TP_ARGS(req)
+);
+#endif /* __XHCI_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE xhci-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
new file mode 100644
index 0000000..dae3be1
--- /dev/null
+++ b/drivers/usb/host/xhci.c
@@ -0,0 +1,5258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-mtk.h"
+#include "xhci-debugfs.h"
+#include "xhci-dbgcap.h"
+
+#define DRIVER_AUTHOR "Sarah Sharp"
+#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+
+#define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
+
+/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
+static int link_quirk;
+module_param(link_quirk, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+
+static unsigned long long quirks;
+module_param(quirks, ullong, S_IRUGO);
+MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+
+static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+{
+	struct xhci_segment *seg = ring->first_seg;
+
+	if (!td || !td->start_seg)
+		return false;
+	do {
+		if (seg == td->start_seg)
+			return true;
+		seg = seg->next;
+	} while (seg && seg != ring->first_seg);
+
+	return false;
+}
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * xhci_handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
+{
+	u32	result;
+
+	do {
+		result = readl(ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+/*
+ * Disable interrupts and begin the xHCI halting process.
+ */
+void xhci_quiesce(struct xhci_hcd *xhci)
+{
+	u32 halted;
+	u32 cmd;
+	u32 mask;
+
+	mask = ~(XHCI_IRQS);
+	halted = readl(&xhci->op_regs->status) & STS_HALT;
+	if (!halted)
+		mask &= ~CMD_RUN;
+
+	cmd = readl(&xhci->op_regs->command);
+	cmd &= mask;
+	writel(cmd, &xhci->op_regs->command);
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 ms of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+	int ret;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
+	xhci_quiesce(xhci);
+
+	ret = xhci_handshake(&xhci->op_regs->status,
+			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+	if (ret) {
+		xhci_warn(xhci, "Host halt failed, %d\n", ret);
+		return ret;
+	}
+	xhci->xhc_state |= XHCI_STATE_HALTED;
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+	return ret;
+}
+
+/*
+ * Set the run bit and wait for the host to be running.
+ */
+int xhci_start(struct xhci_hcd *xhci)
+{
+	u32 temp;
+	int ret;
+
+	temp = readl(&xhci->op_regs->command);
+	temp |= (CMD_RUN);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
+			temp);
+	writel(temp, &xhci->op_regs->command);
+
+	/*
+	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
+	 * running.
+	 */
+	ret = xhci_handshake(&xhci->op_regs->status,
+			STS_HALT, 0, XHCI_MAX_HALT_USEC);
+	if (ret == -ETIMEDOUT)
+		xhci_err(xhci, "Host took too long to start, "
+				"waited %u microseconds.\n",
+				XHCI_MAX_HALT_USEC);
+	if (!ret)
+		/* clear state flags. Including dying, halted or removing */
+		xhci->xhc_state = 0;
+
+	return ret;
+}
+
+/*
+ * Reset a halted HC.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+	u32 command;
+	u32 state;
+	int ret, i;
+
+	state = readl(&xhci->op_regs->status);
+
+	if (state == ~(u32)0) {
+		xhci_warn(xhci, "Host not accessible, reset failed.\n");
+		return -ENODEV;
+	}
+
+	if ((state & STS_HALT) == 0) {
+		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
+		return 0;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
+	command = readl(&xhci->op_regs->command);
+	command |= CMD_RESET;
+	writel(command, &xhci->op_regs->command);
+
+	/* Existing Intel xHCI controllers require a delay of 1 mS,
+	 * after setting the CMD_RESET bit, and before accessing any
+	 * HC registers. This allows the HC to complete the
+	 * reset operation and be ready for HC register access.
+	 * Without this delay, the subsequent HC register access,
+	 * may result in a system hang very rarely.
+	 */
+	if (xhci->quirks & XHCI_INTEL_HOST)
+		udelay(1000);
+
+	ret = xhci_handshake(&xhci->op_regs->command,
+			CMD_RESET, 0, 10 * 1000 * 1000);
+	if (ret)
+		return ret;
+
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			 "Wait for controller to be ready for doorbell rings");
+	/*
+	 * xHCI cannot write to any doorbells or operational registers other
+	 * than status until the "Controller Not Ready" flag is cleared.
+	 */
+	ret = xhci_handshake(&xhci->op_regs->status,
+			STS_CNR, 0, 10 * 1000 * 1000);
+
+	for (i = 0; i < 2; i++) {
+		xhci->bus_state[i].port_c_suspend = 0;
+		xhci->bus_state[i].suspended_ports = 0;
+		xhci->bus_state[i].resuming_ports = 0;
+	}
+
+	return ret;
+}
+
+static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+{
+	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+	int err, i;
+	u64 val;
+
+	/*
+	 * Some Renesas controllers get into a weird state if they are
+	 * reset while programmed with 64bit addresses (they will preserve
+	 * the top half of the address in internal, non visible
+	 * registers). You end up with half the address coming from the
+	 * kernel, and the other half coming from the firmware. Also,
+	 * changing the programming leads to extra accesses even if the
+	 * controller is supposed to be halted. The controller ends up with
+	 * a fatal fault, and is then ripe for being properly reset.
+	 *
+	 * Special care is taken to only apply this if the device is behind
+	 * an iommu. Doing anything when there is no iommu is definitely
+	 * unsafe...
+	 */
+	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
+		return;
+
+	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
+
+	/* Clear HSEIE so that faults do not get signaled */
+	val = readl(&xhci->op_regs->command);
+	val &= ~CMD_HSEIE;
+	writel(val, &xhci->op_regs->command);
+
+	/* Clear HSE (aka FATAL) */
+	val = readl(&xhci->op_regs->status);
+	val |= STS_FATAL;
+	writel(val, &xhci->op_regs->status);
+
+	/* Now zero the registers, and brace for impact */
+	val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+	if (upper_32_bits(val))
+		xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
+	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	if (upper_32_bits(val))
+		xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+
+	for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
+		struct xhci_intr_reg __iomem *ir;
+
+		ir = &xhci->run_regs->ir_set[i];
+		val = xhci_read_64(xhci, &ir->erst_base);
+		if (upper_32_bits(val))
+			xhci_write_64(xhci, 0, &ir->erst_base);
+		val= xhci_read_64(xhci, &ir->erst_dequeue);
+		if (upper_32_bits(val))
+			xhci_write_64(xhci, 0, &ir->erst_dequeue);
+	}
+
+	/* Wait for the fault to appear. It will be cleared on reset */
+	err = xhci_handshake(&xhci->op_regs->status,
+			     STS_FATAL, STS_FATAL,
+			     XHCI_MAX_HALT_USEC);
+	if (!err)
+		xhci_info(xhci, "Fault detected\n");
+}
+
+#ifdef CONFIG_USB_PCI
+/*
+ * Set up MSI
+ */
+static int xhci_setup_msi(struct xhci_hcd *xhci)
+{
+	int ret;
+	/*
+	 * TODO:Check with MSI Soc for sysdev
+	 */
+	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret < 0) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"failed to allocate MSI entry");
+		return ret;
+	}
+
+	ret = request_irq(pdev->irq, xhci_msi_irq,
+				0, "xhci_hcd", xhci_to_hcd(xhci));
+	if (ret) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"disable MSI interrupt");
+		pci_free_irq_vectors(pdev);
+	}
+
+	return ret;
+}
+
+/*
+ * Set up MSI-X
+ */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+	int i, ret = 0;
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+	/*
+	 * calculate number of msi-x vectors supported.
+	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+	 *   with max number of interrupters based on the xhci HCSPARAMS1.
+	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
+	 *   Add additional 1 vector to ensure always available interrupt.
+	 */
+	xhci->msix_count = min(num_online_cpus() + 1,
+				HCS_MAX_INTRS(xhci->hcs_params1));
+
+	ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
+			PCI_IRQ_MSIX);
+	if (ret < 0) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"Failed to enable MSI-X");
+		return ret;
+	}
+
+	for (i = 0; i < xhci->msix_count; i++) {
+		ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
+				"xhci_hcd", xhci_to_hcd(xhci));
+		if (ret)
+			goto disable_msix;
+	}
+
+	hcd->msix_enabled = 1;
+	return ret;
+
+disable_msix:
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
+	while (--i >= 0)
+		free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
+	pci_free_irq_vectors(pdev);
+	return ret;
+}
+
+/* Free any IRQs and disable MSI-X */
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+	if (xhci->quirks & XHCI_PLAT)
+		return;
+
+	/* return if using legacy interrupt */
+	if (hcd->irq > 0)
+		return;
+
+	if (hcd->msix_enabled) {
+		int i;
+
+		for (i = 0; i < xhci->msix_count; i++)
+			free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
+	} else {
+		free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
+	}
+
+	pci_free_irq_vectors(pdev);
+	hcd->msix_enabled = 0;
+}
+
+static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+	if (hcd->msix_enabled) {
+		struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+		int i;
+
+		for (i = 0; i < xhci->msix_count; i++)
+			synchronize_irq(pci_irq_vector(pdev, i));
+	}
+}
+
+static int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct pci_dev  *pdev;
+	int ret;
+
+	/* The xhci platform device has set up IRQs through usb_add_hcd. */
+	if (xhci->quirks & XHCI_PLAT)
+		return 0;
+
+	pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	/*
+	 * Some Fresco Logic host controllers advertise MSI, but fail to
+	 * generate interrupts.  Don't even try to enable MSI.
+	 */
+	if (xhci->quirks & XHCI_BROKEN_MSI)
+		goto legacy_irq;
+
+	/* unregister the legacy interrupt */
+	if (hcd->irq)
+		free_irq(hcd->irq, hcd);
+	hcd->irq = 0;
+
+	ret = xhci_setup_msix(xhci);
+	if (ret)
+		/* fall back to msi*/
+		ret = xhci_setup_msi(xhci);
+
+	if (!ret) {
+		hcd->msi_enabled = 1;
+		return 0;
+	}
+
+	if (!pdev->irq) {
+		xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
+		return -EINVAL;
+	}
+
+ legacy_irq:
+	if (!strlen(hcd->irq_descr))
+		snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
+			 hcd->driver->description, hcd->self.busnum);
+
+	/* fall back to legacy interrupt*/
+	ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+			hcd->irq_descr, hcd);
+	if (ret) {
+		xhci_err(xhci, "request interrupt %d failed\n",
+				pdev->irq);
+		return ret;
+	}
+	hcd->irq = pdev->irq;
+	return 0;
+}
+
+#else
+
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+}
+
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+{
+}
+
+#endif
+
+static void compliance_mode_recovery(struct timer_list *t)
+{
+	struct xhci_hcd *xhci;
+	struct usb_hcd *hcd;
+	struct xhci_hub *rhub;
+	u32 temp;
+	int i;
+
+	xhci = from_timer(xhci, t, comp_mode_recovery_timer);
+	rhub = &xhci->usb3_rhub;
+
+	for (i = 0; i < rhub->num_ports; i++) {
+		temp = readl(rhub->ports[i]->addr);
+		if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
+			/*
+			 * Compliance Mode Detected. Letting USB Core
+			 * handle the Warm Reset
+			 */
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+					"Compliance mode detected->port %d",
+					i + 1);
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+					"Attempting compliance mode recovery");
+			hcd = xhci->shared_hcd;
+
+			if (hcd->state == HC_STATE_SUSPENDED)
+				usb_hcd_resume_root_hub(hcd);
+
+			usb_hcd_poll_rh_status(hcd);
+		}
+	}
+
+	if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
+		mod_timer(&xhci->comp_mode_recovery_timer,
+			jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+}
+
+/*
+ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
+ * that causes ports behind that hardware to enter compliance mode sometimes.
+ * The quirk creates a timer that polls every 2 seconds the link state of
+ * each host controller's port and recovers it by issuing a Warm reset
+ * if Compliance mode is detected, otherwise the port will become "dead" (no
+ * device connections or disconnections will be detected anymore). Becasue no
+ * status event is generated when entering compliance mode (per xhci spec),
+ * this quirk is needed on systems that have the failing hardware installed.
+ */
+static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
+{
+	xhci->port_status_u0 = 0;
+	timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+		    0);
+	xhci->comp_mode_recovery_timer.expires = jiffies +
+			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
+
+	add_timer(&xhci->comp_mode_recovery_timer);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+			"Compliance mode recovery timer initialized");
+}
+
+/*
+ * This function identifies the systems that have installed the SN65LVPE502CP
+ * USB3.0 re-driver and that need the Compliance Mode Quirk.
+ * Systems:
+ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
+ */
+static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
+{
+	const char *dmi_product_name, *dmi_sys_vendor;
+
+	dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+	dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+	if (!dmi_product_name || !dmi_sys_vendor)
+		return false;
+
+	if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+		return false;
+
+	if (strstr(dmi_product_name, "Z420") ||
+			strstr(dmi_product_name, "Z620") ||
+			strstr(dmi_product_name, "Z820") ||
+			strstr(dmi_product_name, "Z1 Workstation"))
+		return true;
+
+	return false;
+}
+
+static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
+{
+	return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
+}
+
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+static int xhci_init(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	int retval = 0;
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
+	spin_lock_init(&xhci->lock);
+	if (xhci->hci_version == 0x95 && link_quirk) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"QUIRK: Not clearing Link TRB chain bits.");
+		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
+	} else {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"xHCI doesn't need link TRB QUIRK");
+	}
+	retval = xhci_mem_init(xhci, GFP_KERNEL);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
+
+	/* Initializing Compliance Mode Recovery Data If Needed */
+	if (xhci_compliance_mode_recovery_timer_quirk_check()) {
+		xhci->quirks |= XHCI_COMP_MODE_QUIRK;
+		compliance_mode_recovery_timer_init(xhci);
+	}
+
+	return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+static int xhci_run_finished(struct xhci_hcd *xhci)
+{
+	if (xhci_start(xhci)) {
+		xhci_halt(xhci);
+		return -ENODEV;
+	}
+	xhci->shared_hcd->state = HC_STATE_RUNNING;
+	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+	if (xhci->quirks & XHCI_NEC_HOST)
+		xhci_ring_cmd_db(xhci);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Finished xhci_run for USB3 roothub");
+	return 0;
+}
+
+/*
+ * Start the HC after it was halted.
+ *
+ * This function is called by the USB core when the HC driver is added.
+ * Its opposite is xhci_stop().
+ *
+ * xhci_init() must be called once before this function can be called.
+ * Reset the HC, enable device slot contexts, program DCBAAP, and
+ * set command ring pointer and event ring pointer.
+ *
+ * Setup MSI-X vectors and enable interrupts.
+ */
+int xhci_run(struct usb_hcd *hcd)
+{
+	u32 temp;
+	u64 temp_64;
+	int ret;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	/* Start the xHCI host controller running only after the USB 2.0 roothub
+	 * is setup.
+	 */
+
+	hcd->uses_new_polling = 1;
+	if (!usb_hcd_is_primary_hcd(hcd))
+		return xhci_run_finished(xhci);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
+
+	ret = xhci_try_enable_msi(hcd);
+	if (ret)
+		return ret;
+
+	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+	temp_64 &= ~ERST_PTR_MASK;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Set the interrupt modulation register");
+	temp = readl(&xhci->ir_set->irq_control);
+	temp &= ~ER_IRQ_INTERVAL_MASK;
+	temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
+	writel(temp, &xhci->ir_set->irq_control);
+
+	/* Set the HCD state before we enable the irqs */
+	temp = readl(&xhci->op_regs->command);
+	temp |= (CMD_EIE);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Enable interrupts, cmd = 0x%x.", temp);
+	writel(temp, &xhci->op_regs->command);
+
+	temp = readl(&xhci->ir_set->irq_pending);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
+			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+	writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+
+	if (xhci->quirks & XHCI_NEC_HOST) {
+		struct xhci_command *command;
+
+		command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+		if (!command)
+			return -ENOMEM;
+
+		ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
+				TRB_TYPE(TRB_NEC_GET_FW));
+		if (ret)
+			xhci_free_command(xhci, command);
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Finished xhci_run for USB2 roothub");
+
+	xhci_dbc_init(xhci);
+
+	xhci_debugfs_init(xhci);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_run);
+
+/*
+ * Stop xHCI driver.
+ *
+ * This function is called by the USB core when the HC driver is removed.
+ * Its opposite is xhci_run().
+ *
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+static void xhci_stop(struct usb_hcd *hcd)
+{
+	u32 temp;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	mutex_lock(&xhci->mutex);
+
+	/* Only halt host and free memory after both hcds are removed */
+	if (!usb_hcd_is_primary_hcd(hcd)) {
+		mutex_unlock(&xhci->mutex);
+		return;
+	}
+
+	xhci_dbc_exit(xhci);
+
+	spin_lock_irq(&xhci->lock);
+	xhci->xhc_state |= XHCI_STATE_HALTED;
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+	xhci_halt(xhci);
+	xhci_reset(xhci);
+	spin_unlock_irq(&xhci->lock);
+
+	xhci_cleanup_msix(xhci);
+
+	/* Deleting Compliance Mode Recovery Timer */
+	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+			(!(xhci_all_ports_seen_u0(xhci)))) {
+		del_timer_sync(&xhci->comp_mode_recovery_timer);
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"%s: compliance mode recovery timer deleted",
+				__func__);
+	}
+
+	if (xhci->quirks & XHCI_AMD_PLL_FIX)
+		usb_amd_dev_put();
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Disabling event ring interrupts");
+	temp = readl(&xhci->op_regs->status);
+	writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
+	temp = readl(&xhci->ir_set->irq_pending);
+	writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
+	xhci_mem_cleanup(xhci);
+	xhci_debugfs_exit(xhci);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"xhci_stop completed - status = %x",
+			readl(&xhci->op_regs->status));
+	mutex_unlock(&xhci->mutex);
+}
+
+/*
+ * Shutdown HC (not bus-specific)
+ *
+ * This is called when the machine is rebooting or halting.  We assume that the
+ * machine will be powered off, and the HC's internal state will be reset.
+ * Don't bother to free memory.
+ *
+ * This will only ever be called with the main usb_hcd (the USB3 roothub).
+ */
+static void xhci_shutdown(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
+		usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
+
+	spin_lock_irq(&xhci->lock);
+	xhci_halt(xhci);
+	/* Workaround for spurious wakeups at shutdown with HSW */
+	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+		xhci_reset(xhci);
+	spin_unlock_irq(&xhci->lock);
+
+	xhci_cleanup_msix(xhci);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"xhci_shutdown completed - status = %x",
+			readl(&xhci->op_regs->status));
+
+	/* Yet another workaround for spurious wakeups at shutdown with HSW */
+	if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+		pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
+}
+
+#ifdef CONFIG_PM
+static void xhci_save_registers(struct xhci_hcd *xhci)
+{
+	xhci->s3.command = readl(&xhci->op_regs->command);
+	xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
+	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+	xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
+	xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
+	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+	xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
+	xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
+}
+
+static void xhci_restore_registers(struct xhci_hcd *xhci)
+{
+	writel(xhci->s3.command, &xhci->op_regs->command);
+	writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
+	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+	writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
+	writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
+	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+	writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+	writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
+}
+
+static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+{
+	u64	val_64;
+
+	/* step 2: initialize command ring buffer */
+	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+		(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+				      xhci->cmd_ring->dequeue) &
+		 (u64) ~CMD_RING_RSVD_BITS) |
+		xhci->cmd_ring->cycle_state;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"// Setting command ring address to 0x%llx",
+			(long unsigned long) val_64);
+	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+}
+
+/*
+ * The whole command ring must be cleared to zero when we suspend the host.
+ *
+ * The host doesn't save the command ring pointer in the suspend well, so we
+ * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
+ * aligned, because of the reserved bits in the command ring dequeue pointer
+ * register.  Therefore, we can't just set the dequeue pointer back in the
+ * middle of the ring (TRBs are 16-byte aligned).
+ */
+static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+{
+	struct xhci_ring *ring;
+	struct xhci_segment *seg;
+
+	ring = xhci->cmd_ring;
+	seg = ring->deq_seg;
+	do {
+		memset(seg->trbs, 0,
+			sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+			cpu_to_le32(~TRB_CYCLE);
+		seg = seg->next;
+	} while (seg != ring->deq_seg);
+
+	/* Reset the software enqueue and dequeue pointers */
+	ring->deq_seg = ring->first_seg;
+	ring->dequeue = ring->first_seg->trbs;
+	ring->enq_seg = ring->deq_seg;
+	ring->enqueue = ring->dequeue;
+
+	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+	/*
+	 * Ring is now zeroed, so the HW should look for change of ownership
+	 * when the cycle bit is set to 1.
+	 */
+	ring->cycle_state = 1;
+
+	/*
+	 * Reset the hardware dequeue pointer.
+	 * Yes, this will need to be re-written after resume, but we're paranoid
+	 * and want to make sure the hardware doesn't access bogus memory
+	 * because, say, the BIOS or an SMI started the host without changing
+	 * the command ring pointers.
+	 */
+	xhci_set_cmd_ring_deq(xhci);
+}
+
+static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
+{
+	struct xhci_port **ports;
+	int port_index;
+	unsigned long flags;
+	u32 t1, t2;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/* disable usb3 ports Wake bits */
+	port_index = xhci->usb3_rhub.num_ports;
+	ports = xhci->usb3_rhub.ports;
+	while (port_index--) {
+		t1 = readl(ports[port_index]->addr);
+		t1 = xhci_port_state_to_neutral(t1);
+		t2 = t1 & ~PORT_WAKE_BITS;
+		if (t1 != t2)
+			writel(t2, ports[port_index]->addr);
+	}
+
+	/* disable usb2 ports Wake bits */
+	port_index = xhci->usb2_rhub.num_ports;
+	ports = xhci->usb2_rhub.ports;
+	while (port_index--) {
+		t1 = readl(ports[port_index]->addr);
+		t1 = xhci_port_state_to_neutral(t1);
+		t2 = t1 & ~PORT_WAKE_BITS;
+		if (t1 != t2)
+			writel(t2, ports[port_index]->addr);
+	}
+
+	spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+	struct xhci_port	**ports;
+	int			port_index;
+	u32			status;
+	u32			portsc;
+
+	status = readl(&xhci->op_regs->status);
+	if (status & STS_EINT)
+		return true;
+	/*
+	 * Checking STS_EINT is not enough as there is a lag between a change
+	 * bit being set and the Port Status Change Event that it generated
+	 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+	 */
+
+	port_index = xhci->usb2_rhub.num_ports;
+	ports = xhci->usb2_rhub.ports;
+	while (port_index--) {
+		portsc = readl(ports[port_index]->addr);
+		if (portsc & PORT_CHANGE_MASK ||
+		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+			return true;
+	}
+	port_index = xhci->usb3_rhub.num_ports;
+	ports = xhci->usb3_rhub.ports;
+	while (port_index--) {
+		portsc = readl(ports[port_index]->addr);
+		if (portsc & PORT_CHANGE_MASK ||
+		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+			return true;
+	}
+	return false;
+}
+
+/*
+ * Stop HC (not bus-specific)
+ *
+ * This is called when the machine transition into S3/S4 mode.
+ *
+ */
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+{
+	int			rc = 0;
+	unsigned int		delay = XHCI_MAX_HALT_USEC;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+	u32			command;
+	u32			res;
+
+	if (!hcd->state)
+		return 0;
+
+	if (hcd->state != HC_STATE_SUSPENDED ||
+			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+		return -EINVAL;
+
+	xhci_dbc_suspend(xhci);
+
+	/* Clear root port wake on bits if wakeup not allowed. */
+	if (!do_wakeup)
+		xhci_disable_port_wake_on_bits(xhci);
+
+	/* Don't poll the roothubs on bus suspend. */
+	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	del_timer_sync(&hcd->rh_timer);
+	clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	del_timer_sync(&xhci->shared_hcd->rh_timer);
+
+	if (xhci->quirks & XHCI_SUSPEND_DELAY)
+		usleep_range(1000, 1500);
+
+	spin_lock_irq(&xhci->lock);
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+	/* step 1: stop endpoint */
+	/* skipped assuming that port suspend has done */
+
+	/* step 2: clear Run/Stop bit */
+	command = readl(&xhci->op_regs->command);
+	command &= ~CMD_RUN;
+	writel(command, &xhci->op_regs->command);
+
+	/* Some chips from Fresco Logic need an extraordinary delay */
+	delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
+
+	if (xhci_handshake(&xhci->op_regs->status,
+		      STS_HALT, STS_HALT, delay)) {
+		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+		spin_unlock_irq(&xhci->lock);
+		return -ETIMEDOUT;
+	}
+	xhci_clear_command_ring(xhci);
+
+	/* step 3: save registers */
+	xhci_save_registers(xhci);
+
+	/* step 4: set CSS flag */
+	command = readl(&xhci->op_regs->command);
+	command |= CMD_CSS;
+	writel(command, &xhci->op_regs->command);
+	xhci->broken_suspend = 0;
+	if (xhci_handshake(&xhci->op_regs->status,
+				STS_SAVE, 0, 10 * 1000)) {
+	/*
+	 * AMD SNPS xHC 3.0 occasionally does not clear the
+	 * SSS bit of USBSTS and when driver tries to poll
+	 * to see if the xHC clears BIT(8) which never happens
+	 * and driver assumes that controller is not responding
+	 * and times out. To workaround this, its good to check
+	 * if SRE and HCE bits are not set (as per xhci
+	 * Section 5.4.2) and bypass the timeout.
+	 */
+		res = readl(&xhci->op_regs->status);
+		if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
+		    (((res & STS_SRE) == 0) &&
+				((res & STS_HCE) == 0))) {
+			xhci->broken_suspend = 1;
+		} else {
+			xhci_warn(xhci, "WARN: xHC save state timeout\n");
+			spin_unlock_irq(&xhci->lock);
+			return -ETIMEDOUT;
+		}
+	}
+	spin_unlock_irq(&xhci->lock);
+
+	/*
+	 * Deleting Compliance Mode Recovery Timer because the xHCI Host
+	 * is about to be suspended.
+	 */
+	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+			(!(xhci_all_ports_seen_u0(xhci)))) {
+		del_timer_sync(&xhci->comp_mode_recovery_timer);
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"%s: compliance mode recovery timer deleted",
+				__func__);
+	}
+
+	/* step 5: remove core well power */
+	/* synchronize irq when using MSI-X */
+	xhci_msix_sync_irqs(xhci);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(xhci_suspend);
+
+/*
+ * start xHC (not bus-specific)
+ *
+ * This is called when the machine transition from S3/S4 mode.
+ *
+ */
+int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+{
+	u32			command, temp = 0;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+	struct usb_hcd		*secondary_hcd;
+	int			retval = 0;
+	bool			comp_timer_running = false;
+
+	if (!hcd->state)
+		return 0;
+
+	/* Wait a bit if either of the roothubs need to settle from the
+	 * transition into bus suspend.
+	 */
+	if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
+			time_before(jiffies,
+				xhci->bus_state[1].next_statechange))
+		msleep(100);
+
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
+	spin_lock_irq(&xhci->lock);
+	if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
+		hibernated = true;
+
+	if (!hibernated) {
+		/* step 1: restore register */
+		xhci_restore_registers(xhci);
+		/* step 2: initialize command ring buffer */
+		xhci_set_cmd_ring_deq(xhci);
+		/* step 3: restore state and start state*/
+		/* step 3: set CRS flag */
+		command = readl(&xhci->op_regs->command);
+		command |= CMD_CRS;
+		writel(command, &xhci->op_regs->command);
+		/*
+		 * Some controllers take up to 55+ ms to complete the controller
+		 * restore so setting the timeout to 100ms. Xhci specification
+		 * doesn't mention any timeout value.
+		 */
+		if (xhci_handshake(&xhci->op_regs->status,
+			      STS_RESTORE, 0, 100 * 1000)) {
+			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
+			spin_unlock_irq(&xhci->lock);
+			return -ETIMEDOUT;
+		}
+		temp = readl(&xhci->op_regs->status);
+	}
+
+	/* If restore operation fails, re-initialize the HC during resume */
+	if ((temp & STS_SRE) || hibernated) {
+
+		if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+				!(xhci_all_ports_seen_u0(xhci))) {
+			del_timer_sync(&xhci->comp_mode_recovery_timer);
+			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Compliance Mode Recovery Timer deleted!");
+		}
+
+		/* Let the USB core know _both_ roothubs lost power. */
+		usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+		usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+
+		xhci_dbg(xhci, "Stop HCD\n");
+		xhci_halt(xhci);
+		xhci_zero_64b_regs(xhci);
+		xhci_reset(xhci);
+		spin_unlock_irq(&xhci->lock);
+		xhci_cleanup_msix(xhci);
+
+		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+		temp = readl(&xhci->op_regs->status);
+		writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
+		temp = readl(&xhci->ir_set->irq_pending);
+		writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+
+		xhci_dbg(xhci, "cleaning up memory\n");
+		xhci_mem_cleanup(xhci);
+		xhci_debugfs_exit(xhci);
+		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+			    readl(&xhci->op_regs->status));
+
+		/* USB core calls the PCI reinit and start functions twice:
+		 * first with the primary HCD, and then with the secondary HCD.
+		 * If we don't do the same, the host will never be started.
+		 */
+		if (!usb_hcd_is_primary_hcd(hcd))
+			secondary_hcd = hcd;
+		else
+			secondary_hcd = xhci->shared_hcd;
+
+		xhci_dbg(xhci, "Initialize the xhci_hcd\n");
+		retval = xhci_init(hcd->primary_hcd);
+		if (retval)
+			return retval;
+		comp_timer_running = true;
+
+		xhci_dbg(xhci, "Start the primary HCD\n");
+		retval = xhci_run(hcd->primary_hcd);
+		if (!retval) {
+			xhci_dbg(xhci, "Start the secondary HCD\n");
+			retval = xhci_run(secondary_hcd);
+		}
+		hcd->state = HC_STATE_SUSPENDED;
+		xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+		goto done;
+	}
+
+	/* step 4: set Run/Stop bit */
+	command = readl(&xhci->op_regs->command);
+	command |= CMD_RUN;
+	writel(command, &xhci->op_regs->command);
+	xhci_handshake(&xhci->op_regs->status, STS_HALT,
+		  0, 250 * 1000);
+
+	/* step 5: walk topology and initialize portsc,
+	 * portpmsc and portli
+	 */
+	/* this is done in bus_resume */
+
+	/* step 6: restart each of the previously
+	 * Running endpoints by ringing their doorbells
+	 */
+
+	spin_unlock_irq(&xhci->lock);
+
+	xhci_dbc_resume(xhci);
+
+ done:
+	if (retval == 0) {
+		/* Resume root hubs only when have pending events. */
+		if (xhci_pending_portevent(xhci)) {
+			usb_hcd_resume_root_hub(xhci->shared_hcd);
+			usb_hcd_resume_root_hub(hcd);
+		}
+	}
+
+	/*
+	 * If system is subject to the Quirk, Compliance Mode Timer needs to
+	 * be re-initialized Always after a system resume. Ports are subject
+	 * to suffer the Compliance Mode issue again. It doesn't matter if
+	 * ports have entered previously to U0 before system's suspension.
+	 */
+	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
+		compliance_mode_recovery_timer_init(xhci);
+
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
+	/* Re-enable port polling. */
+	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+	usb_hcd_poll_rh_status(xhci->shared_hcd);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(xhci_resume);
+#endif	/* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index  = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+	unsigned int index;
+	if (usb_endpoint_xfer_control(desc))
+		index = (unsigned int) (usb_endpoint_num(desc)*2);
+	else
+		index = (unsigned int) (usb_endpoint_num(desc)*2) +
+			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+	return index;
+}
+
+/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
+ * address from the XHCI endpoint index.
+ */
+unsigned int xhci_get_endpoint_address(unsigned int ep_index)
+{
+	unsigned int number = DIV_ROUND_UP(ep_index, 2);
+	unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
+	return direction | number;
+}
+
+/* Find the flag for this endpoint (for use in the control context).  Use the
+ * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+	return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Find the flag for this endpoint (for use in the control context).  Use the
+ * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
+{
+	return 1 << (ep_index + 1);
+}
+
+/* Compute the last valid endpoint context index.  Basically, this is the
+ * endpoint index plus one.  For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+	return fls(added_ctxs) - 1;
+}
+
+/* Returns 1 if the arguments are OK;
+ * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
+ */
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
+		const char *func) {
+	struct xhci_hcd	*xhci;
+	struct xhci_virt_device	*virt_dev;
+
+	if (!hcd || (check_ep && !ep) || !udev) {
+		pr_debug("xHCI %s called with invalid args\n", func);
+		return -EINVAL;
+	}
+	if (!udev->parent) {
+		pr_debug("xHCI %s called for root hub\n", func);
+		return 0;
+	}
+
+	xhci = hcd_to_xhci(hcd);
+	if (check_virt_dev) {
+		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
+			xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
+					func);
+			return -EINVAL;
+		}
+
+		virt_dev = xhci->devs[udev->slot_id];
+		if (virt_dev->udev != udev) {
+			xhci_dbg(xhci, "xHCI %s called with udev and "
+					  "virt_dev does not match\n", func);
+			return -EINVAL;
+		}
+	}
+
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
+		return -ENODEV;
+
+	return 1;
+}
+
+static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+		struct usb_device *udev, struct xhci_command *command,
+		bool ctx_change, bool must_succeed);
+
+/*
+ * Full speed devices may have a max packet size greater than 8 bytes, but the
+ * USB core doesn't know that until it reads the first 8 bytes of the
+ * descriptor.  If the usb_device's max packet size changes after that point,
+ * we need to issue an evaluate context command and wait on it.
+ */
+static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+		unsigned int ep_index, struct urb *urb)
+{
+	struct xhci_container_ctx *out_ctx;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_command *command;
+	int max_packet_size;
+	int hw_max_packet_size;
+	int ret = 0;
+
+	out_ctx = xhci->devs[slot_id]->out_ctx;
+	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+	max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
+	if (hw_max_packet_size != max_packet_size) {
+		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+				"Max Packet Size for ep 0 changed.");
+		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+				"Max packet size in usb_device = %d",
+				max_packet_size);
+		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+				"Max packet size in xHCI HW = %d",
+				hw_max_packet_size);
+		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+				"Issuing evaluate context command.");
+
+		/* Set up the input context flags for the command */
+		/* FIXME: This won't work if a non-default control endpoint
+		 * changes max packet sizes.
+		 */
+
+		command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+		if (!command)
+			return -ENOMEM;
+
+		command->in_ctx = xhci->devs[slot_id]->in_ctx;
+		ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+		if (!ctrl_ctx) {
+			xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+					__func__);
+			ret = -ENOMEM;
+			goto command_cleanup;
+		}
+		/* Set up the modified control endpoint 0 */
+		xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+				xhci->devs[slot_id]->out_ctx, ep_index);
+
+		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
+		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
+
+		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
+		ctrl_ctx->drop_flags = 0;
+
+		ret = xhci_configure_endpoint(xhci, urb->dev, command,
+				true, false);
+
+		/* Clean up the input context for later use by bandwidth
+		 * functions.
+		 */
+		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
+command_cleanup:
+		kfree(command->completion);
+		kfree(command);
+	}
+	return ret;
+}
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ */
+static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	unsigned long flags;
+	int ret = 0;
+	unsigned int slot_id, ep_index;
+	unsigned int *ep_state;
+	struct urb_priv	*urb_priv;
+	int num_tds;
+
+	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
+					true, true, __func__) <= 0)
+		return -EINVAL;
+
+	slot_id = urb->dev->slot_id;
+	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+	ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
+
+	if (!HCD_HW_ACCESSIBLE(hcd)) {
+		if (!in_interrupt())
+			xhci_dbg(xhci, "urb submitted during PCI suspend\n");
+		return -ESHUTDOWN;
+	}
+
+	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+		num_tds = urb->number_of_packets;
+	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
+	    urb->transfer_buffer_length > 0 &&
+	    urb->transfer_flags & URB_ZERO_PACKET &&
+	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
+		num_tds = 2;
+	else
+		num_tds = 1;
+
+	urb_priv = kzalloc(sizeof(struct urb_priv) +
+			   num_tds * sizeof(struct xhci_td), mem_flags);
+	if (!urb_priv)
+		return -ENOMEM;
+
+	urb_priv->num_tds = num_tds;
+	urb_priv->num_tds_done = 0;
+	urb->hcpriv = urb_priv;
+
+	trace_xhci_urb_enqueue(urb);
+
+	if (usb_endpoint_xfer_control(&urb->ep->desc)) {
+		/* Check to see if the max packet size for the default control
+		 * endpoint changed during FS device enumeration
+		 */
+		if (urb->dev->speed == USB_SPEED_FULL) {
+			ret = xhci_check_maxpacket(xhci, slot_id,
+					ep_index, urb);
+			if (ret < 0) {
+				xhci_urb_free_priv(urb_priv);
+				urb->hcpriv = NULL;
+				return ret;
+			}
+		}
+	}
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
+			 urb->ep->desc.bEndpointAddress, urb);
+		ret = -ESHUTDOWN;
+		goto free_priv;
+	}
+	if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
+		xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
+			  *ep_state);
+		ret = -EINVAL;
+		goto free_priv;
+	}
+	if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
+		xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
+		ret = -EINVAL;
+		goto free_priv;
+	}
+
+	switch (usb_endpoint_type(&urb->ep->desc)) {
+
+	case USB_ENDPOINT_XFER_CONTROL:
+		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
+					 slot_id, ep_index);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
+					 slot_id, ep_index);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
+				slot_id, ep_index);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
+				slot_id, ep_index);
+	}
+
+	if (ret) {
+free_priv:
+		xhci_urb_free_priv(urb_priv);
+		urb->hcpriv = NULL;
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return ret;
+}
+
+/*
+ * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
+ * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
+ * should pick up where it left off in the TD, unless a Set Transfer Ring
+ * Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled URB will be "removed" from
+ * the ring.  Since the ring is a contiguous structure, they can't be physically
+ * removed.  Instead, there are two options:
+ *
+ *  1) If the HC is in the middle of processing the URB to be canceled, we
+ *     simply move the ring's dequeue pointer past those TRBs using the Set
+ *     Transfer Ring Dequeue Pointer command.  This will be the common case,
+ *     when drivers timeout on the last submitted URB and attempt to cancel.
+ *
+ *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
+ *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
+ *     HC will need to invalidate the any TRBs it has cached after the stop
+ *     endpoint command, as noted in the xHCI 0.95 errata.
+ *
+ *  3) The TD may have completed by the time the Stop Endpoint Command
+ *     completes, so software needs to handle that case too.
+ *
+ * This function should protect against the TD enqueueing code ringing the
+ * doorbell while this code is waiting for a Stop Endpoint command to complete.
+ * It also needs to account for multiple cancellations on happening at the same
+ * time for the same endpoint.
+ *
+ * Note that this function can be called in any context, or so says
+ * usb_hcd_unlink_urb()
+ */
+static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	unsigned long flags;
+	int ret, i;
+	u32 temp;
+	struct xhci_hcd *xhci;
+	struct urb_priv	*urb_priv;
+	struct xhci_td *td;
+	unsigned int ep_index;
+	struct xhci_ring *ep_ring;
+	struct xhci_virt_ep *ep;
+	struct xhci_command *command;
+	struct xhci_virt_device *vdev;
+
+	xhci = hcd_to_xhci(hcd);
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	trace_xhci_urb_dequeue(urb);
+
+	/* Make sure the URB hasn't completed or been unlinked already */
+	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+	if (ret)
+		goto done;
+
+	/* give back URB now if we can't queue it for cancel */
+	vdev = xhci->devs[urb->dev->slot_id];
+	urb_priv = urb->hcpriv;
+	if (!vdev || !urb_priv)
+		goto err_giveback;
+
+	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+	ep = &vdev->eps[ep_index];
+	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+	if (!ep || !ep_ring)
+		goto err_giveback;
+
+	/* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
+	temp = readl(&xhci->op_regs->status);
+	if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
+		xhci_hc_died(xhci);
+		goto done;
+	}
+
+	/*
+	 * check ring is not re-allocated since URB was enqueued. If it is, then
+	 * make sure none of the ring related pointers in this URB private data
+	 * are touched, such as td_list, otherwise we overwrite freed data
+	 */
+	if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
+		xhci_err(xhci, "Canceled URB td not found on endpoint ring");
+		for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
+			td = &urb_priv->td[i];
+			if (!list_empty(&td->cancelled_td_list))
+				list_del_init(&td->cancelled_td_list);
+		}
+		goto err_giveback;
+	}
+
+	if (xhci->xhc_state & XHCI_STATE_HALTED) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+				"HC halted, freeing TD manually.");
+		for (i = urb_priv->num_tds_done;
+		     i < urb_priv->num_tds;
+		     i++) {
+			td = &urb_priv->td[i];
+			if (!list_empty(&td->td_list))
+				list_del_init(&td->td_list);
+			if (!list_empty(&td->cancelled_td_list))
+				list_del_init(&td->cancelled_td_list);
+		}
+		goto err_giveback;
+	}
+
+	i = urb_priv->num_tds_done;
+	if (i < urb_priv->num_tds)
+		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+				"Cancel URB %p, dev %s, ep 0x%x, "
+				"starting at offset 0x%llx",
+				urb, urb->dev->devpath,
+				urb->ep->desc.bEndpointAddress,
+				(unsigned long long) xhci_trb_virt_to_dma(
+					urb_priv->td[i].start_seg,
+					urb_priv->td[i].first_trb));
+
+	for (; i < urb_priv->num_tds; i++) {
+		td = &urb_priv->td[i];
+		list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+	}
+
+	/* Queue a stop endpoint command, but only if this is
+	 * the first cancellation to be handled.
+	 */
+	if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+		if (!command) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		ep->ep_state |= EP_STOP_CMD_PENDING;
+		ep->stop_cmd_timer.expires = jiffies +
+			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+		add_timer(&ep->stop_cmd_timer);
+		xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
+					 ep_index, 0);
+		xhci_ring_cmd_db(xhci);
+	}
+done:
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return ret;
+
+err_giveback:
+	if (urb_priv)
+		xhci_urb_free_priv(urb_priv);
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
+	return ret;
+}
+
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint that is being
+ * disabled, so there's no need for mutual exclusion to protect
+ * the xhci->devs[slot_id] structure.
+ */
+static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_container_ctx *in_ctx, *out_ctx;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	unsigned int ep_index;
+	struct xhci_ep_ctx *ep_ctx;
+	u32 drop_flag;
+	u32 new_add_flags, new_drop_flags;
+	int ret;
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+	if (xhci->xhc_state & XHCI_STATE_DYING)
+		return -ENODEV;
+
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	drop_flag = xhci_get_endpoint_flag(&ep->desc);
+	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+				__func__, drop_flag);
+		return 0;
+	}
+
+	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return 0;
+	}
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+	/* If the HC already knows the endpoint is disabled,
+	 * or the HCD has noted it is disabled, ignore this request
+	 */
+	if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
+	    le32_to_cpu(ctrl_ctx->drop_flags) &
+	    xhci_get_endpoint_flag(&ep->desc)) {
+		/* Do not warn when called after a usb_device_reset */
+		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
+			xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+				  __func__, ep);
+		return 0;
+	}
+
+	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
+	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
+	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
+	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+
+	xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
+
+	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+	if (xhci->quirks & XHCI_MTK_HOST)
+		xhci_mtk_drop_ep_quirk(hcd, udev, ep);
+
+	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
+			(unsigned int) ep->desc.bEndpointAddress,
+			udev->slot_id,
+			(unsigned int) new_drop_flags,
+			(unsigned int) new_add_flags);
+	return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint until the
+ * configuration or alt setting is installed in the device, so there's no need
+ * for mutual exclusion to protect the xhci->devs[slot_id] structure.
+ */
+static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_container_ctx *in_ctx;
+	unsigned int ep_index;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	u32 added_ctxs;
+	u32 new_add_flags, new_drop_flags;
+	struct xhci_virt_device *virt_dev;
+	int ret = 0;
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0) {
+		/* So we won't queue a reset ep command for a root hub */
+		ep->hcpriv = NULL;
+		return ret;
+	}
+	xhci = hcd_to_xhci(hcd);
+	if (xhci->xhc_state & XHCI_STATE_DYING)
+		return -ENODEV;
+
+	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+		/* FIXME when we have to issue an evaluate endpoint command to
+		 * deal with ep0 max packet size changing once we get the
+		 * descriptors
+		 */
+		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+				__func__, added_ctxs);
+		return 0;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+	in_ctx = virt_dev->in_ctx;
+	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return 0;
+	}
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	/* If this endpoint is already in use, and the upper layers are trying
+	 * to add it again without dropping it, reject the addition.
+	 */
+	if (virt_dev->eps[ep_index].ring &&
+			!(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
+		xhci_warn(xhci, "Trying to add endpoint 0x%x "
+				"without dropping it.\n",
+				(unsigned int) ep->desc.bEndpointAddress);
+		return -EINVAL;
+	}
+
+	/* If the HCD has already noted the endpoint is enabled,
+	 * ignore this request.
+	 */
+	if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
+		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+				__func__, ep);
+		return 0;
+	}
+
+	/*
+	 * Configuration and alternate setting changes must be done in
+	 * process context, not interrupt context (or so documenation
+	 * for usb_set_interface() and usb_set_configuration() claim).
+	 */
+	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
+		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+				__func__, ep->desc.bEndpointAddress);
+		return -ENOMEM;
+	}
+
+	if (xhci->quirks & XHCI_MTK_HOST) {
+		ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
+		if (ret < 0) {
+			xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
+			virt_dev->eps[ep_index].new_ring = NULL;
+			return ret;
+		}
+	}
+
+	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
+	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+
+	/* If xhci_endpoint_disable() was called for this endpoint, but the
+	 * xHC hasn't been notified yet through the check_bandwidth() call,
+	 * this re-adds a new state for the endpoint from the new endpoint
+	 * descriptors.  We must drop and re-add this endpoint, so we leave the
+	 * drop flags alone.
+	 */
+	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
+
+	/* Store the usb_device pointer for later use */
+	ep->hcpriv = udev;
+
+	xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
+
+	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
+			(unsigned int) ep->desc.bEndpointAddress,
+			udev->slot_id,
+			(unsigned int) new_drop_flags,
+			(unsigned int) new_add_flags);
+	return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
+{
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	int i;
+
+	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return;
+	}
+
+	/* When a device's add flag and drop flag are zero, any subsequent
+	 * configure endpoint command will leave that endpoint's state
+	 * untouched.  Make sure we don't leave any old state in the input
+	 * endpoint contexts.
+	 */
+	ctrl_ctx->drop_flags = 0;
+	ctrl_ctx->add_flags = 0;
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+	/* Endpoint 0 is always valid */
+	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+	for (i = 1; i < 31; i++) {
+		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
+		ep_ctx->ep_info = 0;
+		ep_ctx->ep_info2 = 0;
+		ep_ctx->deq = 0;
+		ep_ctx->tx_info = 0;
+	}
+}
+
+static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
+		struct usb_device *udev, u32 *cmd_status)
+{
+	int ret;
+
+	switch (*cmd_status) {
+	case COMP_COMMAND_ABORTED:
+	case COMP_COMMAND_RING_STOPPED:
+		xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
+		ret = -ETIME;
+		break;
+	case COMP_RESOURCE_ERROR:
+		dev_warn(&udev->dev,
+			 "Not enough host controller resources for new device state.\n");
+		ret = -ENOMEM;
+		/* FIXME: can we allocate more resources for the HC? */
+		break;
+	case COMP_BANDWIDTH_ERROR:
+	case COMP_SECONDARY_BANDWIDTH_ERROR:
+		dev_warn(&udev->dev,
+			 "Not enough bandwidth for new device state.\n");
+		ret = -ENOSPC;
+		/* FIXME: can we go back to the old state? */
+		break;
+	case COMP_TRB_ERROR:
+		/* the HCD set up something wrong */
+		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
+				"add flag = 1, "
+				"and endpoint is not disabled.\n");
+		ret = -EINVAL;
+		break;
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		dev_warn(&udev->dev,
+			 "ERROR: Incompatible device for endpoint configure command.\n");
+		ret = -ENODEV;
+		break;
+	case COMP_SUCCESS:
+		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+				"Successful Endpoint Configure command");
+		ret = 0;
+		break;
+	default:
+		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
+				*cmd_status);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
+		struct usb_device *udev, u32 *cmd_status)
+{
+	int ret;
+
+	switch (*cmd_status) {
+	case COMP_COMMAND_ABORTED:
+	case COMP_COMMAND_RING_STOPPED:
+		xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
+		ret = -ETIME;
+		break;
+	case COMP_PARAMETER_ERROR:
+		dev_warn(&udev->dev,
+			 "WARN: xHCI driver setup invalid evaluate context command.\n");
+		ret = -EINVAL;
+		break;
+	case COMP_SLOT_NOT_ENABLED_ERROR:
+		dev_warn(&udev->dev,
+			"WARN: slot not enabled for evaluate context command.\n");
+		ret = -EINVAL;
+		break;
+	case COMP_CONTEXT_STATE_ERROR:
+		dev_warn(&udev->dev,
+			"WARN: invalid context state for evaluate context command.\n");
+		ret = -EINVAL;
+		break;
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		dev_warn(&udev->dev,
+			"ERROR: Incompatible device for evaluate context command.\n");
+		ret = -ENODEV;
+		break;
+	case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
+		/* Max Exit Latency too large error */
+		dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
+		ret = -EINVAL;
+		break;
+	case COMP_SUCCESS:
+		xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+				"Successful evaluate context command");
+		ret = 0;
+		break;
+	default:
+		xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
+			*cmd_status);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+		struct xhci_input_control_ctx *ctrl_ctx)
+{
+	u32 valid_add_flags;
+	u32 valid_drop_flags;
+
+	/* Ignore the slot flag (bit 0), and the default control endpoint flag
+	 * (bit 1).  The default control endpoint is added during the Address
+	 * Device command and is never removed until the slot is disabled.
+	 */
+	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+
+	/* Use hweight32 to count the number of ones in the add flags, or
+	 * number of endpoints added.  Don't count endpoints that are changed
+	 * (both added and dropped).
+	 */
+	return hweight32(valid_add_flags) -
+		hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+		struct xhci_input_control_ctx *ctrl_ctx)
+{
+	u32 valid_add_flags;
+	u32 valid_drop_flags;
+
+	valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
+	valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
+
+	return hweight32(valid_drop_flags) -
+		hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes.  We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ *  - the first configure endpoint command drops more endpoints than it adds
+ *  - a second configure endpoint command that adds more endpoints is queued
+ *  - the first configure endpoint command fails, so the config is unchanged
+ *  - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+		struct xhci_input_control_ctx *ctrl_ctx)
+{
+	u32 added_eps;
+
+	added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
+	if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Not enough ep ctxs: "
+				"%u active, need to add %u, limit is %u.",
+				xhci->num_active_eps, added_eps,
+				xhci->limit_active_eps);
+		return -ENOMEM;
+	}
+	xhci->num_active_eps += added_eps;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+			"Adding %u ep ctxs, %u now active.", added_eps,
+			xhci->num_active_eps);
+	return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+		struct xhci_input_control_ctx *ctrl_ctx)
+{
+	u32 num_failed_eps;
+
+	num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
+	xhci->num_active_eps -= num_failed_eps;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+			"Removing %u failed ep ctxs, %u now active.",
+			num_failed_eps,
+			xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+		struct xhci_input_control_ctx *ctrl_ctx)
+{
+	u32 num_dropped_eps;
+
+	num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
+	xhci->num_active_eps -= num_dropped_eps;
+	if (num_dropped_eps)
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Removing %u dropped ep ctxs, %u now active.",
+				num_dropped_eps,
+				xhci->num_active_eps);
+}
+
+static unsigned int xhci_get_block_size(struct usb_device *udev)
+{
+	switch (udev->speed) {
+	case USB_SPEED_LOW:
+	case USB_SPEED_FULL:
+		return FS_BLOCK;
+	case USB_SPEED_HIGH:
+		return HS_BLOCK;
+	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
+		return SS_BLOCK;
+	case USB_SPEED_UNKNOWN:
+	case USB_SPEED_WIRELESS:
+	default:
+		/* Should never happen */
+		return 1;
+	}
+}
+
+static unsigned int
+xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
+{
+	if (interval_bw->overhead[LS_OVERHEAD_TYPE])
+		return LS_OVERHEAD;
+	if (interval_bw->overhead[FS_OVERHEAD_TYPE])
+		return FS_OVERHEAD;
+	return HS_OVERHEAD;
+}
+
+/* If we are changing a LS/FS device under a HS hub,
+ * make sure (if we are activating a new TT) that the HS bus has enough
+ * bandwidth for this new TT.
+ */
+static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		int old_active_eps)
+{
+	struct xhci_interval_bw_table *bw_table;
+	struct xhci_tt_bw_info *tt_info;
+
+	/* Find the bandwidth table for the root port this TT is attached to. */
+	bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+	tt_info = virt_dev->tt_info;
+	/* If this TT already had active endpoints, the bandwidth for this TT
+	 * has already been added.  Removing all periodic endpoints (and thus
+	 * making the TT enactive) will only decrease the bandwidth used.
+	 */
+	if (old_active_eps)
+		return 0;
+	if (old_active_eps == 0 && tt_info->active_eps != 0) {
+		if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
+			return -ENOMEM;
+		return 0;
+	}
+	/* Not sure why we would have no new active endpoints...
+	 *
+	 * Maybe because of an Evaluate Context change for a hub update or a
+	 * control endpoint 0 max packet size change?
+	 * FIXME: skip the bandwidth calculation in that case.
+	 */
+	return 0;
+}
+
+static int xhci_check_ss_bw(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev)
+{
+	unsigned int bw_reserved;
+
+	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
+	if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
+		return -ENOMEM;
+
+	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
+	if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*
+ * This algorithm is a very conservative estimate of the worst-case scheduling
+ * scenario for any one interval.  The hardware dynamically schedules the
+ * packets, so we can't tell which microframe could be the limiting factor in
+ * the bandwidth scheduling.  This only takes into account periodic endpoints.
+ *
+ * Obviously, we can't solve an NP complete problem to find the minimum worst
+ * case scenario.  Instead, we come up with an estimate that is no less than
+ * the worst case bandwidth used for any one microframe, but may be an
+ * over-estimate.
+ *
+ * We walk the requirements for each endpoint by interval, starting with the
+ * smallest interval, and place packets in the schedule where there is only one
+ * possible way to schedule packets for that interval.  In order to simplify
+ * this algorithm, we record the largest max packet size for each interval, and
+ * assume all packets will be that size.
+ *
+ * For interval 0, we obviously must schedule all packets for each interval.
+ * The bandwidth for interval 0 is just the amount of data to be transmitted
+ * (the sum of all max ESIT payload sizes, plus any overhead per packet times
+ * the number of packets).
+ *
+ * For interval 1, we have two possible microframes to schedule those packets
+ * in.  For this algorithm, if we can schedule the same number of packets for
+ * each possible scheduling opportunity (each microframe), we will do so.  The
+ * remaining number of packets will be saved to be transmitted in the gaps in
+ * the next interval's scheduling sequence.
+ *
+ * As we move those remaining packets to be scheduled with interval 2 packets,
+ * we have to double the number of remaining packets to transmit.  This is
+ * because the intervals are actually powers of 2, and we would be transmitting
+ * the previous interval's packets twice in this interval.  We also have to be
+ * sure that when we look at the largest max packet size for this interval, we
+ * also look at the largest max packet size for the remaining packets and take
+ * the greater of the two.
+ *
+ * The algorithm continues to evenly distribute packets in each scheduling
+ * opportunity, and push the remaining packets out, until we get to the last
+ * interval.  Then those packets and their associated overhead are just added
+ * to the bandwidth used.
+ */
+static int xhci_check_bw_table(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		int old_active_eps)
+{
+	unsigned int bw_reserved;
+	unsigned int max_bandwidth;
+	unsigned int bw_used;
+	unsigned int block_size;
+	struct xhci_interval_bw_table *bw_table;
+	unsigned int packet_size = 0;
+	unsigned int overhead = 0;
+	unsigned int packets_transmitted = 0;
+	unsigned int packets_remaining = 0;
+	unsigned int i;
+
+	if (virt_dev->udev->speed >= USB_SPEED_SUPER)
+		return xhci_check_ss_bw(xhci, virt_dev);
+
+	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
+		max_bandwidth = HS_BW_LIMIT;
+		/* Convert percent of bus BW reserved to blocks reserved */
+		bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
+	} else {
+		max_bandwidth = FS_BW_LIMIT;
+		bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
+	}
+
+	bw_table = virt_dev->bw_table;
+	/* We need to translate the max packet size and max ESIT payloads into
+	 * the units the hardware uses.
+	 */
+	block_size = xhci_get_block_size(virt_dev->udev);
+
+	/* If we are manipulating a LS/FS device under a HS hub, double check
+	 * that the HS bus has enough bandwidth if we are activing a new TT.
+	 */
+	if (virt_dev->tt_info) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Recalculating BW for rootport %u",
+				virt_dev->real_port);
+		if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
+			xhci_warn(xhci, "Not enough bandwidth on HS bus for "
+					"newly activated TT.\n");
+			return -ENOMEM;
+		}
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Recalculating BW for TT slot %u port %u",
+				virt_dev->tt_info->slot_id,
+				virt_dev->tt_info->ttport);
+	} else {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Recalculating BW for rootport %u",
+				virt_dev->real_port);
+	}
+
+	/* Add in how much bandwidth will be used for interval zero, or the
+	 * rounded max ESIT payload + number of packets * largest overhead.
+	 */
+	bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
+		bw_table->interval_bw[0].num_packets *
+		xhci_get_largest_overhead(&bw_table->interval_bw[0]);
+
+	for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
+		unsigned int bw_added;
+		unsigned int largest_mps;
+		unsigned int interval_overhead;
+
+		/*
+		 * How many packets could we transmit in this interval?
+		 * If packets didn't fit in the previous interval, we will need
+		 * to transmit that many packets twice within this interval.
+		 */
+		packets_remaining = 2 * packets_remaining +
+			bw_table->interval_bw[i].num_packets;
+
+		/* Find the largest max packet size of this or the previous
+		 * interval.
+		 */
+		if (list_empty(&bw_table->interval_bw[i].endpoints))
+			largest_mps = 0;
+		else {
+			struct xhci_virt_ep *virt_ep;
+			struct list_head *ep_entry;
+
+			ep_entry = bw_table->interval_bw[i].endpoints.next;
+			virt_ep = list_entry(ep_entry,
+					struct xhci_virt_ep, bw_endpoint_list);
+			/* Convert to blocks, rounding up */
+			largest_mps = DIV_ROUND_UP(
+					virt_ep->bw_info.max_packet_size,
+					block_size);
+		}
+		if (largest_mps > packet_size)
+			packet_size = largest_mps;
+
+		/* Use the larger overhead of this or the previous interval. */
+		interval_overhead = xhci_get_largest_overhead(
+				&bw_table->interval_bw[i]);
+		if (interval_overhead > overhead)
+			overhead = interval_overhead;
+
+		/* How many packets can we evenly distribute across
+		 * (1 << (i + 1)) possible scheduling opportunities?
+		 */
+		packets_transmitted = packets_remaining >> (i + 1);
+
+		/* Add in the bandwidth used for those scheduled packets */
+		bw_added = packets_transmitted * (overhead + packet_size);
+
+		/* How many packets do we have remaining to transmit? */
+		packets_remaining = packets_remaining % (1 << (i + 1));
+
+		/* What largest max packet size should those packets have? */
+		/* If we've transmitted all packets, don't carry over the
+		 * largest packet size.
+		 */
+		if (packets_remaining == 0) {
+			packet_size = 0;
+			overhead = 0;
+		} else if (packets_transmitted > 0) {
+			/* Otherwise if we do have remaining packets, and we've
+			 * scheduled some packets in this interval, take the
+			 * largest max packet size from endpoints with this
+			 * interval.
+			 */
+			packet_size = largest_mps;
+			overhead = interval_overhead;
+		}
+		/* Otherwise carry over packet_size and overhead from the last
+		 * time we had a remainder.
+		 */
+		bw_used += bw_added;
+		if (bw_used > max_bandwidth) {
+			xhci_warn(xhci, "Not enough bandwidth. "
+					"Proposed: %u, Max: %u\n",
+				bw_used, max_bandwidth);
+			return -ENOMEM;
+		}
+	}
+	/*
+	 * Ok, we know we have some packets left over after even-handedly
+	 * scheduling interval 15.  We don't know which microframes they will
+	 * fit into, so we over-schedule and say they will be scheduled every
+	 * microframe.
+	 */
+	if (packets_remaining > 0)
+		bw_used += overhead + packet_size;
+
+	if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
+		unsigned int port_index = virt_dev->real_port - 1;
+
+		/* OK, we're manipulating a HS device attached to a
+		 * root port bandwidth domain.  Include the number of active TTs
+		 * in the bandwidth used.
+		 */
+		bw_used += TT_HS_OVERHEAD *
+			xhci->rh_bw[port_index].num_active_tts;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+		"Final bandwidth: %u, Limit: %u, Reserved: %u, "
+		"Available: %u " "percent",
+		bw_used, max_bandwidth, bw_reserved,
+		(max_bandwidth - bw_used - bw_reserved) * 100 /
+		max_bandwidth);
+
+	bw_used += bw_reserved;
+	if (bw_used > max_bandwidth) {
+		xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
+				bw_used, max_bandwidth);
+		return -ENOMEM;
+	}
+
+	bw_table->bw_used = bw_used;
+	return 0;
+}
+
+static bool xhci_is_async_ep(unsigned int ep_type)
+{
+	return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
+					ep_type != ISOC_IN_EP &&
+					ep_type != INT_IN_EP);
+}
+
+static bool xhci_is_sync_in_ep(unsigned int ep_type)
+{
+	return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
+}
+
+static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+{
+	unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
+
+	if (ep_bw->ep_interval == 0)
+		return SS_OVERHEAD_BURST +
+			(ep_bw->mult * ep_bw->num_packets *
+					(SS_OVERHEAD + mps));
+	return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
+				(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
+				1 << ep_bw->ep_interval);
+
+}
+
+static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+		struct xhci_bw_info *ep_bw,
+		struct xhci_interval_bw_table *bw_table,
+		struct usb_device *udev,
+		struct xhci_virt_ep *virt_ep,
+		struct xhci_tt_bw_info *tt_info)
+{
+	struct xhci_interval_bw	*interval_bw;
+	int normalized_interval;
+
+	if (xhci_is_async_ep(ep_bw->type))
+		return;
+
+	if (udev->speed >= USB_SPEED_SUPER) {
+		if (xhci_is_sync_in_ep(ep_bw->type))
+			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
+				xhci_get_ss_bw_consumed(ep_bw);
+		else
+			xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
+				xhci_get_ss_bw_consumed(ep_bw);
+		return;
+	}
+
+	/* SuperSpeed endpoints never get added to intervals in the table, so
+	 * this check is only valid for HS/FS/LS devices.
+	 */
+	if (list_empty(&virt_ep->bw_endpoint_list))
+		return;
+	/* For LS/FS devices, we need to translate the interval expressed in
+	 * microframes to frames.
+	 */
+	if (udev->speed == USB_SPEED_HIGH)
+		normalized_interval = ep_bw->ep_interval;
+	else
+		normalized_interval = ep_bw->ep_interval - 3;
+
+	if (normalized_interval == 0)
+		bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
+	interval_bw = &bw_table->interval_bw[normalized_interval];
+	interval_bw->num_packets -= ep_bw->num_packets;
+	switch (udev->speed) {
+	case USB_SPEED_LOW:
+		interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
+		break;
+	case USB_SPEED_FULL:
+		interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
+		break;
+	case USB_SPEED_HIGH:
+		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
+		break;
+	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
+	case USB_SPEED_UNKNOWN:
+	case USB_SPEED_WIRELESS:
+		/* Should never happen because only LS/FS/HS endpoints will get
+		 * added to the endpoint list.
+		 */
+		return;
+	}
+	if (tt_info)
+		tt_info->active_eps -= 1;
+	list_del_init(&virt_ep->bw_endpoint_list);
+}
+
+static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
+		struct xhci_bw_info *ep_bw,
+		struct xhci_interval_bw_table *bw_table,
+		struct usb_device *udev,
+		struct xhci_virt_ep *virt_ep,
+		struct xhci_tt_bw_info *tt_info)
+{
+	struct xhci_interval_bw	*interval_bw;
+	struct xhci_virt_ep *smaller_ep;
+	int normalized_interval;
+
+	if (xhci_is_async_ep(ep_bw->type))
+		return;
+
+	if (udev->speed == USB_SPEED_SUPER) {
+		if (xhci_is_sync_in_ep(ep_bw->type))
+			xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
+				xhci_get_ss_bw_consumed(ep_bw);
+		else
+			xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
+				xhci_get_ss_bw_consumed(ep_bw);
+		return;
+	}
+
+	/* For LS/FS devices, we need to translate the interval expressed in
+	 * microframes to frames.
+	 */
+	if (udev->speed == USB_SPEED_HIGH)
+		normalized_interval = ep_bw->ep_interval;
+	else
+		normalized_interval = ep_bw->ep_interval - 3;
+
+	if (normalized_interval == 0)
+		bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
+	interval_bw = &bw_table->interval_bw[normalized_interval];
+	interval_bw->num_packets += ep_bw->num_packets;
+	switch (udev->speed) {
+	case USB_SPEED_LOW:
+		interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
+		break;
+	case USB_SPEED_FULL:
+		interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
+		break;
+	case USB_SPEED_HIGH:
+		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
+		break;
+	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
+	case USB_SPEED_UNKNOWN:
+	case USB_SPEED_WIRELESS:
+		/* Should never happen because only LS/FS/HS endpoints will get
+		 * added to the endpoint list.
+		 */
+		return;
+	}
+
+	if (tt_info)
+		tt_info->active_eps += 1;
+	/* Insert the endpoint into the list, largest max packet size first. */
+	list_for_each_entry(smaller_ep, &interval_bw->endpoints,
+			bw_endpoint_list) {
+		if (ep_bw->max_packet_size >=
+				smaller_ep->bw_info.max_packet_size) {
+			/* Add the new ep before the smaller endpoint */
+			list_add_tail(&virt_ep->bw_endpoint_list,
+					&smaller_ep->bw_endpoint_list);
+			return;
+		}
+	}
+	/* Add the new endpoint at the end of the list. */
+	list_add_tail(&virt_ep->bw_endpoint_list,
+			&interval_bw->endpoints);
+}
+
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		int old_active_eps)
+{
+	struct xhci_root_port_bw_info *rh_bw_info;
+	if (!virt_dev->tt_info)
+		return;
+
+	rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+	if (old_active_eps == 0 &&
+				virt_dev->tt_info->active_eps != 0) {
+		rh_bw_info->num_active_tts += 1;
+		rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
+	} else if (old_active_eps != 0 &&
+				virt_dev->tt_info->active_eps == 0) {
+		rh_bw_info->num_active_tts -= 1;
+		rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
+	}
+}
+
+static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct xhci_container_ctx *in_ctx)
+{
+	struct xhci_bw_info ep_bw_info[31];
+	int i;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	int old_active_eps = 0;
+
+	if (virt_dev->tt_info)
+		old_active_eps = virt_dev->tt_info->active_eps;
+
+	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < 31; i++) {
+		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+			continue;
+
+		/* Make a copy of the BW info in case we need to revert this */
+		memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
+				sizeof(ep_bw_info[i]));
+		/* Drop the endpoint from the interval table if the endpoint is
+		 * being dropped or changed.
+		 */
+		if (EP_IS_DROPPED(ctrl_ctx, i))
+			xhci_drop_ep_from_interval_table(xhci,
+					&virt_dev->eps[i].bw_info,
+					virt_dev->bw_table,
+					virt_dev->udev,
+					&virt_dev->eps[i],
+					virt_dev->tt_info);
+	}
+	/* Overwrite the information stored in the endpoints' bw_info */
+	xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
+	for (i = 0; i < 31; i++) {
+		/* Add any changed or added endpoints to the interval table */
+		if (EP_IS_ADDED(ctrl_ctx, i))
+			xhci_add_ep_to_interval_table(xhci,
+					&virt_dev->eps[i].bw_info,
+					virt_dev->bw_table,
+					virt_dev->udev,
+					&virt_dev->eps[i],
+					virt_dev->tt_info);
+	}
+
+	if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
+		/* Ok, this fits in the bandwidth we have.
+		 * Update the number of active TTs.
+		 */
+		xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+		return 0;
+	}
+
+	/* We don't have enough bandwidth for this, revert the stored info. */
+	for (i = 0; i < 31; i++) {
+		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
+			continue;
+
+		/* Drop the new copies of any added or changed endpoints from
+		 * the interval table.
+		 */
+		if (EP_IS_ADDED(ctrl_ctx, i)) {
+			xhci_drop_ep_from_interval_table(xhci,
+					&virt_dev->eps[i].bw_info,
+					virt_dev->bw_table,
+					virt_dev->udev,
+					&virt_dev->eps[i],
+					virt_dev->tt_info);
+		}
+		/* Revert the endpoint back to its old information */
+		memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
+				sizeof(ep_bw_info[i]));
+		/* Add any changed or dropped endpoints back into the table */
+		if (EP_IS_DROPPED(ctrl_ctx, i))
+			xhci_add_ep_to_interval_table(xhci,
+					&virt_dev->eps[i].bw_info,
+					virt_dev->bw_table,
+					virt_dev->udev,
+					&virt_dev->eps[i],
+					virt_dev->tt_info);
+	}
+	return -ENOMEM;
+}
+
+
+/* Issue a configure endpoint command or evaluate context command
+ * and wait for it to finish.
+ */
+static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct xhci_command *command,
+		bool ctx_change, bool must_succeed)
+{
+	int ret;
+	unsigned long flags;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_virt_device *virt_dev;
+	struct xhci_slot_ctx *slot_ctx;
+
+	if (!command)
+		return -EINVAL;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	if (xhci->xhc_state & XHCI_STATE_DYING) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return -ESHUTDOWN;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+
+	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+	if (!ctrl_ctx) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+			xhci_reserve_host_resources(xhci, ctrl_ctx)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_warn(xhci, "Not enough host resources, "
+				"active endpoint contexts = %u\n",
+				xhci->num_active_eps);
+		return -ENOMEM;
+	}
+	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
+		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+			xhci_free_host_resources(xhci, ctrl_ctx);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_warn(xhci, "Not enough bandwidth\n");
+		return -ENOMEM;
+	}
+
+	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+	trace_xhci_configure_endpoint(slot_ctx);
+
+	if (!ctx_change)
+		ret = xhci_queue_configure_endpoint(xhci, command,
+				command->in_ctx->dma,
+				udev->slot_id, must_succeed);
+	else
+		ret = xhci_queue_evaluate_context(xhci, command,
+				command->in_ctx->dma,
+				udev->slot_id, must_succeed);
+	if (ret < 0) {
+		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+			xhci_free_host_resources(xhci, ctrl_ctx);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
+				"FIXME allocate a new ring segment");
+		return -ENOMEM;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Wait for the configure endpoint command to complete */
+	wait_for_completion(command->completion);
+
+	if (!ctx_change)
+		ret = xhci_configure_endpoint_result(xhci, udev,
+						     &command->status);
+	else
+		ret = xhci_evaluate_context_result(xhci, udev,
+						   &command->status);
+
+	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+		spin_lock_irqsave(&xhci->lock, flags);
+		/* If the command failed, remove the reserved resources.
+		 * Otherwise, clean up the estimate to include dropped eps.
+		 */
+		if (ret)
+			xhci_free_host_resources(xhci, ctrl_ctx);
+		else
+			xhci_finish_resource_reservation(xhci, ctrl_ctx);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	}
+	return ret;
+}
+
+static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
+	struct xhci_virt_device *vdev, int i)
+{
+	struct xhci_virt_ep *ep = &vdev->eps[i];
+
+	if (ep->ep_state & EP_HAS_STREAMS) {
+		xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
+				xhci_get_endpoint_address(i));
+		xhci_free_stream_info(xhci, ep->stream_info);
+		ep->stream_info = NULL;
+		ep->ep_state &= ~EP_HAS_STREAMS;
+	}
+}
+
+/* Called after one or more calls to xhci_add_endpoint() or
+ * xhci_drop_endpoint().  If this call fails, the USB core is expected
+ * to call xhci_reset_bandwidth().
+ *
+ * Since we are in the middle of changing either configuration or
+ * installing a new alt setting, the USB core won't allow URBs to be
+ * enqueued for any endpoint on the old config or interface.  Nothing
+ * else should be touching the xhci->devs[slot_id] structure, so we
+ * don't need to take the xhci->lock for manipulating that.
+ */
+static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	int i;
+	int ret = 0;
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device	*virt_dev;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	struct xhci_command *command;
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+		(xhci->xhc_state & XHCI_STATE_REMOVING))
+		return -ENODEV;
+
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	virt_dev = xhci->devs[udev->slot_id];
+
+	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+	if (!command)
+		return -ENOMEM;
+
+	command->in_ctx = virt_dev->in_ctx;
+
+	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		ret = -ENOMEM;
+		goto command_cleanup;
+	}
+	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+	/* Don't issue the command if there's no endpoints to update. */
+	if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
+	    ctrl_ctx->drop_flags == 0) {
+		ret = 0;
+		goto command_cleanup;
+	}
+	/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+	for (i = 31; i >= 1; i--) {
+		__le32 le32 = cpu_to_le32(BIT(i));
+
+		if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
+		    || (ctrl_ctx->add_flags & le32) || i == 1) {
+			slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+			slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
+			break;
+		}
+	}
+
+	ret = xhci_configure_endpoint(xhci, udev, command,
+			false, false);
+	if (ret)
+		/* Callee should call reset_bandwidth() */
+		goto command_cleanup;
+
+	/* Free any rings that were dropped, but not changed. */
+	for (i = 1; i < 31; i++) {
+		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
+		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
+			xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+		}
+	}
+	xhci_zero_in_ctx(xhci, virt_dev);
+	/*
+	 * Install any rings for completely new endpoints or changed endpoints,
+	 * and free any old rings from changed endpoints.
+	 */
+	for (i = 1; i < 31; i++) {
+		if (!virt_dev->eps[i].new_ring)
+			continue;
+		/* Only free the old ring if it exists.
+		 * It may not if this is the first add of an endpoint.
+		 */
+		if (virt_dev->eps[i].ring) {
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
+		}
+		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+		virt_dev->eps[i].new_ring = NULL;
+	}
+command_cleanup:
+	kfree(command->completion);
+	kfree(command);
+
+	return ret;
+}
+
+static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device	*virt_dev;
+	int i, ret;
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+	if (ret <= 0)
+		return;
+	xhci = hcd_to_xhci(hcd);
+
+	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+	virt_dev = xhci->devs[udev->slot_id];
+	/* Free any rings allocated for added endpoints */
+	for (i = 0; i < 31; i++) {
+		if (virt_dev->eps[i].new_ring) {
+			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
+			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+			virt_dev->eps[i].new_ring = NULL;
+		}
+	}
+	xhci_zero_in_ctx(xhci, virt_dev);
+}
+
+static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_container_ctx *out_ctx,
+		struct xhci_input_control_ctx *ctrl_ctx,
+		u32 add_flags, u32 drop_flags)
+{
+	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
+	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
+	xhci_slot_copy(xhci, in_ctx, out_ctx);
+	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+}
+
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_dequeue_state *deq_state)
+{
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_container_ctx *in_ctx;
+	struct xhci_ep_ctx *ep_ctx;
+	u32 added_ctxs;
+	dma_addr_t addr;
+
+	in_ctx = xhci->devs[slot_id]->in_ctx;
+	ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return;
+	}
+
+	xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+			xhci->devs[slot_id]->out_ctx, ep_index);
+	ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
+			deq_state->new_deq_ptr);
+	if (addr == 0) {
+		xhci_warn(xhci, "WARN Cannot submit config ep after "
+				"reset ep command\n");
+		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+				deq_state->new_deq_seg,
+				deq_state->new_deq_ptr);
+		return;
+	}
+	ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
+
+	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
+	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
+			xhci->devs[slot_id]->out_ctx, ctrl_ctx,
+			added_ctxs, added_ctxs);
+}
+
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+			       unsigned int stream_id, struct xhci_td *td)
+{
+	struct xhci_dequeue_state deq_state;
+	struct usb_device *udev = td->urb->dev;
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+			"Cleaning up stalled endpoint ring");
+	/* We need to move the HW's dequeue pointer past this TD,
+	 * or it will attempt to resend it on the next doorbell ring.
+	 */
+	xhci_find_new_dequeue_state(xhci, udev->slot_id,
+			ep_index, stream_id, td, &deq_state);
+
+	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
+		return;
+
+	/* HW with the reset endpoint quirk will use the saved dequeue state to
+	 * issue a configure endpoint command later.
+	 */
+	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+				"Queueing new dequeue state");
+		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+				ep_index, &deq_state);
+	} else {
+		/* Better hope no one uses the input context between now and the
+		 * reset endpoint completion!
+		 * XXX: No idea how this hardware will react when stream rings
+		 * are enabled.
+		 */
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Setting up input context for "
+				"configure endpoint command");
+		xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
+				ep_index, &deq_state);
+	}
+}
+
+/*
+ * Called after usb core issues a clear halt control message.
+ * The host side of the halt should already be cleared by a reset endpoint
+ * command issued when the STALL event was received.
+ *
+ * The reset endpoint command may only be issued to endpoints in the halted
+ * state. For software that wishes to reset the data toggle or sequence number
+ * of an endpoint that isn't in the halted state this function will issue a
+ * configure endpoint command with the Drop and Add bits set for the target
+ * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
+ */
+
+static void xhci_endpoint_reset(struct usb_hcd *hcd,
+		struct usb_host_endpoint *host_ep)
+{
+	struct xhci_hcd *xhci;
+	struct usb_device *udev;
+	struct xhci_virt_device *vdev;
+	struct xhci_virt_ep *ep;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_command *stop_cmd, *cfg_cmd;
+	unsigned int ep_index;
+	unsigned long flags;
+	u32 ep_flag;
+
+	xhci = hcd_to_xhci(hcd);
+	if (!host_ep->hcpriv)
+		return;
+	udev = (struct usb_device *) host_ep->hcpriv;
+	vdev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&host_ep->desc);
+	ep = &vdev->eps[ep_index];
+
+	/* Bail out if toggle is already being cleared by a endpoint reset */
+	if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
+		ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
+		return;
+	}
+	/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
+	if (usb_endpoint_xfer_control(&host_ep->desc) ||
+	    usb_endpoint_xfer_isoc(&host_ep->desc))
+		return;
+
+	ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
+
+	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
+		return;
+
+	stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
+	if (!stop_cmd)
+		return;
+
+	cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
+	if (!cfg_cmd)
+		goto cleanup;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/* block queuing new trbs and ringing ep doorbell */
+	ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
+
+	/*
+	 * Make sure endpoint ring is empty before resetting the toggle/seq.
+	 * Driver is required to synchronously cancel all transfer request.
+	 * Stop the endpoint to force xHC to update the output context
+	 */
+
+	if (!list_empty(&ep->ring->td_list)) {
+		dev_err(&udev->dev, "EP not empty, refuse reset\n");
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cfg_cmd);
+		goto cleanup;
+	}
+	xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	wait_for_completion(stop_cmd->completion);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/* config ep command clears toggle if add and drop ep flags are set */
+	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
+	xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
+					   ctrl_ctx, ep_flag, ep_flag);
+	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
+
+	xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+				      udev->slot_id, false);
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	wait_for_completion(cfg_cmd->completion);
+
+	ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+	xhci_free_command(xhci, cfg_cmd);
+cleanup:
+	xhci_free_command(xhci, stop_cmd);
+}
+
+static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
+		struct usb_device *udev, struct usb_host_endpoint *ep,
+		unsigned int slot_id)
+{
+	int ret;
+	unsigned int ep_index;
+	unsigned int ep_state;
+
+	if (!ep)
+		return -EINVAL;
+	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
+	if (ret <= 0)
+		return -EINVAL;
+	if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
+		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
+				" descriptor for ep 0x%x does not support streams\n",
+				ep->desc.bEndpointAddress);
+		return -EINVAL;
+	}
+
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+	if (ep_state & EP_HAS_STREAMS ||
+			ep_state & EP_GETTING_STREAMS) {
+		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
+				"already has streams set up.\n",
+				ep->desc.bEndpointAddress);
+		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
+				"dynamic stream context array reallocation.\n");
+		return -EINVAL;
+	}
+	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
+		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
+				"endpoint 0x%x; URBs are pending.\n",
+				ep->desc.bEndpointAddress);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
+		unsigned int *num_streams, unsigned int *num_stream_ctxs)
+{
+	unsigned int max_streams;
+
+	/* The stream context array size must be a power of two */
+	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
+	/*
+	 * Find out how many primary stream array entries the host controller
+	 * supports.  Later we may use secondary stream arrays (similar to 2nd
+	 * level page entries), but that's an optional feature for xHCI host
+	 * controllers. xHCs must support at least 4 stream IDs.
+	 */
+	max_streams = HCC_MAX_PSA(xhci->hcc_params);
+	if (*num_stream_ctxs > max_streams) {
+		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
+				max_streams);
+		*num_stream_ctxs = max_streams;
+		*num_streams = max_streams;
+	}
+}
+
+/* Returns an error code if one of the endpoint already has streams.
+ * This does not change any data structures, it only checks and gathers
+ * information.
+ */
+static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_host_endpoint **eps, unsigned int num_eps,
+		unsigned int *num_streams, u32 *changed_ep_bitmask)
+{
+	unsigned int max_streams;
+	unsigned int endpoint_flag;
+	int i;
+	int ret;
+
+	for (i = 0; i < num_eps; i++) {
+		ret = xhci_check_streams_endpoint(xhci, udev,
+				eps[i], udev->slot_id);
+		if (ret < 0)
+			return ret;
+
+		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
+		if (max_streams < (*num_streams - 1)) {
+			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
+					eps[i]->desc.bEndpointAddress,
+					max_streams);
+			*num_streams = max_streams+1;
+		}
+
+		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
+		if (*changed_ep_bitmask & endpoint_flag)
+			return -EINVAL;
+		*changed_ep_bitmask |= endpoint_flag;
+	}
+	return 0;
+}
+
+static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_host_endpoint **eps, unsigned int num_eps)
+{
+	u32 changed_ep_bitmask = 0;
+	unsigned int slot_id;
+	unsigned int ep_index;
+	unsigned int ep_state;
+	int i;
+
+	slot_id = udev->slot_id;
+	if (!xhci->devs[slot_id])
+		return 0;
+
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+		/* Are streams already being freed for the endpoint? */
+		if (ep_state & EP_GETTING_NO_STREAMS) {
+			xhci_warn(xhci, "WARN Can't disable streams for "
+					"endpoint 0x%x, "
+					"streams are being disabled already\n",
+					eps[i]->desc.bEndpointAddress);
+			return 0;
+		}
+		/* Are there actually any streams to free? */
+		if (!(ep_state & EP_HAS_STREAMS) &&
+				!(ep_state & EP_GETTING_STREAMS)) {
+			xhci_warn(xhci, "WARN Can't disable streams for "
+					"endpoint 0x%x, "
+					"streams are already disabled!\n",
+					eps[i]->desc.bEndpointAddress);
+			xhci_warn(xhci, "WARN xhci_free_streams() called "
+					"with non-streams endpoint\n");
+			return 0;
+		}
+		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
+	}
+	return changed_ep_bitmask;
+}
+
+/*
+ * The USB device drivers use this function (through the HCD interface in USB
+ * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
+ * coordinate mass storage command queueing across multiple endpoints (basically
+ * a stream ID == a task ID).
+ *
+ * Setting up streams involves allocating the same size stream context array
+ * for each endpoint and issuing a configure endpoint command for all endpoints.
+ *
+ * Don't allow the call to succeed if one endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ *
+ * Drivers may get less stream IDs than they asked for, if the host controller
+ * hardware or endpoints claim they can't support the number of requested
+ * stream IDs.
+ */
+static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint **eps, unsigned int num_eps,
+		unsigned int num_streams, gfp_t mem_flags)
+{
+	int i, ret;
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device *vdev;
+	struct xhci_command *config_cmd;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	unsigned int ep_index;
+	unsigned int num_stream_ctxs;
+	unsigned int max_packet;
+	unsigned long flags;
+	u32 changed_ep_bitmask = 0;
+
+	if (!eps)
+		return -EINVAL;
+
+	/* Add one to the number of streams requested to account for
+	 * stream 0 that is reserved for xHCI usage.
+	 */
+	num_streams += 1;
+	xhci = hcd_to_xhci(hcd);
+	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
+			num_streams);
+
+	/* MaxPSASize value 0 (2 streams) means streams are not supported */
+	if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
+			HCC_MAX_PSA(xhci->hcc_params) < 4) {
+		xhci_dbg(xhci, "xHCI controller does not support streams.\n");
+		return -ENOSYS;
+	}
+
+	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
+	if (!config_cmd)
+		return -ENOMEM;
+
+	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		xhci_free_command(xhci, config_cmd);
+		return -ENOMEM;
+	}
+
+	/* Check to make sure all endpoints are not already configured for
+	 * streams.  While we're at it, find the maximum number of streams that
+	 * all the endpoints will support and check for duplicate endpoints.
+	 */
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
+			num_eps, &num_streams, &changed_ep_bitmask);
+	if (ret < 0) {
+		xhci_free_command(xhci, config_cmd);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return ret;
+	}
+	if (num_streams <= 1) {
+		xhci_warn(xhci, "WARN: endpoints can't handle "
+				"more than one stream.\n");
+		xhci_free_command(xhci, config_cmd);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return -EINVAL;
+	}
+	vdev = xhci->devs[udev->slot_id];
+	/* Mark each endpoint as being in transition, so
+	 * xhci_urb_enqueue() will reject all URBs.
+	 */
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Setup internal data structures and allocate HW data structures for
+	 * streams (but don't install the HW structures in the input context
+	 * until we're sure all memory allocation succeeded).
+	 */
+	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
+	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
+			num_stream_ctxs, num_streams);
+
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		max_packet = usb_endpoint_maxp(&eps[i]->desc);
+		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
+				num_stream_ctxs,
+				num_streams,
+				max_packet, mem_flags);
+		if (!vdev->eps[ep_index].stream_info)
+			goto cleanup;
+		/* Set maxPstreams in endpoint context and update deq ptr to
+		 * point to stream context array. FIXME
+		 */
+	}
+
+	/* Set up the input context for a configure endpoint command. */
+	for (i = 0; i < num_eps; i++) {
+		struct xhci_ep_ctx *ep_ctx;
+
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
+
+		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
+				vdev->out_ctx, ep_index);
+		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
+				vdev->eps[ep_index].stream_info);
+	}
+	/* Tell the HW to drop its old copy of the endpoint context info
+	 * and add the updated copy from the input context.
+	 */
+	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
+			vdev->out_ctx, ctrl_ctx,
+			changed_ep_bitmask, changed_ep_bitmask);
+
+	/* Issue and wait for the configure endpoint command */
+	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
+			false, false);
+
+	/* xHC rejected the configure endpoint command for some reason, so we
+	 * leave the old ring intact and free our internal streams data
+	 * structure.
+	 */
+	if (ret < 0)
+		goto cleanup;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
+			 udev->slot_id, ep_index);
+		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
+	}
+	xhci_free_command(xhci, config_cmd);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Subtract 1 for stream 0, which drivers can't use */
+	return num_streams - 1;
+
+cleanup:
+	/* If it didn't work, free the streams! */
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+		vdev->eps[ep_index].stream_info = NULL;
+		/* FIXME Unset maxPstreams in endpoint context and
+		 * update deq ptr to point to normal string ring.
+		 */
+		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+		xhci_endpoint_zero(xhci, vdev, eps[i]);
+	}
+	xhci_free_command(xhci, config_cmd);
+	return -ENOMEM;
+}
+
+/* Transition the endpoint from using streams to being a "normal" endpoint
+ * without streams.
+ *
+ * Modify the endpoint context state, submit a configure endpoint command,
+ * and free all endpoint rings for streams if that completes successfully.
+ */
+static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+		struct usb_host_endpoint **eps, unsigned int num_eps,
+		gfp_t mem_flags)
+{
+	int i, ret;
+	struct xhci_hcd *xhci;
+	struct xhci_virt_device *vdev;
+	struct xhci_command *command;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	unsigned int ep_index;
+	unsigned long flags;
+	u32 changed_ep_bitmask;
+
+	xhci = hcd_to_xhci(hcd);
+	vdev = xhci->devs[udev->slot_id];
+
+	/* Set up a configure endpoint command to remove the streams rings */
+	spin_lock_irqsave(&xhci->lock, flags);
+	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
+			udev, eps, num_eps);
+	if (changed_ep_bitmask == 0) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return -EINVAL;
+	}
+
+	/* Use the xhci_command structure from the first endpoint.  We may have
+	 * allocated too many, but the driver may call xhci_free_streams() for
+	 * each endpoint it grouped into one call to xhci_alloc_streams().
+	 */
+	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
+	command = vdev->eps[ep_index].stream_info->free_streams_command;
+	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+	if (!ctrl_ctx) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_eps; i++) {
+		struct xhci_ep_ctx *ep_ctx;
+
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
+			EP_GETTING_NO_STREAMS;
+
+		xhci_endpoint_copy(xhci, command->in_ctx,
+				vdev->out_ctx, ep_index);
+		xhci_setup_no_streams_ep_input_ctx(ep_ctx,
+				&vdev->eps[ep_index]);
+	}
+	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
+			vdev->out_ctx, ctrl_ctx,
+			changed_ep_bitmask, changed_ep_bitmask);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Issue and wait for the configure endpoint command,
+	 * which must succeed.
+	 */
+	ret = xhci_configure_endpoint(xhci, udev, command,
+			false, true);
+
+	/* xHC rejected the configure endpoint command for some reason, so we
+	 * leave the streams rings intact.
+	 */
+	if (ret < 0)
+		return ret;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	for (i = 0; i < num_eps; i++) {
+		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+		vdev->eps[ep_index].stream_info = NULL;
+		/* FIXME Unset maxPstreams in endpoint context and
+		 * update deq ptr to point to normal string ring.
+		 */
+		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
+		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+	}
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return 0;
+}
+
+/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command.  The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+	struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+	int i;
+	unsigned int num_dropped_eps = 0;
+	unsigned int drop_flags = 0;
+
+	for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+		if (virt_dev->eps[i].ring) {
+			drop_flags |= 1 << i;
+			num_dropped_eps++;
+		}
+	}
+	xhci->num_active_eps -= num_dropped_eps;
+	if (num_dropped_eps)
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Dropped %u ep ctxs, flags = 0x%x, "
+				"%u now active.",
+				num_dropped_eps, drop_flags,
+				xhci->num_active_eps);
+}
+
+/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint.  The USB core should come back and call
+ * xhci_address_device(), and then re-set up the configuration.  If this is
+ * called because of a usb_reset_and_verify_device(), then the old alternate
+ * settings will be re-installed through the normal bandwidth allocation
+ * functions.
+ *
+ * Wait for the Reset Device command to finish.  Remove all structures
+ * associated with the endpoints that were disabled.  Clear the input device
+ * structure? Reset the control endpoint 0 max packet size?
+ *
+ * If the virt_dev to be reset does not exist or does not match the udev,
+ * it means the device is lost, possibly due to the xHC restore error and
+ * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
+ * re-allocate the device.
+ */
+static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
+		struct usb_device *udev)
+{
+	int ret, i;
+	unsigned long flags;
+	struct xhci_hcd *xhci;
+	unsigned int slot_id;
+	struct xhci_virt_device *virt_dev;
+	struct xhci_command *reset_device_cmd;
+	struct xhci_slot_ctx *slot_ctx;
+	int old_active_eps = 0;
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
+	if (ret <= 0)
+		return ret;
+	xhci = hcd_to_xhci(hcd);
+	slot_id = udev->slot_id;
+	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev) {
+		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
+				"not exist. Re-allocate the device\n", slot_id);
+		ret = xhci_alloc_dev(hcd, udev);
+		if (ret == 1)
+			return 0;
+		else
+			return -EINVAL;
+	}
+
+	if (virt_dev->tt_info)
+		old_active_eps = virt_dev->tt_info->active_eps;
+
+	if (virt_dev->udev != udev) {
+		/* If the virt_dev and the udev does not match, this virt_dev
+		 * may belong to another udev.
+		 * Re-allocate the device.
+		 */
+		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
+				"not match the udev. Re-allocate the device\n",
+				slot_id);
+		ret = xhci_alloc_dev(hcd, udev);
+		if (ret == 1)
+			return 0;
+		else
+			return -EINVAL;
+	}
+
+	/* If device is not setup, there is no point in resetting it */
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+	if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+						SLOT_STATE_DISABLED)
+		return 0;
+
+	trace_xhci_discover_or_reset_device(slot_ctx);
+
+	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
+	/* Allocate the command structure that holds the struct completion.
+	 * Assume we're in process context, since the normal device reset
+	 * process has to wait for the device anyway.  Storage devices are
+	 * reset as part of error handling, so use GFP_NOIO instead of
+	 * GFP_KERNEL.
+	 */
+	reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
+	if (!reset_device_cmd) {
+		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+		return -ENOMEM;
+	}
+
+	/* Attempt to submit the Reset Device command to the command ring */
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
+	if (ret) {
+		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto command_cleanup;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* Wait for the Reset Device command to finish */
+	wait_for_completion(reset_device_cmd->completion);
+
+	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
+	 * unless we tried to reset a slot ID that wasn't enabled,
+	 * or the device wasn't in the addressed or configured state.
+	 */
+	ret = reset_device_cmd->status;
+	switch (ret) {
+	case COMP_COMMAND_ABORTED:
+	case COMP_COMMAND_RING_STOPPED:
+		xhci_warn(xhci, "Timeout waiting for reset device command\n");
+		ret = -ETIME;
+		goto command_cleanup;
+	case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
+	case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
+		xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
+				slot_id,
+				xhci_get_slot_state(xhci, virt_dev->out_ctx));
+		xhci_dbg(xhci, "Not freeing device rings.\n");
+		/* Don't treat this as an error.  May change my mind later. */
+		ret = 0;
+		goto command_cleanup;
+	case COMP_SUCCESS:
+		xhci_dbg(xhci, "Successful reset device command.\n");
+		break;
+	default:
+		if (xhci_is_vendor_info_code(xhci, ret))
+			break;
+		xhci_warn(xhci, "Unknown completion code %u for "
+				"reset device command.\n", ret);
+		ret = -EINVAL;
+		goto command_cleanup;
+	}
+
+	/* Free up host controller endpoint resources */
+	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+		spin_lock_irqsave(&xhci->lock, flags);
+		/* Don't delete the default control endpoint resources */
+		xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	}
+
+	/* Everything but endpoint 0 is disabled, so free the rings. */
+	for (i = 1; i < 31; i++) {
+		struct xhci_virt_ep *ep = &virt_dev->eps[i];
+
+		if (ep->ep_state & EP_HAS_STREAMS) {
+			xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
+					xhci_get_endpoint_address(i));
+			xhci_free_stream_info(xhci, ep->stream_info);
+			ep->stream_info = NULL;
+			ep->ep_state &= ~EP_HAS_STREAMS;
+		}
+
+		if (ep->ring) {
+			xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
+		}
+		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
+			xhci_drop_ep_from_interval_table(xhci,
+					&virt_dev->eps[i].bw_info,
+					virt_dev->bw_table,
+					udev,
+					&virt_dev->eps[i],
+					virt_dev->tt_info);
+		xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
+	}
+	/* If necessary, update the number of active TTs on this root port */
+	xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
+	ret = 0;
+
+command_cleanup:
+	xhci_free_command(xhci, reset_device_cmd);
+	return ret;
+}
+
+/*
+ * At this point, the struct usb_device is about to go away, the device has
+ * disconnected, and all traffic has been stopped and the endpoints have been
+ * disabled.  Free any HC data structures associated with that device.
+ */
+static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_virt_device *virt_dev;
+	struct xhci_slot_ctx *slot_ctx;
+	int i, ret;
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+	/*
+	 * We called pm_runtime_get_noresume when the device was attached.
+	 * Decrement the counter here to allow controller to runtime suspend
+	 * if no devices remain.
+	 */
+	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+		pm_runtime_put_noidle(hcd->self.controller);
+#endif
+
+	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+	/* If the host is halted due to driver unload, we still need to free the
+	 * device.
+	 */
+	if (ret <= 0 && ret != -ENODEV)
+		return;
+
+	virt_dev = xhci->devs[udev->slot_id];
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+	trace_xhci_free_dev(slot_ctx);
+
+	/* Stop any wayward timer functions (which may grab the lock) */
+	for (i = 0; i < 31; i++) {
+		virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
+		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
+	}
+	xhci_debugfs_remove_slot(xhci, udev->slot_id);
+	virt_dev->udev = NULL;
+	ret = xhci_disable_slot(xhci, udev->slot_id);
+	if (ret)
+		xhci_free_virt_device(xhci, udev->slot_id);
+}
+
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
+{
+	struct xhci_command *command;
+	unsigned long flags;
+	u32 state;
+	int ret = 0;
+
+	command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+	if (!command)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	/* Don't disable the slot if the host controller is dead. */
+	state = readl(&xhci->op_regs->status);
+	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+			(xhci->xhc_state & XHCI_STATE_HALTED)) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		kfree(command);
+		return -ENODEV;
+	}
+
+	ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+				slot_id);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		kfree(command);
+		return ret;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return ret;
+}
+
+/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+	if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+				"Not enough ep ctxs: "
+				"%u active, need to add 1, limit is %u.",
+				xhci->num_active_eps, xhci->limit_active_eps);
+		return -ENOMEM;
+	}
+	xhci->num_active_eps += 1;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+			"Adding 1 ep ctx, %u now active.",
+			xhci->num_active_eps);
+	return 0;
+}
+
+
+/*
+ * Returns 0 if the xHC ran out of device slots, the Enable Slot command
+ * timed out, or allocating memory failed.  Returns 1 on success.
+ */
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_virt_device *vdev;
+	struct xhci_slot_ctx *slot_ctx;
+	unsigned long flags;
+	int ret, slot_id;
+	struct xhci_command *command;
+
+	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+	if (!command)
+		return 0;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+		xhci_free_command(xhci, command);
+		return 0;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	wait_for_completion(command->completion);
+	slot_id = command->slot_id;
+
+	if (!slot_id || command->status != COMP_SUCCESS) {
+		xhci_err(xhci, "Error while assigning device slot ID\n");
+		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
+				HCS_MAX_SLOTS(
+					readl(&xhci->cap_regs->hcs_params1)));
+		xhci_free_command(xhci, command);
+		return 0;
+	}
+
+	xhci_free_command(xhci, command);
+
+	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+		spin_lock_irqsave(&xhci->lock, flags);
+		ret = xhci_reserve_host_control_ep_resources(xhci);
+		if (ret) {
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			xhci_warn(xhci, "Not enough host resources, "
+					"active endpoint contexts = %u\n",
+					xhci->num_active_eps);
+			goto disable_slot;
+		}
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	}
+	/* Use GFP_NOIO, since this function can be called from
+	 * xhci_discover_or_reset_device(), which may be called as part of
+	 * mass storage driver error handling.
+	 */
+	if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
+		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+		goto disable_slot;
+	}
+	vdev = xhci->devs[slot_id];
+	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+	trace_xhci_alloc_dev(slot_ctx);
+
+	udev->slot_id = slot_id;
+
+	xhci_debugfs_create_slot(xhci, slot_id);
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+	/*
+	 * If resetting upon resume, we can't put the controller into runtime
+	 * suspend if there is a device attached.
+	 */
+	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+		pm_runtime_get_noresume(hcd->self.controller);
+#endif
+
+	/* Is this a LS or FS device under a HS hub? */
+	/* Hub or peripherial? */
+	return 1;
+
+disable_slot:
+	ret = xhci_disable_slot(xhci, udev->slot_id);
+	if (ret)
+		xhci_free_virt_device(xhci, udev->slot_id);
+
+	return 0;
+}
+
+/*
+ * Issue an Address Device command and optionally send a corresponding
+ * SetAddress request to the device.
+ */
+static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+			     enum xhci_setup_dev setup)
+{
+	const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
+	unsigned long flags;
+	struct xhci_virt_device *virt_dev;
+	int ret = 0;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_slot_ctx *slot_ctx;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	u64 temp_64;
+	struct xhci_command *command = NULL;
+
+	mutex_lock(&xhci->mutex);
+
+	if (xhci->xhc_state) {	/* dying, removing or halted */
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	if (!udev->slot_id) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+				"Bad Slot ID %d", udev->slot_id);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+
+	if (WARN_ON(!virt_dev)) {
+		/*
+		 * In plug/unplug torture test with an NEC controller,
+		 * a zero-dereference was observed once due to virt_dev = 0.
+		 * Print useful debug rather than crash if it is observed again!
+		 */
+		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
+			udev->slot_id);
+		ret = -EINVAL;
+		goto out;
+	}
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+	trace_xhci_setup_device_slot(slot_ctx);
+
+	if (setup == SETUP_CONTEXT_ONLY) {
+		if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+		    SLOT_STATE_DEFAULT) {
+			xhci_dbg(xhci, "Slot already in default state\n");
+			goto out;
+		}
+	}
+
+	command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+	if (!command) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	command->in_ctx = virt_dev->in_ctx;
+
+	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * If this is the first Set Address since device plug-in or
+	 * virt_device realloaction after a resume with an xHCI power loss,
+	 * then set up the slot context.
+	 */
+	if (!slot_ctx->dev_info)
+		xhci_setup_addressable_virt_dev(xhci, udev);
+	/* Otherwise, update the control endpoint ring enqueue pointer. */
+	else
+		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+	ctrl_ctx->drop_flags = 0;
+
+	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+				le32_to_cpu(slot_ctx->dev_info) >> 27);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	trace_xhci_setup_device(virt_dev);
+	ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
+					udev->slot_id, setup);
+	if (ret) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+				"FIXME: allocate a command ring segment");
+		goto out;
+	}
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+	wait_for_completion(command->completion);
+
+	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
+	 * the SetAddress() "recovery interval" required by USB and aborting the
+	 * command on a timeout.
+	 */
+	switch (command->status) {
+	case COMP_COMMAND_ABORTED:
+	case COMP_COMMAND_RING_STOPPED:
+		xhci_warn(xhci, "Timeout while waiting for setup device command\n");
+		ret = -ETIME;
+		break;
+	case COMP_CONTEXT_STATE_ERROR:
+	case COMP_SLOT_NOT_ENABLED_ERROR:
+		xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
+			 act, udev->slot_id);
+		ret = -EINVAL;
+		break;
+	case COMP_USB_TRANSACTION_ERROR:
+		dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
+
+		mutex_unlock(&xhci->mutex);
+		ret = xhci_disable_slot(xhci, udev->slot_id);
+		if (!ret)
+			xhci_alloc_dev(hcd, udev);
+		kfree(command->completion);
+		kfree(command);
+		return -EPROTO;
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		dev_warn(&udev->dev,
+			 "ERROR: Incompatible device for setup %s command\n", act);
+		ret = -ENODEV;
+		break;
+	case COMP_SUCCESS:
+		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+			       "Successful setup %s command", act);
+		break;
+	default:
+		xhci_err(xhci,
+			 "ERROR: unexpected setup %s command completion code 0x%x.\n",
+			 act, command->status);
+		trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+			"Op regs DCBAA ptr = %#016llx", temp_64);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+		"Slot ID %d dcbaa entry @%p = %#016llx",
+		udev->slot_id,
+		&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
+		(unsigned long long)
+		le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
+	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+			"Output Context DMA address = %#08llx",
+			(unsigned long long)virt_dev->out_ctx->dma);
+	trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+				le32_to_cpu(slot_ctx->dev_info) >> 27);
+	/*
+	 * USB core uses address 1 for the roothubs, so we add one to the
+	 * address given back to us by the HC.
+	 */
+	trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
+				le32_to_cpu(slot_ctx->dev_info) >> 27);
+	/* Zero the input context control for later use */
+	ctrl_ctx->add_flags = 0;
+	ctrl_ctx->drop_flags = 0;
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+		       "Internal device address = %d",
+		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
+out:
+	mutex_unlock(&xhci->mutex);
+	if (command) {
+		kfree(command->completion);
+		kfree(command);
+	}
+	return ret;
+}
+
+static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+}
+
+static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+}
+
+/*
+ * Transfer the port index into real index in the HW port status
+ * registers. Caculate offset between the port's PORTSC register
+ * and port status base. Divide the number of per port register
+ * to get the real index. The raw port number bases 1.
+ */
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+	struct xhci_hub *rhub;
+
+	rhub = xhci_get_rhub(hcd);
+	return rhub->ports[port1 - 1]->hw_portnum + 1;
+}
+
+/*
+ * Issue an Evaluate Context command to change the Maximum Exit Latency in the
+ * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
+ */
+static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+			struct usb_device *udev, u16 max_exit_latency)
+{
+	struct xhci_virt_device *virt_dev;
+	struct xhci_command *command;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	virt_dev = xhci->devs[udev->slot_id];
+
+	/*
+	 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
+	 * xHC was re-initialized. Exit latency will be set later after
+	 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
+	 */
+
+	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return 0;
+	}
+
+	/* Attempt to issue an Evaluate Context command to change the MEL. */
+	command = xhci->lpm_command;
+	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+	if (!ctrl_ctx) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
+	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
+	slot_ctx->dev_state = 0;
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+			"Set up evaluate context for LPM MEL change.");
+
+	/* Issue and wait for the evaluate context command. */
+	ret = xhci_configure_endpoint(xhci, udev, command,
+			true, true);
+
+	if (!ret) {
+		spin_lock_irqsave(&xhci->lock, flags);
+		virt_dev->current_mel = max_exit_latency;
+		spin_unlock_irqrestore(&xhci->lock, flags);
+	}
+	return ret;
+}
+
+#ifdef CONFIG_PM
+
+/* BESL to HIRD Encoding array for USB2 LPM */
+static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
+	3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
+
+/* Calculate HIRD/BESL for USB2 PORTPMSC*/
+static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
+					struct usb_device *udev)
+{
+	int u2del, besl, besl_host;
+	int besl_device = 0;
+	u32 field;
+
+	u2del = HCS_U2_LATENCY(xhci->hcs_params3);
+	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+	if (field & USB_BESL_SUPPORT) {
+		for (besl_host = 0; besl_host < 16; besl_host++) {
+			if (xhci_besl_encoding[besl_host] >= u2del)
+				break;
+		}
+		/* Use baseline BESL value as default */
+		if (field & USB_BESL_BASELINE_VALID)
+			besl_device = USB_GET_BESL_BASELINE(field);
+		else if (field & USB_BESL_DEEP_VALID)
+			besl_device = USB_GET_BESL_DEEP(field);
+	} else {
+		if (u2del <= 50)
+			besl_host = 0;
+		else
+			besl_host = (u2del - 51) / 75 + 1;
+	}
+
+	besl = besl_host + besl_device;
+	if (besl > 15)
+		besl = 15;
+
+	return besl;
+}
+
+/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
+static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
+{
+	u32 field;
+	int l1;
+	int besld = 0;
+	int hirdm = 0;
+
+	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+	/* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
+	l1 = udev->l1_params.timeout / 256;
+
+	/* device has preferred BESLD */
+	if (field & USB_BESL_DEEP_VALID) {
+		besld = USB_GET_BESL_DEEP(field);
+		hirdm = 1;
+	}
+
+	return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
+}
+
+static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+			struct usb_device *udev, int enable)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct xhci_port **ports;
+	__le32 __iomem	*pm_addr, *hlpm_addr;
+	u32		pm_val, hlpm_val, field;
+	unsigned int	port_num;
+	unsigned long	flags;
+	int		hird, exit_latency;
+	int		ret;
+
+	if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
+			!udev->lpm_capable)
+		return -EPERM;
+
+	if (!udev->parent || udev->parent->parent ||
+			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+		return -EPERM;
+
+	if (udev->usb2_hw_lpm_capable != 1)
+		return -EPERM;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	ports = xhci->usb2_rhub.ports;
+	port_num = udev->portnum - 1;
+	pm_addr = ports[port_num]->addr + PORTPMSC;
+	pm_val = readl(pm_addr);
+	hlpm_addr = ports[port_num]->addr + PORTHLPMC;
+	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
+
+	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
+			enable ? "enable" : "disable", port_num + 1);
+
+	if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+		/* Host supports BESL timeout instead of HIRD */
+		if (udev->usb2_hw_lpm_besl_capable) {
+			/* if device doesn't have a preferred BESL value use a
+			 * default one which works with mixed HIRD and BESL
+			 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
+			 */
+			if ((field & USB_BESL_SUPPORT) &&
+			    (field & USB_BESL_BASELINE_VALID))
+				hird = USB_GET_BESL_BASELINE(field);
+			else
+				hird = udev->l1_params.besl;
+
+			exit_latency = xhci_besl_encoding[hird];
+			spin_unlock_irqrestore(&xhci->lock, flags);
+
+			/* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
+			 * input context for link powermanagement evaluate
+			 * context commands. It is protected by hcd->bandwidth
+			 * mutex and is shared by all devices. We need to set
+			 * the max ext latency in USB 2 BESL LPM as well, so
+			 * use the same mutex and xhci_change_max_exit_latency()
+			 */
+			mutex_lock(hcd->bandwidth_mutex);
+			ret = xhci_change_max_exit_latency(xhci, udev,
+							   exit_latency);
+			mutex_unlock(hcd->bandwidth_mutex);
+
+			if (ret < 0)
+				return ret;
+			spin_lock_irqsave(&xhci->lock, flags);
+
+			hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
+			writel(hlpm_val, hlpm_addr);
+			/* flush write */
+			readl(hlpm_addr);
+		} else {
+			hird = xhci_calculate_hird_besl(xhci, udev);
+		}
+
+		pm_val &= ~PORT_HIRD_MASK;
+		pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
+		writel(pm_val, pm_addr);
+		pm_val = readl(pm_addr);
+		pm_val |= PORT_HLE;
+		writel(pm_val, pm_addr);
+		/* flush write */
+		readl(pm_addr);
+	} else {
+		pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
+		writel(pm_val, pm_addr);
+		/* flush write */
+		readl(pm_addr);
+		if (udev->usb2_hw_lpm_besl_capable) {
+			spin_unlock_irqrestore(&xhci->lock, flags);
+			mutex_lock(hcd->bandwidth_mutex);
+			xhci_change_max_exit_latency(xhci, udev, 0);
+			mutex_unlock(hcd->bandwidth_mutex);
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&xhci->lock, flags);
+	return 0;
+}
+
+/* check if a usb2 port supports a given extened capability protocol
+ * only USB2 ports extended protocol capability values are cached.
+ * Return 1 if capability is supported
+ */
+static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
+					   unsigned capability)
+{
+	u32 port_offset, port_count;
+	int i;
+
+	for (i = 0; i < xhci->num_ext_caps; i++) {
+		if (xhci->ext_caps[i] & capability) {
+			/* port offsets starts at 1 */
+			port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
+			port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
+			if (port >= port_offset &&
+			    port < port_offset + port_count)
+				return 1;
+		}
+	}
+	return 0;
+}
+
+static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int		portnum = udev->portnum - 1;
+
+	if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
+			!udev->lpm_capable)
+		return 0;
+
+	/* we only support lpm for non-hub device connected to root hub yet */
+	if (!udev->parent || udev->parent->parent ||
+			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+		return 0;
+
+	if (xhci->hw_lpm_support == 1 &&
+			xhci_check_usb2_port_capability(
+				xhci, portnum, XHCI_HLC)) {
+		udev->usb2_hw_lpm_capable = 1;
+		udev->l1_params.timeout = XHCI_L1_TIMEOUT;
+		udev->l1_params.besl = XHCI_DEFAULT_BESL;
+		if (xhci_check_usb2_port_capability(xhci, portnum,
+					XHCI_BLC))
+			udev->usb2_hw_lpm_besl_capable = 1;
+	}
+
+	return 0;
+}
+
+/*---------------------- USB 3.0 Link PM functions ------------------------*/
+
+/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
+static unsigned long long xhci_service_interval_to_ns(
+		struct usb_endpoint_descriptor *desc)
+{
+	return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
+}
+
+static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
+		enum usb3_link_state state)
+{
+	unsigned long long sel;
+	unsigned long long pel;
+	unsigned int max_sel_pel;
+	char *state_name;
+
+	switch (state) {
+	case USB3_LPM_U1:
+		/* Convert SEL and PEL stored in nanoseconds to microseconds */
+		sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+		pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
+		max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
+		state_name = "U1";
+		break;
+	case USB3_LPM_U2:
+		sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+		pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
+		max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
+		state_name = "U2";
+		break;
+	default:
+		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
+				__func__);
+		return USB3_LPM_DISABLED;
+	}
+
+	if (sel <= max_sel_pel && pel <= max_sel_pel)
+		return USB3_LPM_DEVICE_INITIATED;
+
+	if (sel > max_sel_pel)
+		dev_dbg(&udev->dev, "Device-initiated %s disabled "
+				"due to long SEL %llu ms\n",
+				state_name, sel);
+	else
+		dev_dbg(&udev->dev, "Device-initiated %s disabled "
+				"due to long PEL %llu ms\n",
+				state_name, pel);
+	return USB3_LPM_DISABLED;
+}
+
+/* The U1 timeout should be the maximum of the following values:
+ *  - For control endpoints, U1 system exit latency (SEL) * 3
+ *  - For bulk endpoints, U1 SEL * 5
+ *  - For interrupt endpoints:
+ *    - Notification EPs, U1 SEL * 3
+ *    - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
+ *  - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
+ */
+static unsigned long long xhci_calculate_intel_u1_timeout(
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc)
+{
+	unsigned long long timeout_ns;
+	int ep_type;
+	int intr_type;
+
+	ep_type = usb_endpoint_type(desc);
+	switch (ep_type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		timeout_ns = udev->u1_params.sel * 3;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		timeout_ns = udev->u1_params.sel * 5;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		intr_type = usb_endpoint_interrupt_type(desc);
+		if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
+			timeout_ns = udev->u1_params.sel * 3;
+			break;
+		}
+		/* Otherwise the calculation is the same as isoc eps */
+		/* fall through */
+	case USB_ENDPOINT_XFER_ISOC:
+		timeout_ns = xhci_service_interval_to_ns(desc);
+		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
+		if (timeout_ns < udev->u1_params.sel * 2)
+			timeout_ns = udev->u1_params.sel * 2;
+		break;
+	default:
+		return 0;
+	}
+
+	return timeout_ns;
+}
+
+/* Returns the hub-encoded U1 timeout value. */
+static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc)
+{
+	unsigned long long timeout_ns;
+
+	/* Prevent U1 if service interval is shorter than U1 exit latency */
+	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+		if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+			dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
+			return USB3_LPM_DISABLED;
+		}
+	}
+
+	if (xhci->quirks & XHCI_INTEL_HOST)
+		timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+	else
+		timeout_ns = udev->u1_params.sel;
+
+	/* The U1 timeout is encoded in 1us intervals.
+	 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
+	 */
+	if (timeout_ns == USB3_LPM_DISABLED)
+		timeout_ns = 1;
+	else
+		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
+
+	/* If the necessary timeout value is bigger than what we can set in the
+	 * USB 3.0 hub, we have to disable hub-initiated U1.
+	 */
+	if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
+		return timeout_ns;
+	dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
+			"due to long timeout %llu ms\n", timeout_ns);
+	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
+}
+
+/* The U2 timeout should be the maximum of:
+ *  - 10 ms (to avoid the bandwidth impact on the scheduler)
+ *  - largest bInterval of any active periodic endpoint (to avoid going
+ *    into lower power link states between intervals).
+ *  - the U2 Exit Latency of the device
+ */
+static unsigned long long xhci_calculate_intel_u2_timeout(
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc)
+{
+	unsigned long long timeout_ns;
+	unsigned long long u2_del_ns;
+
+	timeout_ns = 10 * 1000 * 1000;
+
+	if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
+			(xhci_service_interval_to_ns(desc) > timeout_ns))
+		timeout_ns = xhci_service_interval_to_ns(desc);
+
+	u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
+	if (u2_del_ns > timeout_ns)
+		timeout_ns = u2_del_ns;
+
+	return timeout_ns;
+}
+
+/* Returns the hub-encoded U2 timeout value. */
+static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc)
+{
+	unsigned long long timeout_ns;
+
+	/* Prevent U2 if service interval is shorter than U2 exit latency */
+	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+		if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+			dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
+			return USB3_LPM_DISABLED;
+		}
+	}
+
+	if (xhci->quirks & XHCI_INTEL_HOST)
+		timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+	else
+		timeout_ns = udev->u2_params.sel;
+
+	/* The U2 timeout is encoded in 256us intervals */
+	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
+	/* If the necessary timeout value is bigger than what we can set in the
+	 * USB 3.0 hub, we have to disable hub-initiated U2.
+	 */
+	if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
+		return timeout_ns;
+	dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
+			"due to long timeout %llu ms\n", timeout_ns);
+	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
+}
+
+static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc,
+		enum usb3_link_state state,
+		u16 *timeout)
+{
+	if (state == USB3_LPM_U1)
+		return xhci_calculate_u1_timeout(xhci, udev, desc);
+	else if (state == USB3_LPM_U2)
+		return xhci_calculate_u2_timeout(xhci, udev, desc);
+
+	return USB3_LPM_DISABLED;
+}
+
+static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_endpoint_descriptor *desc,
+		enum usb3_link_state state,
+		u16 *timeout)
+{
+	u16 alt_timeout;
+
+	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
+		desc, state, timeout);
+
+	/* If we found we can't enable hub-initiated LPM, or
+	 * the U1 or U2 exit latency was too high to allow
+	 * device-initiated LPM as well, just stop searching.
+	 */
+	if (alt_timeout == USB3_LPM_DISABLED ||
+			alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+		*timeout = alt_timeout;
+		return -E2BIG;
+	}
+	if (alt_timeout > *timeout)
+		*timeout = alt_timeout;
+	return 0;
+}
+
+static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		struct usb_host_interface *alt,
+		enum usb3_link_state state,
+		u16 *timeout)
+{
+	int j;
+
+	for (j = 0; j < alt->desc.bNumEndpoints; j++) {
+		if (xhci_update_timeout_for_endpoint(xhci, udev,
+					&alt->endpoint[j].desc, state, timeout))
+			return -E2BIG;
+		continue;
+	}
+	return 0;
+}
+
+static int xhci_check_intel_tier_policy(struct usb_device *udev,
+		enum usb3_link_state state)
+{
+	struct usb_device *parent;
+	unsigned int num_hubs;
+
+	if (state == USB3_LPM_U2)
+		return 0;
+
+	/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
+	for (parent = udev->parent, num_hubs = 0; parent->parent;
+			parent = parent->parent)
+		num_hubs++;
+
+	if (num_hubs < 2)
+		return 0;
+
+	dev_dbg(&udev->dev, "Disabling U1 link state for device"
+			" below second-tier hub.\n");
+	dev_dbg(&udev->dev, "Plug device into first-tier hub "
+			"to decrease power consumption.\n");
+	return -E2BIG;
+}
+
+static int xhci_check_tier_policy(struct xhci_hcd *xhci,
+		struct usb_device *udev,
+		enum usb3_link_state state)
+{
+	if (xhci->quirks & XHCI_INTEL_HOST)
+		return xhci_check_intel_tier_policy(udev, state);
+	else
+		return 0;
+}
+
+/* Returns the U1 or U2 timeout that should be enabled.
+ * If the tier check or timeout setting functions return with a non-zero exit
+ * code, that means the timeout value has been finalized and we shouldn't look
+ * at any more endpoints.
+ */
+static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
+			struct usb_device *udev, enum usb3_link_state state)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct usb_host_config *config;
+	char *state_name;
+	int i;
+	u16 timeout = USB3_LPM_DISABLED;
+
+	if (state == USB3_LPM_U1)
+		state_name = "U1";
+	else if (state == USB3_LPM_U2)
+		state_name = "U2";
+	else {
+		dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
+				state);
+		return timeout;
+	}
+
+	if (xhci_check_tier_policy(xhci, udev, state) < 0)
+		return timeout;
+
+	/* Gather some information about the currently installed configuration
+	 * and alternate interface settings.
+	 */
+	if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
+			state, &timeout))
+		return timeout;
+
+	config = udev->actconfig;
+	if (!config)
+		return timeout;
+
+	for (i = 0; i < config->desc.bNumInterfaces; i++) {
+		struct usb_driver *driver;
+		struct usb_interface *intf = config->interface[i];
+
+		if (!intf)
+			continue;
+
+		/* Check if any currently bound drivers want hub-initiated LPM
+		 * disabled.
+		 */
+		if (intf->dev.driver) {
+			driver = to_usb_driver(intf->dev.driver);
+			if (driver && driver->disable_hub_initiated_lpm) {
+				dev_dbg(&udev->dev, "Hub-initiated %s disabled "
+						"at request of driver %s\n",
+						state_name, driver->name);
+				return xhci_get_timeout_no_hub_lpm(udev, state);
+			}
+		}
+
+		/* Not sure how this could happen... */
+		if (!intf->cur_altsetting)
+			continue;
+
+		if (xhci_update_timeout_for_interface(xhci, udev,
+					intf->cur_altsetting,
+					state, &timeout))
+			return timeout;
+	}
+	return timeout;
+}
+
+static int calculate_max_exit_latency(struct usb_device *udev,
+		enum usb3_link_state state_changed,
+		u16 hub_encoded_timeout)
+{
+	unsigned long long u1_mel_us = 0;
+	unsigned long long u2_mel_us = 0;
+	unsigned long long mel_us = 0;
+	bool disabling_u1;
+	bool disabling_u2;
+	bool enabling_u1;
+	bool enabling_u2;
+
+	disabling_u1 = (state_changed == USB3_LPM_U1 &&
+			hub_encoded_timeout == USB3_LPM_DISABLED);
+	disabling_u2 = (state_changed == USB3_LPM_U2 &&
+			hub_encoded_timeout == USB3_LPM_DISABLED);
+
+	enabling_u1 = (state_changed == USB3_LPM_U1 &&
+			hub_encoded_timeout != USB3_LPM_DISABLED);
+	enabling_u2 = (state_changed == USB3_LPM_U2 &&
+			hub_encoded_timeout != USB3_LPM_DISABLED);
+
+	/* If U1 was already enabled and we're not disabling it,
+	 * or we're going to enable U1, account for the U1 max exit latency.
+	 */
+	if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
+			enabling_u1)
+		u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
+	if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
+			enabling_u2)
+		u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
+
+	if (u1_mel_us > u2_mel_us)
+		mel_us = u1_mel_us;
+	else
+		mel_us = u2_mel_us;
+	/* xHCI host controller max exit latency field is only 16 bits wide. */
+	if (mel_us > MAX_EXIT) {
+		dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
+				"is too big.\n", mel_us);
+		return -E2BIG;
+	}
+	return mel_us;
+}
+
+/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
+static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+			struct usb_device *udev, enum usb3_link_state state)
+{
+	struct xhci_hcd	*xhci;
+	u16 hub_encoded_timeout;
+	int mel;
+	int ret;
+
+	xhci = hcd_to_xhci(hcd);
+	/* The LPM timeout values are pretty host-controller specific, so don't
+	 * enable hub-initiated timeouts unless the vendor has provided
+	 * information about their timeout algorithm.
+	 */
+	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
+			!xhci->devs[udev->slot_id])
+		return USB3_LPM_DISABLED;
+
+	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
+	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
+	if (mel < 0) {
+		/* Max Exit Latency is too big, disable LPM. */
+		hub_encoded_timeout = USB3_LPM_DISABLED;
+		mel = 0;
+	}
+
+	ret = xhci_change_max_exit_latency(xhci, udev, mel);
+	if (ret)
+		return ret;
+	return hub_encoded_timeout;
+}
+
+static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+			struct usb_device *udev, enum usb3_link_state state)
+{
+	struct xhci_hcd	*xhci;
+	u16 mel;
+
+	xhci = hcd_to_xhci(hcd);
+	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
+			!xhci->devs[udev->slot_id])
+		return 0;
+
+	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
+	return xhci_change_max_exit_latency(xhci, udev, mel);
+}
+#else /* CONFIG_PM */
+
+static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+				struct usb_device *udev, int enable)
+{
+	return 0;
+}
+
+static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	return 0;
+}
+
+static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+			struct usb_device *udev, enum usb3_link_state state)
+{
+	return USB3_LPM_DISABLED;
+}
+
+static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+			struct usb_device *udev, enum usb3_link_state state)
+{
+	return 0;
+}
+#endif	/* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+/* Once a hub descriptor is fetched for a device, we need to update the xHC's
+ * internal data structures for the device.
+ */
+static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+			struct usb_tt *tt, gfp_t mem_flags)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct xhci_virt_device *vdev;
+	struct xhci_command *config_cmd;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_slot_ctx *slot_ctx;
+	unsigned long flags;
+	unsigned think_time;
+	int ret;
+
+	/* Ignore root hubs */
+	if (!hdev->parent)
+		return 0;
+
+	vdev = xhci->devs[hdev->slot_id];
+	if (!vdev) {
+		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
+		return -EINVAL;
+	}
+
+	config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
+	if (!config_cmd)
+		return -ENOMEM;
+
+	ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+				__func__);
+		xhci_free_command(xhci, config_cmd);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	if (hdev->speed == USB_SPEED_HIGH &&
+			xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
+		xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
+		xhci_free_command(xhci, config_cmd);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		return -ENOMEM;
+	}
+
+	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
+	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
+	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
+	/*
+	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
+	 * but it may be already set to 1 when setup an xHCI virtual
+	 * device, so clear it anyway.
+	 */
+	if (tt->multi)
+		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
+	else if (hdev->speed == USB_SPEED_FULL)
+		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
+
+	if (xhci->hci_version > 0x95) {
+		xhci_dbg(xhci, "xHCI version %x needs hub "
+				"TT think time and number of ports\n",
+				(unsigned int) xhci->hci_version);
+		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
+		/* Set TT think time - convert from ns to FS bit times.
+		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
+		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
+		 *
+		 * xHCI 1.0: this field shall be 0 if the device is not a
+		 * High-spped hub.
+		 */
+		think_time = tt->think_time;
+		if (think_time != 0)
+			think_time = (think_time / 666) - 1;
+		if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
+			slot_ctx->tt_info |=
+				cpu_to_le32(TT_THINK_TIME(think_time));
+	} else {
+		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
+				"TT think time or number of ports\n",
+				(unsigned int) xhci->hci_version);
+	}
+	slot_ctx->dev_state = 0;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	xhci_dbg(xhci, "Set up %s for hub device.\n",
+			(xhci->hci_version > 0x95) ?
+			"configure endpoint" : "evaluate context");
+
+	/* Issue and wait for the configure endpoint or
+	 * evaluate context command.
+	 */
+	if (xhci->hci_version > 0x95)
+		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
+				false, false);
+	else
+		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
+				true, false);
+
+	xhci_free_command(xhci, config_cmd);
+	return ret;
+}
+
+static int xhci_get_frame(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	/* EHCI mods by the periodic size.  Why? */
+	return readl(&xhci->run_regs->microframe_index) >> 3;
+}
+
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+{
+	struct xhci_hcd		*xhci;
+	/*
+	 * TODO: Check with DWC3 clients for sysdev according to
+	 * quirks
+	 */
+	struct device		*dev = hcd->self.sysdev;
+	unsigned int		minor_rev;
+	int			retval;
+
+	/* Accept arbitrarily long scatter-gather lists */
+	hcd->self.sg_tablesize = ~0;
+
+	/* support to build packet from discontinuous buffers */
+	hcd->self.no_sg_constraint = 1;
+
+	/* XHCI controllers don't stop the ep queue on short packets :| */
+	hcd->self.no_stop_on_short = 1;
+
+	xhci = hcd_to_xhci(hcd);
+
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		xhci->main_hcd = hcd;
+		xhci->usb2_rhub.hcd = hcd;
+		/* Mark the first roothub as being USB 2.0.
+		 * The xHCI driver will register the USB 3.0 roothub.
+		 */
+		hcd->speed = HCD_USB2;
+		hcd->self.root_hub->speed = USB_SPEED_HIGH;
+		/*
+		 * USB 2.0 roothub under xHCI has an integrated TT,
+		 * (rate matching hub) as opposed to having an OHCI/UHCI
+		 * companion controller.
+		 */
+		hcd->has_tt = 1;
+	} else {
+		/*
+		 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
+		 * minor revision instead of sbrn
+		 */
+		minor_rev = xhci->usb3_rhub.min_rev;
+		if (minor_rev) {
+			hcd->speed = HCD_USB31;
+			hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+		}
+		xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
+			  minor_rev,
+			  minor_rev ? "Enhanced" : "");
+
+		xhci->usb3_rhub.hcd = hcd;
+		/* xHCI private pointer was set in xhci_pci_probe for the second
+		 * registered roothub.
+		 */
+		return 0;
+	}
+
+	mutex_init(&xhci->mutex);
+	xhci->cap_regs = hcd->regs;
+	xhci->op_regs = hcd->regs +
+		HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
+	xhci->run_regs = hcd->regs +
+		(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+	/* Cache read-only capability registers */
+	xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
+	xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
+	xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
+	xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
+	xhci->hci_version = HC_VERSION(xhci->hcc_params);
+	xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
+	if (xhci->hci_version > 0x100)
+		xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
+
+	xhci->quirks |= quirks;
+
+	get_quirks(dev, xhci);
+
+	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
+	 * success event after a short transfer. This quirk will ignore such
+	 * spurious event.
+	 */
+	if (xhci->hci_version > 0x96)
+		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
+	/* Make sure the HC is halted. */
+	retval = xhci_halt(xhci);
+	if (retval)
+		return retval;
+
+	xhci_zero_64b_regs(xhci);
+
+	xhci_dbg(xhci, "Resetting HCD\n");
+	/* Reset the internal HC memory state and registers. */
+	retval = xhci_reset(xhci);
+	if (retval)
+		return retval;
+	xhci_dbg(xhci, "Reset complete\n");
+
+	/*
+	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+	 * address memory pointers actually. So, this driver clears the AC64
+	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+	 */
+	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+		xhci->hcc_params &= ~BIT(0);
+
+	/* Set dma_mask and coherent_dma_mask to 64-bits,
+	 * if xHC supports 64-bit addressing */
+	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+			!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+	} else {
+		/*
+		 * This is to avoid error in cases where a 32-bit USB
+		 * controller is used on a 64-bit capable system.
+		 */
+		retval = dma_set_mask(dev, DMA_BIT_MASK(32));
+		if (retval)
+			return retval;
+		xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+	}
+
+	xhci_dbg(xhci, "Calling HCD init\n");
+	/* Initialize HCD and host controller data structures. */
+	retval = xhci_init(hcd);
+	if (retval)
+		return retval;
+	xhci_dbg(xhci, "Called HCD init\n");
+
+	xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
+		  xhci->hcc_params, xhci->hci_version, xhci->quirks);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_gen_setup);
+
+static const struct hc_driver xhci_hc_driver = {
+	.description =		"xhci-hcd",
+	.product_desc =		"xHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct xhci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			xhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		NULL, /* set in xhci_init_driver() */
+	.start =		xhci_run,
+	.stop =			xhci_stop,
+	.shutdown =		xhci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		xhci_urb_enqueue,
+	.urb_dequeue =		xhci_urb_dequeue,
+	.alloc_dev =		xhci_alloc_dev,
+	.free_dev =		xhci_free_dev,
+	.alloc_streams =	xhci_alloc_streams,
+	.free_streams =		xhci_free_streams,
+	.add_endpoint =		xhci_add_endpoint,
+	.drop_endpoint =	xhci_drop_endpoint,
+	.endpoint_reset =	xhci_endpoint_reset,
+	.check_bandwidth =	xhci_check_bandwidth,
+	.reset_bandwidth =	xhci_reset_bandwidth,
+	.address_device =	xhci_address_device,
+	.enable_device =	xhci_enable_device,
+	.update_hub_device =	xhci_update_hub_device,
+	.reset_device =		xhci_discover_or_reset_device,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	xhci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_control =		xhci_hub_control,
+	.hub_status_data =	xhci_hub_status_data,
+	.bus_suspend =		xhci_bus_suspend,
+	.bus_resume =		xhci_bus_resume,
+	.get_resuming_ports =	xhci_get_resuming_ports,
+
+	/*
+	 * call back when device connected and addressed
+	 */
+	.update_device =        xhci_update_device,
+	.set_usb2_hw_lpm =	xhci_set_usb2_hardware_lpm,
+	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
+	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
+	.find_raw_port_number =	xhci_find_raw_port_number,
+};
+
+void xhci_init_driver(struct hc_driver *drv,
+		      const struct xhci_driver_overrides *over)
+{
+	BUG_ON(!over);
+
+	/* Copy the generic table to drv then apply the overrides */
+	*drv = xhci_hc_driver;
+
+	if (over) {
+		drv->hcd_priv_size += over->extra_priv_size;
+		if (over->reset)
+			drv->reset = over->reset;
+		if (over->start)
+			drv->start = over->start;
+	}
+}
+EXPORT_SYMBOL_GPL(xhci_init_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static int __init xhci_hcd_init(void)
+{
+	/*
+	 * Check the compiler generated sizes of structures that must be laid
+	 * out in specific ways for hardware access.
+	 */
+	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+	/* xhci_device_control has eight fields, and also
+	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+	 */
+	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
+	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	xhci_debugfs_create_root();
+
+	return 0;
+}
+
+/*
+ * If an init function is provided, an exit function must also be provided
+ * to allow module unload.
+ */
+static void __exit xhci_hcd_fini(void)
+{
+	xhci_debugfs_remove_root();
+}
+
+module_init(xhci_hcd_init);
+module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 0000000..e88060e
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,2612 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/usb/hcd.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/* Code sharing between pci-quirks and xhci hcd */
+#include	"xhci-ext-caps.h"
+#include "pci-quirks.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET	(0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS		256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS		127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase:		length of the capabilities register and HC version number
+ * @hcs_params1:	HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2:	HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3:	HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params:		HCCPARAMS - Capability Parameters
+ * @db_off:		DBOFF - Doorbell array offset
+ * @run_regs_off:	RTSOFF - Runtime register space offset
+ * @hcc_params2:	HCCPARAMS2 Capability Parameters 2, xhci 1.1 only
+ */
+struct xhci_cap_regs {
+	__le32	hc_capbase;
+	__le32	hcs_params1;
+	__le32	hcs_params2;
+	__le32	hcs_params3;
+	__le32	hcc_params;
+	__le32	db_off;
+	__le32	run_regs_off;
+	__le32	hcc_params2; /* xhci 1.1 */
+	/* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p)		XHCI_HC_LENGTH(p)
+/* bits 31:16	*/
+#define HC_VERSION(p)		(((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p)	(((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK		0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p)	(((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p)	(((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p)		(((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p)	(((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p)	(((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p)	((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p)	((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p)	((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p)		((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p)	((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p)	((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p)		((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p)		((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p)		((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p)		((p) & (1 << 11))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p)		(1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p)		XHCI_HCC_EXT_CAPS(p)
+
+#define CTX_SIZE(_hcc)		(HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define	DBOFF_MASK	(~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define	RTSOFF_MASK	(~0x1f)
+
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define	HCC2_U3C(p)		((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define	HCC2_CMC(p)		((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define	HCC2_FSC(p)		((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define	HCC2_CTC(p)		((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define	HCC2_LEC(p)		((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define	HCC2_CIC(p)		((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define	HCC2_ETC(p)		((p) & (1 << 6))
+
+/* Number of registers per port */
+#define	NUM_PORT_REGS	4
+
+#define PORTSC		0
+#define PORTPMSC	1
+#define PORTLI		2
+#define PORTHLPMC	3
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command:		USBCMD - xHC command register
+ * @status:		USBSTS - xHC status register
+ * @page_size:		This indicates the page size that the host controller
+ * 			supports.  If bit n is set, the HC supports a page size
+ * 			of 2^(n+12), up to a 128MB page size.
+ * 			4K is the minimum page size.
+ * @cmd_ring:		CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr:		DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg:		CONFIG - Configure Register
+ * @port_status_base:	PORTSCn - base address for Port Status and Control
+ * 			Each port has a Port Status and Control register,
+ * 			followed by a Port Power Management Status and Control
+ * 			register, a Port Link Info register, and a reserved
+ * 			register.
+ * @port_power_base:	PORTPMSCn - base address for
+ * 			Port Power Management Status and Control
+ * @port_link_base:	PORTLIn - base address for Port Link Info (current
+ * 			Link PM state and control) for USB 2.1 and USB 3.0
+ * 			devices.
+ */
+struct xhci_op_regs {
+	__le32	command;
+	__le32	status;
+	__le32	page_size;
+	__le32	reserved1;
+	__le32	reserved2;
+	__le32	dev_notification;
+	__le64	cmd_ring;
+	/* rsvd: offset 0x20-2F */
+	__le32	reserved3[4];
+	__le64	dcbaa_ptr;
+	__le32	config_reg;
+	/* rsvd: offset 0x3C-3FF */
+	__le32	reserved4[241];
+	/* port 1 registers, which serve as a base address for other ports */
+	__le32	port_status_base;
+	__le32	port_power_base;
+	__le32	port_link_base;
+	__le32	reserved5;
+	/* registers for ports 2-255 */
+	__le32	reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN		XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET	(1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE		XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE	XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET	(1 << 7)
+/* host controller save/restore state. */
+#define CMD_CSS		(1 << 8)
+#define CMD_CRS		(1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE		XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX	(1 << 11)
+/* bit 14 Extended TBC Enable, changes Isoc TRB fields to support larger TBC */
+#define CMD_ETE		(1 << 14)
+/* bits 15:31 are reserved (and should be preserved on writes). */
+
+/* IMAN - Interrupt Management Register */
+#define IMAN_IE		(1 << 1)
+#define IMAN_IP		(1 << 0)
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT	XHCI_STS_HALT
+/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
+#define STS_FATAL	(1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT	(1 << 3)
+/* port change detect */
+#define STS_PORT	(1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE	(1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE	(1 << 9)
+/* true: save or restore error */
+#define STS_SRE		(1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR		XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE		(1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define	DEV_NOTE_MASK		(0xffff)
+#define ENABLE_DEV_NOTE(x)	(1 << (x))
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define	DEV_NOTE_FWAKE		ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE		(1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT		(1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING	(1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_RSVD_BITS	(0x3f)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p)	((p) & 0xff)
+/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */
+#define CONFIG_U3E		(1 << 8)
+/* bit 9: Configuration Information Enable, xhci 1.1 */
+#define CONFIG_CIE		(1 << 9)
+/* bits 10:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT	(1 << 0)
+/* true: port enabled */
+#define PORT_PE		(1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC		(1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET	(1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS_MASK	(0xf << 5)
+#define XDEV_U0		(0x0 << 5)
+#define XDEV_U1		(0x1 << 5)
+#define XDEV_U2		(0x2 << 5)
+#define XDEV_U3		(0x3 << 5)
+#define XDEV_DISABLED	(0x4 << 5)
+#define XDEV_RXDETECT	(0x5 << 5)
+#define XDEV_INACTIVE	(0x6 << 5)
+#define XDEV_POLLING	(0x7 << 5)
+#define XDEV_RECOVERY	(0x8 << 5)
+#define XDEV_HOT_RESET	(0x9 << 5)
+#define XDEV_COMP_MODE	(0xa << 5)
+#define XDEV_TEST_MODE	(0xb << 5)
+#define XDEV_RESUME	(0xf << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER	(1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK		(0xf << 10)
+#define	XDEV_FS			(0x1 << 10)
+#define	XDEV_LS			(0x2 << 10)
+#define	XDEV_HS			(0x3 << 10)
+#define	XDEV_SS			(0x4 << 10)
+#define	XDEV_SSP		(0x5 << 10)
+#define DEV_UNDEFSPEED(p)	(((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p)		(((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p)	(((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p)	(((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p)	(((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p)	(((p) >> 10) & 0x0f)
+
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define	SLOT_SPEED_FS		(XDEV_FS << 10)
+#define	SLOT_SPEED_LS		(XDEV_LS << 10)
+#define	SLOT_SPEED_HS		(XDEV_HS << 10)
+#define	SLOT_SPEED_SS		(XDEV_SS << 10)
+#define	SLOT_SPEED_SSP		(XDEV_SSP << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF	(0 << 14)
+#define PORT_LED_AMBER	(1 << 14)
+#define PORT_LED_GREEN	(2 << 14)
+#define PORT_LED_MASK	(3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE	(1 << 16)
+/* true: connect status change */
+#define PORT_CSC	(1 << 17)
+/* true: port enable change */
+#define PORT_PEC	(1 << 18)
+/* true: warm reset for a USB 3.0 device is done.  A "hot" reset puts the port
+ * into an enabled state, and the device into the default state.  A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC	(1 << 19)
+/* true: over-current change */
+#define PORT_OCC	(1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC		(1 << 21)
+/* port link status change - set on some port link state transitions:
+ *  Transition				Reason
+ *  ------------------------------------------------------------------------------
+ *  - U3 to Resume			Wakeup signaling from a device
+ *  - Resume to Recovery to U0		USB 3.0 device resume
+ *  - Resume to U0			USB 2.0 device resume
+ *  - U3 to Recovery to U0		Software resume of USB 3.0 device complete
+ *  - U3 to U0				Software resume of USB 2.0 device complete
+ *  - U2 to U0				L1 resume of USB 2.1 device complete
+ *  - U0 to U0 (???)			L1 entry rejection by USB 2.1 device
+ *  - U0 to disabled			L1 entry error with USB 2.1 device
+ *  - Any state to inactive		Error on USB 3.0 port
+ */
+#define PORT_PLC	(1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC	(1 << 23)
+#define PORT_CHANGE_MASK	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+				 PORT_RC | PORT_PLC | PORT_CEC)
+
+
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS	(1 << 24)
+/* wake on connect (enable) */
+#define PORT_WKCONN_E	(1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E	(1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E	(1 << 27)
+/* bits 28:29 reserved */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE	(1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR		(1 << 31)
+
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us.  0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p)	((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK	0xff
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p)	(((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK	(0xff << 8)
+/* Bits 24:31 for port testing */
+
+/* USB2 Protocol PORTSPMSC */
+#define	PORT_L1S_MASK		7
+#define	PORT_L1S_SUCCESS	1
+#define	PORT_RWE		(1 << 3)
+#define	PORT_HIRD(p)		(((p) & 0xf) << 4)
+#define	PORT_HIRD_MASK		(0xf << 4)
+#define	PORT_L1DS_MASK		(0xff << 8)
+#define	PORT_L1DS(p)		(((p) & 0xff) << 8)
+#define	PORT_HLE		(1 << 16)
+#define PORT_TEST_MODE_SHIFT	28
+
+/* USB3 Protocol PORTLI  Port Link Information */
+#define PORT_RX_LANES(p)	(((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p)	(((p) >> 20) & 0xf)
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT		512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL	4
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
+ *			interrupts and check for pending interrupts.
+ * @irq_control:	IMOD - Interrupt Moderation Register.
+ * 			Used to throttle interrupts.
+ * @erst_size:		Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base:		ERST base address.
+ * @erst_dequeue:	Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
+ * multiple segments of the same size.  The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+	__le32	irq_pending;
+	__le32	irq_control;
+	__le32	erst_size;
+	__le32	rsvd;
+	__le64	erst_base;
+	__le64	erst_dequeue;
+};
+
+/* irq_pending bitmasks */
+#define	ER_IRQ_PENDING(p)	((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
+#define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
+#define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals).  The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK	(0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK	(0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define	ERST_SIZE_MASK		(0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies.  This is an optional HW hint.
+ */
+#define ERST_DESI_MASK		(0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB		(1 << 3)
+#define ERST_PTR_MASK		(0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ * 		MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+	__le32			microframe_index;
+	__le32			rsvd[7];
+	struct xhci_intr_reg	ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Bits  0 -  7: Endpoint target
+ * Bits  8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+	__le32	doorbell[256];
+};
+
+#define DB_VALUE(ep, stream)	((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST		0x00000000
+
+/**
+ * struct xhci_protocol_caps
+ * @revision:		major revision, minor revision, capability ID,
+ *			and next capability pointer.
+ * @name_string:	Four ASCII characters to say which spec this xHC
+ *			follows, typically "USB ".
+ * @port_info:		Port offset, count, and protocol-defined information.
+ */
+struct xhci_protocol_caps {
+	u32	revision;
+	u32	name_string;
+	u32	port_info;
+};
+
+#define	XHCI_EXT_PORT_MAJOR(x)	(((x) >> 24) & 0xff)
+#define	XHCI_EXT_PORT_MINOR(x)	(((x) >> 16) & 0xff)
+#define	XHCI_EXT_PORT_PSIC(x)	(((x) >> 28) & 0x0f)
+#define	XHCI_EXT_PORT_OFF(x)	((x) & 0xff)
+#define	XHCI_EXT_PORT_COUNT(x)	(((x) >> 8) & 0xff)
+
+#define	XHCI_EXT_PORT_PSIV(x)	(((x) >> 0) & 0x0f)
+#define	XHCI_EXT_PORT_PSIE(x)	(((x) >> 4) & 0x03)
+#define	XHCI_EXT_PORT_PLT(x)	(((x) >> 6) & 0x03)
+#define	XHCI_EXT_PORT_PFD(x)	(((x) >> 8) & 0x01)
+#define	XHCI_EXT_PORT_LP(x)	(((x) >> 14) & 0x03)
+#define	XHCI_EXT_PORT_PSIM(x)	(((x) >> 16) & 0xffff)
+
+#define PLT_MASK        (0x03 << 6)
+#define PLT_SYM         (0x00 << 6)
+#define PLT_ASYM_RX     (0x02 << 6)
+#define PLT_ASYM_TX     (0x03 << 6)
+
+/**
+ * struct xhci_container_ctx
+ * @type: Type of context.  Used to calculated offsets to contained contexts.
+ * @size: Size of the context data
+ * @bytes: The raw context data given to HW
+ * @dma: dma address of the bytes
+ *
+ * Represents either a Device or Input context.  Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct xhci_container_ctx {
+	unsigned type;
+#define XHCI_CTX_TYPE_DEVICE  0x1
+#define XHCI_CTX_TYPE_INPUT   0x2
+
+	int size;
+
+	u8 *bytes;
+	dma_addr_t dma;
+};
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info:	Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2:	Max exit latency for device number, root hub port number
+ * @tt_info:	tt_info is used to construct split transaction tokens
+ * @dev_state:	slot state and device address
+ *
+ * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+	__le32	dev_info;
+	__le32	dev_info2;
+	__le32	tt_info;
+	__le32	dev_state;
+	/* offset 0x10 to 0x1f reserved for HC internal use */
+	__le32	reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK	(0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED	(0xf << 20)
+#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT		(0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB		(0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK	(0x1f << 27)
+#define LAST_CTX(p)	((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p)	(((p) >> 27) - 1)
+#define SLOT_FLAG	(1 << 0)
+#define EP0_FLAG	(1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT	(0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p)	(((p) & 0xff) << 16)
+#define DEVINFO_TO_ROOT_HUB_PORT(p)	(((p) >> 16) & 0xff)
+/* Maximum number of ports under a hub device */
+#define XHCI_MAX_PORTS(p)	(((p) & 0xff) << 24)
+#define DEVINFO_TO_MAX_PORTS(p)	(((p) & (0xff << 24)) >> 24)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device.  '0' if attached to root hub port.
+ */
+#define TT_SLOT		(0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT		(0xff << 8)
+#define TT_THINK_TIME(p)	(((p) & 0x3) << 16)
+#define GET_TT_THINK_TIME(p)	(((p) & (0x3 << 16)) >> 16)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK	(0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE	(0x1f << 27)
+#define GET_SLOT_STATE(p)	(((p) & (0x1f << 27)) >> 27)
+
+#define SLOT_STATE_DISABLED	0
+#define SLOT_STATE_ENABLED	SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT	1
+#define SLOT_STATE_ADDRESSED	2
+#define SLOT_STATE_CONFIGURED	3
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info:	endpoint state, streams, mult, and interval information.
+ * @ep_info2:	information on endpoint type, max packet size, max burst size,
+ * 		error count, and whether the HC will force an event for all
+ * 		transactions.
+ * @deq:	64-bit ring dequeue pointer address.  If the endpoint only
+ * 		defines one stream, this points to the endpoint transfer ring.
+ * 		Otherwise, it points to a stream context array, which has a
+ * 		ring pointer for each flow.
+ * @tx_info:
+ * 		Average TRB lengths for the endpoint ring and
+ * 		max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+	__le32	ep_info;
+	__le32	ep_info2;
+	__le64	deq;
+	__le32	tx_info;
+	/* offset 0x14 - 0x1f reserved for HC internal use */
+	__le32	reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK		(0xf)
+#define EP_STATE_DISABLED	0
+#define EP_STATE_RUNNING	1
+#define EP_STATE_HALTED		2
+#define EP_STATE_STOPPED	3
+#define EP_STATE_ERROR		4
+#define GET_EP_CTX_STATE(ctx)	(le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p)		(((p) & 0x3) << 8)
+#define CTX_TO_EP_MULT(p)	(((p) >> 8) & 0x3)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p)			(((p) & 0xff) << 16)
+#define EP_INTERVAL_TO_UFRAMES(p)	(1 << (((p) >> 16) & 0xff))
+#define CTX_TO_EP_INTERVAL(p)		(((p) >> 16) & 0xff)
+#define EP_MAXPSTREAMS_MASK		(0x1f << 10)
+#define EP_MAXPSTREAMS(p)		(((p) << 10) & EP_MAXPSTREAMS_MASK)
+#define CTX_TO_EP_MAXPSTREAMS(p)	(((p) & EP_MAXPSTREAMS_MASK) >> 10)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define	EP_HAS_LSA		(1 << 15)
+/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)	(((p) >> 24) & 0xff)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define	FORCE_EVENT	(0x1)
+#define ERROR_COUNT(p)	(((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p)	(((p) >> 3) & 0x7)
+#define EP_TYPE(p)	((p) << 3)
+#define ISOC_OUT_EP	1
+#define BULK_OUT_EP	2
+#define INT_OUT_EP	3
+#define CTRL_EP		4
+#define ISOC_IN_EP	5
+#define BULK_IN_EP	6
+#define INT_IN_EP	7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p)	(((p)&0xff) << 8)
+#define CTX_TO_MAX_BURST(p)	(((p) >> 8) & 0xff)
+#define MAX_PACKET(p)	(((p)&0xffff) << 16)
+#define MAX_PACKET_MASK		(0xffff << 16)
+#define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
+
+/* tx_info bitmasks */
+#define EP_AVG_TRB_LENGTH(p)		((p) & 0xffff)
+#define EP_MAX_ESIT_PAYLOAD_LO(p)	(((p) & 0xffff) << 16)
+#define EP_MAX_ESIT_PAYLOAD_HI(p)	((((p) >> 16) & 0xff) << 24)
+#define CTX_TO_MAX_ESIT_PAYLOAD(p)	(((p) >> 16) & 0xffff)
+
+/* deq bitmasks */
+#define EP_CTX_CYCLE_MASK		(1 << 0)
+#define SCTX_DEQ_MASK			(~0xfL)
+
+
+/**
+ * struct xhci_input_control_context
+ * Input control context; see section 6.2.5.
+ *
+ * @drop_context:	set the bit of the endpoint context you want to disable
+ * @add_context:	set the bit of the endpoint context you want to enable
+ */
+struct xhci_input_control_ctx {
+	__le32	drop_flags;
+	__le32	add_flags;
+	__le32	rsvd2[6];
+};
+
+#define	EP_IS_ADDED(ctrl_ctx, i) \
+	(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
+#define	EP_IS_DROPPED(ctrl_ctx, i)       \
+	(le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
+
+/* Represents everything that is needed to issue a command on the command ring.
+ * It's useful to pre-allocate these for commands that cannot fail due to
+ * out-of-memory errors, like freeing streams.
+ */
+struct xhci_command {
+	/* Input context for changing device state */
+	struct xhci_container_ctx	*in_ctx;
+	u32				status;
+	int				slot_id;
+	/* If completion is null, no one is waiting on this command
+	 * and the structure can be freed after the command completes.
+	 */
+	struct completion		*completion;
+	union xhci_trb			*command_trb;
+	struct list_head		cmd_list;
+};
+
+/* drop context bitmasks */
+#define	DROP_EP(x)	(0x1 << x)
+/* add context bitmasks */
+#define	ADD_EP(x)	(0x1 << x)
+
+struct xhci_stream_ctx {
+	/* 64-bit stream ring address, cycle state, and stream type */
+	__le64	stream_ring;
+	/* offset 0x14 - 0x1f reserved for HC internal use */
+	__le32	reserved[2];
+};
+
+/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
+#define	SCT_FOR_CTX(p)		(((p) & 0x7) << 1)
+/* Secondary stream array type, dequeue pointer is to a transfer ring */
+#define	SCT_SEC_TR		0
+/* Primary stream array type, dequeue pointer is to a transfer ring */
+#define	SCT_PRI_TR		1
+/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
+#define SCT_SSA_8		2
+#define SCT_SSA_16		3
+#define SCT_SSA_32		4
+#define SCT_SSA_64		5
+#define SCT_SSA_128		6
+#define SCT_SSA_256		7
+
+/* Assume no secondary streams for now */
+struct xhci_stream_info {
+	struct xhci_ring		**stream_rings;
+	/* Number of streams, including stream 0 (which drivers can't use) */
+	unsigned int			num_streams;
+	/* The stream context array may be bigger than
+	 * the number of streams the driver asked for
+	 */
+	struct xhci_stream_ctx		*stream_ctx_array;
+	unsigned int			num_stream_ctxs;
+	dma_addr_t			ctx_array_dma;
+	/* For mapping physical TRB addresses to segments in stream rings */
+	struct radix_tree_root		trb_address_map;
+	struct xhci_command		*free_streams_command;
+};
+
+#define	SMALL_STREAM_ARRAY_SIZE		256
+#define	MEDIUM_STREAM_ARRAY_SIZE	1024
+
+/* Some Intel xHCI host controllers need software to keep track of the bus
+ * bandwidth.  Keep track of endpoint info here.  Each root port is allocated
+ * the full bus bandwidth.  We must also treat TTs (including each port under a
+ * multi-TT hub) as a separate bandwidth domain.  The direct memory interface
+ * (DMI) also limits the total bandwidth (across all domains) that can be used.
+ */
+struct xhci_bw_info {
+	/* ep_interval is zero-based */
+	unsigned int		ep_interval;
+	/* mult and num_packets are one-based */
+	unsigned int		mult;
+	unsigned int		num_packets;
+	unsigned int		max_packet_size;
+	unsigned int		max_esit_payload;
+	unsigned int		type;
+};
+
+/* "Block" sizes in bytes the hardware uses for different device speeds.
+ * The logic in this part of the hardware limits the number of bits the hardware
+ * can use, so must represent bandwidth in a less precise manner to mimic what
+ * the scheduler hardware computes.
+ */
+#define	FS_BLOCK	1
+#define	HS_BLOCK	4
+#define	SS_BLOCK	16
+#define	DMI_BLOCK	32
+
+/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
+ * with each byte transferred.  SuperSpeed devices have an initial overhead to
+ * set up bursts.  These are in blocks, see above.  LS overhead has already been
+ * translated into FS blocks.
+ */
+#define DMI_OVERHEAD 8
+#define DMI_OVERHEAD_BURST 4
+#define SS_OVERHEAD 8
+#define SS_OVERHEAD_BURST 32
+#define HS_OVERHEAD 26
+#define FS_OVERHEAD 20
+#define LS_OVERHEAD 128
+/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
+ * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
+ * of overhead associated with split transfers crossing microframe boundaries.
+ * 31 blocks is pure protocol overhead.
+ */
+#define TT_HS_OVERHEAD (31 + 94)
+#define TT_DMI_OVERHEAD (25 + 12)
+
+/* Bandwidth limits in blocks */
+#define FS_BW_LIMIT		1285
+#define TT_BW_LIMIT		1320
+#define HS_BW_LIMIT		1607
+#define SS_BW_LIMIT_IN		3906
+#define DMI_BW_LIMIT_IN		3906
+#define SS_BW_LIMIT_OUT		3906
+#define DMI_BW_LIMIT_OUT	3906
+
+/* Percentage of bus bandwidth reserved for non-periodic transfers */
+#define FS_BW_RESERVED		10
+#define HS_BW_RESERVED		20
+#define SS_BW_RESERVED		10
+
+struct xhci_virt_ep {
+	struct xhci_ring		*ring;
+	/* Related to endpoints that are configured to use stream IDs only */
+	struct xhci_stream_info		*stream_info;
+	/* Temporary storage in case the configure endpoint command fails and we
+	 * have to restore the device state to the previous state
+	 */
+	struct xhci_ring		*new_ring;
+	unsigned int			ep_state;
+#define SET_DEQ_PENDING		(1 << 0)
+#define EP_HALTED		(1 << 1)	/* For stall handling */
+#define EP_STOP_CMD_PENDING	(1 << 2)	/* For URB cancellation */
+/* Transitioning the endpoint to using streams, don't enqueue URBs */
+#define EP_GETTING_STREAMS	(1 << 3)
+#define EP_HAS_STREAMS		(1 << 4)
+/* Transitioning the endpoint to not using streams, don't enqueue URBs */
+#define EP_GETTING_NO_STREAMS	(1 << 5)
+#define EP_HARD_CLEAR_TOGGLE	(1 << 6)
+#define EP_SOFT_CLEAR_TOGGLE	(1 << 7)
+	/* ----  Related to URB cancellation ---- */
+	struct list_head	cancelled_td_list;
+	/* Watchdog timer for stop endpoint command to cancel URBs */
+	struct timer_list	stop_cmd_timer;
+	struct xhci_hcd		*xhci;
+	/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
+	 * command.  We'll need to update the ring's dequeue segment and dequeue
+	 * pointer after the command completes.
+	 */
+	struct xhci_segment	*queued_deq_seg;
+	union xhci_trb		*queued_deq_ptr;
+	/*
+	 * Sometimes the xHC can not process isochronous endpoint ring quickly
+	 * enough, and it will miss some isoc tds on the ring and generate
+	 * a Missed Service Error Event.
+	 * Set skip flag when receive a Missed Service Error Event and
+	 * process the missed tds on the endpoint ring.
+	 */
+	bool			skip;
+	/* Bandwidth checking storage */
+	struct xhci_bw_info	bw_info;
+	struct list_head	bw_endpoint_list;
+	/* Isoch Frame ID checking storage */
+	int			next_frame_id;
+	/* Use new Isoch TRB layout needed for extended TBC support */
+	bool			use_extended_tbc;
+};
+
+enum xhci_overhead_type {
+	LS_OVERHEAD_TYPE = 0,
+	FS_OVERHEAD_TYPE,
+	HS_OVERHEAD_TYPE,
+};
+
+struct xhci_interval_bw {
+	unsigned int		num_packets;
+	/* Sorted by max packet size.
+	 * Head of the list is the greatest max packet size.
+	 */
+	struct list_head	endpoints;
+	/* How many endpoints of each speed are present. */
+	unsigned int		overhead[3];
+};
+
+#define	XHCI_MAX_INTERVAL	16
+
+struct xhci_interval_bw_table {
+	unsigned int		interval0_esit_payload;
+	struct xhci_interval_bw	interval_bw[XHCI_MAX_INTERVAL];
+	/* Includes reserved bandwidth for async endpoints */
+	unsigned int		bw_used;
+	unsigned int		ss_bw_in;
+	unsigned int		ss_bw_out;
+};
+
+
+struct xhci_virt_device {
+	struct usb_device		*udev;
+	/*
+	 * Commands to the hardware are passed an "input context" that
+	 * tells the hardware what to change in its data structures.
+	 * The hardware will return changes in an "output context" that
+	 * software must allocate for the hardware.  We need to keep
+	 * track of input and output contexts separately because
+	 * these commands might fail and we don't trust the hardware.
+	 */
+	struct xhci_container_ctx       *out_ctx;
+	/* Used for addressing devices and configuration changes */
+	struct xhci_container_ctx       *in_ctx;
+	struct xhci_virt_ep		eps[31];
+	u8				fake_port;
+	u8				real_port;
+	struct xhci_interval_bw_table	*bw_table;
+	struct xhci_tt_bw_info		*tt_info;
+	/* The current max exit latency for the enabled USB3 link states. */
+	u16				current_mel;
+	/* Used for the debugfs interfaces. */
+	void				*debugfs_private;
+};
+
+/*
+ * For each roothub, keep track of the bandwidth information for each periodic
+ * interval.
+ *
+ * If a high speed hub is attached to the roothub, each TT associated with that
+ * hub is a separate bandwidth domain.  The interval information for the
+ * endpoints on the devices under that TT will appear in the TT structure.
+ */
+struct xhci_root_port_bw_info {
+	struct list_head		tts;
+	unsigned int			num_active_tts;
+	struct xhci_interval_bw_table	bw_table;
+};
+
+struct xhci_tt_bw_info {
+	struct list_head		tt_list;
+	int				slot_id;
+	int				ttport;
+	struct xhci_interval_bw_table	bw_table;
+	int				active_eps;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr	array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+	/* 64-bit device addresses; we only write 32-bit addresses */
+	__le64			dev_context_ptrs[MAX_HC_SLOTS];
+	/* private xHCD pointers */
+	dma_addr_t	dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_transfer_event {
+	/* 64-bit buffer address, or immediate data */
+	__le64	buffer;
+	__le32	transfer_len;
+	/* This field is interpreted differently based on the type of TRB */
+	__le32	flags;
+};
+
+/* Transfer event TRB length bit mask */
+/* bits 0:23 */
+#define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
+
+/** Transfer Event bit fields **/
+#define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
+
+/* Completion Code - only applicable for some types of TRBs */
+#define	COMP_CODE_MASK		(0xff << 24)
+#define GET_COMP_CODE(p)	(((p) & COMP_CODE_MASK) >> 24)
+#define COMP_INVALID				0
+#define COMP_SUCCESS				1
+#define COMP_DATA_BUFFER_ERROR			2
+#define COMP_BABBLE_DETECTED_ERROR		3
+#define COMP_USB_TRANSACTION_ERROR		4
+#define COMP_TRB_ERROR				5
+#define COMP_STALL_ERROR			6
+#define COMP_RESOURCE_ERROR			7
+#define COMP_BANDWIDTH_ERROR			8
+#define COMP_NO_SLOTS_AVAILABLE_ERROR		9
+#define COMP_INVALID_STREAM_TYPE_ERROR		10
+#define COMP_SLOT_NOT_ENABLED_ERROR		11
+#define COMP_ENDPOINT_NOT_ENABLED_ERROR		12
+#define COMP_SHORT_PACKET			13
+#define COMP_RING_UNDERRUN			14
+#define COMP_RING_OVERRUN			15
+#define COMP_VF_EVENT_RING_FULL_ERROR		16
+#define COMP_PARAMETER_ERROR			17
+#define COMP_BANDWIDTH_OVERRUN_ERROR		18
+#define COMP_CONTEXT_STATE_ERROR		19
+#define COMP_NO_PING_RESPONSE_ERROR		20
+#define COMP_EVENT_RING_FULL_ERROR		21
+#define COMP_INCOMPATIBLE_DEVICE_ERROR		22
+#define COMP_MISSED_SERVICE_ERROR		23
+#define COMP_COMMAND_RING_STOPPED		24
+#define COMP_COMMAND_ABORTED			25
+#define COMP_STOPPED				26
+#define COMP_STOPPED_LENGTH_INVALID		27
+#define COMP_STOPPED_SHORT_PACKET		28
+#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR	29
+#define COMP_ISOCH_BUFFER_OVERRUN		31
+#define COMP_EVENT_LOST_ERROR			32
+#define COMP_UNDEFINED_ERROR			33
+#define COMP_INVALID_STREAM_ID_ERROR		34
+#define COMP_SECONDARY_BANDWIDTH_ERROR		35
+#define COMP_SPLIT_TRANSACTION_ERROR		36
+
+static inline const char *xhci_trb_comp_code_string(u8 status)
+{
+	switch (status) {
+	case COMP_INVALID:
+		return "Invalid";
+	case COMP_SUCCESS:
+		return "Success";
+	case COMP_DATA_BUFFER_ERROR:
+		return "Data Buffer Error";
+	case COMP_BABBLE_DETECTED_ERROR:
+		return "Babble Detected";
+	case COMP_USB_TRANSACTION_ERROR:
+		return "USB Transaction Error";
+	case COMP_TRB_ERROR:
+		return "TRB Error";
+	case COMP_STALL_ERROR:
+		return "Stall Error";
+	case COMP_RESOURCE_ERROR:
+		return "Resource Error";
+	case COMP_BANDWIDTH_ERROR:
+		return "Bandwidth Error";
+	case COMP_NO_SLOTS_AVAILABLE_ERROR:
+		return "No Slots Available Error";
+	case COMP_INVALID_STREAM_TYPE_ERROR:
+		return "Invalid Stream Type Error";
+	case COMP_SLOT_NOT_ENABLED_ERROR:
+		return "Slot Not Enabled Error";
+	case COMP_ENDPOINT_NOT_ENABLED_ERROR:
+		return "Endpoint Not Enabled Error";
+	case COMP_SHORT_PACKET:
+		return "Short Packet";
+	case COMP_RING_UNDERRUN:
+		return "Ring Underrun";
+	case COMP_RING_OVERRUN:
+		return "Ring Overrun";
+	case COMP_VF_EVENT_RING_FULL_ERROR:
+		return "VF Event Ring Full Error";
+	case COMP_PARAMETER_ERROR:
+		return "Parameter Error";
+	case COMP_BANDWIDTH_OVERRUN_ERROR:
+		return "Bandwidth Overrun Error";
+	case COMP_CONTEXT_STATE_ERROR:
+		return "Context State Error";
+	case COMP_NO_PING_RESPONSE_ERROR:
+		return "No Ping Response Error";
+	case COMP_EVENT_RING_FULL_ERROR:
+		return "Event Ring Full Error";
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		return "Incompatible Device Error";
+	case COMP_MISSED_SERVICE_ERROR:
+		return "Missed Service Error";
+	case COMP_COMMAND_RING_STOPPED:
+		return "Command Ring Stopped";
+	case COMP_COMMAND_ABORTED:
+		return "Command Aborted";
+	case COMP_STOPPED:
+		return "Stopped";
+	case COMP_STOPPED_LENGTH_INVALID:
+		return "Stopped - Length Invalid";
+	case COMP_STOPPED_SHORT_PACKET:
+		return "Stopped - Short Packet";
+	case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
+		return "Max Exit Latency Too Large Error";
+	case COMP_ISOCH_BUFFER_OVERRUN:
+		return "Isoch Buffer Overrun";
+	case COMP_EVENT_LOST_ERROR:
+		return "Event Lost Error";
+	case COMP_UNDEFINED_ERROR:
+		return "Undefined Error";
+	case COMP_INVALID_STREAM_ID_ERROR:
+		return "Invalid Stream ID Error";
+	case COMP_SECONDARY_BANDWIDTH_ERROR:
+		return "Secondary Bandwidth Error";
+	case COMP_SPLIT_TRANSACTION_ERROR:
+		return "Split Transaction Error";
+	default:
+		return "Unknown!!";
+	}
+}
+
+struct xhci_link_trb {
+	/* 64-bit segment pointer*/
+	__le64 segment_ptr;
+	__le32 intr_target;
+	__le32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE	(0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+	/* Pointer to command TRB, or the value passed by the event data trb */
+	__le64 cmd_trb;
+	__le32 status;
+	__le32 flags;
+};
+
+/* flags bitmasks */
+
+/* Address device - disable SetAddress */
+#define TRB_BSR		(1<<9)
+
+/* Configure Endpoint - Deconfigure */
+#define TRB_DC		(1<<9)
+
+/* Stop Ring - Transfer State Preserve */
+#define TRB_TSP		(1<<9)
+
+enum xhci_ep_reset_type {
+	EP_HARD_RESET,
+	EP_SOFT_RESET,
+};
+
+/* Force Event */
+#define TRB_TO_VF_INTR_TARGET(p)	(((p) & (0x3ff << 22)) >> 22)
+#define TRB_TO_VF_ID(p)			(((p) & (0xff << 16)) >> 16)
+
+/* Set Latency Tolerance Value */
+#define TRB_TO_BELT(p)			(((p) & (0xfff << 16)) >> 16)
+
+/* Get Port Bandwidth */
+#define TRB_TO_DEV_SPEED(p)		(((p) & (0xf << 16)) >> 16)
+
+/* Force Header */
+#define TRB_TO_PACKET_TYPE(p)		((p) & 0x1f)
+#define TRB_TO_ROOTHUB_PORT(p)		(((p) & (0xff << 24)) >> 24)
+
+enum xhci_setup_dev {
+	SETUP_CONTEXT_ONLY,
+	SETUP_CONTEXT_ADDRESS,
+};
+
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p)	(((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p)	(((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p)		((((p) & (0x1f << 16)) >> 16) - 1)
+#define	EP_ID_FOR_TRB(p)		((((p) + 1) & 0x1f) << 16)
+
+#define SUSPEND_PORT_FOR_TRB(p)		(((p) & 1) << 23)
+#define TRB_TO_SUSPEND_PORT(p)		(((p) & (1 << 23)) >> 23)
+#define LAST_EP_INDEX			30
+
+/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
+#define TRB_TO_STREAM_ID(p)		((((p) & (0xffff << 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p)		((((p)) & 0xffff) << 16)
+#define SCT_FOR_TRB(p)			(((p) << 1) & 0x7)
+
+/* Link TRB specific fields */
+#define TRB_TC			(1<<1)
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p)		(((p) & (0xff << 24)) >> 24)
+
+#define EVENT_DATA		(1 << 2)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define	TRB_LEN(p)		((p) & 0x1ffff)
+/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
+#define TRB_TD_SIZE(p)          (min((p), (u32)31) << 17)
+#define GET_TD_SIZE(p)		(((p) & 0x3e0000) >> 17)
+/* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */
+#define TRB_TD_SIZE_TBC(p)      (min((p), (u32)31) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
+/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
+#define TRB_TBC(p)		(((p) & 0x3) << 7)
+#define TRB_TLBPC(p)		(((p) & 0xf) << 16)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE		(1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT			(1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP			(1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP		(1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN		(1<<4)
+/* Interrupt on completion */
+#define TRB_IOC			(1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT			(1<<6)
+
+/* Block Event Interrupt */
+#define	TRB_BEI			(1<<9)
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN		(1<<16)
+#define	TRB_TX_TYPE(p)		((p) << 16)
+#define	TRB_DATA_OUT		2
+#define	TRB_DATA_IN		3
+
+/* Isochronous TRB specific fields */
+#define TRB_SIA			(1<<31)
+#define TRB_FRAME_ID(p)		(((p) & 0x7ff) << 20)
+
+struct xhci_generic_trb {
+	__le32 field[4];
+};
+
+union xhci_trb {
+	struct xhci_link_trb		link;
+	struct xhci_transfer_event	trans_event;
+	struct xhci_event_cmd		event_cmd;
+	struct xhci_generic_trb		generic;
+};
+
+/* TRB bit mask */
+#define	TRB_TYPE_BITMASK	(0xfc00)
+#define TRB_TYPE(p)		((p) << 10)
+#define TRB_FIELD_TO_TYPE(p)	(((p) & TRB_TYPE_BITMASK) >> 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL		1
+/* setup stage for control transfers */
+#define TRB_SETUP		2
+/* data stage for control transfers */
+#define TRB_DATA		3
+/* status stage for control transfers */
+#define TRB_STATUS		4
+/* isoc transfers */
+#define TRB_ISOC		5
+/* TRB for linking ring segments */
+#define TRB_LINK		6
+#define TRB_EVENT_DATA		7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP		8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT		9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT	10
+/* Address Device Command */
+#define TRB_ADDR_DEV		11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP		12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT	13
+/* Reset Endpoint Command */
+#define TRB_RESET_EP		14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING		15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ		16
+/* Reset Device Command */
+#define TRB_RESET_DEV		17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT		18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH	19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT		20
+/* Get port bandwidth Command */
+#define TRB_GET_BW		21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER	22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP		23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER		32
+/* Command Completion Event */
+#define TRB_COMPLETION		33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS		34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT	35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL		36
+/* Host Controller Event */
+#define TRB_HC_EVENT		37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE		38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP	39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/* Nec vendor-specific command completion event. */
+#define	TRB_NEC_CMD_COMP	48
+/* Get NEC firmware revision. */
+#define	TRB_NEC_GET_FW		49
+
+static inline const char *xhci_trb_type_string(u8 type)
+{
+	switch (type) {
+	case TRB_NORMAL:
+		return "Normal";
+	case TRB_SETUP:
+		return "Setup Stage";
+	case TRB_DATA:
+		return "Data Stage";
+	case TRB_STATUS:
+		return "Status Stage";
+	case TRB_ISOC:
+		return "Isoch";
+	case TRB_LINK:
+		return "Link";
+	case TRB_EVENT_DATA:
+		return "Event Data";
+	case TRB_TR_NOOP:
+		return "No-Op";
+	case TRB_ENABLE_SLOT:
+		return "Enable Slot Command";
+	case TRB_DISABLE_SLOT:
+		return "Disable Slot Command";
+	case TRB_ADDR_DEV:
+		return "Address Device Command";
+	case TRB_CONFIG_EP:
+		return "Configure Endpoint Command";
+	case TRB_EVAL_CONTEXT:
+		return "Evaluate Context Command";
+	case TRB_RESET_EP:
+		return "Reset Endpoint Command";
+	case TRB_STOP_RING:
+		return "Stop Ring Command";
+	case TRB_SET_DEQ:
+		return "Set TR Dequeue Pointer Command";
+	case TRB_RESET_DEV:
+		return "Reset Device Command";
+	case TRB_FORCE_EVENT:
+		return "Force Event Command";
+	case TRB_NEG_BANDWIDTH:
+		return "Negotiate Bandwidth Command";
+	case TRB_SET_LT:
+		return "Set Latency Tolerance Value Command";
+	case TRB_GET_BW:
+		return "Get Port Bandwidth Command";
+	case TRB_FORCE_HEADER:
+		return "Force Header Command";
+	case TRB_CMD_NOOP:
+		return "No-Op Command";
+	case TRB_TRANSFER:
+		return "Transfer Event";
+	case TRB_COMPLETION:
+		return "Command Completion Event";
+	case TRB_PORT_STATUS:
+		return "Port Status Change Event";
+	case TRB_BANDWIDTH_EVENT:
+		return "Bandwidth Request Event";
+	case TRB_DOORBELL:
+		return "Doorbell Event";
+	case TRB_HC_EVENT:
+		return "Host Controller Event";
+	case TRB_DEV_NOTE:
+		return "Device Notification Event";
+	case TRB_MFINDEX_WRAP:
+		return "MFINDEX Wrap Event";
+	case TRB_NEC_CMD_COMP:
+		return "NEC Command Completion Event";
+	case TRB_NEC_GET_FW:
+		return "NET Get Firmware Revision Command";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+#define TRB_TYPE_LINK(x)	(((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+/* Above, but for __le32 types -- can avoid work by swapping constants: */
+#define TRB_TYPE_LINK_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+				 cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x)	(((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+				 cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
+#define NEC_FW_MINOR(p)		(((p) >> 0) & 0xff)
+#define NEC_FW_MAJOR(p)		(((p) >> 8) & 0xff)
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT	256
+/* Allow two commands + a link TRB, along with any reserved command TRBs */
+#define MAX_RSVD_CMD_TRBS	(TRBS_PER_SEGMENT - 3)
+#define TRB_SEGMENT_SIZE	(TRBS_PER_SEGMENT*16)
+#define TRB_SEGMENT_SHIFT	(ilog2(TRB_SEGMENT_SIZE))
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT		16
+#define TRB_MAX_BUFF_SIZE	(1 << TRB_MAX_BUFF_SHIFT)
+/* How much data is left before the 64KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr)	(TRB_MAX_BUFF_SIZE - \
+					(addr & (TRB_MAX_BUFF_SIZE - 1)))
+
+struct xhci_segment {
+	union xhci_trb		*trbs;
+	/* private to HCD */
+	struct xhci_segment	*next;
+	dma_addr_t		dma;
+	/* Max packet sized bounce buffer for td-fragmant alignment */
+	dma_addr_t		bounce_dma;
+	void			*bounce_buf;
+	unsigned int		bounce_offs;
+	unsigned int		bounce_len;
+};
+
+struct xhci_td {
+	struct list_head	td_list;
+	struct list_head	cancelled_td_list;
+	struct urb		*urb;
+	struct xhci_segment	*start_seg;
+	union xhci_trb		*first_trb;
+	union xhci_trb		*last_trb;
+	struct xhci_segment	*bounce_seg;
+	/* actual_length of the URB has already been set */
+	bool			urb_length_set;
+};
+
+/* xHCI command default timeout value */
+#define XHCI_CMD_DEFAULT_TIMEOUT	(5 * HZ)
+
+/* command descriptor */
+struct xhci_cd {
+	struct xhci_command	*command;
+	union xhci_trb		*cmd_trb;
+};
+
+struct xhci_dequeue_state {
+	struct xhci_segment *new_deq_seg;
+	union xhci_trb *new_deq_ptr;
+	int new_cycle_state;
+	unsigned int stream_id;
+};
+
+enum xhci_ring_type {
+	TYPE_CTRL = 0,
+	TYPE_ISOC,
+	TYPE_BULK,
+	TYPE_INTR,
+	TYPE_STREAM,
+	TYPE_COMMAND,
+	TYPE_EVENT,
+};
+
+static inline const char *xhci_ring_type_string(enum xhci_ring_type type)
+{
+	switch (type) {
+	case TYPE_CTRL:
+		return "CTRL";
+	case TYPE_ISOC:
+		return "ISOC";
+	case TYPE_BULK:
+		return "BULK";
+	case TYPE_INTR:
+		return "INTR";
+	case TYPE_STREAM:
+		return "STREAM";
+	case TYPE_COMMAND:
+		return "CMD";
+	case TYPE_EVENT:
+		return "EVENT";
+	}
+
+	return "UNKNOWN";
+}
+
+struct xhci_ring {
+	struct xhci_segment	*first_seg;
+	struct xhci_segment	*last_seg;
+	union  xhci_trb		*enqueue;
+	struct xhci_segment	*enq_seg;
+	union  xhci_trb		*dequeue;
+	struct xhci_segment	*deq_seg;
+	struct list_head	td_list;
+	/*
+	 * Write the cycle state into the TRB cycle field to give ownership of
+	 * the TRB to the host controller (if we are the producer), or to check
+	 * if we own the TRB (if we are the consumer).  See section 4.9.1.
+	 */
+	u32			cycle_state;
+	unsigned int		stream_id;
+	unsigned int		num_segs;
+	unsigned int		num_trbs_free;
+	unsigned int		num_trbs_free_temp;
+	unsigned int		bounce_buf_len;
+	enum xhci_ring_type	type;
+	bool			last_td_was_short;
+	struct radix_tree_root	*trb_address_map;
+};
+
+struct xhci_erst_entry {
+	/* 64-bit event ring segment address */
+	__le64	seg_addr;
+	__le32	seg_size;
+	/* Set to zero */
+	__le32	rsvd;
+};
+
+struct xhci_erst {
+	struct xhci_erst_entry	*entries;
+	unsigned int		num_entries;
+	/* xhci->event_ring keeps track of segment dma addresses */
+	dma_addr_t		erst_dma_addr;
+	/* Num entries the ERST can contain */
+	unsigned int		erst_size;
+};
+
+struct xhci_scratchpad {
+	u64 *sp_array;
+	dma_addr_t sp_dma;
+	void **sp_buffers;
+};
+
+struct urb_priv {
+	int	num_tds;
+	int	num_tds_done;
+	struct	xhci_td	td[0];
+};
+
+/*
+ * Each segment table entry is 4*32bits long.  1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define	ERST_NUM_SEGS	1
+/* Initial allocated size of the ERST, in number of entries */
+#define	ERST_SIZE	64
+/* Initial number of event segment rings allocated */
+#define	ERST_ENTRIES	1
+/* Poll every 60 seconds */
+#define	POLL_TIMEOUT	60
+/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
+#define XHCI_STOP_EP_CMD_TIMEOUT	5
+/* XXX: Make these module parameters */
+
+struct s3_save {
+	u32	command;
+	u32	dev_nt;
+	u64	dcbaa_ptr;
+	u32	config_reg;
+	u32	irq_pending;
+	u32	irq_control;
+	u32	erst_size;
+	u64	erst_base;
+	u64	erst_dequeue;
+};
+
+/* Use for lpm */
+struct dev_info {
+	u32			dev_id;
+	struct	list_head	list;
+};
+
+struct xhci_bus_state {
+	unsigned long		bus_suspended;
+	unsigned long		next_statechange;
+
+	/* Port suspend arrays are indexed by the portnum of the fake roothub */
+	/* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
+	u32			port_c_suspend;
+	u32			suspended_ports;
+	u32			port_remote_wakeup;
+	unsigned long		resume_done[USB_MAXCHILDREN];
+	/* which ports have started to resume */
+	unsigned long		resuming_ports;
+	/* Which ports are waiting on RExit to U0 transition. */
+	unsigned long		rexit_ports;
+	struct completion	rexit_done[USB_MAXCHILDREN];
+};
+
+
+/*
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+#define	XHCI_MAX_REXIT_TIMEOUT_MS	20
+
+static inline unsigned int hcd_index(struct usb_hcd *hcd)
+{
+	if (hcd->speed >= HCD_USB3)
+		return 0;
+	else
+		return 1;
+}
+struct xhci_port {
+	__le32 __iomem		*addr;
+	int			hw_portnum;
+	int			hcd_portnum;
+	struct xhci_hub		*rhub;
+};
+
+struct xhci_hub {
+	struct xhci_port	**ports;
+	unsigned int		num_ports;
+	struct usb_hcd		*hcd;
+	/* supported prococol extended capabiliy values */
+	u8			maj_rev;
+	u8			min_rev;
+	u32			*psi;	/* array of protocol speed ID entries */
+	u8			psi_count;
+	u8			psi_uid_count;
+};
+
+/* There is one xhci_hcd structure per controller */
+struct xhci_hcd {
+	struct usb_hcd *main_hcd;
+	struct usb_hcd *shared_hcd;
+	/* glue to PCI and HCD framework */
+	struct xhci_cap_regs __iomem *cap_regs;
+	struct xhci_op_regs __iomem *op_regs;
+	struct xhci_run_regs __iomem *run_regs;
+	struct xhci_doorbell_array __iomem *dba;
+	/* Our HCD's current interrupter register set */
+	struct	xhci_intr_reg __iomem *ir_set;
+
+	/* Cached register copies of read-only HC data */
+	__u32		hcs_params1;
+	__u32		hcs_params2;
+	__u32		hcs_params3;
+	__u32		hcc_params;
+	__u32		hcc_params2;
+
+	spinlock_t	lock;
+
+	/* packed release number */
+	u8		sbrn;
+	u16		hci_version;
+	u8		max_slots;
+	u8		max_interrupters;
+	u8		max_ports;
+	u8		isoc_threshold;
+	/* imod_interval in ns (I * 250ns) */
+	u32		imod_interval;
+	int		event_ring_max;
+	/* 4KB min, 128MB max */
+	int		page_size;
+	/* Valid values are 12 to 20, inclusive */
+	int		page_shift;
+	/* msi-x vectors */
+	int		msix_count;
+	/* optional clocks */
+	struct clk		*clk;
+	struct clk		*reg_clk;
+	/* data structures */
+	struct xhci_device_context_array *dcbaa;
+	struct xhci_ring	*cmd_ring;
+	unsigned int            cmd_ring_state;
+#define CMD_RING_STATE_RUNNING         (1 << 0)
+#define CMD_RING_STATE_ABORTED         (1 << 1)
+#define CMD_RING_STATE_STOPPED         (1 << 2)
+	struct list_head        cmd_list;
+	unsigned int		cmd_ring_reserved_trbs;
+	struct delayed_work	cmd_timer;
+	struct completion	cmd_ring_stop_completion;
+	struct xhci_command	*current_cmd;
+	struct xhci_ring	*event_ring;
+	struct xhci_erst	erst;
+	/* Scratchpad */
+	struct xhci_scratchpad  *scratchpad;
+	/* Store LPM test failed devices' information */
+	struct list_head	lpm_failed_devs;
+
+	/* slot enabling and address device helpers */
+	/* these are not thread safe so use mutex */
+	struct mutex mutex;
+	/* For USB 3.0 LPM enable/disable. */
+	struct xhci_command		*lpm_command;
+	/* Internal mirror of the HW's dcbaa */
+	struct xhci_virt_device	*devs[MAX_HC_SLOTS];
+	/* For keeping track of bandwidth domains per roothub. */
+	struct xhci_root_port_bw_info	*rh_bw;
+
+	/* DMA pools */
+	struct dma_pool	*device_pool;
+	struct dma_pool	*segment_pool;
+	struct dma_pool	*small_streams_pool;
+	struct dma_pool	*medium_streams_pool;
+
+	/* Host controller watchdog timer structures */
+	unsigned int		xhc_state;
+
+	u32			command;
+	struct s3_save		s3;
+/* Host controller is dying - not responding to commands. "I'm not dead yet!"
+ *
+ * xHC interrupts have been disabled and a watchdog timer will (or has already)
+ * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code.  Any code
+ * that sees this status (other than the timer that set it) should stop touching
+ * hardware immediately.  Interrupt handlers should return immediately when
+ * they see this status (any time they drop and re-acquire xhci->lock).
+ * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
+ * putting the TD on the canceled list, etc.
+ *
+ * There are no reports of xHCI host controllers that display this issue.
+ */
+#define XHCI_STATE_DYING	(1 << 0)
+#define XHCI_STATE_HALTED	(1 << 1)
+#define XHCI_STATE_REMOVING	(1 << 2)
+	unsigned long long	quirks;
+#define	XHCI_LINK_TRB_QUIRK	BIT_ULL(0)
+#define XHCI_RESET_EP_QUIRK	BIT_ULL(1)
+#define XHCI_NEC_HOST		BIT_ULL(2)
+#define XHCI_AMD_PLL_FIX	BIT_ULL(3)
+#define XHCI_SPURIOUS_SUCCESS	BIT_ULL(4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle.  Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't.  Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK	BIT_ULL(5)
+#define XHCI_BROKEN_MSI		BIT_ULL(6)
+#define XHCI_RESET_ON_RESUME	BIT_ULL(7)
+#define	XHCI_SW_BW_CHECKING	BIT_ULL(8)
+#define XHCI_AMD_0x96_HOST	BIT_ULL(9)
+#define XHCI_TRUST_TX_LENGTH	BIT_ULL(10)
+#define XHCI_LPM_SUPPORT	BIT_ULL(11)
+#define XHCI_INTEL_HOST		BIT_ULL(12)
+#define XHCI_SPURIOUS_REBOOT	BIT_ULL(13)
+#define XHCI_COMP_MODE_QUIRK	BIT_ULL(14)
+#define XHCI_AVOID_BEI		BIT_ULL(15)
+#define XHCI_PLAT		BIT_ULL(16)
+#define XHCI_SLOW_SUSPEND	BIT_ULL(17)
+#define XHCI_SPURIOUS_WAKEUP	BIT_ULL(18)
+/* For controllers with a broken beyond repair streams implementation */
+#define XHCI_BROKEN_STREAMS	BIT_ULL(19)
+#define XHCI_PME_STUCK_QUIRK	BIT_ULL(20)
+#define XHCI_MTK_HOST		BIT_ULL(21)
+#define XHCI_SSIC_PORT_UNUSED	BIT_ULL(22)
+#define XHCI_NO_64BIT_SUPPORT	BIT_ULL(23)
+#define XHCI_MISSING_CAS	BIT_ULL(24)
+/* For controller with a broken Port Disable implementation */
+#define XHCI_BROKEN_PORT_PED	BIT_ULL(25)
+#define XHCI_LIMIT_ENDPOINT_INTERVAL_7	BIT_ULL(26)
+#define XHCI_U2_DISABLE_WAKE	BIT_ULL(27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	BIT_ULL(28)
+#define XHCI_HW_LPM_DISABLE	BIT_ULL(29)
+#define XHCI_SUSPEND_DELAY	BIT_ULL(30)
+#define XHCI_INTEL_USB_ROLE_SW	BIT_ULL(31)
+#define XHCI_ZERO_64B_REGS	BIT_ULL(32)
+#define XHCI_RESET_PLL_ON_DISCONNECT	BIT_ULL(34)
+#define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
+
+	unsigned int		num_active_eps;
+	unsigned int		limit_active_eps;
+	/* There are two roothubs to keep track of bus suspend info for */
+	struct xhci_bus_state   bus_state[2];
+	struct xhci_port	*hw_ports;
+	struct xhci_hub		usb2_rhub;
+	struct xhci_hub		usb3_rhub;
+	/* support xHCI 0.96 spec USB2 software LPM */
+	unsigned		sw_lpm_support:1;
+	/* support xHCI 1.0 spec USB2 hardware LPM */
+	unsigned		hw_lpm_support:1;
+	/* Broken Suspend flag for SNPS Suspend resume issue */
+	unsigned		broken_suspend:1;
+	/* cached usb2 extened protocol capabilites */
+	u32                     *ext_caps;
+	unsigned int            num_ext_caps;
+	/* Compliance Mode Recovery Data */
+	struct timer_list	comp_mode_recovery_timer;
+	u32			port_status_u0;
+	u16			test_mode;
+/* Compliance Mode Timer Triggered every 2 seconds */
+#define COMP_MODE_RCVRY_MSECS 2000
+
+	struct dentry		*debugfs_root;
+	struct dentry		*debugfs_slots;
+	struct list_head	regset_list;
+
+	void			*dbc;
+	/* platform-specific data -- must come last */
+	unsigned long		priv[0] __aligned(sizeof(s64));
+};
+
+/* Platform specific overrides to generic XHCI hc_driver ops */
+struct xhci_driver_overrides {
+	size_t extra_priv_size;
+	int (*reset)(struct usb_hcd *hcd);
+	int (*start)(struct usb_hcd *hcd);
+};
+
+#define	XHCI_CFC_DELAY		10
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+	struct usb_hcd *primary_hcd;
+
+	if (usb_hcd_is_primary_hcd(hcd))
+		primary_hcd = hcd;
+	else
+		primary_hcd = hcd->primary_hcd;
+
+	return (struct xhci_hcd *) (primary_hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+	return xhci->main_hcd;
+}
+
+#define xhci_dbg(xhci, fmt, args...) \
+	dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_err(xhci, fmt, args...) \
+	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn_ratelimited(xhci, fmt, args...) \
+	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_info(xhci, fmt, args...) \
+	dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers.  Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+		__le64 __iomem *regs)
+{
+	return lo_hi_readq(regs);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+				 const u64 val, __le64 __iomem *regs)
+{
+	lo_hi_writeq(val, regs);
+}
+
+static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
+{
+	return xhci->quirks & XHCI_LINK_TRB_QUIRK;
+}
+
+/* xHCI debugging */
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *ctx);
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+			const char *fmt, ...);
+
+/* xHCI memory management */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+		struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_address(unsigned int ep_index);
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		int old_active_eps);
+void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
+void xhci_update_bw_info(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_input_control_ctx *ctrl_ctx,
+		struct xhci_virt_device *virt_dev);
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_container_ctx *out_ctx,
+		unsigned int ep_index);
+void xhci_slot_copy(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *in_ctx,
+		struct xhci_container_ctx *out_ctx);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+		struct usb_device *udev, struct usb_host_endpoint *ep,
+		gfp_t mem_flags);
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+		unsigned int num_segs, unsigned int cycle_state,
+		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		unsigned int num_trbs, gfp_t flags);
+int xhci_alloc_erst(struct xhci_hcd *xhci,
+		struct xhci_ring *evt_ring,
+		struct xhci_erst *erst,
+		gfp_t flags);
+void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		unsigned int ep_index);
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+		unsigned int num_stream_ctxs,
+		unsigned int num_streams,
+		unsigned int max_packet, gfp_t flags);
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+		struct xhci_stream_info *stream_info);
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+		struct xhci_ep_ctx *ep_ctx,
+		struct xhci_stream_info *stream_info);
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
+		struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+	struct xhci_virt_device *virt_dev, bool drop_control_ep);
+struct xhci_ring *xhci_dma_to_transfer_ring(
+		struct xhci_virt_ep *ep,
+		u64 address);
+struct xhci_ring *xhci_stream_id_to_ring(
+		struct xhci_virt_device *dev,
+		unsigned int ep_index,
+		unsigned int stream_id);
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+		bool allocate_completion, gfp_t mem_flags);
+struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
+		bool allocate_completion, gfp_t mem_flags);
+void xhci_urb_free_priv(struct urb_priv *urb_priv);
+void xhci_free_command(struct xhci_hcd *xhci,
+		struct xhci_command *command);
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+		int type, gfp_t flags);
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
+		struct xhci_container_ctx *ctx);
+
+/* xHCI host controller glue */
+typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
+int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
+void xhci_quiesce(struct xhci_hcd *xhci);
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_start(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_run(struct usb_hcd *hcd);
+int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+void xhci_init_driver(struct hc_driver *drv,
+		      const struct xhci_driver_overrides *over);
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
+int xhci_ext_cap_init(struct xhci_hcd *xhci);
+
+int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
+int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
+
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+irqreturn_t xhci_msi_irq(int irq, void *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+		struct xhci_virt_device *virt_dev,
+		struct usb_device *hdev,
+		struct usb_tt *tt, gfp_t mem_flags);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
+		struct xhci_segment *start_seg, union xhci_trb *start_trb,
+		union xhci_trb *end_trb, dma_addr_t suspect_dma, bool debug);
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev);
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 field1, u32 field2, u32 field3, u32 field4);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		int slot_id, unsigned int ep_index, int suspend);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+		int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+		int slot_id, unsigned int ep_index);
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+		int slot_id, unsigned int ep_index);
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
+		struct urb *urb, int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+		struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
+		bool command_must_succeed);
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		int slot_id, unsigned int ep_index,
+		enum xhci_ep_reset_type reset_type);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+		u32 slot_id);
+void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		unsigned int stream_id, struct xhci_td *cur_td,
+		struct xhci_dequeue_state *state);
+void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		struct xhci_dequeue_state *deq_state);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+		unsigned int stream_id, struct xhci_td *td);
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
+void xhci_handle_command_timeout(struct work_struct *work);
+
+void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
+		unsigned int ep_index, unsigned int stream_id);
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
+void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
+unsigned int count_trbs(u64 addr, u64 len);
+
+/* xHCI roothub code */
+void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
+				u32 link_state);
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
+				u32 port_bit);
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+		char *buf, u16 wLength);
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
+
+void xhci_hc_died(struct xhci_hcd *xhci);
+
+#ifdef CONFIG_PM
+int xhci_bus_suspend(struct usb_hcd *hcd);
+int xhci_bus_resume(struct usb_hcd *hcd);
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
+#else
+#define	xhci_bus_suspend	NULL
+#define	xhci_bus_resume		NULL
+#define	xhci_get_resuming_ports	NULL
+#endif	/* CONFIG_PM */
+
+u32 xhci_port_state_to_neutral(u32 state);
+int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+		u16 port);
+void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
+
+/* xHCI contexts */
+struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
+struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
+struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+		unsigned int slot_id, unsigned int ep_index,
+		unsigned int stream_id);
+
+static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+								struct urb *urb)
+{
+	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+					xhci_get_endpoint_index(&urb->ep->desc),
+					urb->stream_id);
+}
+
+static inline char *xhci_slot_state_string(u32 state)
+{
+	switch (state) {
+	case SLOT_STATE_ENABLED:
+		return "enabled/disabled";
+	case SLOT_STATE_DEFAULT:
+		return "default";
+	case SLOT_STATE_ADDRESSED:
+		return "addressed";
+	case SLOT_STATE_CONFIGURED:
+		return "configured";
+	default:
+		return "reserved";
+	}
+}
+
+static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
+		u32 field3)
+{
+	static char str[256];
+	int type = TRB_FIELD_TO_TYPE(field3);
+
+	switch (type) {
+	case TRB_LINK:
+		sprintf(str,
+			"LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c",
+			field1, field0, GET_INTR_TARGET(field2),
+			xhci_trb_type_string(type),
+			field3 & TRB_IOC ? 'I' : 'i',
+			field3 & TRB_CHAIN ? 'C' : 'c',
+			field3 & TRB_TC ? 'T' : 't',
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_TRANSFER:
+	case TRB_COMPLETION:
+	case TRB_PORT_STATUS:
+	case TRB_BANDWIDTH_EVENT:
+	case TRB_DOORBELL:
+	case TRB_HC_EVENT:
+	case TRB_DEV_NOTE:
+	case TRB_MFINDEX_WRAP:
+		sprintf(str,
+			"TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c",
+			field1, field0,
+			xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
+			EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+			/* Macro decrements 1, maybe it shouldn't?!? */
+			TRB_TO_EP_INDEX(field3) + 1,
+			xhci_trb_type_string(type),
+			field3 & EVENT_DATA ? 'E' : 'e',
+			field3 & TRB_CYCLE ? 'C' : 'c');
+
+		break;
+	case TRB_SETUP:
+		sprintf(str, "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
+				field0 & 0xff,
+				(field0 & 0xff00) >> 8,
+				(field0 & 0xff000000) >> 24,
+				(field0 & 0xff0000) >> 16,
+				(field1 & 0xff00) >> 8,
+				field1 & 0xff,
+				(field1 & 0xff000000) >> 16 |
+				(field1 & 0xff0000) >> 16,
+				TRB_LEN(field2), GET_TD_SIZE(field2),
+				GET_INTR_TARGET(field2),
+				xhci_trb_type_string(type),
+				field3 & TRB_IDT ? 'I' : 'i',
+				field3 & TRB_IOC ? 'I' : 'i',
+				field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_DATA:
+		sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
+				field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+				GET_INTR_TARGET(field2),
+				xhci_trb_type_string(type),
+				field3 & TRB_IDT ? 'I' : 'i',
+				field3 & TRB_IOC ? 'I' : 'i',
+				field3 & TRB_CHAIN ? 'C' : 'c',
+				field3 & TRB_NO_SNOOP ? 'S' : 's',
+				field3 & TRB_ISP ? 'I' : 'i',
+				field3 & TRB_ENT ? 'E' : 'e',
+				field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_STATUS:
+		sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
+				field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+				GET_INTR_TARGET(field2),
+				xhci_trb_type_string(type),
+				field3 & TRB_IOC ? 'I' : 'i',
+				field3 & TRB_CHAIN ? 'C' : 'c',
+				field3 & TRB_ENT ? 'E' : 'e',
+				field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_NORMAL:
+	case TRB_ISOC:
+	case TRB_EVENT_DATA:
+	case TRB_TR_NOOP:
+		sprintf(str,
+			"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
+			field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+			GET_INTR_TARGET(field2),
+			xhci_trb_type_string(type),
+			field3 & TRB_BEI ? 'B' : 'b',
+			field3 & TRB_IDT ? 'I' : 'i',
+			field3 & TRB_IOC ? 'I' : 'i',
+			field3 & TRB_CHAIN ? 'C' : 'c',
+			field3 & TRB_NO_SNOOP ? 'S' : 's',
+			field3 & TRB_ISP ? 'I' : 'i',
+			field3 & TRB_ENT ? 'E' : 'e',
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+
+	case TRB_CMD_NOOP:
+	case TRB_ENABLE_SLOT:
+		sprintf(str,
+			"%s: flags %c",
+			xhci_trb_type_string(type),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_DISABLE_SLOT:
+	case TRB_NEG_BANDWIDTH:
+		sprintf(str,
+			"%s: slot %d flags %c",
+			xhci_trb_type_string(type),
+			TRB_TO_SLOT_ID(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_ADDR_DEV:
+		sprintf(str,
+			"%s: ctx %08x%08x slot %d flags %c:%c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_SLOT_ID(field3),
+			field3 & TRB_BSR ? 'B' : 'b',
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_CONFIG_EP:
+		sprintf(str,
+			"%s: ctx %08x%08x slot %d flags %c:%c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_SLOT_ID(field3),
+			field3 & TRB_DC ? 'D' : 'd',
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_EVAL_CONTEXT:
+		sprintf(str,
+			"%s: ctx %08x%08x slot %d flags %c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_SLOT_ID(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_RESET_EP:
+		sprintf(str,
+			"%s: ctx %08x%08x slot %d ep %d flags %c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_SLOT_ID(field3),
+			/* Macro decrements 1, maybe it shouldn't?!? */
+			TRB_TO_EP_INDEX(field3) + 1,
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_STOP_RING:
+		sprintf(str,
+			"%s: slot %d sp %d ep %d flags %c",
+			xhci_trb_type_string(type),
+			TRB_TO_SLOT_ID(field3),
+			TRB_TO_SUSPEND_PORT(field3),
+			/* Macro decrements 1, maybe it shouldn't?!? */
+			TRB_TO_EP_INDEX(field3) + 1,
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_SET_DEQ:
+		sprintf(str,
+			"%s: deq %08x%08x stream %d slot %d ep %d flags %c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_STREAM_ID(field2),
+			TRB_TO_SLOT_ID(field3),
+			/* Macro decrements 1, maybe it shouldn't?!? */
+			TRB_TO_EP_INDEX(field3) + 1,
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_RESET_DEV:
+		sprintf(str,
+			"%s: slot %d flags %c",
+			xhci_trb_type_string(type),
+			TRB_TO_SLOT_ID(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_FORCE_EVENT:
+		sprintf(str,
+			"%s: event %08x%08x vf intr %d vf id %d flags %c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_VF_INTR_TARGET(field2),
+			TRB_TO_VF_ID(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_SET_LT:
+		sprintf(str,
+			"%s: belt %d flags %c",
+			xhci_trb_type_string(type),
+			TRB_TO_BELT(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_GET_BW:
+		sprintf(str,
+			"%s: ctx %08x%08x slot %d speed %d flags %c",
+			xhci_trb_type_string(type),
+			field1, field0,
+			TRB_TO_SLOT_ID(field3),
+			TRB_TO_DEV_SPEED(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	case TRB_FORCE_HEADER:
+		sprintf(str,
+			"%s: info %08x%08x%08x pkt type %d roothub port %d flags %c",
+			xhci_trb_type_string(type),
+			field2, field1, field0 & 0xffffffe0,
+			TRB_TO_PACKET_TYPE(field0),
+			TRB_TO_ROOTHUB_PORT(field3),
+			field3 & TRB_CYCLE ? 'C' : 'c');
+		break;
+	default:
+		sprintf(str,
+			"type '%s' -> raw %08x %08x %08x %08x",
+			xhci_trb_type_string(type),
+			field0, field1, field2, field3);
+	}
+
+	return str;
+}
+
+static inline const char *xhci_decode_slot_context(u32 info, u32 info2,
+		u32 tt_info, u32 state)
+{
+	static char str[1024];
+	u32 speed;
+	u32 hub;
+	u32 mtt;
+	int ret = 0;
+
+	speed = info & DEV_SPEED;
+	hub = info & DEV_HUB;
+	mtt = info & DEV_MTT;
+
+	ret = sprintf(str, "RS %05x %s%s%s Ctx Entries %d MEL %d us Port# %d/%d",
+			info & ROUTE_STRING_MASK,
+			({ char *s;
+			switch (speed) {
+			case SLOT_SPEED_FS:
+				s = "full-speed";
+				break;
+			case SLOT_SPEED_LS:
+				s = "low-speed";
+				break;
+			case SLOT_SPEED_HS:
+				s = "high-speed";
+				break;
+			case SLOT_SPEED_SS:
+				s = "super-speed";
+				break;
+			case SLOT_SPEED_SSP:
+				s = "super-speed plus";
+				break;
+			default:
+				s = "UNKNOWN speed";
+			} s; }),
+			mtt ? " multi-TT" : "",
+			hub ? " Hub" : "",
+			(info & LAST_CTX_MASK) >> 27,
+			info2 & MAX_EXIT,
+			DEVINFO_TO_ROOT_HUB_PORT(info2),
+			DEVINFO_TO_MAX_PORTS(info2));
+
+	ret += sprintf(str + ret, " [TT Slot %d Port# %d TTT %d Intr %d] Addr %d State %s",
+			tt_info & TT_SLOT, (tt_info & TT_PORT) >> 8,
+			GET_TT_THINK_TIME(tt_info), GET_INTR_TARGET(tt_info),
+			state & DEV_ADDR_MASK,
+			xhci_slot_state_string(GET_SLOT_STATE(state)));
+
+	return str;
+}
+
+
+static inline const char *xhci_portsc_link_state_string(u32 portsc)
+{
+	switch (portsc & PORT_PLS_MASK) {
+	case XDEV_U0:
+		return "U0";
+	case XDEV_U1:
+		return "U1";
+	case XDEV_U2:
+		return "U2";
+	case XDEV_U3:
+		return "U3";
+	case XDEV_DISABLED:
+		return "Disabled";
+	case XDEV_RXDETECT:
+		return "RxDetect";
+	case XDEV_INACTIVE:
+		return "Inactive";
+	case XDEV_POLLING:
+		return "Polling";
+	case XDEV_RECOVERY:
+		return "Recovery";
+	case XDEV_HOT_RESET:
+		return "Hot Reset";
+	case XDEV_COMP_MODE:
+		return "Compliance mode";
+	case XDEV_TEST_MODE:
+		return "Test mode";
+	case XDEV_RESUME:
+		return "Resume";
+	default:
+		break;
+	}
+	return "Unknown";
+}
+
+static inline const char *xhci_decode_portsc(u32 portsc)
+{
+	static char str[256];
+	int ret;
+
+	ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
+		      portsc & PORT_POWER	? "Powered" : "Powered-off",
+		      portsc & PORT_CONNECT	? "Connected" : "Not-connected",
+		      portsc & PORT_PE		? "Enabled" : "Disabled",
+		      xhci_portsc_link_state_string(portsc),
+		      DEV_PORT_SPEED(portsc));
+
+	if (portsc & PORT_OC)
+		ret += sprintf(str + ret, "OverCurrent ");
+	if (portsc & PORT_RESET)
+		ret += sprintf(str + ret, "In-Reset ");
+
+	ret += sprintf(str + ret, "Change: ");
+	if (portsc & PORT_CSC)
+		ret += sprintf(str + ret, "CSC ");
+	if (portsc & PORT_PEC)
+		ret += sprintf(str + ret, "PEC ");
+	if (portsc & PORT_WRC)
+		ret += sprintf(str + ret, "WRC ");
+	if (portsc & PORT_OCC)
+		ret += sprintf(str + ret, "OCC ");
+	if (portsc & PORT_RC)
+		ret += sprintf(str + ret, "PRC ");
+	if (portsc & PORT_PLC)
+		ret += sprintf(str + ret, "PLC ");
+	if (portsc & PORT_CEC)
+		ret += sprintf(str + ret, "CEC ");
+	if (portsc & PORT_CAS)
+		ret += sprintf(str + ret, "CAS ");
+
+	ret += sprintf(str + ret, "Wake: ");
+	if (portsc & PORT_WKCONN_E)
+		ret += sprintf(str + ret, "WCE ");
+	if (portsc & PORT_WKDISC_E)
+		ret += sprintf(str + ret, "WDE ");
+	if (portsc & PORT_WKOC_E)
+		ret += sprintf(str + ret, "WOE ");
+
+	return str;
+}
+
+static inline const char *xhci_ep_state_string(u8 state)
+{
+	switch (state) {
+	case EP_STATE_DISABLED:
+		return "disabled";
+	case EP_STATE_RUNNING:
+		return "running";
+	case EP_STATE_HALTED:
+		return "halted";
+	case EP_STATE_STOPPED:
+		return "stopped";
+	case EP_STATE_ERROR:
+		return "error";
+	default:
+		return "INVALID";
+	}
+}
+
+static inline const char *xhci_ep_type_string(u8 type)
+{
+	switch (type) {
+	case ISOC_OUT_EP:
+		return "Isoc OUT";
+	case BULK_OUT_EP:
+		return "Bulk OUT";
+	case INT_OUT_EP:
+		return "Int OUT";
+	case CTRL_EP:
+		return "Ctrl";
+	case ISOC_IN_EP:
+		return "Isoc IN";
+	case BULK_IN_EP:
+		return "Bulk IN";
+	case INT_IN_EP:
+		return "Int IN";
+	default:
+		return "INVALID";
+	}
+}
+
+static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
+		u32 tx_info)
+{
+	static char str[1024];
+	int ret;
+
+	u32 esit;
+	u16 maxp;
+	u16 avg;
+
+	u8 max_pstr;
+	u8 ep_state;
+	u8 interval;
+	u8 ep_type;
+	u8 burst;
+	u8 cerr;
+	u8 mult;
+
+	bool lsa;
+	bool hid;
+
+	esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+		CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
+
+	ep_state = info & EP_STATE_MASK;
+	max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
+	interval = CTX_TO_EP_INTERVAL(info);
+	mult = CTX_TO_EP_MULT(info) + 1;
+	lsa = !!(info & EP_HAS_LSA);
+
+	cerr = (info2 & (3 << 1)) >> 1;
+	ep_type = CTX_TO_EP_TYPE(info2);
+	hid = !!(info2 & (1 << 7));
+	burst = CTX_TO_MAX_BURST(info2);
+	maxp = MAX_PACKET_DECODED(info2);
+
+	avg = EP_AVG_TRB_LENGTH(tx_info);
+
+	ret = sprintf(str, "State %s mult %d max P. Streams %d %s",
+			xhci_ep_state_string(ep_state), mult,
+			max_pstr, lsa ? "LSA " : "");
+
+	ret += sprintf(str + ret, "interval %d us max ESIT payload %d CErr %d ",
+			(1 << interval) * 125, esit, cerr);
+
+	ret += sprintf(str + ret, "Type %s %sburst %d maxp %d deq %016llx ",
+			xhci_ep_type_string(ep_type), hid ? "HID" : "",
+			burst, maxp, deq);
+
+	ret += sprintf(str + ret, "avg trb len %d", avg);
+
+	return str;
+}
+
+#endif /* __LINUX_XHCI_HCD_H */