v4.19.13 snapshot.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 0000000..39b181d
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,1121 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SATA/PATA driver configuration
+#
+
+config HAVE_PATA_PLATFORM
+	bool
+	help
+	  This is an internal configuration node for any machine that
+	  uses pata-platform driver to enable the relevant driver in the
+	  configuration structure without having to submit endless patches
+	  to update the PATA_PLATFORM entry.
+
+menuconfig ATA
+	tristate "Serial ATA and Parallel ATA drivers (libata)"
+	depends on HAS_IOMEM
+	depends on BLOCK
+	select SCSI
+	select GLOB
+	---help---
+	  If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
+	  any other ATA device under Linux, say Y and make sure that you know
+	  the name of your ATA host adapter (the card inside your computer
+	  that "speaks" the ATA protocol, also called ATA controller),
+	  because you will be asked for it.
+
+	  NOTE: ATA enables basic SCSI support; *however*,
+	  'SCSI disk support', 'SCSI tape support', or
+	  'SCSI CDROM support' may also be needed,
+	  depending on your hardware configuration.
+
+if ATA
+
+config ATA_NONSTANDARD
+       bool
+       default n
+
+config ATA_VERBOSE_ERROR
+	bool "Verbose ATA error reporting"
+	default y
+	help
+	  This option adds parsing of ATA command descriptions and error bits
+	  in libata kernel output, making it easier to interpret.
+	  This option will enlarge the kernel by approx. 6KB. Disable it only
+	  if kernel size is more important than ease of debugging.
+
+	  If unsure, say Y.
+
+config ATA_ACPI
+	bool "ATA ACPI Support"
+	depends on ACPI
+	default y
+	help
+	  This option adds support for ATA-related ACPI objects.
+	  These ACPI objects add the ability to retrieve taskfiles
+	  from the ACPI BIOS and write them to the disk controller.
+	  These objects may be related to performance, security,
+	  power management, or other areas.
+	  You can disable this at kernel boot time by using the
+	  option libata.noacpi=1
+
+config SATA_ZPODD
+	bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
+	depends on ATA_ACPI && PM
+	default n
+	help
+	  This option adds support for SATA Zero Power Optical Disc
+	  Drive (ZPODD). It requires both the ODD and the platform
+	  support, and if enabled, will automatically power on/off the
+	  ODD when certain condition is satisfied. This does not impact
+	  end user's experience of the ODD, only power is saved when
+	  the ODD is not in use (i.e. no disc inside).
+
+	  If unsure, say N.
+
+config SATA_PMP
+	bool "SATA Port Multiplier support"
+	default y
+	help
+	  This option adds support for SATA Port Multipliers
+	  (the SATA version of an ethernet hub, or SAS expander).
+
+if HAS_DMA
+
+comment "Controllers with non-SFF native interface"
+
+config SATA_AHCI
+	tristate "AHCI SATA support"
+	depends on PCI
+	help
+	  This option enables support for AHCI Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_MOBILE_LPM_POLICY
+	int "Default SATA Link Power Management policy for mobile chipsets"
+	range 0 4
+	default 0
+	depends on SATA_AHCI
+	help
+	  Select the Default SATA Link Power Management (LPM) policy to use
+	  for mobile / laptop variants of chipsets / "South Bridges".
+
+	  The value set has the following meanings:
+		0 => Keep firmware settings
+		1 => Maximum performance
+		2 => Medium power
+		3 => Medium power with Device Initiated PM enabled
+		4 => Minimum power
+
+	  Note "Minimum power" is known to cause issues, including disk
+	  corruption, with some disks and should not be used.
+
+config SATA_AHCI_PLATFORM
+	tristate "Platform AHCI SATA support"
+	help
+	  This option enables support for Platform AHCI Serial ATA
+	  controllers.
+
+	  If unsure, say N.
+
+config AHCI_BRCM
+	tristate "Broadcom AHCI SATA support"
+	depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
+	help
+	  This option enables support for the AHCI SATA3 controller found on
+	  Broadcom SoC's.
+
+	  If unsure, say N.
+
+config AHCI_DA850
+	tristate "DaVinci DA850 AHCI SATA support"
+	depends on ARCH_DAVINCI_DA850
+	help
+	  This option enables support for the DaVinci DA850 SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config AHCI_DM816
+	tristate "DaVinci DM816 AHCI SATA support"
+	depends on ARCH_OMAP2PLUS
+	help
+	  This option enables support for the DaVinci DM816 SoC's
+	  onboard AHCI SATA controller.
+
+	  If unsure, say N.
+
+config AHCI_ST
+	tristate "ST AHCI SATA support"
+	depends on ARCH_STI
+	help
+	  This option enables support for ST AHCI SATA controller.
+
+	  If unsure, say N.
+
+config AHCI_IMX
+	tristate "Freescale i.MX AHCI SATA support"
+	depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
+	depends on (HWMON && (THERMAL || !THERMAL_OF)) || !HWMON
+	help
+	  This option enables support for the Freescale i.MX SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config AHCI_CEVA
+	tristate "CEVA AHCI SATA support"
+	depends on OF
+	help
+	  This option enables support for the CEVA AHCI SATA.
+	  It can be found on the Xilinx Zynq UltraScale+ MPSoC.
+
+	  If unsure, say N.
+
+config AHCI_MTK
+	tristate "MediaTek AHCI SATA support"
+	depends on ARCH_MEDIATEK
+	select MFD_SYSCON
+	help
+	  This option enables support for the MediaTek SoC's
+	  onboard AHCI SATA controller.
+
+	  If unsure, say N.
+
+config AHCI_MVEBU
+	tristate "Marvell EBU AHCI SATA support"
+	depends on ARCH_MVEBU
+	help
+	  This option enables support for the Marvebu EBU SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config AHCI_OCTEON
+	tristate "Cavium Octeon Soc Serial ATA"
+	depends on SATA_AHCI_PLATFORM && CAVIUM_OCTEON_SOC
+	default y
+	help
+	  This option enables support for Cavium Octeon SoC Serial ATA.
+
+	  If unsure, say N.
+
+config AHCI_SUNXI
+	tristate "Allwinner sunxi AHCI SATA support"
+	depends on ARCH_SUNXI
+	help
+	  This option enables support for the Allwinner sunxi SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config AHCI_TEGRA
+	tristate "NVIDIA Tegra AHCI SATA support"
+	depends on ARCH_TEGRA
+	help
+	  This option enables support for the NVIDIA Tegra SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config AHCI_XGENE
+	tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
+	depends on PHY_XGENE
+	help
+	 This option enables support for APM X-Gene SoC SATA host controller.
+
+config AHCI_QORIQ
+	tristate "Freescale QorIQ AHCI SATA support"
+	depends on OF
+	help
+	  This option enables support for the Freescale QorIQ AHCI SoC's
+	  onboard AHCI SATA.
+
+	  If unsure, say N.
+
+config SATA_FSL
+	tristate "Freescale 3.0Gbps SATA support"
+	depends on FSL_SOC
+	help
+	  This option enables support for Freescale 3.0Gbps SATA controller.
+	  It can be found on MPC837x and MPC8315.
+
+	  If unsure, say N.
+
+config SATA_GEMINI
+	tristate "Gemini SATA bridge support"
+	depends on ARCH_GEMINI || COMPILE_TEST
+	default ARCH_GEMINI
+	help
+	  This enabled support for the FTIDE010 to SATA bridge
+	  found in Cortina Systems Gemini platform.
+
+	  If unsure, say N.
+
+config SATA_AHCI_SEATTLE
+	tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+	depends on ARCH_SEATTLE
+	help
+	 This option enables support for AMD Seattle SATA host controller.
+
+	 If unsure, say N
+
+config SATA_INIC162X
+	tristate "Initio 162x SATA support (Very Experimental)"
+	depends on PCI
+	help
+	  This option enables support for Initio 162x Serial ATA.
+
+config SATA_ACARD_AHCI
+	tristate "ACard AHCI variant (ATP 8620)"
+	depends on PCI
+	help
+	  This option enables support for Acard.
+
+	  If unsure, say N.
+
+config SATA_SIL24
+	tristate "Silicon Image 3124/3132 SATA support"
+	depends on PCI
+	help
+	  This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+	  If unsure, say N.
+
+endif # HAS_DMA
+
+config ATA_SFF
+	bool "ATA SFF support (for legacy IDE and PATA)"
+	default y
+	help
+	  This option adds support for ATA controllers with SFF
+	  compliant or similar programming interface.
+
+	  SFF is the legacy IDE interface that has been around since
+	  the dawn of time.  Almost all PATA controllers have an
+	  SFF interface.  Many SATA controllers have an SFF interface
+	  when configured into a legacy compatibility mode.
+
+	  For users with exclusively modern controllers like AHCI,
+	  Silicon Image 3124, or Marvell 6440, you may choose to
+	  disable this unneeded SFF support.
+
+	  If unsure, say Y.
+
+if ATA_SFF
+
+comment "SFF controllers with custom DMA interface"
+
+config PDC_ADMA
+	tristate "Pacific Digital ADMA support"
+	depends on PCI
+	help
+	  This option enables support for Pacific Digital ADMA controllers
+
+	  If unsure, say N.
+
+config PATA_OCTEON_CF
+	tristate "OCTEON Boot Bus Compact Flash support"
+	depends on CAVIUM_OCTEON_SOC
+	help
+	  This option enables a polled compact flash driver for use with
+	  compact flash cards attached to the OCTEON boot bus.
+
+	  If unsure, say N.
+
+config SATA_QSTOR
+	tristate "Pacific Digital SATA QStor support"
+	depends on PCI
+	help
+	  This option enables support for Pacific Digital Serial ATA QStor.
+
+	  If unsure, say N.
+
+config SATA_SX4
+	tristate "Promise SATA SX4 support (Experimental)"
+	depends on PCI
+	help
+	  This option enables support for Promise Serial ATA SX4.
+
+	  If unsure, say N.
+
+config ATA_BMDMA
+	bool "ATA BMDMA support"
+	depends on HAS_DMA
+	default y
+	help
+	  This option adds support for SFF ATA controllers with BMDMA
+	  capability.  BMDMA stands for bus-master DMA and is the
+	  de facto DMA interface for SFF controllers.
+
+	  If unsure, say Y.
+
+if ATA_BMDMA
+
+comment "SATA SFF controllers with BMDMA"
+
+config ATA_PIIX
+	tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
+	depends on PCI
+	help
+	  This option enables support for ICH5/6/7/8 Serial ATA
+	  and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
+	  host controllers.
+
+	  If unsure, say N.
+
+config SATA_DWC
+	tristate "DesignWare Cores SATA support"
+	depends on DMADEVICES
+	select GENERIC_PHY
+	help
+	  This option enables support for the on-chip SATA controller of the
+	  AppliedMicro processor 460EX.
+
+	  If unsure, say N.
+
+config SATA_DWC_OLD_DMA
+	bool "Support old device trees"
+	depends on SATA_DWC
+	select DW_DMAC_CORE
+	default y if 460EX
+	help
+	  This option enables support for old device trees without the
+	  "dmas" property.
+
+config SATA_DWC_DEBUG
+	bool "Debugging driver version"
+	depends on SATA_DWC
+	help
+	  This option enables debugging output in the driver.
+
+config SATA_DWC_VDEBUG
+	bool "Verbose debug output"
+	depends on SATA_DWC_DEBUG
+	help
+	  This option enables the taskfile dumping and NCQ debugging.
+
+config SATA_HIGHBANK
+	tristate "Calxeda Highbank SATA support"
+	depends on ARCH_HIGHBANK || COMPILE_TEST
+	help
+	  This option enables support for the Calxeda Highbank SoC's
+	  onboard SATA.
+
+	  If unsure, say N.
+
+config SATA_MV
+	tristate "Marvell SATA support"
+	depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
+		   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
+	select GENERIC_PHY
+	help
+	  This option enables support for the Marvell Serial ATA family.
+	  Currently supports 88SX[56]0[48][01] PCI(-X) chips,
+	  as well as the newer [67]042 PCI-X/PCIe and SOC devices.
+
+	  If unsure, say N.
+
+config SATA_NV
+	tristate "NVIDIA SATA support"
+	depends on PCI
+	help
+	  This option enables support for NVIDIA Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_PROMISE
+	tristate "Promise SATA TX2/TX4 support"
+	depends on PCI
+	help
+	  This option enables support for Promise Serial ATA TX2/TX4.
+
+	  If unsure, say N.
+
+config SATA_RCAR
+	tristate "Renesas R-Car SATA support"
+	depends on ARCH_RENESAS || COMPILE_TEST
+	help
+	  This option enables support for Renesas R-Car Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_SIL
+	tristate "Silicon Image SATA support"
+	depends on PCI
+	help
+	  This option enables support for Silicon Image Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_SIS
+	tristate "SiS 964/965/966/180 SATA support"
+	depends on PCI
+	select PATA_SIS
+	help
+	  This option enables support for SiS Serial ATA on
+	  SiS 964/965/966/180 and Parallel ATA on SiS 180.
+	  The PATA support for SiS 180 requires additionally to
+	  enable the PATA_SIS driver in the config.
+	  If unsure, say N.
+
+config SATA_SVW
+	tristate "ServerWorks Frodo / Apple K2 SATA support"
+	depends on PCI
+	help
+	  This option enables support for Broadcom/Serverworks/Apple K2
+	  SATA support.
+
+	  If unsure, say N.
+
+config SATA_ULI
+	tristate "ULi Electronics SATA support"
+	depends on PCI
+	help
+	  This option enables support for ULi Electronics SATA.
+
+	  If unsure, say N.
+
+config SATA_VIA
+	tristate "VIA SATA support"
+	depends on PCI
+	help
+	  This option enables support for VIA Serial ATA.
+
+	  If unsure, say N.
+
+config SATA_VITESSE
+	tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
+	depends on PCI
+	help
+	  This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
+
+	  If unsure, say N.
+
+comment "PATA SFF controllers with BMDMA"
+
+config PATA_ALI
+	tristate "ALi PATA support"
+	depends on PCI
+	help
+	  This option enables support for the ALi ATA interfaces
+	  found on the many ALi chipsets.
+
+	  If unsure, say N.
+
+config PATA_AMD
+	tristate "AMD/NVidia PATA support"
+	depends on PCI
+	help
+	  This option enables support for the AMD and NVidia PATA
+	  interfaces found on the chipsets for Athlon/Athlon64.
+
+	  If unsure, say N.
+
+config PATA_ARASAN_CF
+	tristate "ARASAN CompactFlash PATA Controller Support"
+	depends on ARCH_SPEAR13XX || COMPILE_TEST
+	depends on DMADEVICES
+	select DMA_ENGINE
+	help
+	  Say Y here to support the ARASAN CompactFlash PATA controller
+
+config PATA_ARTOP
+	tristate "ARTOP 6210/6260 PATA support"
+	depends on PCI
+	help
+	  This option enables support for ARTOP PATA controllers.
+
+	  If unsure, say N.
+
+config PATA_ATIIXP
+	tristate "ATI PATA support"
+	depends on PCI
+	help
+	  This option enables support for the ATI ATA interfaces
+	  found on the many ATI chipsets.
+
+	  If unsure, say N.
+
+config PATA_ATP867X
+	tristate "ARTOP/Acard ATP867X PATA support"
+	depends on PCI
+	help
+	  This option enables support for ARTOP/Acard ATP867X PATA
+	  controllers.
+
+	  If unsure, say N.
+
+config PATA_BK3710
+	tristate "Palmchip BK3710 PATA support"
+	depends on ARCH_DAVINCI
+	help
+	  This option enables support for the integrated IDE controller on
+	  the TI DaVinci SoC.
+
+	  If unsure, say N.
+
+config PATA_CMD64X
+	tristate "CMD64x PATA support"
+	depends on PCI
+	help
+	  This option enables support for the CMD64x series chips
+	  except for the CMD640.
+
+	  If unsure, say N.
+
+config PATA_CS5520
+	tristate "CS5510/5520 PATA support"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	help
+	  This option enables support for the Cyrix 5510/5520
+	  companion chip used with the MediaGX/Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CS5530
+	tristate "CS5530 PATA support"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	help
+	  This option enables support for the Cyrix/NatSemi/AMD CS5530
+	  companion chip used with the MediaGX/Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CS5535
+	tristate "CS5535 PATA support (Experimental)"
+	depends on PCI && X86_32
+	help
+	  This option enables support for the NatSemi/AMD CS5535
+	  companion chip used with the Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CS5536
+	tristate "CS5536 PATA support"
+	depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
+	help
+	  This option enables support for the AMD CS5536
+	  companion chip used with the Geode LX processor family.
+
+	  If unsure, say N.
+
+config PATA_CYPRESS
+	tristate "Cypress CY82C693 PATA support (Very Experimental)"
+	depends on PCI
+	help
+	  This option enables support for the Cypress/Contaq CY82C693
+	  chipset found in some Alpha systems
+
+	  If unsure, say N.
+
+config PATA_EFAR
+	tristate "EFAR SLC90E66 support"
+	depends on PCI
+	help
+	  This option enables support for the EFAR SLC90E66
+	  IDE controller found on some older machines.
+
+	  If unsure, say N.
+
+config PATA_EP93XX
+	tristate "Cirrus Logic EP93xx PATA support"
+	depends on ARCH_EP93XX
+	help
+	  This option enables support for the PATA controller in
+	  the Cirrus Logic EP9312 and EP9315 ARM CPU.
+
+	  If unsure, say N.
+
+config PATA_FTIDE010
+	tristate "Faraday Technology FTIDE010 PATA support"
+	depends on OF
+	depends on ARM
+	depends on SATA_GEMINI
+	help
+	  This option enables support for the Faraday FTIDE010
+	  PATA controller found in the Cortina Gemini SoCs.
+
+	  If unsure, say N.
+
+config PATA_HPT366
+	tristate "HPT 366/368 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the HPT 366 and 368
+	  PATA controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_HPT37X
+	tristate "HPT 370/370A/371/372/374/302 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the majority of the later HPT
+	  PATA controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_HPT3X2N
+	tristate "HPT 371N/372N/302N PATA support"
+	depends on PCI
+	help
+	  This option enables support for the N variant HPT PATA
+	  controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_HPT3X3
+	tristate "HPT 343/363 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the HPT 343/363
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_HPT3X3_DMA
+	bool "HPT 343/363 DMA support"
+	depends on PATA_HPT3X3
+	help
+	  This option enables DMA support for the HPT343/363
+	  controllers. Enable with care as there are still some
+	  problems with DMA on this chipset.
+
+config PATA_ICSIDE
+	tristate "Acorn ICS PATA support"
+	depends on ARM && ARCH_ACORN
+	help
+	  On Acorn systems, say Y here if you wish to use the ICS PATA
+	  interface card.  This is not required for ICS partition support.
+	  If you are unsure, say N to this.
+
+config PATA_IMX
+	tristate "PATA support for Freescale iMX"
+	depends on ARCH_MXC
+	help
+	  This option enables support for the PATA host available on Freescale
+          iMX SoCs.
+
+	  If unsure, say N.
+
+config PATA_IT8213
+	tristate "IT8213 PATA support (Experimental)"
+	depends on PCI
+	help
+	  This option enables support for the ITE 821 PATA
+          controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_IT821X
+	tristate "IT8211/2 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the ITE 8211 and 8212
+	  PATA controllers via the new ATA layer, including RAID
+	  mode.
+
+	  If unsure, say N.
+
+config PATA_JMICRON
+	tristate "JMicron PATA support"
+	depends on PCI
+	help
+	  Enable support for the JMicron IDE controller, via the new
+	  ATA layer.
+
+	  If unsure, say N.
+
+config PATA_MACIO
+	tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
+	depends on PPC_PMAC
+	help
+	  Most IDE capable PowerMacs have IDE busses driven by a variant
+          of this controller which is part of the Apple chipset used on
+          most PowerMac models. Some models have multiple busses using
+          different chipsets, though generally, MacIO is one of them.
+
+config PATA_MARVELL
+	tristate "Marvell PATA support via legacy mode"
+	depends on PCI
+	help
+	  This option enables limited support for the Marvell 88SE61xx ATA
+	  controllers. If you wish to use only the SATA ports then select
+	  the AHCI driver alone. If you wish to the use the PATA port or
+	  both SATA and PATA include this driver.
+
+	  If unsure, say N.
+
+config PATA_MPC52xx
+	tristate "Freescale MPC52xx SoC internal IDE"
+	depends on PPC_MPC52xx && PPC_BESTCOMM
+	select PPC_BESTCOMM_ATA
+	help
+	  This option enables support for integrated IDE controller
+	  of the Freescale MPC52xx SoC.
+
+	  If unsure, say N.
+
+config PATA_NETCELL
+	tristate "NETCELL Revolution RAID support"
+	depends on PCI
+	help
+	  This option enables support for the Netcell Revolution RAID
+	  PATA controller.
+
+	  If unsure, say N.
+
+config PATA_NINJA32
+	tristate "Ninja32/Delkin Cardbus ATA support"
+	depends on PCI
+	help
+	  This option enables support for the Ninja32, Delkin and
+	  possibly other brands of Cardbus ATA adapter
+
+	  If unsure, say N.
+
+config PATA_NS87415
+	tristate "Nat Semi NS87415 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the National Semiconductor
+	  NS87415 PCI-IDE controller.
+
+	  If unsure, say N.
+
+config PATA_OLDPIIX
+	tristate "Intel PATA old PIIX support"
+	depends on PCI
+	help
+	  This option enables support for early PIIX PATA support.
+
+	  If unsure, say N.
+
+config PATA_OPTIDMA
+	tristate "OPTI FireStar PATA support (Very Experimental)"
+	depends on PCI
+	help
+	  This option enables DMA/PIO support for the later OPTi
+	  controllers found on some old motherboards and in some
+	  laptops.
+
+	  If unsure, say N.
+
+config PATA_PDC2027X
+	tristate "Promise PATA 2027x support"
+	depends on PCI
+	help
+	  This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
+
+	  If unsure, say N.
+
+config PATA_PDC_OLD
+	tristate "Older Promise PATA controller support"
+	depends on PCI
+	help
+	  This option enables support for the Promise 20246, 20262, 20263,
+	  20265 and 20267 adapters.
+
+	  If unsure, say N.
+
+config PATA_RADISYS
+	tristate "RADISYS 82600 PATA support (Experimental)"
+	depends on PCI
+	help
+	  This option enables support for the RADISYS 82600
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_RDC
+	tristate "RDC PATA support"
+	depends on PCI
+	help
+	  This option enables basic support for the later RDC PATA controllers
+	  controllers via the new ATA layer. For the RDC 1010, you need to
+	  enable the IT821X driver instead.
+
+	  If unsure, say N.
+
+config PATA_SC1200
+	tristate "SC1200 PATA support"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	help
+	  This option enables support for the NatSemi/AMD SC1200 SoC
+	  companion chip used with the Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_SCH
+	tristate "Intel SCH PATA support"
+	depends on PCI
+	help
+	  This option enables support for Intel SCH PATA on the Intel
+	  SCH (US15W, US15L, UL11L) series host controllers.
+
+	  If unsure, say N.
+
+config PATA_SERVERWORKS
+	tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the Serverworks OSB4/CSB5/CSB6 and
+	  HT1000 PATA controllers, via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_SIL680
+	tristate "CMD / Silicon Image 680 PATA support"
+	depends on PCI
+	help
+	  This option enables support for CMD / Silicon Image 680 PATA.
+
+	  If unsure, say N.
+
+config PATA_SIS
+	tristate "SiS PATA support"
+	depends on PCI
+	help
+	  This option enables support for SiS PATA controllers
+
+	  If unsure, say N.
+
+config PATA_TOSHIBA
+	tristate "Toshiba Piccolo support (Experimental)"
+	depends on PCI
+	help
+	  Support for the Toshiba Piccolo controllers. Currently only the
+	  primary channel is supported by this driver.
+
+	  If unsure, say N.
+
+config PATA_TRIFLEX
+	tristate "Compaq Triflex PATA support"
+	depends on PCI
+	help
+	  Enable support for the Compaq 'Triflex' IDE controller as found
+	  on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_VIA
+	tristate "VIA PATA support"
+	depends on PCI
+	help
+	  This option enables support for the VIA PATA interfaces
+	  found on the many VIA chipsets.
+
+	  If unsure, say N.
+
+config PATA_PXA
+	tristate "PXA DMA-capable PATA support"
+	depends on ARCH_PXA
+	help
+	  This option enables support for harddrive attached to PXA CPU's bus.
+
+	  NOTE: This driver utilizes PXA DMA controller, in case your hardware
+	        is not capable of doing MWDMA, use pata_platform instead.
+
+	  If unsure, say N.
+
+config PATA_WINBOND
+	tristate "Winbond SL82C105 PATA support"
+	depends on PCI
+	help
+	  This option enables support for SL82C105 PATA devices found in the
+	  Netwinder and some other systems
+
+	  If unsure, say N.
+
+endif # ATA_BMDMA
+
+comment "PIO-only SFF controllers"
+
+config PATA_CMD640_PCI
+	tristate "CMD640 PCI PATA support (Experimental)"
+	depends on PCI
+	help
+	  This option enables support for the CMD640 PCI IDE
+	  interface chip. Only the primary channel is currently
+	  supported.
+
+	  If unsure, say N.
+
+config PATA_FALCON
+	tristate "Atari Falcon PATA support"
+	depends on M68K && ATARI
+	help
+	  This option enables support for the on-board IDE
+	  interface on the Atari Falcon.
+
+	  If unsure, say N.
+
+config PATA_GAYLE
+	tristate "Amiga Gayle PATA support"
+	depends on M68K && AMIGA
+	help
+	  This option enables support for the on-board IDE
+	  interfaces on some Amiga models (A600, A1200,
+	  A4000 and A4000T) and also for IDE interfaces on
+	  the Zorro expansion bus (M-Tech E-Matrix 530
+	  expansion card).
+
+	  If unsure, say N.
+
+config PATA_ISAPNP
+	tristate "ISA Plug and Play PATA support"
+	depends on ISAPNP
+	help
+	  This option enables support for ISA plug & play ATA
+	  controllers such as those found on old soundcards.
+
+	  If unsure, say N.
+
+config PATA_IXP4XX_CF
+	tristate "IXP4XX Compact Flash support"
+	depends on ARCH_IXP4XX
+	help
+	  This option enables support for a Compact Flash connected on
+	  the ixp4xx expansion bus. This driver had been written for
+	  Loft/Avila boards in mind but can work with others.
+
+	  If unsure, say N.
+
+config PATA_MPIIX
+	tristate "Intel PATA MPIIX support"
+	depends on PCI
+	help
+	  This option enables support for MPIIX PATA support.
+
+	  If unsure, say N.
+
+config PATA_NS87410
+	tristate "Nat Semi NS87410 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the National Semiconductor
+	  NS87410 PCI-IDE controller.
+
+	  If unsure, say N.
+
+config PATA_OPTI
+	tristate "OPTI621/6215 PATA support (Very Experimental)"
+	depends on PCI
+	help
+	  This option enables full PIO support for the early Opti ATA
+	  controllers found on some old motherboards.
+
+	  If unsure, say N.
+
+config PATA_PALMLD
+	tristate "Palm LifeDrive PATA support"
+	depends on MACH_PALMLD
+	help
+	  This option enables support for Palm LifeDrive's internal ATA
+	  port via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_PCMCIA
+	tristate "PCMCIA PATA support"
+	depends on PCMCIA
+	help
+	  This option enables support for PCMCIA ATA interfaces, including
+	  compact flash card adapters via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_PLATFORM
+	tristate "Generic platform device PATA support"
+	depends on EXPERT || PPC || HAVE_PATA_PLATFORM
+	help
+	  This option enables support for generic directly connected ATA
+	  devices commonly found on embedded systems.
+
+	  If unsure, say N.
+
+config PATA_OF_PLATFORM
+	tristate "OpenFirmware platform device PATA support"
+	depends on PATA_PLATFORM && OF
+	help
+	  This option enables support for generic directly connected ATA
+	  devices commonly found on embedded systems with OpenFirmware
+	  bindings.
+
+	  If unsure, say N.
+
+config PATA_QDI
+	tristate "QDI VLB PATA support"
+	depends on ISA
+	select PATA_LEGACY
+	help
+	  Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
+
+config PATA_RB532
+	tristate "RouterBoard 532 PATA CompactFlash support"
+	depends on MIKROTIK_RB532
+	help
+	  This option enables support for the RouterBoard 532
+	  PATA CompactFlash controller.
+
+	  If unsure, say N.
+
+config PATA_RZ1000
+	tristate "PC Tech RZ1000 PATA support"
+	depends on PCI
+	help
+	  This option enables basic support for the PC Tech RZ1000/1
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_SAMSUNG_CF
+	tristate "Samsung SoC PATA support"
+	depends on SAMSUNG_DEV_IDE
+	help
+	  This option enables basic support for Samsung's S3C/S5P board
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_WINBOND_VLB
+	tristate "Winbond W83759A VLB PATA support (Experimental)"
+	depends on ISA
+	select PATA_LEGACY
+	help
+	  Support for the Winbond W83759A controller on Vesa Local Bus
+	  systems.
+
+comment "Generic fallback / legacy drivers"
+
+config PATA_ACPI
+	tristate "ACPI firmware driver for PATA"
+	depends on ATA_ACPI && ATA_BMDMA
+	help
+	  This option enables an ACPI method driver which drives
+	  motherboard PATA controller interfaces through the ACPI
+	  firmware in the BIOS. This driver can sometimes handle
+	  otherwise unsupported hardware.
+
+config ATA_GENERIC
+	tristate "Generic ATA support"
+	depends on PCI && ATA_BMDMA
+	help
+	  This option enables support for generic BIOS configured
+	  ATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_LEGACY
+	tristate "Legacy ISA PATA support (Experimental)"
+	depends on (ISA || PCI)
+	help
+	  This option enables support for ISA/VLB/PCI bus legacy PATA
+	  ports and allows them to be accessed via the new ATA layer.
+
+	  If unsure, say N.
+
+endif # ATA_SFF
+endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 0000000..d21cdd8
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_ATA)		+= libata.o
+
+# non-SFF interface
+obj-$(CONFIG_SATA_AHCI)		+= ahci.o libahci.o
+obj-$(CONFIG_SATA_ACARD_AHCI)	+= acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE)	+= ahci_seattle.o libahci.o libahci_platform.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
+obj-$(CONFIG_SATA_FSL)		+= sata_fsl.o
+obj-$(CONFIG_SATA_GEMINI)	+= sata_gemini.o
+obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
+obj-$(CONFIG_SATA_SIL24)	+= sata_sil24.o
+obj-$(CONFIG_SATA_DWC)		+= sata_dwc_460ex.o
+obj-$(CONFIG_SATA_HIGHBANK)	+= sata_highbank.o libahci.o
+obj-$(CONFIG_AHCI_BRCM)		+= ahci_brcm.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_CEVA)		+= ahci_ceva.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_DA850)	+= ahci_da850.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_DM816)	+= ahci_dm816.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_IMX)		+= ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MTK)		+= ahci_mtk.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MVEBU)	+= ahci_mvebu.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_OCTEON)	+= ahci_octeon.o
+obj-$(CONFIG_AHCI_SUNXI)	+= ahci_sunxi.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_ST)		+= ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_TEGRA)	+= ahci_tegra.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_XGENE)	+= ahci_xgene.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_QORIQ)	+= ahci_qoriq.o libahci.o libahci_platform.o
+
+# SFF w/ custom DMA
+obj-$(CONFIG_PDC_ADMA)		+= pdc_adma.o
+obj-$(CONFIG_PATA_ARASAN_CF)	+= pata_arasan_cf.o
+obj-$(CONFIG_PATA_OCTEON_CF)	+= pata_octeon_cf.o
+obj-$(CONFIG_SATA_QSTOR)	+= sata_qstor.o
+obj-$(CONFIG_SATA_SX4)		+= sata_sx4.o
+
+# SFF SATA w/ BMDMA
+obj-$(CONFIG_ATA_PIIX)		+= ata_piix.o
+obj-$(CONFIG_SATA_MV)		+= sata_mv.o
+obj-$(CONFIG_SATA_NV)		+= sata_nv.o
+obj-$(CONFIG_SATA_PROMISE)	+= sata_promise.o
+obj-$(CONFIG_SATA_RCAR)		+= sata_rcar.o
+obj-$(CONFIG_SATA_SIL)		+= sata_sil.o
+obj-$(CONFIG_SATA_SIS)		+= sata_sis.o
+obj-$(CONFIG_SATA_SVW)		+= sata_svw.o
+obj-$(CONFIG_SATA_ULI)		+= sata_uli.o
+obj-$(CONFIG_SATA_VIA)		+= sata_via.o
+obj-$(CONFIG_SATA_VITESSE)	+= sata_vsc.o
+
+# SFF PATA w/ BMDMA
+obj-$(CONFIG_PATA_ALI)		+= pata_ali.o
+obj-$(CONFIG_PATA_AMD)		+= pata_amd.o
+obj-$(CONFIG_PATA_ARTOP)	+= pata_artop.o
+obj-$(CONFIG_PATA_ATIIXP)	+= pata_atiixp.o
+obj-$(CONFIG_PATA_ATP867X)	+= pata_atp867x.o
+obj-$(CONFIG_PATA_BK3710)	+= pata_bk3710.o
+obj-$(CONFIG_PATA_CMD64X)	+= pata_cmd64x.o
+obj-$(CONFIG_PATA_CS5520)	+= pata_cs5520.o
+obj-$(CONFIG_PATA_CS5530)	+= pata_cs5530.o
+obj-$(CONFIG_PATA_CS5535)	+= pata_cs5535.o
+obj-$(CONFIG_PATA_CS5536)	+= pata_cs5536.o
+obj-$(CONFIG_PATA_CYPRESS)	+= pata_cypress.o
+obj-$(CONFIG_PATA_EFAR)		+= pata_efar.o
+obj-$(CONFIG_PATA_EP93XX)	+= pata_ep93xx.o
+obj-$(CONFIG_PATA_FTIDE010)	+= pata_ftide010.o
+obj-$(CONFIG_PATA_HPT366)	+= pata_hpt366.o
+obj-$(CONFIG_PATA_HPT37X)	+= pata_hpt37x.o
+obj-$(CONFIG_PATA_HPT3X2N)	+= pata_hpt3x2n.o
+obj-$(CONFIG_PATA_HPT3X3)	+= pata_hpt3x3.o
+obj-$(CONFIG_PATA_ICSIDE)	+= pata_icside.o
+obj-$(CONFIG_PATA_IMX)		+= pata_imx.o
+obj-$(CONFIG_PATA_IT8213)	+= pata_it8213.o
+obj-$(CONFIG_PATA_IT821X)	+= pata_it821x.o
+obj-$(CONFIG_PATA_JMICRON)	+= pata_jmicron.o
+obj-$(CONFIG_PATA_MACIO)	+= pata_macio.o
+obj-$(CONFIG_PATA_MARVELL)	+= pata_marvell.o
+obj-$(CONFIG_PATA_MPC52xx)	+= pata_mpc52xx.o
+obj-$(CONFIG_PATA_NETCELL)	+= pata_netcell.o
+obj-$(CONFIG_PATA_NINJA32)	+= pata_ninja32.o
+obj-$(CONFIG_PATA_NS87415)	+= pata_ns87415.o
+obj-$(CONFIG_PATA_OLDPIIX)	+= pata_oldpiix.o
+obj-$(CONFIG_PATA_OPTIDMA)	+= pata_optidma.o
+obj-$(CONFIG_PATA_PDC2027X)	+= pata_pdc2027x.o
+obj-$(CONFIG_PATA_PDC_OLD)	+= pata_pdc202xx_old.o
+obj-$(CONFIG_PATA_RADISYS)	+= pata_radisys.o
+obj-$(CONFIG_PATA_RDC)		+= pata_rdc.o
+obj-$(CONFIG_PATA_SC1200)	+= pata_sc1200.o
+obj-$(CONFIG_PATA_SCH)		+= pata_sch.o
+obj-$(CONFIG_PATA_SERVERWORKS)	+= pata_serverworks.o
+obj-$(CONFIG_PATA_SIL680)	+= pata_sil680.o
+obj-$(CONFIG_PATA_SIS)		+= pata_sis.o
+obj-$(CONFIG_PATA_TOSHIBA)	+= pata_piccolo.o
+obj-$(CONFIG_PATA_TRIFLEX)	+= pata_triflex.o
+obj-$(CONFIG_PATA_VIA)		+= pata_via.o
+obj-$(CONFIG_PATA_WINBOND)	+= pata_sl82c105.o
+
+# SFF PIO only
+obj-$(CONFIG_PATA_CMD640_PCI)	+= pata_cmd640.o
+obj-$(CONFIG_PATA_FALCON)	+= pata_falcon.o
+obj-$(CONFIG_PATA_GAYLE)	+= pata_gayle.o
+obj-$(CONFIG_PATA_ISAPNP)	+= pata_isapnp.o
+obj-$(CONFIG_PATA_IXP4XX_CF)	+= pata_ixp4xx_cf.o
+obj-$(CONFIG_PATA_MPIIX)	+= pata_mpiix.o
+obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
+obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
+obj-$(CONFIG_PATA_PCMCIA)	+= pata_pcmcia.o
+obj-$(CONFIG_PATA_PALMLD)	+= pata_palmld.o
+obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
+obj-$(CONFIG_PATA_OF_PLATFORM)	+= pata_of_platform.o
+obj-$(CONFIG_PATA_RB532)	+= pata_rb532_cf.o
+obj-$(CONFIG_PATA_RZ1000)	+= pata_rz1000.o
+obj-$(CONFIG_PATA_SAMSUNG_CF)	+= pata_samsung_cf.o
+
+obj-$(CONFIG_PATA_PXA)		+= pata_pxa.o
+
+# Should be last but two libata driver
+obj-$(CONFIG_PATA_ACPI)		+= pata_acpi.o
+# Should be last but one libata driver
+obj-$(CONFIG_ATA_GENERIC)	+= ata_generic.o
+# Should be last libata driver
+obj-$(CONFIG_PATA_LEGACY)	+= pata_legacy.o
+
+libata-y	:= libata-core.o libata-scsi.o libata-eh.o \
+	libata-transport.o libata-trace.o
+libata-$(CONFIG_ATA_SFF)	+= libata-sff.o
+libata-$(CONFIG_SATA_PMP)	+= libata-pmp.o
+libata-$(CONFIG_ATA_ACPI)	+= libata-acpi.o
+libata-$(CONFIG_SATA_ZPODD)	+= libata-zpodd.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
new file mode 100644
index 0000000..583e366
--- /dev/null
+++ b/drivers/ata/acard-ahci.c
@@ -0,0 +1,512 @@
+
+/*
+ *  acard-ahci.c - ACard AHCI SATA support
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2010 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/driver-api/libata.rst
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+#define DRV_NAME	"acard-ahci"
+#define DRV_VERSION	"1.0"
+
+/*
+  Received FIS structure limited to 80h.
+*/
+
+#define ACARD_AHCI_RX_FIS_SZ 128
+
+enum {
+	AHCI_PCI_BAR		= 5,
+};
+
+enum board_ids {
+	board_acard_ahci,
+};
+
+struct acard_sg {
+	__le32			addr;
+	__le32			addr_hi;
+	__le32			reserved;
+	__le32			size;	 /* bit 31 (EOT) max==0x10000 (64k) */
+};
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int acard_ahci_port_start(struct ata_port *ap);
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+#ifdef CONFIG_PM_SLEEP
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
+#endif
+
+static struct scsi_host_template acard_ahci_sht = {
+	AHCI_SHT("acard-ahci"),
+};
+
+static struct ata_port_operations acard_ops = {
+	.inherits		= &ahci_ops,
+	.qc_prep		= acard_ahci_qc_prep,
+	.qc_fill_rtf		= acard_ahci_qc_fill_rtf,
+	.port_start             = acard_ahci_port_start,
+};
+
+#define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
+
+static const struct ata_port_info acard_ahci_port_info[] = {
+	[board_acard_ahci] =
+	{
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &acard_ops,
+	},
+};
+
+static const struct pci_device_id acard_ahci_pci_tbl[] = {
+	/* ACard */
+	{ PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
+
+	{ }    /* terminate list */
+};
+
+static struct pci_driver acard_ahci_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= acard_ahci_pci_tbl,
+	.probe			= acard_ahci_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= acard_ahci_pci_device_suspend,
+	.resume			= acard_ahci_pci_device_resume,
+#endif
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+
+	if (mesg.event & PM_EVENT_SUSPEND &&
+	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_err(&pdev->dev,
+			"BIOS update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	if (mesg.event & PM_EVENT_SLEEP) {
+		/* AHCI spec rev1.1 section 8.3.3:
+		 * Software must disable interrupts prior to requesting a
+		 * transition of the HBA to D3 state.
+		 */
+		ctl = readl(mmio + HOST_CTL);
+		ctl &= ~HOST_IRQ_EN;
+		writel(ctl, mmio + HOST_CTL);
+		readl(mmio + HOST_CTL); /* flush */
+	}
+
+	return ata_pci_device_suspend(pdev, mesg);
+}
+
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+{
+	int rc;
+
+	if (using_dac &&
+	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(&pdev->dev,
+					   "64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void acard_ahci_pci_print_info(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u16 cc;
+	const char *scc_s;
+
+	pci_read_config_word(pdev, 0x0a, &cc);
+	if (cc == PCI_CLASS_STORAGE_IDE)
+		scc_s = "IDE";
+	else if (cc == PCI_CLASS_STORAGE_SATA)
+		scc_s = "SATA";
+	else if (cc == PCI_CLASS_STORAGE_RAID)
+		scc_s = "RAID";
+	else
+		scc_s = "unknown";
+
+	ahci_print_info(host, scc_s);
+}
+
+static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+	struct scatterlist *sg;
+	struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+	unsigned int si, last_si = 0;
+
+	VPRINTK("ENTER\n");
+
+	/*
+	 * Next, the S/G list.
+	 */
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		/*
+		 * ACard note:
+		 * We must set an end-of-table (EOT) bit,
+		 * and the segment cannot exceed 64k (0x10000)
+		 */
+		acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+		acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+		acard_sg[si].size = cpu_to_le32(sg_len);
+		last_si = si;
+	}
+
+	acard_sg[last_si].size |= cpu_to_le32(1 << 31);	/* set EOT */
+
+	return si;
+}
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	int is_atapi = ata_is_atapi(qc->tf.protocol);
+	void *cmd_tbl;
+	u32 opts;
+	const u32 cmd_fis_len = 5; /* five dwords */
+	unsigned int n_elem;
+
+	/*
+	 * Fill in command table information.  First, the header,
+	 * a SATA Register - Host to Device command FIS.
+	 */
+	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
+
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+	if (is_atapi) {
+		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+	}
+
+	n_elem = 0;
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+
+	/*
+	 * Fill in command slot information.
+	 *
+	 * ACard note: prd table length not filled in
+	 */
+	opts = cmd_fis_len | (qc->dev->link->pmp << 12);
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		opts |= AHCI_CMD_WRITE;
+	if (is_atapi)
+		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+}
+
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct ahci_port_priv *pp = qc->ap->private_data;
+	u8 *rx_fis = pp->rx_fis;
+
+	if (pp->fbs_enabled)
+		rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
+
+	/*
+	 * After a successful execution of an ATA PIO data-in command,
+	 * the device doesn't send D2H Reg FIS to update the TF and
+	 * the host should take TF and E_Status from the preceding PIO
+	 * Setup FIS.
+	 */
+	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
+	    !(qc->flags & ATA_QCFLAG_FAILED)) {
+		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
+		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+	} else
+		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+
+	return true;
+}
+
+static int acard_ahci_port_start(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct device *dev = ap->host->dev;
+	struct ahci_port_priv *pp;
+	void *mem;
+	dma_addr_t mem_dma;
+	size_t dma_sz, rx_fis_sz;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	/* check FBS capability */
+	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+		void __iomem *port_mmio = ahci_port_base(ap);
+		u32 cmd = readl(port_mmio + PORT_CMD);
+		if (cmd & PORT_CMD_FBSCP)
+			pp->fbs_supported = true;
+		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+				 ap->port_no);
+			pp->fbs_supported = true;
+		} else
+			dev_warn(dev, "port %d is not capable of FBS\n",
+				 ap->port_no);
+	}
+
+	if (pp->fbs_supported) {
+		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
+	} else {
+		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
+	}
+
+	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+	memset(mem, 0, dma_sz);
+
+	/*
+	 * First item in chunk of DMA memory: 32-slot command table,
+	 * 32 bytes each in size
+	 */
+	pp->cmd_slot = mem;
+	pp->cmd_slot_dma = mem_dma;
+
+	mem += AHCI_CMD_SLOT_SZ;
+	mem_dma += AHCI_CMD_SLOT_SZ;
+
+	/*
+	 * Second item: Received-FIS area
+	 */
+	pp->rx_fis = mem;
+	pp->rx_fis_dma = mem_dma;
+
+	mem += rx_fis_sz;
+	mem_dma += rx_fis_sz;
+
+	/*
+	 * Third item: data area for storing a single command
+	 * and its scatter-gather table
+	 */
+	pp->cmd_tbl = mem;
+	pp->cmd_tbl_dma = mem_dma;
+
+	/*
+	 * Save off initial list of interrupts to be enabled.
+	 * This could be changed later
+	 */
+	pp->intr_mask = DEF_PORT_IRQ;
+
+	ap->private_data = pp;
+
+	/* engage engines, captain */
+	return ahci_port_resume(ap);
+}
+
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	unsigned int board_id = ent->driver_data;
+	struct ata_port_info pi = acard_ahci_port_info[board_id];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ata_host *host;
+	int n_ports, i, rc;
+
+	VPRINTK("ENTER\n");
+
+	WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* AHCI controllers often implement SFF compatible interface.
+	 * Grab all PCI BARs just in case.
+	 */
+	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+
+	hpriv->irq = pdev->irq;
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
+		pci_enable_msi(pdev);
+
+	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
+	/* save initial config */
+	ahci_save_initial_config(&pdev->dev, hpriv);
+
+	/* prepare host */
+	if (hpriv->cap & HOST_CAP_NCQ)
+		pi.flags |= ATA_FLAG_NCQ;
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	ahci_set_em_messages(hpriv, &pi);
+
+	/* CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+	host->private_data = hpriv;
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+	else
+		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+				   0x100 + ap->port_no * 0x80, "port");
+
+		/* set initial link pm policy */
+		/*
+		ap->pm_policy = NOT_AVAILABLE;
+		*/
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	/* initialize adapter */
+	rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+	if (rc)
+		return rc;
+
+	rc = ahci_reset_controller(host);
+	if (rc)
+		return rc;
+
+	ahci_init_controller(host);
+	acard_ahci_pci_print_info(host);
+
+	pci_set_master(pdev);
+	return ahci_host_activate(host, &acard_ahci_sht);
+}
+
+module_pci_driver(acard_ahci_pci_driver);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 0000000..021ce46
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,1888 @@
+/*
+ *  ahci.c - AHCI SATA support
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/driver-api/libata.rst
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <linux/msi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include <linux/ahci-remap.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "ahci.h"
+
+#define DRV_NAME	"ahci"
+#define DRV_VERSION	"3.0"
+
+enum {
+	AHCI_PCI_BAR_STA2X11	= 0,
+	AHCI_PCI_BAR_CAVIUM	= 0,
+	AHCI_PCI_BAR_ENMOTUS	= 2,
+	AHCI_PCI_BAR_CAVIUM_GEN5	= 4,
+	AHCI_PCI_BAR_STANDARD	= 5,
+};
+
+enum board_ids {
+	/* board IDs by feature in alphabetical order */
+	board_ahci,
+	board_ahci_ign_iferr,
+	board_ahci_mobile,
+	board_ahci_nomsi,
+	board_ahci_noncq,
+	board_ahci_nosntf,
+	board_ahci_yes_fbs,
+
+	/* board IDs for specific chipsets in alphabetical order */
+	board_ahci_avn,
+	board_ahci_mcp65,
+	board_ahci_mcp77,
+	board_ahci_mcp89,
+	board_ahci_mv,
+	board_ahci_sb600,
+	board_ahci_sb700,	/* for SB700 and SB800 */
+	board_ahci_vt8251,
+
+	/* aliases */
+	board_ahci_mcp_linux	= board_ahci_mcp65,
+	board_ahci_mcp67	= board_ahci_mcp65,
+	board_ahci_mcp73	= board_ahci_mcp65,
+	board_ahci_mcp79	= board_ahci_mcp77,
+};
+
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ahci_remove_one(struct pci_dev *dev);
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+				 unsigned long deadline);
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline);
+static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
+static bool is_mcp89_apple(struct pci_dev *pdev);
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
+#ifdef CONFIG_PM
+static int ahci_pci_device_runtime_suspend(struct device *dev);
+static int ahci_pci_device_runtime_resume(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev);
+static int ahci_pci_device_resume(struct device *dev);
+#endif
+#endif /* CONFIG_PM */
+
+static struct scsi_host_template ahci_sht = {
+	AHCI_SHT("ahci"),
+};
+
+static struct ata_port_operations ahci_vt8251_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_vt8251_hardreset,
+};
+
+static struct ata_port_operations ahci_p5wdh_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_p5wdh_hardreset,
+};
+
+static struct ata_port_operations ahci_avn_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_avn_hardreset,
+};
+
+static const struct ata_port_info ahci_port_info[] = {
+	/* by features */
+	[board_ahci] = {
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_ign_iferr] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_mobile] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_IS_MOBILE),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_nomsi] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_MSI),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_noncq] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_nosntf] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_SNTF),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_yes_fbs] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_YES_FBS),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	/* by chipsets */
+	[board_ahci_avn] = {
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_avn_ops,
+	},
+	[board_ahci_mcp65] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+				 AHCI_HFLAG_YES_NCQ),
+		.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_mcp77] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_mcp89] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_FPDMA_AA),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_mv] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	[board_ahci_sb600] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
+				 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
+				 AHCI_HFLAG_32BIT_ONLY),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_pmp_retry_srst_ops,
+	},
+	[board_ahci_sb700] = {	/* for SB700 and SB800 */
+		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_pmp_retry_srst_ops,
+	},
+	[board_ahci_vt8251] = {
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_vt8251_ops,
+	},
+};
+
+static const struct pci_device_id ahci_pci_tbl[] = {
+	/* Intel */
+	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
+	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
+	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
+	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
+	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
+	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
+	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
+	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
+	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
+	{ PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
+	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
+	{ PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
+	{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
+	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
+	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
+	{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
+	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
+	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+	{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
+	{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
+	{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+	{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+	{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
+	{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
+	{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
+	{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
+	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+	{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
+	{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+	{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
+	{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
+	{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
+	{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
+	{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
+	{ PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
+	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+	{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
+	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
+	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
+	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
+	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
+	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+	{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+	{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
+	{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
+	{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
+	{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
+
+	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
+	/* JMicron 362B and 362C have an AHCI function with IDE class code */
+	{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
+	{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
+	/* May need to update quirk_jmicron_async_suspend() for additions */
+
+	/* ATI */
+	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
+	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+
+	/* AMD */
+	{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
+	{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+	/* AMD is using RAID class only for ahci controllers */
+	{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+
+	/* VIA */
+	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
+	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
+
+	/* NVIDIA */
+	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 },	/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 },	/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 },	/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 },	/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 },	/* MCP89 */
+	{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 },	/* MCP89 */
+
+	/* SiS */
+	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
+	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
+	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
+
+	/* ST Microelectronics */
+	{ PCI_VDEVICE(STMICRO, 0xCC06), board_ahci },		/* ST ConneXt */
+
+	/* Marvell */
+	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
+	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
+	  .class = PCI_CLASS_STORAGE_SATA_AHCI,
+	  .class_mask = 0xffffff,
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9128 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9125 */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+			 PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9170 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9182 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 on some Gigabyte */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a2), 	/* 88se91a2 */
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
+	  .driver_data = board_ahci_yes_fbs },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
+	  .driver_data = board_ahci_yes_fbs },
+
+	/* Promise */
+	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
+	{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
+
+	/* Asmedia */
+	{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },	/* ASM1060 */
+	{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci },	/* ASM1060 */
+	{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },	/* ASM1061 */
+	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1062 */
+	{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci },   /* ASM1061R */
+	{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci },   /* ASM1062R */
+
+	/*
+	 * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+	 * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
+	 */
+	{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+	{ PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+
+	/* Enmotus */
+	{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
+	/* Generic, PCI class code for AHCI */
+	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+
+	{ }	/* terminate list */
+};
+
+static const struct dev_pm_ops ahci_pci_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume)
+	SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend,
+			   ahci_pci_device_runtime_resume, NULL)
+};
+
+static struct pci_driver ahci_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= ahci_pci_tbl,
+	.probe			= ahci_init_one,
+	.remove			= ahci_remove_one,
+	.driver = {
+		.pm		= &ahci_pci_pm_ops,
+	},
+};
+
+#if IS_ENABLED(CONFIG_PATA_MARVELL)
+static int marvell_enable;
+#else
+static int marvell_enable = 1;
+#endif
+module_param(marvell_enable, int, 0644);
+MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+
+static int mobile_lpm_policy = -1;
+module_param(mobile_lpm_policy, int, 0644);
+MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+					 struct ahci_host_priv *hpriv)
+{
+	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+		dev_info(&pdev->dev, "JMB361 has only one port\n");
+		hpriv->force_port_map = 1;
+	}
+
+	/*
+	 * Temporary Marvell 6145 hack: PATA port presence
+	 * is asserted through the standard AHCI port
+	 * presence register, as bit 4 (counting from 0)
+	 */
+	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+		if (pdev->device == 0x6121)
+			hpriv->mask_port_map = 0x3;
+		else
+			hpriv->mask_port_map = 0xf;
+		dev_info(&pdev->dev,
+			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
+	}
+
+	ahci_save_initial_config(&pdev->dev, hpriv);
+}
+
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	int rc;
+
+	rc = ahci_reset_controller(host);
+	if (rc)
+		return rc;
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+		struct ahci_host_priv *hpriv = host->private_data;
+		u16 tmp16;
+
+		/* configure PCS */
+		pci_read_config_word(pdev, 0x92, &tmp16);
+		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+			tmp16 |= hpriv->port_map;
+			pci_write_config_word(pdev, 0x92, tmp16);
+		}
+	}
+
+	return 0;
+}
+
+static void ahci_pci_init_controller(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *port_mmio;
+	u32 tmp;
+	int mv;
+
+	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+		if (pdev->device == 0x6121)
+			mv = 2;
+		else
+			mv = 4;
+		port_mmio = __ahci_port_base(host, mv);
+
+		writel(0, port_mmio + PORT_IRQ_MASK);
+
+		/* clear port IRQ */
+		tmp = readl(port_mmio + PORT_IRQ_STAT);
+		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+		if (tmp)
+			writel(tmp, port_mmio + PORT_IRQ_STAT);
+	}
+
+	ahci_init_controller(host);
+}
+
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+				 unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	bool online;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	hpriv->stop_engine(ap);
+
+	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+				 deadline, &online, NULL);
+
+	hpriv->start_engine(ap);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+	/* vt8251 doesn't clear BSY on signature FIS reception,
+	 * request follow-up softreset.
+	 */
+	return online ? -EAGAIN : rc;
+}
+
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	bool online;
+	int rc;
+
+	hpriv->stop_engine(ap);
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = ATA_BUSY;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+				 deadline, &online, NULL);
+
+	hpriv->start_engine(ap);
+
+	/* The pseudo configuration device on SIMG4726 attached to
+	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
+	 * hardreset if no device is attached to the first downstream
+	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
+	 * work around this, wait for !BSY only briefly.  If BSY isn't
+	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
+	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
+	 *
+	 * Wait for two seconds.  Devices attached to downstream port
+	 * which can't process the following IDENTIFY after this will
+	 * have to be reset again.  For most cases, this should
+	 * suffice while making probing snappish enough.
+	 */
+	if (online) {
+		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
+					  ahci_check_ready);
+		if (rc)
+			ahci_kick_engine(ap);
+	}
+	return rc;
+}
+
+/*
+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
+ *
+ * It has been observed with some SSDs that the timing of events in the
+ * link synchronization phase can leave the port in a state that can not
+ * be recovered by a SATA-hard-reset alone.  The failing signature is
+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
+ * communication not established").  It was found that unloading and
+ * reloading the driver when this problem occurs allows the drive
+ * connection to be recovered (DET advanced to 0x3).  The critical
+ * component of reloading the driver is that the port state machines are
+ * reset by bouncing "port enable" in the AHCI PCS configuration
+ * register.  So, reproduce that effect by bouncing a port whenever we
+ * see DET==1 after a reset.
+ */
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	unsigned long tmo = deadline - jiffies;
+	struct ata_taskfile tf;
+	bool online;
+	int rc, i;
+
+	DPRINTK("ENTER\n");
+
+	hpriv->stop_engine(ap);
+
+	for (i = 0; i < 2; i++) {
+		u16 val;
+		u32 sstatus;
+		int port = ap->port_no;
+		struct ata_host *host = ap->host;
+		struct pci_dev *pdev = to_pci_dev(host->dev);
+
+		/* clear D2H reception area to properly wait for D2H FIS */
+		ata_tf_init(link->device, &tf);
+		tf.command = ATA_BUSY;
+		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+		rc = sata_link_hardreset(link, timing, deadline, &online,
+				ahci_check_ready);
+
+		if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
+				(sstatus & 0xf) != 1)
+			break;
+
+		ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
+				port);
+
+		pci_read_config_word(pdev, 0x92, &val);
+		val &= ~(1 << port);
+		pci_write_config_word(pdev, 0x92, val);
+		ata_msleep(ap, 1000);
+		val |= 1 << port;
+		pci_write_config_word(pdev, 0x92, val);
+		deadline += tmo;
+	}
+
+	hpriv->start_engine(ap);
+
+	if (online)
+		*class = ahci_dev_classify(ap);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+	return rc;
+}
+
+
+#ifdef CONFIG_PM
+static void ahci_pci_disable_interrupts(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+
+	/* AHCI spec rev1.1 section 8.3.3:
+	 * Software must disable interrupts prior to requesting a
+	 * transition of the HBA to D3 state.
+	 */
+	ctl = readl(mmio + HOST_CTL);
+	ctl &= ~HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+}
+
+static int ahci_pci_device_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ata_host *host = pci_get_drvdata(pdev);
+
+	ahci_pci_disable_interrupts(host);
+	return 0;
+}
+
+static int ahci_pci_device_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ahci_pci_reset_controller(host);
+	if (rc)
+		return rc;
+	ahci_pci_init_controller(host);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_err(&pdev->dev,
+			"BIOS update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	ahci_pci_disable_interrupts(host);
+	return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int ahci_pci_device_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	/* Apple BIOS helpfully mangles the registers on resume */
+	if (is_mcp89_apple(pdev))
+		ahci_mcp89_apple_enable(pdev);
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_pci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_pci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+#endif /* CONFIG_PM */
+
+static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+{
+	int rc;
+
+	/*
+	 * If the device fixup already set the dma_mask to some non-standard
+	 * value, don't extend it here. This happens on STA2X11, for example.
+	 */
+	if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+		return 0;
+
+	if (using_dac &&
+	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void ahci_pci_print_info(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u16 cc;
+	const char *scc_s;
+
+	pci_read_config_word(pdev, 0x0a, &cc);
+	if (cc == PCI_CLASS_STORAGE_IDE)
+		scc_s = "IDE";
+	else if (cc == PCI_CLASS_STORAGE_SATA)
+		scc_s = "SATA";
+	else if (cc == PCI_CLASS_STORAGE_RAID)
+		scc_s = "RAID";
+	else
+		scc_s = "unknown";
+
+	ahci_print_info(host, scc_s);
+}
+
+/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
+ * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
+ * support PMP and the 4726 either directly exports the device
+ * attached to the first downstream port or acts as a hardware storage
+ * controller and emulate a single ATA device (can be RAID 0/1 or some
+ * other configuration).
+ *
+ * When there's no device attached to the first downstream port of the
+ * 4726, "Config Disk" appears, which is a pseudo ATA device to
+ * configure the 4726.  However, ATA emulation of the device is very
+ * lame.  It doesn't send signature D2H Reg FIS after the initial
+ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
+ *
+ * The following function works around the problem by always using
+ * hardreset on the port and not depending on receiving signature FIS
+ * afterward.  If signature FIS isn't received soon, ATA class is
+ * assumed without follow-up softreset.
+ */
+static void ahci_p5wdh_workaround(struct ata_host *host)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			.ident = "P5W DH Deluxe",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR,
+					  "ASUSTEK COMPUTER INC"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+			},
+		},
+		{ }
+	};
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+
+	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
+	    dmi_check_system(sysids)) {
+		struct ata_port *ap = host->ports[1];
+
+		dev_info(&pdev->dev,
+			 "enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n");
+
+		ap->ops = &ahci_p5wdh_ops;
+		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
+	}
+}
+
+/*
+ * Macbook7,1 firmware forcibly disables MCP89 AHCI and changes PCI ID when
+ * booting in BIOS compatibility mode.  We restore the registers but not ID.
+ */
+static void ahci_mcp89_apple_enable(struct pci_dev *pdev)
+{
+	u32 val;
+
+	printk(KERN_INFO "ahci: enabling MCP89 AHCI mode\n");
+
+	pci_read_config_dword(pdev, 0xf8, &val);
+	val |= 1 << 0x1b;
+	/* the following changes the device ID, but appears not to affect function */
+	/* val = (val & ~0xf0000000) | 0x80000000; */
+	pci_write_config_dword(pdev, 0xf8, val);
+
+	pci_read_config_dword(pdev, 0x54c, &val);
+	val |= 1 << 0xc;
+	pci_write_config_dword(pdev, 0x54c, val);
+
+	pci_read_config_dword(pdev, 0x4a4, &val);
+	val &= 0xff;
+	val |= 0x01060100;
+	pci_write_config_dword(pdev, 0x4a4, val);
+
+	pci_read_config_dword(pdev, 0x54c, &val);
+	val &= ~(1 << 0xc);
+	pci_write_config_dword(pdev, 0x54c, val);
+
+	pci_read_config_dword(pdev, 0xf8, &val);
+	val &= ~(1 << 0x1b);
+	pci_write_config_dword(pdev, 0xf8, val);
+}
+
+static bool is_mcp89_apple(struct pci_dev *pdev)
+{
+	return pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
+		pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+		pdev->subsystem_device == 0xcb89;
+}
+
+/* only some SB600 ahci controllers can do 64bit DMA */
+static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		/*
+		 * The oldest version known to be broken is 0901 and
+		 * working is 1501 which was released on 2007-10-26.
+		 * Enable 64bit DMA on 1501 and anything newer.
+		 *
+		 * Please read bko#9412 for more info.
+		 */
+		{
+			.ident = "ASUS M2A-VM",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "ASUSTeK Computer INC."),
+				DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
+			},
+			.driver_data = "20071026",	/* yyyymmdd */
+		},
+		/*
+		 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
+		 * support 64bit DMA.
+		 *
+		 * BIOS versions earlier than 1.5 had the Manufacturer DMI
+		 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
+		 * This spelling mistake was fixed in BIOS version 1.5, so
+		 * 1.5 and later have the Manufacturer as
+		 * "MICRO-STAR INTERNATIONAL CO.,LTD".
+		 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
+		 *
+		 * BIOS versions earlier than 1.9 had a Board Product Name
+		 * DMI field of "MS-7376". This was changed to be
+		 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
+		 * match on DMI_BOARD_NAME of "MS-7376".
+		 */
+		{
+			.ident = "MSI K9A2 Platinum",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "MICRO-STAR INTER"),
+				DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
+			},
+		},
+		/*
+		 * All BIOS versions for the MSI K9AGM2 (MS-7327) support
+		 * 64bit DMA.
+		 *
+		 * This board also had the typo mentioned above in the
+		 * Manufacturer DMI field (fixed in BIOS version 1.5), so
+		 * match on DMI_BOARD_VENDOR of "MICRO-STAR INTER" again.
+		 */
+		{
+			.ident = "MSI K9AGM2",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "MICRO-STAR INTER"),
+				DMI_MATCH(DMI_BOARD_NAME, "MS-7327"),
+			},
+		},
+		/*
+		 * All BIOS versions for the Asus M3A support 64bit DMA.
+		 * (all release versions from 0301 to 1206 were tested)
+		 */
+		{
+			.ident = "ASUS M3A",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "ASUSTeK Computer INC."),
+				DMI_MATCH(DMI_BOARD_NAME, "M3A"),
+			},
+		},
+		{ }
+	};
+	const struct dmi_system_id *match;
+	int year, month, date;
+	char buf[9];
+
+	match = dmi_first_match(sysids);
+	if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
+	    !match)
+		return false;
+
+	if (!match->driver_data)
+		goto enable_64bit;
+
+	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+	if (strcmp(buf, match->driver_data) >= 0)
+		goto enable_64bit;
+	else {
+		dev_warn(&pdev->dev,
+			 "%s: BIOS too old, forcing 32bit DMA, update BIOS\n",
+			 match->ident);
+		return false;
+	}
+
+enable_64bit:
+	dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident);
+	return true;
+}
+
+static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id broken_systems[] = {
+		{
+			.ident = "HP Compaq nx6310",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
+			},
+			/* PCI slot number of the controller */
+			.driver_data = (void *)0x1FUL,
+		},
+		{
+			.ident = "HP Compaq 6720s",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
+			},
+			/* PCI slot number of the controller */
+			.driver_data = (void *)0x1FUL,
+		},
+
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
+
+	if (dmi) {
+		unsigned long slot = (unsigned long)dmi->driver_data;
+		/* apply the quirk only to on-board controllers */
+		return slot == PCI_SLOT(pdev->devfn);
+	}
+
+	return false;
+}
+
+static bool ahci_broken_suspend(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		/*
+		 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
+		 * to the harddisk doesn't become online after
+		 * resuming from STR.  Warn and fail suspend.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
+		 *
+		 * Use dates instead of versions to match as HP is
+		 * apparently recycling both product and version
+		 * strings.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
+		 */
+		{
+			.ident = "dv4",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME,
+					  "HP Pavilion dv4 Notebook PC"),
+			},
+			.driver_data = "20090105",	/* F.30 */
+		},
+		{
+			.ident = "dv5",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME,
+					  "HP Pavilion dv5 Notebook PC"),
+			},
+			.driver_data = "20090506",	/* F.16 */
+		},
+		{
+			.ident = "dv6",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME,
+					  "HP Pavilion dv6 Notebook PC"),
+			},
+			.driver_data = "20090423",	/* F.21 */
+		},
+		{
+			.ident = "HDX18",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME,
+					  "HP HDX18 Notebook PC"),
+			},
+			.driver_data = "20090430",	/* F.23 */
+		},
+		/*
+		 * Acer eMachines G725 has the same problem.  BIOS
+		 * V1.03 is known to be broken.  V3.04 is known to
+		 * work.  Between, there are V1.06, V2.06 and V3.03
+		 * that we don't have much idea about.  For now,
+		 * blacklist anything older than V3.04.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
+		 */
+		{
+			.ident = "G725",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
+			},
+			.driver_data = "20091216",	/* V3.04 */
+		},
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+	int year, month, date;
+	char buf[9];
+
+	if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
+		return false;
+
+	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+	return strcmp(buf, dmi->driver_data) < 0;
+}
+
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		/* Various Lenovo 50 series have LPM issues with older BIOSen */
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+			},
+			.driver_data = "20180406", /* 1.31 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+			},
+			.driver_data = "20180420", /* 1.28 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+			},
+			.driver_data = "20180315", /* 1.33 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+			},
+			/*
+			 * Note date based on release notes, 2.35 has been
+			 * reported to be good, but I've been unable to get
+			 * a hold of the reporter to get the DMI BIOS date.
+			 * TODO: fix this.
+			 */
+			.driver_data = "20180310", /* 2.35 */
+		},
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+	int year, month, date;
+	char buf[9];
+
+	if (!dmi)
+		return false;
+
+	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+	return strcmp(buf, dmi->driver_data) < 0;
+}
+
+static bool ahci_broken_online(struct pci_dev *pdev)
+{
+#define ENCODE_BUSDEVFN(bus, slot, func)			\
+	(void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
+	static const struct dmi_system_id sysids[] = {
+		/*
+		 * There are several gigabyte boards which use
+		 * SIMG5723s configured as hardware RAID.  Certain
+		 * 5723 firmware revisions shipped there keep the link
+		 * online but fail to answer properly to SRST or
+		 * IDENTIFY when no device is attached downstream
+		 * causing libata to retry quite a few times leading
+		 * to excessive detection delay.
+		 *
+		 * As these firmwares respond to the second reset try
+		 * with invalid device signature, considering unknown
+		 * sig as offline works around the problem acceptably.
+		 */
+		{
+			.ident = "EP45-DQ6",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "Gigabyte Technology Co., Ltd."),
+				DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
+			},
+			.driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
+		},
+		{
+			.ident = "EP45-DS5",
+			.matches = {
+				DMI_MATCH(DMI_BOARD_VENDOR,
+					  "Gigabyte Technology Co., Ltd."),
+				DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
+			},
+			.driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
+		},
+		{ }	/* terminate list */
+	};
+#undef ENCODE_BUSDEVFN
+	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+	unsigned int val;
+
+	if (!dmi)
+		return false;
+
+	val = (unsigned long)dmi->driver_data;
+
+	return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
+}
+
+static bool ahci_broken_devslp(struct pci_dev *pdev)
+{
+	/* device with broken DEVSLP but still showing SDS capability */
+	static const struct pci_device_id ids[] = {
+		{ PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
+		{}
+	};
+
+	return pci_match_id(ids, pdev);
+}
+
+#ifdef CONFIG_ATA_ACPI
+static void ahci_gtf_filter_workaround(struct ata_host *host)
+{
+	static const struct dmi_system_id sysids[] = {
+		/*
+		 * Aspire 3810T issues a bunch of SATA enable commands
+		 * via _GTF including an invalid one and one which is
+		 * rejected by the device.  Among the successful ones
+		 * is FPDMA non-zero offset enable which when enabled
+		 * only on the drive side leads to NCQ command
+		 * failures.  Filter it out.
+		 */
+		{
+			.ident = "Aspire 3810T",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
+			},
+			.driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
+		},
+		{ }
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+	unsigned int filter;
+	int i;
+
+	if (!dmi)
+		return;
+
+	filter = (unsigned long)dmi->driver_data;
+	dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n",
+		 filter, dmi->ident);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_link *link;
+		struct ata_device *dev;
+
+		ata_for_each_link(link, ap, EDGE)
+			ata_for_each_dev(dev, link, ALL)
+				dev->gtf_filter |= filter;
+	}
+}
+#else
+static inline void ahci_gtf_filter_workaround(struct ata_host *host)
+{}
+#endif
+
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+				    struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			.ident = "Acer Switch Alpha 12",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+			},
+		},
+		{ }
+	};
+
+	if (dmi_check_system(sysids)) {
+		dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+		if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+			hpriv->port_map = 0x7;
+			hpriv->cap = 0xC734FF02;
+		}
+	}
+}
+
+#ifdef CONFIG_ARM64
+/*
+ * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
+ * Workaround is to make sure all pending IRQs are served before leaving
+ * handler.
+ */
+static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct ahci_host_priv *hpriv;
+	unsigned int rc = 0;
+	void __iomem *mmio;
+	u32 irq_stat, irq_masked;
+	unsigned int handled = 1;
+
+	VPRINTK("ENTER\n");
+	hpriv = host->private_data;
+	mmio = hpriv->mmio;
+	irq_stat = readl(mmio + HOST_IRQ_STAT);
+	if (!irq_stat)
+		return IRQ_NONE;
+
+	do {
+		irq_masked = irq_stat & hpriv->port_map;
+		spin_lock(&host->lock);
+		rc = ahci_handle_port_intr(host, irq_masked);
+		if (!rc)
+			handled = 0;
+		writel(irq_stat, mmio + HOST_IRQ_STAT);
+		irq_stat = readl(mmio + HOST_IRQ_STAT);
+		spin_unlock(&host->lock);
+	} while (irq_stat);
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(handled);
+}
+#endif
+
+static void ahci_remap_check(struct pci_dev *pdev, int bar,
+		struct ahci_host_priv *hpriv)
+{
+	int i, count = 0;
+	u32 cap;
+
+	/*
+	 * Check if this device might have remapped nvme devices.
+	 */
+	if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
+	    pci_resource_len(pdev, bar) < SZ_512K ||
+	    bar != AHCI_PCI_BAR_STANDARD ||
+	    !(readl(hpriv->mmio + AHCI_VSCAP) & 1))
+		return;
+
+	cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
+	for (i = 0; i < AHCI_MAX_REMAP; i++) {
+		if ((cap & (1 << i)) == 0)
+			continue;
+		if (readl(hpriv->mmio + ahci_remap_dcc(i))
+				!= PCI_CLASS_STORAGE_EXPRESS)
+			continue;
+
+		/* We've found a remapped device */
+		count++;
+	}
+
+	if (!count)
+		return;
+
+	dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+	dev_warn(&pdev->dev,
+		 "Switch your BIOS from RAID to AHCI mode to use them.\n");
+
+	/*
+	 * Don't rely on the msi-x capability in the remap case,
+	 * share the legacy interrupt across ahci and remapped devices.
+	 */
+	hpriv->flags |= AHCI_HFLAG_NO_MSI;
+}
+
+static int ahci_get_irq_vector(struct ata_host *host, int port)
+{
+	return pci_irq_vector(to_pci_dev(host->dev), port);
+}
+
+static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
+			struct ahci_host_priv *hpriv)
+{
+	int nvec;
+
+	if (hpriv->flags & AHCI_HFLAG_NO_MSI)
+		return -ENODEV;
+
+	/*
+	 * If number of MSIs is less than number of ports then Sharing Last
+	 * Message mode could be enforced. In this case assume that advantage
+	 * of multipe MSIs is negated and use single MSI mode instead.
+	 */
+	if (n_ports > 1) {
+		nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+				PCI_IRQ_MSIX | PCI_IRQ_MSI);
+		if (nvec > 0) {
+			if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+				hpriv->get_irq_vector = ahci_get_irq_vector;
+				hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+				return nvec;
+			}
+
+			/*
+			 * Fallback to single MSI mode if the controller
+			 * enforced MRSM mode.
+			 */
+			printk(KERN_INFO
+				"ahci: MRSM is on, fallback to single MSI\n");
+			pci_free_irq_vectors(pdev);
+		}
+	}
+
+	/*
+	 * If the host is not capable of supporting per-port vectors, fall
+	 * back to single MSI before finally attempting single MSI-X.
+	 */
+	nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+	if (nvec == 1)
+		return nvec;
+	return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+}
+
+static void ahci_update_initial_lpm_policy(struct ata_port *ap,
+					   struct ahci_host_priv *hpriv)
+{
+	int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+
+
+	/* Ignore processing for non mobile platforms */
+	if (!(hpriv->flags & AHCI_HFLAG_IS_MOBILE))
+		return;
+
+	/* user modified policy via module param */
+	if (mobile_lpm_policy != -1) {
+		policy = mobile_lpm_policy;
+		goto update_policy;
+	}
+
+#ifdef CONFIG_ACPI
+	if (policy > ATA_LPM_MED_POWER &&
+	    (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+		if (hpriv->cap & HOST_CAP_PART)
+			policy = ATA_LPM_MIN_POWER_WITH_PARTIAL;
+		else if (hpriv->cap & HOST_CAP_SSC)
+			policy = ATA_LPM_MIN_POWER;
+	}
+#endif
+
+update_policy:
+	if (policy >= ATA_LPM_UNKNOWN && policy <= ATA_LPM_MIN_POWER)
+		ap->target_lpm_policy = policy;
+}
+
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	unsigned int board_id = ent->driver_data;
+	struct ata_port_info pi = ahci_port_info[board_id];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ata_host *host;
+	int n_ports, i, rc;
+	int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
+
+	VPRINTK("ENTER\n");
+
+	WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* The AHCI driver can only drive the SATA ports, the PATA driver
+	   can drive them all so if both drivers are selected make sure
+	   AHCI stays out of the way */
+	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
+		return -ENODEV;
+
+	/* Apple BIOS on MCP89 prevents us using AHCI */
+	if (is_mcp89_apple(pdev))
+		ahci_mcp89_apple_enable(pdev);
+
+	/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
+	 * At the moment, we can only use the AHCI mode. Let the users know
+	 * that for SAS drives they're out of luck.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
+		dev_info(&pdev->dev,
+			 "PDC42819 can only drive SATA devices with this driver\n");
+
+	/* Some devices use non-standard BARs */
+	if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+		ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+	else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+		ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
+	else if (pdev->vendor == PCI_VENDOR_ID_CAVIUM) {
+		if (pdev->device == 0xa01c)
+			ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
+		if (pdev->device == 0xa084)
+			ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
+	}
+
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
+		u8 map;
+
+		/* ICH6s share the same PCI ID for both piix and ahci
+		 * modes.  Enabling ahci mode while MAP indicates
+		 * combined mode is a bad idea.  Yield to ata_piix.
+		 */
+		pci_read_config_byte(pdev, ICH_MAP, &map);
+		if (map & 0x3) {
+			dev_info(&pdev->dev,
+				 "controller is in combined mode, can't enable AHCI mode\n");
+			return -ENODEV;
+		}
+	}
+
+	/* AHCI controllers often implement SFF compatible interface.
+	 * Grab all PCI BARs just in case.
+	 */
+	rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	/* MCP65 revision A1 and A2 can't do MSI */
+	if (board_id == board_ahci_mcp65 &&
+	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
+		hpriv->flags |= AHCI_HFLAG_NO_MSI;
+
+	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
+	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
+		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
+
+	/* only some SB600s can do 64bit DMA */
+	if (ahci_sb600_enable_64bit(pdev))
+		hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
+
+	hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+
+	/* detect remapped nvme devices */
+	ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+
+	/* must set flag prior to save config in order to take effect */
+	if (ahci_broken_devslp(pdev))
+		hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
+#ifdef CONFIG_ARM64
+	if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+		hpriv->irq_handler = ahci_thunderx_irq_handler;
+#endif
+
+	/* save initial config */
+	ahci_pci_save_initial_config(pdev, hpriv);
+
+	/* prepare host */
+	if (hpriv->cap & HOST_CAP_NCQ) {
+		pi.flags |= ATA_FLAG_NCQ;
+		/*
+		 * Auto-activate optimization is supposed to be
+		 * supported on all AHCI controllers indicating NCQ
+		 * capability, but it seems to be broken on some
+		 * chipsets including NVIDIAs.
+		 */
+		if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
+			pi.flags |= ATA_FLAG_FPDMA_AA;
+
+		/*
+		 * All AHCI controllers should be forward-compatible
+		 * with the new auxiliary field. This code should be
+		 * conditionalized if any buggy AHCI controllers are
+		 * encountered.
+		 */
+		pi.flags |= ATA_FLAG_FPDMA_AUX;
+	}
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	ahci_set_em_messages(hpriv, &pi);
+
+	if (ahci_broken_system_poweroff(pdev)) {
+		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
+		dev_info(&pdev->dev,
+			"quirky BIOS, skipping spindown on poweroff\n");
+	}
+
+	if (ahci_broken_lpm(pdev)) {
+		pi.flags |= ATA_FLAG_NO_LPM;
+		dev_warn(&pdev->dev,
+			 "BIOS update required for Link Power Management support\n");
+	}
+
+	if (ahci_broken_suspend(pdev)) {
+		hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
+		dev_warn(&pdev->dev,
+			 "BIOS update required for suspend/resume\n");
+	}
+
+	if (ahci_broken_online(pdev)) {
+		hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
+		dev_info(&pdev->dev,
+			 "online status unreliable, applying workaround\n");
+	}
+
+
+	/* Acer SA5-271 workaround modifies private_data */
+	acer_sa5_271_workaround(hpriv, pdev);
+
+	/* CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+	host->private_data = hpriv;
+
+	if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+		/* legacy intx interrupts */
+		pci_intx(pdev, 1);
+	}
+	hpriv->irq = pci_irq_vector(pdev, 0);
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+	else
+		dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+
+	if (pi.flags & ATA_FLAG_EM)
+		ahci_reset_em(host);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+		ata_port_pbar_desc(ap, ahci_pci_bar,
+				   0x100 + ap->port_no * 0x80, "port");
+
+		/* set enclosure management message type */
+		if (ap->flags & ATA_FLAG_EM)
+			ap->em_message_type = hpriv->em_msg_type;
+
+		ahci_update_initial_lpm_policy(ap, hpriv);
+
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	/* apply workaround for ASUS P5W DH Deluxe mainboard */
+	ahci_p5wdh_workaround(host);
+
+	/* apply gtf filter quirk */
+	ahci_gtf_filter_workaround(host);
+
+	/* initialize adapter */
+	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+	if (rc)
+		return rc;
+
+	rc = ahci_pci_reset_controller(host);
+	if (rc)
+		return rc;
+
+	ahci_pci_init_controller(host);
+	ahci_pci_print_info(host);
+
+	pci_set_master(pdev);
+
+	rc = ahci_host_activate(host, &ahci_sht);
+	if (rc)
+		return rc;
+
+	pm_runtime_put_noidle(&pdev->dev);
+	return 0;
+}
+
+static void ahci_remove_one(struct pci_dev *pdev)
+{
+	pm_runtime_get_noresume(&pdev->dev);
+	ata_pci_remove_one(pdev);
+}
+
+module_pci_driver(ahci_pci_driver);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 0000000..6a1515f
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,453 @@
+/*
+ *  ahci.h - Common AHCI SATA definitions and declarations
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/driver-api/libata.rst
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#ifndef _AHCI_H
+#define _AHCI_H
+
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE              0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT           0x0000000f
+#define EM_MSG_LED_PMP_SLOT           0x0000ff00
+#define EM_MSG_LED_VALUE              0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY     0x00070000
+#define EM_MSG_LED_VALUE_OFF          0xfff80000
+#define EM_MSG_LED_VALUE_ON           0x00010000
+
+enum {
+	AHCI_MAX_PORTS		= 32,
+	AHCI_MAX_CLKS		= 5,
+	AHCI_MAX_SG		= 168, /* hardware max is 64K */
+	AHCI_DMA_BOUNDARY	= 0xffffffff,
+	AHCI_MAX_CMDS		= 32,
+	AHCI_CMD_SZ		= 32,
+	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
+	AHCI_RX_FIS_SZ		= 256,
+	AHCI_CMD_TBL_CDB	= 0x40,
+	AHCI_CMD_TBL_HDR_SZ	= 0x80,
+	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+				  AHCI_RX_FIS_SZ,
+	AHCI_PORT_PRIV_FBS_DMA_SZ	= AHCI_CMD_SLOT_SZ +
+					  AHCI_CMD_TBL_AR_SZ +
+					  (AHCI_RX_FIS_SZ * 16),
+	AHCI_IRQ_ON_SG		= (1 << 31),
+	AHCI_CMD_ATAPI		= (1 << 5),
+	AHCI_CMD_WRITE		= (1 << 6),
+	AHCI_CMD_PREFETCH	= (1 << 7),
+	AHCI_CMD_RESET		= (1 << 8),
+	AHCI_CMD_CLR_BUSY	= (1 << 10),
+
+	RX_FIS_PIO_SETUP	= 0x20,	/* offset of PIO Setup FIS data */
+	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
+	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
+	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
+
+	/* global controller registers */
+	HOST_CAP		= 0x00, /* host capabilities */
+	HOST_CTL		= 0x04, /* global host control */
+	HOST_IRQ_STAT		= 0x08, /* interrupt status */
+	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
+	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
+	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
+	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
+	HOST_CAP2		= 0x24, /* host capabilities, extended */
+
+	/* HOST_CTL bits */
+	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
+	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
+	HOST_MRSM		= (1 << 2),  /* MSI Revert to Single Message */
+	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
+
+	/* HOST_CAP bits */
+	HOST_CAP_SXS		= (1 << 5),  /* Supports External SATA */
+	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
+	HOST_CAP_CCC		= (1 << 7),  /* Command Completion Coalescing */
+	HOST_CAP_PART		= (1 << 13), /* Partial state capable */
+	HOST_CAP_SSC		= (1 << 14), /* Slumber state capable */
+	HOST_CAP_PIO_MULTI	= (1 << 15), /* PIO multiple DRQ support */
+	HOST_CAP_FBS		= (1 << 16), /* FIS-based switching support */
+	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
+	HOST_CAP_ONLY		= (1 << 18), /* Supports AHCI mode only */
+	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
+	HOST_CAP_LED		= (1 << 25), /* Supports activity LED */
+	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
+	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
+	HOST_CAP_MPS		= (1 << 28), /* Mechanical presence switch */
+	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
+	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
+	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+	/* HOST_CAP2 bits */
+	HOST_CAP2_BOH		= (1 << 0),  /* BIOS/OS handoff supported */
+	HOST_CAP2_NVMHCI	= (1 << 1),  /* NVMHCI supported */
+	HOST_CAP2_APST		= (1 << 2),  /* Automatic partial to slumber */
+	HOST_CAP2_SDS		= (1 << 3),  /* Support device sleep */
+	HOST_CAP2_SADM		= (1 << 4),  /* Support aggressive DevSlp */
+	HOST_CAP2_DESO		= (1 << 5),  /* DevSlp from slumber only */
+
+	/* registers for each SATA port */
+	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
+	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
+	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
+	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
+	PORT_IRQ_STAT		= 0x10, /* interrupt status */
+	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
+	PORT_CMD		= 0x18, /* port command */
+	PORT_TFDATA		= 0x20,	/* taskfile data */
+	PORT_SIG		= 0x24,	/* device TF signature */
+	PORT_CMD_ISSUE		= 0x38, /* command issue */
+	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
+	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
+	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
+	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
+	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
+	PORT_FBS		= 0x40, /* FIS-based Switching */
+	PORT_DEVSLP		= 0x44, /* device sleep */
+
+	/* PORT_IRQ_{STAT,MASK} bits */
+	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
+	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
+	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
+	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
+	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
+	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
+	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
+	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
+
+	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
+	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
+	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
+	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
+	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
+	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
+	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
+	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
+	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
+
+	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
+				  PORT_IRQ_IF_ERR |
+				  PORT_IRQ_CONNECT |
+				  PORT_IRQ_PHYRDY |
+				  PORT_IRQ_UNK_FIS |
+				  PORT_IRQ_BAD_PMP,
+	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
+				  PORT_IRQ_TF_ERR |
+				  PORT_IRQ_HBUS_DATA_ERR,
+	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+	/* PORT_CMD bits */
+	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
+	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
+	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
+	PORT_CMD_FBSCP		= (1 << 22), /* FBS Capable Port */
+	PORT_CMD_ESP		= (1 << 21), /* External Sata Port */
+	PORT_CMD_HPCP		= (1 << 18), /* HotPlug Capable Port */
+	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
+	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
+	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
+	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
+	PORT_CMD_CLO		= (1 << 3), /* Command list override */
+	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
+	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
+	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
+
+	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
+	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
+	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
+	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
+
+	/* PORT_FBS bits */
+	PORT_FBS_DWE_OFFSET	= 16, /* FBS device with error offset */
+	PORT_FBS_ADO_OFFSET	= 12, /* FBS active dev optimization offset */
+	PORT_FBS_DEV_OFFSET	= 8,  /* FBS device to issue offset */
+	PORT_FBS_DEV_MASK	= (0xf << PORT_FBS_DEV_OFFSET),  /* FBS.DEV */
+	PORT_FBS_SDE		= (1 << 2), /* FBS single device error */
+	PORT_FBS_DEC		= (1 << 1), /* FBS device error clear */
+	PORT_FBS_EN		= (1 << 0), /* Enable FBS */
+
+	/* PORT_DEVSLP bits */
+	PORT_DEVSLP_DM_OFFSET	= 25,             /* DITO multiplier offset */
+	PORT_DEVSLP_DM_MASK	= (0xf << 25),    /* DITO multiplier mask */
+	PORT_DEVSLP_DITO_OFFSET	= 15,             /* DITO offset */
+	PORT_DEVSLP_MDAT_OFFSET	= 10,             /* Minimum assertion time */
+	PORT_DEVSLP_DETO_OFFSET	= 2,              /* DevSlp exit timeout */
+	PORT_DEVSLP_DSP		= (1 << 1),       /* DevSlp present */
+	PORT_DEVSLP_ADSE	= (1 << 0),       /* Aggressive DevSlp enable */
+
+	/* hpriv->flags bits */
+
+#define AHCI_HFLAGS(flags)		.private_data	= (void *)(flags)
+
+	AHCI_HFLAG_NO_NCQ		= (1 << 0),
+	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
+	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
+	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
+	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
+	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
+	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
+	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
+	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
+	AHCI_HFLAG_NO_SUSPEND		= (1 << 10), /* don't suspend */
+	AHCI_HFLAG_SRST_TOUT_IS_OFFLINE	= (1 << 11), /* treat SRST timeout as
+							link offline */
+	AHCI_HFLAG_NO_SNTF		= (1 << 12), /* no sntf */
+	AHCI_HFLAG_NO_FPDMA_AA		= (1 << 13), /* no FPDMA AA */
+	AHCI_HFLAG_YES_FBS		= (1 << 14), /* force FBS cap on */
+	AHCI_HFLAG_DELAY_ENGINE		= (1 << 15), /* do not start engine on
+						        port start (wait until
+						        error-handling stage) */
+	AHCI_HFLAG_NO_DEVSLP		= (1 << 17), /* no device sleep */
+	AHCI_HFLAG_NO_FBS		= (1 << 18), /* no FBS */
+
+#ifdef CONFIG_PCI_MSI
+	AHCI_HFLAG_MULTI_MSI		= (1 << 20), /* per-port MSI(-X) */
+#else
+	/* compile out MSI infrastructure */
+	AHCI_HFLAG_MULTI_MSI		= 0,
+#endif
+	AHCI_HFLAG_WAKE_BEFORE_STOP	= (1 << 22), /* wake before DMA stop */
+	AHCI_HFLAG_YES_ALPM		= (1 << 23), /* force ALPM cap on */
+	AHCI_HFLAG_NO_WRITE_TO_RO	= (1 << 24), /* don't write to read
+							only registers */
+	AHCI_HFLAG_IS_MOBILE		= (1 << 25), /* mobile chipset, use
+							SATA_MOBILE_LPM_POLICY
+							as default lpm_policy */
+
+	/* ap->flags bits */
+
+	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+
+	ICH_MAP				= 0x90, /* ICH MAP register */
+
+	/* em constants */
+	EM_MAX_SLOTS			= 8,
+	EM_MAX_RETRY			= 5,
+
+	/* em_ctl bits */
+	EM_CTL_RST		= (1 << 9), /* Reset */
+	EM_CTL_TM		= (1 << 8), /* Transmit Message */
+	EM_CTL_MR		= (1 << 0), /* Message Received */
+	EM_CTL_ALHD		= (1 << 26), /* Activity LED */
+	EM_CTL_XMT		= (1 << 25), /* Transmit Only */
+	EM_CTL_SMB		= (1 << 24), /* Single Message Buffer */
+	EM_CTL_SGPIO		= (1 << 19), /* SGPIO messages supported */
+	EM_CTL_SES		= (1 << 18), /* SES-2 messages supported */
+	EM_CTL_SAFTE		= (1 << 17), /* SAF-TE messages supported */
+	EM_CTL_LED		= (1 << 16), /* LED messages supported */
+
+	/* em message type */
+	EM_MSG_TYPE_LED		= (1 << 0), /* LED */
+	EM_MSG_TYPE_SAFTE	= (1 << 1), /* SAF-TE */
+	EM_MSG_TYPE_SES2	= (1 << 2), /* SES-2 */
+	EM_MSG_TYPE_SGPIO	= (1 << 3), /* SGPIO */
+};
+
+struct ahci_cmd_hdr {
+	__le32			opts;
+	__le32			status;
+	__le32			tbl_addr;
+	__le32			tbl_addr_hi;
+	__le32			reserved[4];
+};
+
+struct ahci_sg {
+	__le32			addr;
+	__le32			addr_hi;
+	__le32			reserved;
+	__le32			flags_size;
+};
+
+struct ahci_em_priv {
+	enum sw_activity blink_policy;
+	struct timer_list timer;
+	unsigned long saved_activity;
+	unsigned long activity;
+	unsigned long led_state;
+	struct ata_link *link;
+};
+
+struct ahci_port_priv {
+	struct ata_link		*active_link;
+	struct ahci_cmd_hdr	*cmd_slot;
+	dma_addr_t		cmd_slot_dma;
+	void			*cmd_tbl;
+	dma_addr_t		cmd_tbl_dma;
+	void			*rx_fis;
+	dma_addr_t		rx_fis_dma;
+	/* for NCQ spurious interrupt analysis */
+	unsigned int		ncq_saw_d2h:1;
+	unsigned int		ncq_saw_dmas:1;
+	unsigned int		ncq_saw_sdb:1;
+	spinlock_t		lock;		/* protects parent ata_port */
+	u32 			intr_mask;	/* interrupts to enable */
+	bool			fbs_supported;	/* set iff FBS is supported */
+	bool			fbs_enabled;	/* set iff FBS is enabled */
+	int			fbs_last_dev;	/* save FBS.DEV of last FIS */
+	/* enclosure management info per PM slot */
+	struct ahci_em_priv	em_priv[EM_MAX_SLOTS];
+	char			*irq_desc;	/* desc in /proc/interrupts */
+};
+
+struct ahci_host_priv {
+	/* Input fields */
+	unsigned int		flags;		/* AHCI_HFLAG_* */
+	u32			force_port_map;	/* force port map */
+	u32			mask_port_map;	/* mask out particular bits */
+
+	void __iomem *		mmio;		/* bus-independent mem map */
+	u32			cap;		/* cap to use */
+	u32			cap2;		/* cap2 to use */
+	u32			version;	/* cached version */
+	u32			port_map;	/* port map to use */
+	u32			saved_cap;	/* saved initial cap */
+	u32			saved_cap2;	/* saved initial cap2 */
+	u32			saved_port_map;	/* saved initial port_map */
+	u32 			em_loc; /* enclosure management location */
+	u32			em_buf_sz;	/* EM buffer size in byte */
+	u32			em_msg_type;	/* EM message type */
+	bool			got_runtime_pm; /* Did we do pm_runtime_get? */
+	struct clk		*clks[AHCI_MAX_CLKS]; /* Optional */
+	struct reset_control	*rsts;		/* Optional */
+	struct regulator	**target_pwrs;	/* Optional */
+	/*
+	 * If platform uses PHYs. There is a 1:1 relation between the port number and
+	 * the PHY position in this array.
+	 */
+	struct phy		**phys;
+	unsigned		nports;		/* Number of ports */
+	void			*plat_data;	/* Other platform data */
+	unsigned int		irq;		/* interrupt line */
+	/*
+	 * Optional ahci_start_engine override, if not set this gets set to the
+	 * default ahci_start_engine during ahci_save_initial_config, this can
+	 * be overridden anytime before the host is activated.
+	 */
+	void			(*start_engine)(struct ata_port *ap);
+	/*
+	 * Optional ahci_stop_engine override, if not set this gets set to the
+	 * default ahci_stop_engine during ahci_save_initial_config, this can
+	 * be overridden anytime before the host is activated.
+	 */
+	int			(*stop_engine)(struct ata_port *ap);
+
+	irqreturn_t 		(*irq_handler)(int irq, void *dev_instance);
+
+	/* only required for per-port MSI(-X) support */
+	int			(*get_irq_vector)(struct ata_host *host,
+						  int port);
+};
+
+extern int ahci_ignore_sss;
+
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+/*
+ * This must be instantiated by the edge drivers.  Read the comments
+ * for ATA_BASE_SHT
+ */
+#define AHCI_SHT(drv_name)						\
+	ATA_NCQ_SHT(drv_name),						\
+	.can_queue		= AHCI_MAX_CMDS,			\
+	.sg_tablesize		= AHCI_MAX_SG,				\
+	.dma_boundary		= AHCI_DMA_BOUNDARY,			\
+	.shost_attrs		= ahci_shost_attrs,			\
+	.sdev_attrs		= ahci_sdev_attrs
+
+extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
+extern struct ata_port_operations ahci_pmp_retry_srst_ops;
+
+unsigned int ahci_dev_classify(struct ata_port *ap);
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+			u32 opts);
+void ahci_save_initial_config(struct device *dev,
+			      struct ahci_host_priv *hpriv);
+void ahci_init_controller(struct ata_host *host);
+int ahci_reset_controller(struct ata_host *host);
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+		      int pmp, unsigned long deadline,
+		      int (*check_ready)(struct ata_link *link));
+
+int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
+		      unsigned long deadline, bool *online);
+
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_fis_rx(struct ata_port *ap);
+void ahci_start_engine(struct ata_port *ap);
+int ahci_check_ready(struct ata_link *link);
+int ahci_kick_engine(struct ata_port *ap);
+int ahci_port_resume(struct ata_port *ap);
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+			  struct ata_port_info *pi);
+int ahci_reset_em(struct ata_host *host);
+void ahci_print_info(struct ata_host *host, const char *scc_s);
+int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
+void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+					     unsigned int port_no)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+
+	return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+	return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static inline int ahci_nr_ports(u32 cap)
+{
+	return (cap & 0x1f) + 1;
+}
+
+#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
new file mode 100644
index 0000000..f3d5577
--- /dev/null
+++ b/drivers/ata/ahci_brcm.c
@@ -0,0 +1,486 @@
+/*
+ * Broadcom SATA3 AHCI Controller Driver
+ *
+ * Copyright © 2009-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+#include "ahci.h"
+
+#define DRV_NAME					"brcm-ahci"
+
+#define SATA_TOP_CTRL_VERSION				0x0
+#define SATA_TOP_CTRL_BUS_CTRL				0x4
+ #define MMIO_ENDIAN_SHIFT				0 /* CPU->AHCI */
+ #define DMADESC_ENDIAN_SHIFT				2 /* AHCI->DDR */
+ #define DMADATA_ENDIAN_SHIFT				4 /* AHCI->DDR */
+ #define PIODATA_ENDIAN_SHIFT				6
+  #define ENDIAN_SWAP_NONE				0
+  #define ENDIAN_SWAP_FULL				2
+#define SATA_TOP_CTRL_TP_CTRL				0x8
+#define SATA_TOP_CTRL_PHY_CTRL				0xc
+ #define SATA_TOP_CTRL_PHY_CTRL_1			0x0
+  #define SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE	BIT(14)
+ #define SATA_TOP_CTRL_PHY_CTRL_2			0x4
+  #define SATA_TOP_CTRL_2_SW_RST_MDIOREG		BIT(0)
+  #define SATA_TOP_CTRL_2_SW_RST_OOB			BIT(1)
+  #define SATA_TOP_CTRL_2_SW_RST_RX			BIT(2)
+  #define SATA_TOP_CTRL_2_SW_RST_TX			BIT(3)
+  #define SATA_TOP_CTRL_2_PHY_GLOBAL_RESET		BIT(14)
+ #define SATA_TOP_CTRL_PHY_OFFS				0x8
+ #define SATA_TOP_MAX_PHYS				2
+
+#define SATA_FIRST_PORT_CTRL				0x700
+#define SATA_NEXT_PORT_CTRL_OFFSET			0x80
+#define SATA_PORT_PCTRL6(reg_base)			(reg_base + 0x18)
+
+/* On big-endian MIPS, buses are reversed to big endian, so switch them back */
+#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
+#define DATA_ENDIAN			 2 /* AHCI->DDR inbound accesses */
+#define MMIO_ENDIAN			 2 /* CPU->AHCI outbound accesses */
+#else
+#define DATA_ENDIAN			 0
+#define MMIO_ENDIAN			 0
+#endif
+
+#define BUS_CTRL_ENDIAN_CONF				\
+	((DATA_ENDIAN << DMADATA_ENDIAN_SHIFT) |	\
+	(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) |		\
+	(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+
+#define BUS_CTRL_ENDIAN_NSP_CONF			\
+	(0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
+
+#define BUS_CTRL_ENDIAN_CONF_MASK			\
+	(0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT |	\
+	 0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
+
+enum brcm_ahci_version {
+	BRCM_SATA_BCM7425 = 1,
+	BRCM_SATA_BCM7445,
+	BRCM_SATA_NSP,
+};
+
+enum brcm_ahci_quirks {
+	BRCM_AHCI_QUIRK_NO_NCQ		= BIT(0),
+	BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE	= BIT(1),
+};
+
+struct brcm_ahci_priv {
+	struct device *dev;
+	void __iomem *top_ctrl;
+	u32 port_mask;
+	u32 quirks;
+	enum brcm_ahci_version version;
+};
+
+static inline u32 brcm_sata_readreg(void __iomem *addr)
+{
+	/*
+	 * MIPS endianness is configured by boot strap, which also reverses all
+	 * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+	 * endian I/O).
+	 *
+	 * Other architectures (e.g., ARM) either do not support big endian, or
+	 * else leave I/O in little endian mode.
+	 */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		return __raw_readl(addr);
+	else
+		return readl_relaxed(addr);
+}
+
+static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
+{
+	/* See brcm_sata_readreg() comments */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		__raw_writel(val, addr);
+	else
+		writel_relaxed(val, addr);
+}
+
+static void brcm_sata_alpm_init(struct ahci_host_priv *hpriv)
+{
+	struct brcm_ahci_priv *priv = hpriv->plat_data;
+	u32 port_ctrl, host_caps;
+	int i;
+
+	/* Enable support for ALPM */
+	host_caps = readl(hpriv->mmio + HOST_CAP);
+	if (!(host_caps & HOST_CAP_ALPM))
+		hpriv->flags |= AHCI_HFLAG_YES_ALPM;
+
+	/*
+	 * Adjust timeout to allow PLL sufficient time to lock while waking
+	 * up from slumber mode.
+	 */
+	for (i = 0, port_ctrl = SATA_FIRST_PORT_CTRL;
+	     i < SATA_TOP_MAX_PHYS;
+	     i++, port_ctrl += SATA_NEXT_PORT_CTRL_OFFSET) {
+		if (priv->port_mask & BIT(i))
+			writel(0xff1003fc,
+			       hpriv->mmio + SATA_PORT_PCTRL6(port_ctrl));
+	}
+}
+
+static void brcm_sata_phy_enable(struct brcm_ahci_priv *priv, int port)
+{
+	void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
+				(port * SATA_TOP_CTRL_PHY_OFFS);
+	void __iomem *p;
+	u32 reg;
+
+	if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
+		return;
+
+	/* clear PHY_DEFAULT_POWER_STATE */
+	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
+	reg = brcm_sata_readreg(p);
+	reg &= ~SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
+	brcm_sata_writereg(reg, p);
+
+	/* reset the PHY digital logic */
+	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
+	reg = brcm_sata_readreg(p);
+	reg &= ~(SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
+		 SATA_TOP_CTRL_2_SW_RST_RX);
+	reg |= SATA_TOP_CTRL_2_SW_RST_TX;
+	brcm_sata_writereg(reg, p);
+	reg = brcm_sata_readreg(p);
+	reg |= SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
+	brcm_sata_writereg(reg, p);
+	reg = brcm_sata_readreg(p);
+	reg &= ~SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
+	brcm_sata_writereg(reg, p);
+	(void)brcm_sata_readreg(p);
+}
+
+static void brcm_sata_phy_disable(struct brcm_ahci_priv *priv, int port)
+{
+	void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
+				(port * SATA_TOP_CTRL_PHY_OFFS);
+	void __iomem *p;
+	u32 reg;
+
+	if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
+		return;
+
+	/* power-off the PHY digital logic */
+	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
+	reg = brcm_sata_readreg(p);
+	reg |= (SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
+		SATA_TOP_CTRL_2_SW_RST_RX | SATA_TOP_CTRL_2_SW_RST_TX |
+		SATA_TOP_CTRL_2_PHY_GLOBAL_RESET);
+	brcm_sata_writereg(reg, p);
+
+	/* set PHY_DEFAULT_POWER_STATE */
+	p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
+	reg = brcm_sata_readreg(p);
+	reg |= SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
+	brcm_sata_writereg(reg, p);
+}
+
+static void brcm_sata_phys_enable(struct brcm_ahci_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
+		if (priv->port_mask & BIT(i))
+			brcm_sata_phy_enable(priv, i);
+}
+
+static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
+		if (priv->port_mask & BIT(i))
+			brcm_sata_phy_disable(priv, i);
+}
+
+static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+				  struct brcm_ahci_priv *priv)
+{
+	void __iomem *ahci;
+	struct resource *res;
+	u32 impl;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
+	ahci = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ahci))
+		return 0;
+
+	impl = readl(ahci + HOST_PORTS_IMPL);
+
+	if (fls(impl) > SATA_TOP_MAX_PHYS)
+		dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
+			 impl);
+	else if (!impl)
+		dev_info(priv->dev, "no ports found\n");
+
+	devm_iounmap(&pdev->dev, ahci);
+	devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+
+	return impl;
+}
+
+static void brcm_sata_init(struct brcm_ahci_priv *priv)
+{
+	void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+	u32 data;
+
+	/* Configure endianness */
+	data = brcm_sata_readreg(ctrl);
+	data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
+	if (priv->version == BRCM_SATA_NSP)
+		data |= BUS_CTRL_ENDIAN_NSP_CONF;
+	else
+		data |= BUS_CTRL_ENDIAN_CONF;
+	brcm_sata_writereg(data, ctrl);
+}
+
+static unsigned int brcm_ahci_read_id(struct ata_device *dev,
+				      struct ata_taskfile *tf, u16 *id)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_host *host = ap->host;
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct brcm_ahci_priv *priv = hpriv->plat_data;
+	void __iomem *mmio = hpriv->mmio;
+	unsigned int err_mask;
+	unsigned long flags;
+	int i, rc;
+	u32 ctl;
+
+	/* Try to read the device ID and, if this fails, proceed with the
+	 * recovery sequence below
+	 */
+	err_mask = ata_do_dev_read_id(dev, tf, id);
+	if (likely(!err_mask))
+		return err_mask;
+
+	/* Disable host interrupts */
+	spin_lock_irqsave(&host->lock, flags);
+	ctl = readl(mmio + HOST_CTL);
+	ctl &= ~HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/* Perform the SATA PHY reset sequence */
+	brcm_sata_phy_disable(priv, ap->port_no);
+
+	/* Bring the PHY back on */
+	brcm_sata_phy_enable(priv, ap->port_no);
+
+	/* Re-initialize and calibrate the PHY */
+	for (i = 0; i < hpriv->nports; i++) {
+		rc = phy_init(hpriv->phys[i]);
+		if (rc)
+			goto disable_phys;
+
+		rc = phy_calibrate(hpriv->phys[i]);
+		if (rc) {
+			phy_exit(hpriv->phys[i]);
+			goto disable_phys;
+		}
+	}
+
+	/* Re-enable host interrupts */
+	spin_lock_irqsave(&host->lock, flags);
+	ctl = readl(mmio + HOST_CTL);
+	ctl |= HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return ata_do_dev_read_id(dev, tf, id);
+
+disable_phys:
+	while (--i >= 0) {
+		phy_power_off(hpriv->phys[i]);
+		phy_exit(hpriv->phys[i]);
+	}
+
+	return AC_ERR_OTHER;
+}
+
+static void brcm_ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	ahci_platform_disable_resources(hpriv);
+}
+
+static struct ata_port_operations ahci_brcm_platform_ops = {
+	.inherits	= &ahci_ops,
+	.host_stop	= brcm_ahci_host_stop,
+	.read_id	= brcm_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+	.link_flags	= ATA_LFLAG_NO_DB_DELAY,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_brcm_platform_ops,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int brcm_ahci_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct brcm_ahci_priv *priv = hpriv->plat_data;
+	int ret;
+
+	ret = ahci_platform_suspend(dev);
+	brcm_sata_phys_disable(priv);
+	return ret;
+}
+
+static int brcm_ahci_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct brcm_ahci_priv *priv = hpriv->plat_data;
+
+	brcm_sata_init(priv);
+	brcm_sata_phys_enable(priv);
+	brcm_sata_alpm_init(hpriv);
+	return ahci_platform_resume(dev);
+}
+#endif
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static const struct of_device_id ahci_of_match[] = {
+	{.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
+	{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
+	{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static int brcm_ahci_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	struct device *dev = &pdev->dev;
+	struct brcm_ahci_priv *priv;
+	struct ahci_host_priv *hpriv;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	of_id = of_match_node(ahci_of_match, pdev->dev.of_node);
+	if (!of_id)
+		return -ENODEV;
+
+	priv->version = (enum brcm_ahci_version)of_id->data;
+	priv->dev = dev;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
+	priv->top_ctrl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->top_ctrl))
+		return PTR_ERR(priv->top_ctrl);
+
+	if ((priv->version == BRCM_SATA_BCM7425) ||
+		(priv->version == BRCM_SATA_NSP)) {
+		priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+		priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+	}
+
+	brcm_sata_init(priv);
+
+	priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
+	if (!priv->port_mask)
+		return -ENODEV;
+
+	brcm_sata_phys_enable(priv);
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+	hpriv->plat_data = priv;
+	hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
+
+	brcm_sata_alpm_init(hpriv);
+
+	ret = ahci_platform_enable_resources(hpriv);
+	if (ret)
+		return ret;
+
+	if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
+		hpriv->flags |= AHCI_HFLAG_NO_NCQ;
+	hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+
+	ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
+				      &ahci_platform_sht);
+	if (ret)
+		return ret;
+
+	dev_info(dev, "Broadcom AHCI SATA3 registered\n");
+
+	return 0;
+}
+
+static int brcm_ahci_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct brcm_ahci_priv *priv = hpriv->plat_data;
+	int ret;
+
+	ret = ata_platform_remove_one(pdev);
+	if (ret)
+		return ret;
+
+	brcm_sata_phys_disable(priv);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
+
+static struct platform_driver brcm_ahci_driver = {
+	.probe = brcm_ahci_probe,
+	.remove = brcm_ahci_remove,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_of_match,
+		.pm = &ahci_brcm_pm_ops,
+	},
+};
+module_platform_driver(brcm_ahci_driver);
+
+MODULE_DESCRIPTION("Broadcom SATA3 AHCI Controller Driver");
+MODULE_AUTHOR("Brian Norris");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sata-brcmstb");
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
new file mode 100644
index 0000000..dc78c98
--- /dev/null
+++ b/drivers/ata/ahci_ceva.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2015 Xilinx, Inc.
+ * CEVA AHCI SATA platform driver
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "ahci.h"
+
+/* Vendor Specific Register Offsets */
+#define AHCI_VEND_PCFG  0xA4
+#define AHCI_VEND_PPCFG 0xA8
+#define AHCI_VEND_PP2C  0xAC
+#define AHCI_VEND_PP3C  0xB0
+#define AHCI_VEND_PP4C  0xB4
+#define AHCI_VEND_PP5C  0xB8
+#define AHCI_VEND_AXICC 0xBC
+#define AHCI_VEND_PAXIC 0xC0
+#define AHCI_VEND_PTC   0xC8
+
+/* Vendor Specific Register bit definitions */
+#define PAXIC_ADBW_BW64 0x1
+#define PAXIC_MAWID(i)	(((i) * 2) << 4)
+#define PAXIC_MARID(i)	(((i) * 2) << 12)
+#define PAXIC_MARIDD(i)	((((i) * 2) + 1) << 16)
+#define PAXIC_MAWIDD(i)	((((i) * 2) + 1) << 8)
+#define PAXIC_OTL	(0x4 << 20)
+
+/* Register bit definitions for cache control */
+#define AXICC_ARCA_VAL  (0xF << 0)
+#define AXICC_ARCF_VAL  (0xF << 4)
+#define AXICC_ARCH_VAL  (0xF << 8)
+#define AXICC_ARCP_VAL  (0xF << 12)
+#define AXICC_AWCFD_VAL (0xF << 16)
+#define AXICC_AWCD_VAL  (0xF << 20)
+#define AXICC_AWCF_VAL  (0xF << 24)
+
+#define PCFG_TPSS_VAL	(0x32 << 16)
+#define PCFG_TPRS_VAL	(0x2 << 12)
+#define PCFG_PAD_VAL	0x2
+
+#define PPCFG_TTA	0x1FFFE
+#define PPCFG_PSSO_EN	(1 << 28)
+#define PPCFG_PSS_EN	(1 << 29)
+#define PPCFG_ESDF_EN	(1 << 31)
+
+#define PP5C_RIT	0x60216
+#define PP5C_RCT	(0x7f0 << 20)
+
+#define PTC_RX_WM_VAL	0x40
+#define PTC_RSVD	(1 << 27)
+
+#define PORT0_BASE	0x100
+#define PORT1_BASE	0x180
+
+/* Port Control Register Bit Definitions */
+#define PORT_SCTL_SPD_GEN3	(0x3 << 4)
+#define PORT_SCTL_SPD_GEN2	(0x2 << 4)
+#define PORT_SCTL_SPD_GEN1	(0x1 << 4)
+#define PORT_SCTL_IPM		(0x3 << 8)
+
+#define PORT_BASE	0x100
+#define PORT_OFFSET	0x80
+#define NR_PORTS	2
+#define DRV_NAME	"ahci-ceva"
+#define CEVA_FLAG_BROKEN_GEN2	1
+
+static unsigned int rx_watermark = PTC_RX_WM_VAL;
+module_param(rx_watermark, uint, 0644);
+MODULE_PARM_DESC(rx_watermark, "RxWaterMark value (0 - 0x80)");
+
+struct ceva_ahci_priv {
+	struct platform_device *ahci_pdev;
+	/* Port Phy2Cfg Register */
+	u32 pp2c[NR_PORTS];
+	u32 pp3c[NR_PORTS];
+	u32 pp4c[NR_PORTS];
+	u32 pp5c[NR_PORTS];
+	/* Axi Cache Control Register */
+	u32 axicc;
+	bool is_cci_enabled;
+	int flags;
+};
+
+static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+					struct ata_taskfile *tf, u16 *id)
+{
+	u32 err_mask;
+
+	err_mask = ata_do_dev_read_id(dev, tf, id);
+	if (err_mask)
+		return err_mask;
+	/*
+	 * Since CEVA controller does not support device sleep feature, we
+	 * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
+	 */
+	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+
+	return 0;
+}
+
+static struct ata_port_operations ahci_ceva_ops = {
+	.inherits = &ahci_platform_ops,
+	.read_id = ceva_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_ceva_port_info = {
+	.flags          = AHCI_FLAG_COMMON,
+	.pio_mask       = ATA_PIO4,
+	.udma_mask      = ATA_UDMA6,
+	.port_ops	= &ahci_ceva_ops,
+};
+
+static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
+{
+	void __iomem *mmio = hpriv->mmio;
+	struct ceva_ahci_priv *cevapriv = hpriv->plat_data;
+	u32 tmp;
+	int i;
+
+	/* Set AHCI Enable */
+	tmp = readl(mmio + HOST_CTL);
+	tmp |= HOST_AHCI_EN;
+	writel(tmp, mmio + HOST_CTL);
+
+	for (i = 0; i < NR_PORTS; i++) {
+		/* TPSS TPRS scalars, CISE and Port Addr */
+		tmp = PCFG_TPSS_VAL | PCFG_TPRS_VAL | (PCFG_PAD_VAL + i);
+		writel(tmp, mmio + AHCI_VEND_PCFG);
+
+		/*
+		 * AXI Data bus width to 64
+		 * Set Mem Addr Read, Write ID for data transfers
+		 * Set Mem Addr Read ID, Write ID for non-data transfers
+		 * Transfer limit to 72 DWord
+		 */
+		tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD(i) | PAXIC_MARIDD(i) |
+			PAXIC_MAWID(i) | PAXIC_MARID(i) | PAXIC_OTL;
+		writel(tmp, mmio + AHCI_VEND_PAXIC);
+
+		/* Set AXI cache control register if CCi is enabled */
+		if (cevapriv->is_cci_enabled) {
+			tmp = readl(mmio + AHCI_VEND_AXICC);
+			tmp |= AXICC_ARCA_VAL | AXICC_ARCF_VAL |
+				AXICC_ARCH_VAL | AXICC_ARCP_VAL |
+				AXICC_AWCFD_VAL | AXICC_AWCD_VAL |
+				AXICC_AWCF_VAL;
+			writel(tmp, mmio + AHCI_VEND_AXICC);
+		}
+
+		/* Port Phy Cfg register enables */
+		tmp = PPCFG_TTA | PPCFG_PSS_EN | PPCFG_ESDF_EN;
+		writel(tmp, mmio + AHCI_VEND_PPCFG);
+
+		/* Phy Control OOB timing parameters COMINIT */
+		writel(cevapriv->pp2c[i], mmio + AHCI_VEND_PP2C);
+
+		/* Phy Control OOB timing parameters COMWAKE */
+		writel(cevapriv->pp3c[i], mmio + AHCI_VEND_PP3C);
+
+		/* Phy Control Burst timing setting */
+		writel(cevapriv->pp4c[i], mmio + AHCI_VEND_PP4C);
+
+		/* Rate Change Timer and Retry Interval Timer setting */
+		writel(cevapriv->pp5c[i], mmio + AHCI_VEND_PP5C);
+
+		/* Rx Watermark setting  */
+		tmp = rx_watermark | PTC_RSVD;
+		writel(tmp, mmio + AHCI_VEND_PTC);
+
+		/* Default to Gen 3 Speed and Gen 1 if Gen2 is broken */
+		tmp = PORT_SCTL_SPD_GEN3 | PORT_SCTL_IPM;
+		if (cevapriv->flags & CEVA_FLAG_BROKEN_GEN2)
+			tmp = PORT_SCTL_SPD_GEN1 | PORT_SCTL_IPM;
+		writel(tmp, mmio + PORT_SCR_CTL + PORT_BASE + PORT_OFFSET * i);
+	}
+}
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ceva_ahci_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ceva_ahci_priv *cevapriv;
+	enum dev_dma_attr attr;
+	int rc;
+
+	cevapriv = devm_kzalloc(dev, sizeof(*cevapriv), GFP_KERNEL);
+	if (!cevapriv)
+		return -ENOMEM;
+
+	cevapriv->ahci_pdev = pdev;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	if (of_property_read_bool(np, "ceva,broken-gen2"))
+		cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+
+	/* Read OOB timing value for COMINIT from device-tree */
+	if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+					(u8 *)&cevapriv->pp2c[0], 4) < 0) {
+		dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+					(u8 *)&cevapriv->pp2c[1], 4) < 0) {
+		dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+		return -EINVAL;
+	}
+
+	/* Read OOB timing value for COMWAKE from device-tree*/
+	if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+					(u8 *)&cevapriv->pp3c[0], 4) < 0) {
+		dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+					(u8 *)&cevapriv->pp3c[1], 4) < 0) {
+		dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+		return -EINVAL;
+	}
+
+	/* Read phy BURST timing value from device-tree */
+	if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+					(u8 *)&cevapriv->pp4c[0], 4) < 0) {
+		dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+					(u8 *)&cevapriv->pp4c[1], 4) < 0) {
+		dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+		return -EINVAL;
+	}
+
+	/* Read phy RETRY interval timing value from device-tree */
+	if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+					(u16 *)&cevapriv->pp5c[0], 2) < 0) {
+		dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+					(u16 *)&cevapriv->pp5c[1], 2) < 0) {
+		dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check if CCI is enabled for SATA. The DEV_DMA_COHERENT is returned
+	 * if CCI is enabled, so check for DEV_DMA_COHERENT.
+	 */
+	attr = device_get_dma_attr(dev);
+	cevapriv->is_cci_enabled = (attr == DEV_DMA_COHERENT);
+
+	hpriv->plat_data = cevapriv;
+
+	/* CEVA specific initialization */
+	ahci_ceva_setup(hpriv);
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_ceva_port_info,
+					&ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static int __maybe_unused ceva_ahci_suspend(struct device *dev)
+{
+	return ahci_platform_suspend(dev);
+}
+
+static int __maybe_unused ceva_ahci_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	/* Configure CEVA specific config before resuming HBA */
+	ahci_ceva_setup(hpriv);
+
+	rc = ahci_platform_resume_host(dev);
+	if (rc)
+		goto disable_resources;
+
+	/* We resumed so update PM runtime state */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
+
+static const struct of_device_id ceva_ahci_of_match[] = {
+	{ .compatible = "ceva,ahci-1v84" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
+
+static struct platform_driver ceva_ahci_driver = {
+	.probe = ceva_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ceva_ahci_of_match,
+		.pm = &ahci_ceva_pm_ops,
+	},
+};
+module_platform_driver(ceva_ahci_driver);
+
+MODULE_DESCRIPTION("CEVA AHCI SATA platform driver");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
new file mode 100644
index 0000000..ebaa657
--- /dev/null
+++ b/drivers/ata/ahci_da850.c
@@ -0,0 +1,265 @@
+/*
+ * DaVinci DA850 AHCI SATA platform driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+#define DRV_NAME		"ahci_da850"
+#define HARDRESET_RETRIES	5
+
+/* SATA PHY Control Register offset from AHCI base */
+#define SATA_P0PHYCR_REG	0x178
+
+#define SATA_PHY_MPY(x)		((x) << 0)
+#define SATA_PHY_LOS(x)		((x) << 6)
+#define SATA_PHY_RXCDR(x)	((x) << 10)
+#define SATA_PHY_RXEQ(x)	((x) << 13)
+#define SATA_PHY_TXSWING(x)	((x) << 19)
+#define SATA_PHY_ENPLL(x)	((x) << 31)
+
+static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
+			    void __iomem *ahci_base, u32 mpy)
+{
+	unsigned int val;
+
+	/* Enable SATA clock receiver */
+	val = readl(pwrdn_reg);
+	val &= ~BIT(0);
+	writel(val, pwrdn_reg);
+
+	val = SATA_PHY_MPY(mpy) | SATA_PHY_LOS(1) | SATA_PHY_RXCDR(4) |
+	      SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) | SATA_PHY_ENPLL(1);
+
+	writel(val, ahci_base + SATA_P0PHYCR_REG);
+}
+
+static u32 ahci_da850_calculate_mpy(unsigned long refclk_rate)
+{
+	u32 pll_output = 1500000000, needed;
+
+	/*
+	 * We need to determine the value of the multiplier (MPY) bits.
+	 * In order to include the 12.5 multiplier we need to first divide
+	 * the refclk rate by ten.
+	 *
+	 * __div64_32() turned out to be unreliable, sometimes returning
+	 * false results.
+	 */
+	WARN((refclk_rate % 10) != 0, "refclk must be divisible by 10");
+	needed = pll_output / (refclk_rate / 10);
+
+	/*
+	 * What we have now is (multiplier * 10).
+	 *
+	 * Let's determine the actual register value we need to write.
+	 */
+
+	switch (needed) {
+	case 50:
+		return 0x1;
+	case 60:
+		return 0x2;
+	case 80:
+		return 0x4;
+	case 100:
+		return 0x5;
+	case 120:
+		return 0x6;
+	case 125:
+		return 0x7;
+	case 150:
+		return 0x8;
+	case 200:
+		return 0x9;
+	case 250:
+		return 0xa;
+	default:
+		/*
+		 * We should have divided evenly - if not, return an invalid
+		 * value.
+		 */
+		return 0;
+	}
+}
+
+static int ahci_da850_softreset(struct ata_link *link,
+				unsigned int *class, unsigned long deadline)
+{
+	int pmp, ret;
+
+	pmp = sata_srst_pmp(link);
+
+	/*
+	 * There's an issue with the SATA controller on da850 SoCs: if we
+	 * enable Port Multiplier support, but the drive is connected directly
+	 * to the board, it can't be detected. As a workaround: if PMP is
+	 * enabled, we first call ahci_do_softreset() and pass it the result of
+	 * sata_srst_pmp(). If this call fails, we retry with pmp = 0.
+	 */
+	ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+	if (pmp && ret == -EBUSY)
+		return ahci_do_softreset(link, class, 0,
+					 deadline, ahci_check_ready);
+
+	return ret;
+}
+
+static int ahci_da850_hardreset(struct ata_link *link,
+				unsigned int *class, unsigned long deadline)
+{
+	int ret, retry = HARDRESET_RETRIES;
+	bool online;
+
+	/*
+	 * In order to correctly service the LCD controller of the da850 SoC,
+	 * we increased the PLL0 frequency to 456MHz from the default 300MHz.
+	 *
+	 * This made the SATA controller unstable and the hardreset operation
+	 * does not always succeed the first time. Before really giving up to
+	 * bring up the link, retry the reset a couple times.
+	 */
+	do {
+		ret = ahci_do_hardreset(link, class, deadline, &online);
+		if (online)
+			return ret;
+	} while (retry--);
+
+	return ret;
+}
+
+static struct ata_port_operations ahci_da850_port_ops = {
+	.inherits = &ahci_platform_ops,
+	.softreset = ahci_da850_softreset,
+	/*
+	 * No need to override .pmp_softreset - it's only used for actual
+	 * PMP-enabled ports.
+	 */
+	.hardreset = ahci_da850_hardreset,
+	.pmp_hardreset = ahci_da850_hardreset,
+};
+
+static const struct ata_port_info ahci_da850_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_da850_port_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_da850_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	void __iomem *pwrdn_reg;
+	struct resource *res;
+	struct clk *clk;
+	u32 mpy;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	/*
+	 * Internally ahci_platform_get_resources() calls clk_get(dev, NULL)
+	 * when trying to obtain the functional clock. This SATA controller
+	 * uses two clocks for which we specify two connection ids. If we don't
+	 * have the functional clock at this point - call clk_get() again with
+	 * con_id = "fck".
+	 */
+	if (!hpriv->clks[0]) {
+		clk = clk_get(dev, "fck");
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		hpriv->clks[0] = clk;
+	}
+
+	/*
+	 * The second clock used by ahci-da850 is the external REFCLK. If we
+	 * didn't get it from ahci_platform_get_resources(), let's try to
+	 * specify the con_id in clk_get().
+	 */
+	if (!hpriv->clks[1]) {
+		clk = clk_get(dev, "refclk");
+		if (IS_ERR(clk)) {
+			dev_err(dev, "unable to obtain the reference clock");
+			return -ENODEV;
+		}
+
+		hpriv->clks[1] = clk;
+	}
+
+	mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1]));
+	if (mpy == 0) {
+		dev_err(dev, "invalid REFCLK multiplier value: 0x%x", mpy);
+		return -EINVAL;
+	}
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res) {
+		rc = -ENODEV;
+		goto disable_resources;
+	}
+
+	pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
+	if (!pwrdn_reg) {
+		rc = -ENOMEM;
+		goto disable_resources;
+	}
+
+	da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct of_device_id ahci_da850_of_match[] = {
+	{ .compatible = "ti,da850-ahci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
+
+static struct platform_driver ahci_da850_driver = {
+	.probe = ahci_da850_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_da850_of_match,
+		.pm = &ahci_da850_pm_ops,
+	},
+};
+module_platform_driver(ahci_da850_driver);
+
+MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
new file mode 100644
index 0000000..89509c3
--- /dev/null
+++ b/drivers/ata/ahci_dm816.c
@@ -0,0 +1,200 @@
+/*
+ * DaVinci DM816 AHCI SATA platform driver
+ *
+ * Copyright (C) 2017 BayLibre SAS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+
+#include "ahci.h"
+
+#define AHCI_DM816_DRV_NAME		"ahci-dm816"
+
+#define AHCI_DM816_PHY_ENPLL(x)		((x) << 0)
+#define AHCI_DM816_PHY_MPY(x)		((x) << 1)
+#define AHCI_DM816_PHY_LOS(x)		((x) << 12)
+#define AHCI_DM816_PHY_RXCDR(x)		((x) << 13)
+#define AHCI_DM816_PHY_RXEQ(x)		((x) << 16)
+#define AHCI_DM816_PHY_TXSWING(x)	((x) << 23)
+
+#define AHCI_DM816_P0PHYCR_REG		0x178
+#define AHCI_DM816_P1PHYCR_REG		0x1f8
+
+#define AHCI_DM816_PLL_OUT		1500000000LU
+
+static const unsigned long pll_mpy_table[] = {
+	  400,  500,  600,  800,  825, 1000, 1200,
+	 1250, 1500, 1600, 1650, 2000, 2200, 2500
+};
+
+static int ahci_dm816_get_mpy_bits(unsigned long refclk_rate)
+{
+	unsigned long pll_multiplier;
+	int i;
+
+	/*
+	 * We need to determine the value of the multiplier (MPY) bits.
+	 * In order to include the 8.25 multiplier we need to first divide
+	 * the refclk rate by 100.
+	 */
+	pll_multiplier = AHCI_DM816_PLL_OUT / (refclk_rate / 100);
+
+	for (i = 0; i < ARRAY_SIZE(pll_mpy_table); i++) {
+		if (pll_mpy_table[i] == pll_multiplier)
+			return i;
+	}
+
+	/*
+	 * We should have divided evenly - if not, return an invalid
+	 * value.
+	 */
+	return -1;
+}
+
+static int ahci_dm816_phy_init(struct ahci_host_priv *hpriv, struct device *dev)
+{
+	unsigned long refclk_rate;
+	int mpy;
+	u32 val;
+
+	/*
+	 * We should have been supplied two clocks: the functional and
+	 * keep-alive clock and the external reference clock. We need the
+	 * rate of the latter to calculate the correct value of MPY bits.
+	 */
+	if (!hpriv->clks[1]) {
+		dev_err(dev, "reference clock not supplied\n");
+		return -EINVAL;
+	}
+
+	refclk_rate = clk_get_rate(hpriv->clks[1]);
+	if ((refclk_rate % 100) != 0) {
+		dev_err(dev, "reference clock rate must be divisible by 100\n");
+		return -EINVAL;
+	}
+
+	mpy = ahci_dm816_get_mpy_bits(refclk_rate);
+	if (mpy < 0) {
+		dev_err(dev, "can't calculate the MPY bits value\n");
+		return -EINVAL;
+	}
+
+	/* Enable the PHY and configure the first HBA port. */
+	val = AHCI_DM816_PHY_MPY(mpy) | AHCI_DM816_PHY_LOS(1) |
+	      AHCI_DM816_PHY_RXCDR(4) | AHCI_DM816_PHY_RXEQ(1) |
+	      AHCI_DM816_PHY_TXSWING(3) | AHCI_DM816_PHY_ENPLL(1);
+	writel(val, hpriv->mmio + AHCI_DM816_P0PHYCR_REG);
+
+	/* Configure the second HBA port. */
+	val = AHCI_DM816_PHY_LOS(1) | AHCI_DM816_PHY_RXCDR(4) |
+	      AHCI_DM816_PHY_RXEQ(1) | AHCI_DM816_PHY_TXSWING(3);
+	writel(val, hpriv->mmio + AHCI_DM816_P1PHYCR_REG);
+
+	return 0;
+}
+
+static int ahci_dm816_softreset(struct ata_link *link,
+				unsigned int *class, unsigned long deadline)
+{
+	int pmp, ret;
+
+	pmp = sata_srst_pmp(link);
+
+	/*
+	 * There's an issue with the SATA controller on DM816 SoC: if we
+	 * enable Port Multiplier support, but the drive is connected directly
+	 * to the board, it can't be detected. As a workaround: if PMP is
+	 * enabled, we first call ahci_do_softreset() and pass it the result of
+	 * sata_srst_pmp(). If this call fails, we retry with pmp = 0.
+	 */
+	ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+	if (pmp && ret == -EBUSY)
+		return ahci_do_softreset(link, class, 0,
+					 deadline, ahci_check_ready);
+
+	return ret;
+}
+
+static struct ata_port_operations ahci_dm816_port_ops = {
+	.inherits = &ahci_platform_ops,
+	.softreset = ahci_dm816_softreset,
+};
+
+static const struct ata_port_info ahci_dm816_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_dm816_port_ops,
+};
+
+static struct scsi_host_template ahci_dm816_platform_sht = {
+	AHCI_SHT(AHCI_DM816_DRV_NAME),
+};
+
+static int ahci_dm816_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_dm816_phy_init(hpriv, dev);
+	if (rc)
+		goto disable_resources;
+
+	rc = ahci_platform_init_host(pdev, hpriv,
+				     &ahci_dm816_port_info,
+				     &ahci_dm816_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_dm816_pm_ops,
+			 ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct of_device_id ahci_dm816_of_match[] = {
+	{ .compatible = "ti,dm816-ahci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
+
+static struct platform_driver ahci_dm816_driver = {
+	.probe = ahci_dm816_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = AHCI_DM816_DRV_NAME,
+		.of_match_table = ahci_dm816_of_match,
+		.pm = &ahci_dm816_pm_ops,
+	},
+};
+module_platform_driver(ahci_dm816_driver);
+
+MODULE_DESCRIPTION("DaVinci DM816 AHCI SATA platform driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 0000000..b00799d
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,1253 @@
+/*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
+ * Freescale IMX AHCI SATA platform driver
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci-imx"
+
+enum {
+	/* Timer 1-ms Register */
+	IMX_TIMER1MS				= 0x00e0,
+	/* Port0 PHY Control Register */
+	IMX_P0PHYCR				= 0x0178,
+	IMX_P0PHYCR_TEST_PDDQ			= 1 << 20,
+	IMX_P0PHYCR_CR_READ			= 1 << 19,
+	IMX_P0PHYCR_CR_WRITE			= 1 << 18,
+	IMX_P0PHYCR_CR_CAP_DATA			= 1 << 17,
+	IMX_P0PHYCR_CR_CAP_ADDR			= 1 << 16,
+	/* Port0 PHY Status Register */
+	IMX_P0PHYSR				= 0x017c,
+	IMX_P0PHYSR_CR_ACK			= 1 << 18,
+	IMX_P0PHYSR_CR_DATA_OUT			= 0xffff << 0,
+	/* Lane0 Output Status Register */
+	IMX_LANE0_OUT_STAT			= 0x2003,
+	IMX_LANE0_OUT_STAT_RX_PLL_STATE		= 1 << 1,
+	/* Clock Reset Register */
+	IMX_CLOCK_RESET				= 0x7f3f,
+	IMX_CLOCK_RESET_RESET			= 1 << 0,
+	/* IMX8QM HSIO AHCI definitions */
+	IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET	= 0x03,
+	IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET	= 0x09,
+	IMX8QM_SATA_PHY_IMPED_RATIO_85OHM	= 0x6c,
+	IMX8QM_LPCG_PHYX2_OFFSET		= 0x00000,
+	IMX8QM_CSR_PHYX2_OFFSET			= 0x90000,
+	IMX8QM_CSR_PHYX1_OFFSET			= 0xa0000,
+	IMX8QM_CSR_PHYX_STTS0_OFFSET		= 0x4,
+	IMX8QM_CSR_PCIEA_OFFSET			= 0xb0000,
+	IMX8QM_CSR_PCIEB_OFFSET			= 0xc0000,
+	IMX8QM_CSR_SATA_OFFSET			= 0xd0000,
+	IMX8QM_CSR_PCIE_CTRL2_OFFSET		= 0x8,
+	IMX8QM_CSR_MISC_OFFSET			= 0xe0000,
+
+	IMX8QM_LPCG_PHYX2_PCLK0_MASK		= (0x3 << 16),
+	IMX8QM_LPCG_PHYX2_PCLK1_MASK		= (0x3 << 20),
+	IMX8QM_PHY_APB_RSTN_0			= BIT(0),
+	IMX8QM_PHY_MODE_SATA			= BIT(19),
+	IMX8QM_PHY_MODE_MASK			= (0xf << 17),
+	IMX8QM_PHY_PIPE_RSTN_0			= BIT(24),
+	IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0		= BIT(25),
+	IMX8QM_PHY_PIPE_RSTN_1			= BIT(26),
+	IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1		= BIT(27),
+	IMX8QM_STTS0_LANE0_TX_PLL_LOCK		= BIT(4),
+	IMX8QM_MISC_IOB_RXENA			= BIT(0),
+	IMX8QM_MISC_IOB_TXENA			= BIT(1),
+	IMX8QM_MISC_PHYX1_EPCS_SEL		= BIT(12),
+	IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1	= BIT(24),
+	IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0	= BIT(25),
+	IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1	= BIT(28),
+	IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0	= BIT(29),
+	IMX8QM_SATA_CTRL_RESET_N		= BIT(12),
+	IMX8QM_SATA_CTRL_EPCS_PHYRESET_N	= BIT(7),
+	IMX8QM_CTRL_BUTTON_RST_N		= BIT(21),
+	IMX8QM_CTRL_POWER_UP_RST_N		= BIT(23),
+	IMX8QM_CTRL_LTSSM_ENABLE		= BIT(4),
+};
+
+enum ahci_imx_type {
+	AHCI_IMX53,
+	AHCI_IMX6Q,
+	AHCI_IMX6QP,
+	AHCI_IMX8QM,
+};
+
+struct imx_ahci_priv {
+	struct platform_device *ahci_pdev;
+	enum ahci_imx_type type;
+	struct clk *sata_clk;
+	struct clk *sata_ref_clk;
+	struct clk *ahb_clk;
+	struct clk *epcs_tx_clk;
+	struct clk *epcs_rx_clk;
+	struct clk *phy_apbclk;
+	struct clk *phy_pclk0;
+	struct clk *phy_pclk1;
+	void __iomem *phy_base;
+	int clkreq_gpio;
+	struct regmap *gpr;
+	bool no_device;
+	bool first_time;
+	u32 phy_params;
+	u32 imped_ratio;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_host_stop(struct ata_host *host);
+
+static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
+{
+	int timeout = 10;
+	u32 crval;
+	u32 srval;
+
+	/* Assert or deassert the bit */
+	crval = readl(mmio + IMX_P0PHYCR);
+	if (assert)
+		crval |= bit;
+	else
+		crval &= ~bit;
+	writel(crval, mmio + IMX_P0PHYCR);
+
+	/* Wait for the cr_ack signal */
+	do {
+		srval = readl(mmio + IMX_P0PHYSR);
+		if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
+			break;
+		usleep_range(100, 200);
+	} while (--timeout);
+
+	return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+{
+	u32 crval = addr;
+	int ret;
+
+	/* Supply the address on cr_data_in */
+	writel(crval, mmio + IMX_P0PHYCR);
+
+	/* Assert the cr_cap_addr signal */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
+	if (ret)
+		return ret;
+
+	/* Deassert cr_cap_addr */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int imx_phy_reg_write(u16 val, void __iomem *mmio)
+{
+	u32 crval = val;
+	int ret;
+
+	/* Supply the data on cr_data_in */
+	writel(crval, mmio + IMX_P0PHYCR);
+
+	/* Assert the cr_cap_data signal */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
+	if (ret)
+		return ret;
+
+	/* Deassert cr_cap_data */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
+	if (ret)
+		return ret;
+
+	if (val & IMX_CLOCK_RESET_RESET) {
+		/*
+		 * In case we're resetting the phy, it's unable to acknowledge,
+		 * so we return immediately here.
+		 */
+		crval |= IMX_P0PHYCR_CR_WRITE;
+		writel(crval, mmio + IMX_P0PHYCR);
+		goto out;
+	}
+
+	/* Assert the cr_write signal */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
+	if (ret)
+		return ret;
+
+	/* Deassert cr_write */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
+	if (ret)
+		return ret;
+
+out:
+	return 0;
+}
+
+static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
+{
+	int ret;
+
+	/* Assert the cr_read signal */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
+	if (ret)
+		return ret;
+
+	/* Capture the data from cr_data_out[] */
+	*val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
+
+	/* Deassert cr_read */
+	ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
+{
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+	void __iomem *mmio = hpriv->mmio;
+	int timeout = 10;
+	u16 val;
+	int ret;
+
+	if (imxpriv->type == AHCI_IMX6QP) {
+		/* 6qp adds the sata reset mechanism, use it for 6qp sata */
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
+				   IMX6Q_GPR5_SATA_SW_PD, 0);
+
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
+				   IMX6Q_GPR5_SATA_SW_RST, 0);
+		udelay(50);
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
+				   IMX6Q_GPR5_SATA_SW_RST,
+				   IMX6Q_GPR5_SATA_SW_RST);
+		return 0;
+	}
+
+	/* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
+	ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
+	if (ret)
+		return ret;
+	ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
+	if (ret)
+		return ret;
+
+	/* Wait for PHY RX_PLL to be stable */
+	do {
+		usleep_range(100, 200);
+		ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
+		if (ret)
+			return ret;
+		ret = imx_phy_reg_read(&val, mmio);
+		if (ret)
+			return ret;
+		if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
+			break;
+	} while (--timeout);
+
+	return timeout ? 0 : -ETIMEDOUT;
+}
+
+enum {
+	/* SATA PHY Register */
+	SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT = 0x0001,
+	SATA_PHY_CR_CLOCK_DAC_CTL = 0x0008,
+	SATA_PHY_CR_CLOCK_RTUNE_CTL = 0x0009,
+	SATA_PHY_CR_CLOCK_ADC_OUT = 0x000A,
+	SATA_PHY_CR_CLOCK_MPLL_TST = 0x0017,
+};
+
+static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
+{
+	u16 adc_out_reg, read_sum;
+	u32 index, read_attempt;
+	const u32 attempt_limit = 200;
+
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+	imx_phy_reg_write(rtune_ctl_reg, mmio);
+
+	/* two dummy read */
+	index = 0;
+	read_attempt = 0;
+	adc_out_reg = 0;
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_ADC_OUT, mmio);
+	while (index < 2) {
+		imx_phy_reg_read(&adc_out_reg, mmio);
+		/* check if valid */
+		if (adc_out_reg & 0x400)
+			index++;
+
+		read_attempt++;
+		if (read_attempt > attempt_limit) {
+			dev_err(dev, "Read REG more than %d times!\n",
+				attempt_limit);
+			break;
+		}
+	}
+
+	index = 0;
+	read_attempt = 0;
+	read_sum = 0;
+	while (index < 80) {
+		imx_phy_reg_read(&adc_out_reg, mmio);
+		if (adc_out_reg & 0x400) {
+			read_sum = read_sum + (adc_out_reg & 0x3FF);
+			index++;
+		}
+		read_attempt++;
+		if (read_attempt > attempt_limit) {
+			dev_err(dev, "Read REG more than %d times!\n",
+				attempt_limit);
+			break;
+		}
+	}
+
+	/* Use the U32 to make 1000 precision */
+	return (read_sum * 1000) / 80;
+}
+
+/* SATA AHCI temperature monitor */
+static int sata_ahci_read_temperature(void *dev, int *temp)
+{
+	u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum;
+	u32 str1, str2, str3, str4;
+	int m1, m2, a;
+	struct ahci_host_priv *hpriv = dev_get_drvdata(dev);
+	void __iomem *mmio = hpriv->mmio;
+
+	/* check rd-wr to reg */
+	read_sum = 0;
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_CRCMP_LT_LIMIT, mmio);
+	imx_phy_reg_write(read_sum, mmio);
+	imx_phy_reg_read(&read_sum, mmio);
+	if ((read_sum & 0xffff) != 0)
+		dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+	imx_phy_reg_write(0x5A5A, mmio);
+	imx_phy_reg_read(&read_sum, mmio);
+	if ((read_sum & 0xffff) != 0x5A5A)
+		dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+	imx_phy_reg_write(0x1234, mmio);
+	imx_phy_reg_read(&read_sum, mmio);
+	if ((read_sum & 0xffff) != 0x1234)
+		dev_err(dev, "Read/Write REG error, 0x%x!\n", read_sum);
+
+	/* start temperature test */
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+	imx_phy_reg_read(&mpll_test_reg, mmio);
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+	imx_phy_reg_read(&rtune_ctl_reg, mmio);
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+	imx_phy_reg_read(&dac_ctl_reg, mmio);
+
+	/* mpll_tst.meas_iv   ([12:2]) */
+	str1 = (mpll_test_reg >> 2) & 0x7FF;
+	/* rtune_ctl.mode     ([1:0]) */
+	str2 = (rtune_ctl_reg) & 0x3;
+	/* dac_ctl.dac_mode   ([14:12]) */
+	str3 = (dac_ctl_reg >> 12)  & 0x7;
+	/* rtune_ctl.sel_atbp ([4]) */
+	str4 = (rtune_ctl_reg >> 4);
+
+	/* Calculate the m1 */
+	/* mpll_tst.meas_iv */
+	mpll_test_reg = (mpll_test_reg & 0xE03) | (512) << 2;
+	/* rtune_ctl.mode */
+	rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (1);
+	/* dac_ctl.dac_mode */
+	dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (4) << 12;
+	/* rtune_ctl.sel_atbp */
+	rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (0) << 4;
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+	imx_phy_reg_write(mpll_test_reg, mmio);
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+	imx_phy_reg_write(dac_ctl_reg, mmio);
+	m1 = read_adc_sum(dev, rtune_ctl_reg, mmio);
+
+	/* Calculate the m2 */
+	/* rtune_ctl.sel_atbp */
+	rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (1) << 4;
+	m2 = read_adc_sum(dev, rtune_ctl_reg, mmio);
+
+	/* restore the status  */
+	/* mpll_tst.meas_iv */
+	mpll_test_reg = (mpll_test_reg & 0xE03) | (str1) << 2;
+	/* rtune_ctl.mode */
+	rtune_ctl_reg = (rtune_ctl_reg & 0xFFC) | (str2);
+	/* dac_ctl.dac_mode */
+	dac_ctl_reg = (dac_ctl_reg & 0x8FF) | (str3) << 12;
+	/* rtune_ctl.sel_atbp */
+	rtune_ctl_reg = (rtune_ctl_reg & 0xFEF) | (str4) << 4;
+
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_MPLL_TST, mmio);
+	imx_phy_reg_write(mpll_test_reg, mmio);
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_DAC_CTL, mmio);
+	imx_phy_reg_write(dac_ctl_reg, mmio);
+	imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
+	imx_phy_reg_write(rtune_ctl_reg, mmio);
+
+	/* Compute temperature */
+	if (!(m2 / 1000))
+		m2 = 1000;
+	a = (m2 - m1) / (m2/1000);
+	*temp = ((-559) * a * a) / 1000 + (1379) * a + (-458000);
+
+	return 0;
+}
+
+static ssize_t sata_ahci_show_temp(struct device *dev,
+				   struct device_attribute *da,
+				   char *buf)
+{
+	unsigned int temp = 0;
+	int err;
+
+	err = sata_ahci_read_temperature(dev, &temp);
+	if (err < 0)
+		return err;
+
+	return sprintf(buf, "%u\n", temp);
+}
+
+static const struct thermal_zone_of_device_ops fsl_sata_ahci_of_thermal_ops = {
+	.get_temp = sata_ahci_read_temperature,
+};
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sata_ahci_show_temp, NULL, 0);
+
+static struct attribute *fsl_sata_ahci_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(fsl_sata_ahci);
+
+static int imx8_sata_enable(struct ahci_host_priv *hpriv)
+{
+	u32 val, reg;
+	int i, ret;
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+	struct device *dev = &imxpriv->ahci_pdev->dev;
+
+	/* configure the hsio for sata */
+	ret = clk_prepare_enable(imxpriv->phy_pclk0);
+	if (ret < 0) {
+		dev_err(dev, "can't enable phy_pclk0.\n");
+		return ret;
+	}
+	ret = clk_prepare_enable(imxpriv->phy_pclk1);
+	if (ret < 0) {
+		dev_err(dev, "can't enable phy_pclk1.\n");
+		goto disable_phy_pclk0;
+	}
+	ret = clk_prepare_enable(imxpriv->epcs_tx_clk);
+	if (ret < 0) {
+		dev_err(dev, "can't enable epcs_tx_clk.\n");
+		goto disable_phy_pclk1;
+	}
+	ret = clk_prepare_enable(imxpriv->epcs_rx_clk);
+	if (ret < 0) {
+		dev_err(dev, "can't enable epcs_rx_clk.\n");
+		goto disable_epcs_tx_clk;
+	}
+	ret = clk_prepare_enable(imxpriv->phy_apbclk);
+	if (ret < 0) {
+		dev_err(dev, "can't enable phy_apbclk.\n");
+		goto disable_epcs_rx_clk;
+	}
+	/* Configure PHYx2 PIPE_RSTN */
+	regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET +
+			IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val);
+	if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
+		/* The link of the PCIEA of HSIO is down */
+		regmap_update_bits(imxpriv->gpr,
+				IMX8QM_CSR_PHYX2_OFFSET,
+				IMX8QM_PHY_PIPE_RSTN_0 |
+				IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0,
+				IMX8QM_PHY_PIPE_RSTN_0 |
+				IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0);
+	}
+	regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET +
+			IMX8QM_CSR_PCIE_CTRL2_OFFSET, &reg);
+	if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
+		/* The link of the PCIEB of HSIO is down */
+		regmap_update_bits(imxpriv->gpr,
+				IMX8QM_CSR_PHYX2_OFFSET,
+				IMX8QM_PHY_PIPE_RSTN_1 |
+				IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1,
+				IMX8QM_PHY_PIPE_RSTN_1 |
+				IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1);
+	}
+	if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
+		/* The links of both PCIA and PCIEB of HSIO are down */
+		regmap_update_bits(imxpriv->gpr,
+				IMX8QM_LPCG_PHYX2_OFFSET,
+				IMX8QM_LPCG_PHYX2_PCLK0_MASK |
+				IMX8QM_LPCG_PHYX2_PCLK1_MASK,
+				0);
+	}
+
+	/* set PWR_RST and BT_RST of csr_pciea */
+	val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET;
+	regmap_update_bits(imxpriv->gpr,
+			val,
+			IMX8QM_CTRL_BUTTON_RST_N,
+			IMX8QM_CTRL_BUTTON_RST_N);
+	regmap_update_bits(imxpriv->gpr,
+			val,
+			IMX8QM_CTRL_POWER_UP_RST_N,
+			IMX8QM_CTRL_POWER_UP_RST_N);
+
+	/* PHYX1_MODE to SATA */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_PHYX1_OFFSET,
+			IMX8QM_PHY_MODE_MASK,
+			IMX8QM_PHY_MODE_SATA);
+
+	/*
+	 * BIT0 RXENA 1, BIT1 TXENA 0
+	 * BIT12 PHY_X1_EPCS_SEL 1.
+	 */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_MISC_OFFSET,
+			IMX8QM_MISC_IOB_RXENA,
+			IMX8QM_MISC_IOB_RXENA);
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_MISC_OFFSET,
+			IMX8QM_MISC_IOB_TXENA,
+			0);
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_MISC_OFFSET,
+			IMX8QM_MISC_PHYX1_EPCS_SEL,
+			IMX8QM_MISC_PHYX1_EPCS_SEL);
+	/*
+	 * It is possible, for PCIe and SATA are sharing
+	 * the same clock source, HPLL or external oscillator.
+	 * When PCIe is in low power modes (L1.X or L2 etc),
+	 * the clock source can be turned off. In this case,
+	 * if this clock source is required to be toggling by
+	 * SATA, then SATA functions will be abnormal.
+	 * Set the override here to avoid it.
+	 */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_MISC_OFFSET,
+			IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
+			IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
+			IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
+			IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0,
+			IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
+			IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
+			IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
+			IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0);
+
+	/* clear PHY RST, then set it */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_SATA_OFFSET,
+			IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
+			0);
+
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_SATA_OFFSET,
+			IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
+			IMX8QM_SATA_CTRL_EPCS_PHYRESET_N);
+
+	/* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_SATA_OFFSET,
+			IMX8QM_SATA_CTRL_RESET_N,
+			IMX8QM_SATA_CTRL_RESET_N);
+	udelay(1);
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_SATA_OFFSET,
+			IMX8QM_SATA_CTRL_RESET_N,
+			0);
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_SATA_OFFSET,
+			IMX8QM_SATA_CTRL_RESET_N,
+			IMX8QM_SATA_CTRL_RESET_N);
+
+	/* APB reset */
+	regmap_update_bits(imxpriv->gpr,
+			IMX8QM_CSR_PHYX1_OFFSET,
+			IMX8QM_PHY_APB_RSTN_0,
+			IMX8QM_PHY_APB_RSTN_0);
+
+	for (i = 0; i < 100; i++) {
+		reg = IMX8QM_CSR_PHYX1_OFFSET +
+			IMX8QM_CSR_PHYX_STTS0_OFFSET;
+		regmap_read(imxpriv->gpr, reg, &val);
+		val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
+		if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK)
+			break;
+		udelay(1);
+	}
+
+	if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) {
+		dev_err(dev, "TX PLL of the PHY is not locked\n");
+		ret = -ENODEV;
+	} else {
+		writeb(imxpriv->imped_ratio, imxpriv->phy_base +
+				IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
+		writeb(imxpriv->imped_ratio, imxpriv->phy_base +
+				IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
+		reg = readb(imxpriv->phy_base +
+				IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
+		if (unlikely(reg != imxpriv->imped_ratio))
+			dev_info(dev, "Can't set PHY RX impedance ratio.\n");
+		reg = readb(imxpriv->phy_base +
+				IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
+		if (unlikely(reg != imxpriv->imped_ratio))
+			dev_info(dev, "Can't set PHY TX impedance ratio.\n");
+		usleep_range(50, 100);
+
+		/*
+		 * To reduce the power consumption, gate off
+		 * the PHY clks
+		 */
+		clk_disable_unprepare(imxpriv->phy_apbclk);
+		clk_disable_unprepare(imxpriv->phy_pclk1);
+		clk_disable_unprepare(imxpriv->phy_pclk0);
+		return ret;
+	}
+
+	clk_disable_unprepare(imxpriv->phy_apbclk);
+disable_epcs_rx_clk:
+	clk_disable_unprepare(imxpriv->epcs_rx_clk);
+disable_epcs_tx_clk:
+	clk_disable_unprepare(imxpriv->epcs_tx_clk);
+disable_phy_pclk1:
+	clk_disable_unprepare(imxpriv->phy_pclk1);
+disable_phy_pclk0:
+	clk_disable_unprepare(imxpriv->phy_pclk0);
+
+	return ret;
+}
+
+static int imx_sata_enable(struct ahci_host_priv *hpriv)
+{
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+	struct device *dev = &imxpriv->ahci_pdev->dev;
+	int ret;
+
+	if (imxpriv->no_device)
+		return 0;
+
+	ret = ahci_platform_enable_regulators(hpriv);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+	if (ret < 0)
+		goto disable_regulator;
+
+	if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
+		/*
+		 * set PHY Paremeters, two steps to configure the GPR13,
+		 * one write for rest of parameters, mask of first write
+		 * is 0x07ffffff, and the other one write for setting
+		 * the mpll_clk_en.
+		 */
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				   IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+				   IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+				   IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+				   IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+				   IMX6Q_GPR13_SATA_MPLL_SS_EN |
+				   IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+				   IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+				   IMX6Q_GPR13_SATA_TX_LVL_MASK |
+				   IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+				   IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+				   imxpriv->phy_params);
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+				   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+
+		usleep_range(100, 200);
+
+		ret = imx_sata_phy_reset(hpriv);
+		if (ret) {
+			dev_err(dev, "failed to reset phy: %d\n", ret);
+			goto disable_clk;
+		}
+	} else if (imxpriv->type == AHCI_IMX8QM) {
+		ret = imx8_sata_enable(hpriv);
+	}
+
+	usleep_range(1000, 2000);
+
+	return 0;
+
+disable_clk:
+	clk_disable_unprepare(imxpriv->sata_ref_clk);
+disable_regulator:
+	ahci_platform_disable_regulators(hpriv);
+
+	return ret;
+}
+
+static void imx_sata_disable(struct ahci_host_priv *hpriv)
+{
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+	if (imxpriv->no_device)
+		return;
+
+	switch (imxpriv->type) {
+	case AHCI_IMX6QP:
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR5,
+				   IMX6Q_GPR5_SATA_SW_PD,
+				   IMX6Q_GPR5_SATA_SW_PD);
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+				   !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+		break;
+
+	case AHCI_IMX6Q:
+		regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+				   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+				   !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+		break;
+
+	case AHCI_IMX8QM:
+		clk_disable_unprepare(imxpriv->epcs_rx_clk);
+		clk_disable_unprepare(imxpriv->epcs_tx_clk);
+		break;
+
+	default:
+		break;
+	}
+
+	clk_disable_unprepare(imxpriv->sata_ref_clk);
+
+	ahci_platform_disable_regulators(hpriv);
+}
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+	u32 reg_val;
+	struct ata_device *dev;
+	struct ata_host *host = dev_get_drvdata(ap->dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+	ahci_error_handler(ap);
+
+	if (!(imxpriv->first_time) || ahci_imx_hotplug)
+		return;
+
+	imxpriv->first_time = false;
+
+	ata_for_each_dev(dev, &ap->link, ENABLED)
+		return;
+	/*
+	 * Disable link to save power.  An imx ahci port can't be recovered
+	 * without full reset once the pddq mode is enabled making it
+	 * impossible to use as part of libata LPM.
+	 */
+	reg_val = readl(mmio + IMX_P0PHYCR);
+	writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
+	imx_sata_disable(hpriv);
+	imxpriv->no_device = true;
+
+	dev_info(ap->dev, "no device found, disabling link.\n");
+	dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
+}
+
+static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
+		       unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_host *host = dev_get_drvdata(ap->dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+	int ret = -EIO;
+
+	if (imxpriv->type == AHCI_IMX53)
+		ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+	else
+		ret = ahci_ops.softreset(link, class, deadline);
+
+	return ret;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+	.inherits	= &ahci_ops,
+	.host_stop	= ahci_imx_host_stop,
+	.error_handler	= ahci_imx_error_handler,
+	.softreset	= ahci_imx_softreset,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_imx_ops,
+};
+
+static const struct of_device_id imx_ahci_of_match[] = {
+	{ .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
+	{ .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
+	{ .compatible = "fsl,imx6qp-ahci", .data = (void *)AHCI_IMX6QP },
+	{ .compatible = "fsl,imx8qm-ahci", .data = (void *)AHCI_IMX8QM },
+	{},
+};
+MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
+
+struct reg_value {
+	u32 of_value;
+	u32 reg_value;
+};
+
+struct reg_property {
+	const char *name;
+	const struct reg_value *values;
+	size_t num_values;
+	u32 def_value;
+	u32 set_value;
+};
+
+static const struct reg_value gpr13_tx_level[] = {
+	{  937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
+	{  947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
+	{  957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
+	{  966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
+	{  976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
+	{  986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
+	{  996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
+	{ 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
+	{ 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
+	{ 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
+	{ 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
+	{ 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
+	{ 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
+	{ 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
+	{ 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
+	{ 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
+	{ 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
+	{ 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
+	{ 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
+	{ 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
+	{ 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
+	{ 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
+	{ 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
+	{ 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
+	{ 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
+	{ 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
+	{ 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
+	{ 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
+	{ 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
+	{ 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
+	{ 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
+	{ 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
+};
+
+static const struct reg_value gpr13_tx_boost[] = {
+	{    0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
+	{  370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
+	{  740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
+	{ 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
+	{ 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
+	{ 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
+	{ 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
+	{ 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
+	{ 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
+	{ 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
+	{ 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
+	{ 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
+	{ 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
+	{ 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
+	{ 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
+	{ 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
+};
+
+static const struct reg_value gpr13_tx_atten[] = {
+	{  8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
+	{  9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
+	{ 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
+	{ 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
+	{ 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
+	{ 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
+};
+
+static const struct reg_value gpr13_rx_eq[] = {
+	{  500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
+	{ 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
+	{ 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
+	{ 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
+	{ 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
+	{ 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
+	{ 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
+	{ 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
+};
+
+static const struct reg_property gpr13_props[] = {
+	{
+		.name = "fsl,transmit-level-mV",
+		.values = gpr13_tx_level,
+		.num_values = ARRAY_SIZE(gpr13_tx_level),
+		.def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
+	}, {
+		.name = "fsl,transmit-boost-mdB",
+		.values = gpr13_tx_boost,
+		.num_values = ARRAY_SIZE(gpr13_tx_boost),
+		.def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
+	}, {
+		.name = "fsl,transmit-atten-16ths",
+		.values = gpr13_tx_atten,
+		.num_values = ARRAY_SIZE(gpr13_tx_atten),
+		.def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
+	}, {
+		.name = "fsl,receive-eq-mdB",
+		.values = gpr13_rx_eq,
+		.num_values = ARRAY_SIZE(gpr13_rx_eq),
+		.def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
+	}, {
+		.name = "fsl,no-spread-spectrum",
+		.def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
+		.set_value = 0,
+	},
+};
+
+static u32 imx_ahci_parse_props(struct device *dev,
+				const struct reg_property *prop, size_t num)
+{
+	struct device_node *np = dev->of_node;
+	u32 reg_value = 0;
+	int i, j;
+
+	for (i = 0; i < num; i++, prop++) {
+		u32 of_val;
+
+		if (prop->num_values == 0) {
+			if (of_property_read_bool(np, prop->name))
+				reg_value |= prop->set_value;
+			else
+				reg_value |= prop->def_value;
+			continue;
+		}
+
+		if (of_property_read_u32(np, prop->name, &of_val)) {
+			dev_info(dev, "%s not specified, using %08x\n",
+				prop->name, prop->def_value);
+			reg_value |= prop->def_value;
+			continue;
+		}
+
+		for (j = 0; j < prop->num_values; j++) {
+			if (prop->values[j].of_value == of_val) {
+				dev_info(dev, "%s value %u, using %08x\n",
+					prop->name, of_val, prop->values[j].reg_value);
+				reg_value |= prop->values[j].reg_value;
+				break;
+			}
+		}
+
+		if (j == prop->num_values) {
+			dev_err(dev, "DT property %s is not a valid value\n",
+				prop->name);
+			reg_value |= prop->def_value;
+		}
+	}
+
+	return reg_value;
+}
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
+{
+	int ret;
+	struct resource *phy_res;
+	struct platform_device *pdev = imxpriv->ahci_pdev;
+	struct device_node *np = dev->of_node;
+
+	if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio))
+		imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM;
+	phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	if (phy_res) {
+		imxpriv->phy_base = devm_ioremap(dev, phy_res->start,
+					resource_size(phy_res));
+		if (!imxpriv->phy_base) {
+			dev_err(dev, "error with ioremap\n");
+			return -ENOMEM;
+		}
+	} else {
+		dev_err(dev, "missing *phy* reg region.\n");
+		return -ENOMEM;
+	}
+	imxpriv->gpr =
+		 syscon_regmap_lookup_by_phandle(np, "hsio");
+	if (IS_ERR(imxpriv->gpr)) {
+		dev_err(dev, "unable to find gpr registers\n");
+		return PTR_ERR(imxpriv->gpr);
+	}
+
+	imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx");
+	if (IS_ERR(imxpriv->epcs_tx_clk)) {
+		dev_err(dev, "can't get epcs_tx_clk clock.\n");
+		return PTR_ERR(imxpriv->epcs_tx_clk);
+	}
+	imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx");
+	if (IS_ERR(imxpriv->epcs_rx_clk)) {
+		dev_err(dev, "can't get epcs_rx_clk clock.\n");
+		return PTR_ERR(imxpriv->epcs_rx_clk);
+	}
+	imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0");
+	if (IS_ERR(imxpriv->phy_pclk0)) {
+		dev_err(dev, "can't get phy_pclk0 clock.\n");
+		return PTR_ERR(imxpriv->phy_pclk0);
+	}
+	imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1");
+	if (IS_ERR(imxpriv->phy_pclk1)) {
+		dev_err(dev, "can't get phy_pclk1 clock.\n");
+		return PTR_ERR(imxpriv->phy_pclk1);
+	}
+	imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk");
+	if (IS_ERR(imxpriv->phy_apbclk)) {
+		dev_err(dev, "can't get phy_apbclk clock.\n");
+		return PTR_ERR(imxpriv->phy_apbclk);
+	}
+
+	/* Fetch GPIO, then enable the external OSC */
+	imxpriv->clkreq_gpio = of_get_named_gpio(np, "clkreq-gpio", 0);
+	if (gpio_is_valid(imxpriv->clkreq_gpio)) {
+		ret = devm_gpio_request_one(dev, imxpriv->clkreq_gpio,
+					    GPIOF_OUT_INIT_LOW,
+					    "SATA CLKREQ");
+		if (ret == -EBUSY) {
+			dev_info(dev, "clkreq had been initialized.\n");
+		} else if (ret) {
+			dev_err(dev, "%d unable to get clkreq.\n", ret);
+			return ret;
+		}
+	} else if (imxpriv->clkreq_gpio == -EPROBE_DEFER) {
+		return imxpriv->clkreq_gpio;
+	}
+
+	return 0;
+}
+
+static int imx_ahci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *of_id;
+	struct ahci_host_priv *hpriv;
+	struct imx_ahci_priv *imxpriv;
+	unsigned int reg_val;
+	int ret;
+
+	of_id = of_match_device(imx_ahci_of_match, dev);
+	if (!of_id)
+		return -EINVAL;
+
+	imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
+	if (!imxpriv)
+		return -ENOMEM;
+
+	imxpriv->ahci_pdev = pdev;
+	imxpriv->no_device = false;
+	imxpriv->first_time = true;
+	imxpriv->type = (enum ahci_imx_type)of_id->data;
+
+	imxpriv->sata_clk = devm_clk_get(dev, "sata");
+	if (IS_ERR(imxpriv->sata_clk)) {
+		dev_err(dev, "can't get sata clock.\n");
+		return PTR_ERR(imxpriv->sata_clk);
+	}
+
+	imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+	if (IS_ERR(imxpriv->sata_ref_clk)) {
+		dev_err(dev, "can't get sata_ref clock.\n");
+		return PTR_ERR(imxpriv->sata_ref_clk);
+	}
+
+	imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(imxpriv->ahb_clk)) {
+		dev_err(dev, "can't get ahb clock.\n");
+		return PTR_ERR(imxpriv->ahb_clk);
+	}
+
+	if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
+		u32 reg_value;
+
+		imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+							"fsl,imx6q-iomuxc-gpr");
+		if (IS_ERR(imxpriv->gpr)) {
+			dev_err(dev,
+				"failed to find fsl,imx6q-iomux-gpr regmap\n");
+			return PTR_ERR(imxpriv->gpr);
+		}
+
+		reg_value = imx_ahci_parse_props(dev, gpr13_props,
+						 ARRAY_SIZE(gpr13_props));
+
+		imxpriv->phy_params =
+				   IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+				   IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+				   IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+				   reg_value;
+	} else if (imxpriv->type == AHCI_IMX8QM) {
+		ret =  imx8_sata_probe(dev, imxpriv);
+		if (ret)
+			return ret;
+	}
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	hpriv->plat_data = imxpriv;
+
+	ret = clk_prepare_enable(imxpriv->sata_clk);
+	if (ret)
+		return ret;
+
+	if (imxpriv->type == AHCI_IMX53 &&
+	    IS_ENABLED(CONFIG_HWMON)) {
+		/* Add the temperature monitor */
+		struct device *hwmon_dev;
+
+		hwmon_dev =
+			devm_hwmon_device_register_with_groups(dev,
+							"sata_ahci",
+							hpriv,
+							fsl_sata_ahci_groups);
+		if (IS_ERR(hwmon_dev)) {
+			ret = PTR_ERR(hwmon_dev);
+			goto disable_clk;
+		}
+		devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
+					     &fsl_sata_ahci_of_thermal_ops);
+		dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev));
+	}
+
+	ret = imx_sata_enable(hpriv);
+	if (ret)
+		goto disable_clk;
+
+	/*
+	 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+	 * and IP vendor specific register IMX_TIMER1MS.
+	 * Configure CAP_SSS (support stagered spin up).
+	 * Implement the port0.
+	 * Get the ahb clock rate, and configure the TIMER1MS register.
+	 */
+	reg_val = readl(hpriv->mmio + HOST_CAP);
+	if (!(reg_val & HOST_CAP_SSS)) {
+		reg_val |= HOST_CAP_SSS;
+		writel(reg_val, hpriv->mmio + HOST_CAP);
+	}
+	reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
+	if (!(reg_val & 0x1)) {
+		reg_val |= 0x1;
+		writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
+	}
+
+	reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+	writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+
+	ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
+				      &ahci_platform_sht);
+	if (ret)
+		goto disable_sata;
+
+	return 0;
+
+disable_sata:
+	imx_sata_disable(hpriv);
+disable_clk:
+	clk_disable_unprepare(imxpriv->sata_clk);
+	return ret;
+}
+
+static void ahci_imx_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+	imx_sata_disable(hpriv);
+	clk_disable_unprepare(imxpriv->sata_clk);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx_ahci_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int ret;
+
+	ret = ahci_platform_suspend_host(dev);
+	if (ret)
+		return ret;
+
+	imx_sata_disable(hpriv);
+
+	return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int ret;
+
+	ret = imx_sata_enable(hpriv);
+	if (ret)
+		return ret;
+
+	return ahci_platform_resume_host(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
+
+static struct platform_driver imx_ahci_driver = {
+	.probe = imx_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = imx_ahci_of_match,
+		.pm = &ahci_imx_pm_ops,
+	},
+};
+module_platform_driver(imx_ahci_driver);
+
+MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
+MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
new file mode 100644
index 0000000..8bc1a26
--- /dev/null
+++ b/drivers/ata/ahci_mtk.c
@@ -0,0 +1,196 @@
+/*
+ * MediaTek AHCI SATA driver
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "ahci.h"
+
+#define DRV_NAME		"ahci-mtk"
+
+#define SYS_CFG			0x14
+#define SYS_CFG_SATA_MSK	GENMASK(31, 30)
+#define SYS_CFG_SATA_EN		BIT(31)
+
+struct mtk_ahci_plat {
+	struct regmap *mode;
+	struct reset_control *axi_rst;
+	struct reset_control *sw_rst;
+	struct reset_control *reg_rst;
+};
+
+static const struct ata_port_info ahci_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv,
+				    struct device *dev)
+{
+	struct mtk_ahci_plat *plat = hpriv->plat_data;
+	int err;
+
+	/* reset AXI bus and PHY part */
+	plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi");
+	if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER)
+		return PTR_ERR(plat->axi_rst);
+
+	plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw");
+	if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER)
+		return PTR_ERR(plat->sw_rst);
+
+	plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg");
+	if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER)
+		return PTR_ERR(plat->reg_rst);
+
+	err = reset_control_assert(plat->axi_rst);
+	if (err) {
+		dev_err(dev, "failed to assert AXI bus\n");
+		return err;
+	}
+
+	err = reset_control_assert(plat->sw_rst);
+	if (err) {
+		dev_err(dev, "failed to assert PHY digital part\n");
+		return err;
+	}
+
+	err = reset_control_assert(plat->reg_rst);
+	if (err) {
+		dev_err(dev, "failed to assert PHY register part\n");
+		return err;
+	}
+
+	err = reset_control_deassert(plat->reg_rst);
+	if (err) {
+		dev_err(dev, "failed to deassert PHY register part\n");
+		return err;
+	}
+
+	err = reset_control_deassert(plat->sw_rst);
+	if (err) {
+		dev_err(dev, "failed to deassert PHY digital part\n");
+		return err;
+	}
+
+	err = reset_control_deassert(plat->axi_rst);
+	if (err) {
+		dev_err(dev, "failed to deassert AXI bus\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
+				   struct device *dev)
+{
+	struct mtk_ahci_plat *plat = hpriv->plat_data;
+	struct device_node *np = dev->of_node;
+
+	/* enable SATA function if needed */
+	if (of_find_property(np, "mediatek,phy-mode", NULL)) {
+		plat->mode = syscon_regmap_lookup_by_phandle(
+					np, "mediatek,phy-mode");
+		if (IS_ERR(plat->mode)) {
+			dev_err(dev, "missing phy-mode phandle\n");
+			return PTR_ERR(plat->mode);
+		}
+
+		regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK,
+				   SYS_CFG_SATA_EN);
+	}
+
+	of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map);
+
+	return 0;
+}
+
+static int mtk_ahci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_ahci_plat *plat;
+	struct ahci_host_priv *hpriv;
+	int err;
+
+	plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+	if (!plat)
+		return -ENOMEM;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	hpriv->plat_data = plat;
+
+	err = mtk_ahci_parse_property(hpriv, dev);
+	if (err)
+		return err;
+
+	err = mtk_ahci_platform_resets(hpriv, dev);
+	if (err)
+		return err;
+
+	err = ahci_platform_enable_resources(hpriv);
+	if (err)
+		return err;
+
+	err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+				      &ahci_platform_sht);
+	if (err)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct of_device_id ahci_of_match[] = {
+	{ .compatible = "mediatek,mtk-ahci", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static struct platform_driver mtk_ahci_driver = {
+	.probe = mtk_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_of_match,
+		.pm = &ahci_pm_ops,
+	},
+};
+module_platform_driver(mtk_ahci_driver);
+
+MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
new file mode 100644
index 0000000..f9cb51b
--- /dev/null
+++ b/drivers/ata/ahci_mvebu.c
@@ -0,0 +1,220 @@
+/*
+ * AHCI glue platform driver for Marvell EBU SOCs
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci-mvebu"
+
+#define AHCI_VENDOR_SPECIFIC_0_ADDR  0xa0
+#define AHCI_VENDOR_SPECIFIC_0_DATA  0xa4
+
+#define AHCI_WINDOW_CTRL(win)	(0x60 + ((win) << 4))
+#define AHCI_WINDOW_BASE(win)	(0x64 + ((win) << 4))
+#define AHCI_WINDOW_SIZE(win)	(0x68 + ((win) << 4))
+
+static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
+				   const struct mbus_dram_target_info *dram)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		writel(0, hpriv->mmio + AHCI_WINDOW_CTRL(i));
+		writel(0, hpriv->mmio + AHCI_WINDOW_BASE(i));
+		writel(0, hpriv->mmio + AHCI_WINDOW_SIZE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel((cs->mbus_attr << 8) |
+		       (dram->mbus_dram_target_id << 4) | 1,
+		       hpriv->mmio + AHCI_WINDOW_CTRL(i));
+		writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
+		writel(((cs->size - 1) & 0xffff0000),
+		       hpriv->mmio + AHCI_WINDOW_SIZE(i));
+	}
+}
+
+static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
+{
+	/*
+	 * Enable the regret bit to allow the SATA unit to regret a
+	 * request that didn't receive an acknowlegde and avoid a
+	 * deadlock
+	 */
+	writel(0x4, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
+	writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+}
+
+/**
+ * ahci_mvebu_stop_engine
+ *
+ * @ap:	Target ata port
+ *
+ * Errata Ref#226 - SATA Disk HOT swap issue when connected through
+ * Port Multiplier in FIS-based Switching mode.
+ *
+ * To avoid the issue, according to design, the bits[11:8, 0] of
+ * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
+ * changes its value from 1 to 0, i.e. falling edge of Port
+ * Command and Status bit[0] sends PULSE that resets PxFBS
+ * bits[11:8; 0].
+ *
+ * This function is used to override function of "ahci_stop_engine"
+ * from libahci.c by adding the mvebu work around(WA) to save PxFBS
+ * value before the PxCMD ST write of 0, then restore PxFBS value.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 tmp, port_fbs;
+
+	tmp = readl(port_mmio + PORT_CMD);
+
+	/* check if the HBA is idle */
+	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+		return 0;
+
+	/* save the port PxFBS register for later restore */
+	port_fbs = readl(port_mmio + PORT_FBS);
+
+	/* setting HBA to idle */
+	tmp &= ~PORT_CMD_START;
+	writel(tmp, port_mmio + PORT_CMD);
+
+	/*
+	 * bit #15 PxCMD signal doesn't clear PxFBS,
+	 * restore the PxFBS register right after clearing the PxCMD ST,
+	 * no need to wait for the PxCMD bit #15.
+	 */
+	writel(port_fbs, port_mmio + PORT_FBS);
+
+	/* wait for engine to stop. This could be as long as 500 msec */
+	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
+				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+	if (tmp & PORT_CMD_LIST_ON)
+		return -EIO;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return ahci_platform_suspend_host(&pdev->dev);
+}
+
+static int ahci_mvebu_resume(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	const struct mbus_dram_target_info *dram;
+
+	dram = mv_mbus_dram_info();
+	if (dram)
+		ahci_mvebu_mbus_config(hpriv, dram);
+
+	ahci_mvebu_regret_option(hpriv);
+
+	return ahci_platform_resume_host(&pdev->dev);
+}
+#else
+#define ahci_mvebu_suspend NULL
+#define ahci_mvebu_resume NULL
+#endif
+
+static const struct ata_port_info ahci_mvebu_port_info = {
+	.flags	   = AHCI_FLAG_COMMON,
+	.pio_mask  = ATA_PIO4,
+	.udma_mask = ATA_UDMA6,
+	.port_ops  = &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_mvebu_probe(struct platform_device *pdev)
+{
+	struct ahci_host_priv *hpriv;
+	const struct mbus_dram_target_info *dram;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	hpriv->stop_engine = ahci_mvebu_stop_engine;
+
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "marvell,armada-380-ahci")) {
+		dram = mv_mbus_dram_info();
+		if (!dram)
+			return -ENODEV;
+
+		ahci_mvebu_mbus_config(hpriv, dram);
+		ahci_mvebu_regret_option(hpriv);
+	}
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static const struct of_device_id ahci_mvebu_of_match[] = {
+	{ .compatible = "marvell,armada-380-ahci", },
+	{ .compatible = "marvell,armada-3700-ahci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
+
+/*
+ * We currently don't provide power management related operations,
+ * since there is no suspend/resume support at the platform level for
+ * Armada 38x for the moment.
+ */
+static struct platform_driver ahci_mvebu_driver = {
+	.probe = ahci_mvebu_probe,
+	.remove = ata_platform_remove_one,
+	.suspend = ahci_mvebu_suspend,
+	.resume = ahci_mvebu_resume,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_mvebu_of_match,
+	},
+};
+module_platform_driver(ahci_mvebu_driver);
+
+MODULE_DESCRIPTION("Marvell EBU AHCI SATA driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>, Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci_mvebu");
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
new file mode 100644
index 0000000..5a44e08
--- /dev/null
+++ b/drivers/ata/ahci_octeon.c
@@ -0,0 +1,100 @@
+/*
+ * SATA glue for Cavium Octeon III SOCs.
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010-2015 Cavium Networks
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/bitfield.h>
+
+#define CVMX_SATA_UCTL_SHIM_CFG		0xE8
+
+#define SATA_UCTL_ENDIAN_MODE_BIG	1
+#define SATA_UCTL_ENDIAN_MODE_LITTLE	0
+#define SATA_UCTL_ENDIAN_MODE_MASK	3
+
+#define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT	8
+#define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT	0
+#define SATA_UCTL_DMA_READ_CMD_SHIFT	12
+
+static int ahci_octeon_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+	u64 cfg;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG);
+
+	cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT);
+	cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT);
+
+#ifdef __BIG_ENDIAN
+	cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+	cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#else
+	cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+	cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#endif
+
+	cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT;
+
+	cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg);
+
+	if (!node) {
+		dev_err(dev, "no device node, failed to add octeon sata\n");
+		return -ENODEV;
+	}
+
+	ret = of_platform_populate(node, NULL, NULL, dev);
+	if (ret) {
+		dev_err(dev, "failed to add ahci-platform core\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ahci_octeon_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id octeon_ahci_match[] = {
+	{ .compatible = "cavium,octeon-7130-sata-uctl", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, octeon_ahci_match);
+
+static struct platform_driver ahci_octeon_driver = {
+	.probe          = ahci_octeon_probe,
+	.remove         = ahci_octeon_remove,
+	.driver         = {
+		.name   = "octeon-ahci",
+		.of_match_table = octeon_ahci_match,
+	},
+};
+
+module_platform_driver(ahci_octeon_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. sata config.");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 0000000..46f0bd7
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,109 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005  Red Hat, Inc.
+ *   Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010  MontaVista Software, LLC.
+ *   Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci"
+
+static const struct ata_port_info ahci_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev,
+					    AHCI_PLATFORM_GET_RESETS);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	of_property_read_u32(dev->of_node,
+			     "ports-implemented", &hpriv->force_port_map);
+
+	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct of_device_id ahci_of_match[] = {
+	{ .compatible = "generic-ahci", },
+	/* Keep the following compatibles for device tree compatibility */
+	{ .compatible = "snps,spear-ahci", },
+	{ .compatible = "ibm,476gtr-ahci", },
+	{ .compatible = "snps,dwc-ahci", },
+	{ .compatible = "hisilicon,hisi-ahci", },
+	{ .compatible = "cavium,octeon-7130-ahci", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+	{ ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_driver = {
+	.probe = ahci_probe,
+	.remove = ata_platform_remove_one,
+	.shutdown = ahci_platform_shutdown,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_of_match,
+		.acpi_match_table = ahci_acpi_match,
+		.pm = &ahci_pm_ops,
+	},
+};
+module_platform_driver(ahci_driver);
+
+MODULE_DESCRIPTION("AHCI SATA platform driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
new file mode 100644
index 0000000..ce59253
--- /dev/null
+++ b/drivers/ata/ahci_qoriq.c
@@ -0,0 +1,347 @@
+/*
+ * Freescale QorIQ AHCI SATA platform driver
+ *
+ * Copyright 2015 Freescale, Inc.
+ *   Tang Yuantian <Yuantian.Tang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/ahci_platform.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci-qoriq"
+
+/* port register definition */
+#define PORT_PHY1	0xA8
+#define PORT_PHY2	0xAC
+#define PORT_PHY3	0xB0
+#define PORT_PHY4	0xB4
+#define PORT_PHY5	0xB8
+#define PORT_AXICC	0xBC
+#define PORT_TRANS	0xC8
+
+/* port register default value */
+#define AHCI_PORT_PHY_1_CFG	0xa003fffe
+#define AHCI_PORT_PHY2_CFG	0x28184d1f
+#define AHCI_PORT_PHY3_CFG	0x0e081509
+#define AHCI_PORT_TRANS_CFG	0x08000029
+#define AHCI_PORT_AXICC_CFG	0x3fffffff
+
+/* for ls1021a */
+#define LS1021A_PORT_PHY2	0x28183414
+#define LS1021A_PORT_PHY3	0x0e080e06
+#define LS1021A_PORT_PHY4	0x064a080b
+#define LS1021A_PORT_PHY5	0x2aa86470
+#define LS1021A_AXICC_ADDR	0xC0
+
+#define SATA_ECC_DISABLE	0x00020000
+#define ECC_DIS_ARMV8_CH2	0x80000000
+#define ECC_DIS_LS1088A		0x40000000
+
+enum ahci_qoriq_type {
+	AHCI_LS1021A,
+	AHCI_LS1043A,
+	AHCI_LS2080A,
+	AHCI_LS1046A,
+	AHCI_LS1088A,
+	AHCI_LS2088A,
+};
+
+struct ahci_qoriq_priv {
+	struct ccsr_ahci *reg_base;
+	enum ahci_qoriq_type type;
+	void __iomem *ecc_addr;
+	bool is_dmacoherent;
+};
+
+static const struct of_device_id ahci_qoriq_of_match[] = {
+	{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
+	{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
+	{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+	{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
+	{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
+	{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
+
+static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	u32 px_cmd, px_is, px_val;
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_qoriq_priv *qoriq_priv = hpriv->plat_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	bool online;
+	int rc;
+	bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
+
+	DPRINTK("ENTER\n");
+
+	hpriv->stop_engine(ap);
+
+	/*
+	 * There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
+	 * A-009042: The device detection initialization sequence
+	 * mistakenly resets some registers.
+	 *
+	 * Workaround for this is:
+	 * The software should read and store PxCMD and PxIS values
+	 * before issuing the device detection initialization sequence.
+	 * After the sequence is complete, software should restore the
+	 * PxCMD and PxIS with the stored values.
+	 */
+	if (ls1021a_workaround) {
+		px_cmd = readl(port_mmio + PORT_CMD);
+		px_is = readl(port_mmio + PORT_IRQ_STAT);
+	}
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = ATA_BUSY;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	rc = sata_link_hardreset(link, timing, deadline, &online,
+				 ahci_check_ready);
+
+	/* restore the PxCMD and PxIS on ls1021 */
+	if (ls1021a_workaround) {
+		px_val = readl(port_mmio + PORT_CMD);
+		if (px_val != px_cmd)
+			writel(px_cmd, port_mmio + PORT_CMD);
+
+		px_val = readl(port_mmio + PORT_IRQ_STAT);
+		if (px_val != px_is)
+			writel(px_is, port_mmio + PORT_IRQ_STAT);
+	}
+
+	hpriv->start_engine(ap);
+
+	if (online)
+		*class = ahci_dev_classify(ap);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+	return rc;
+}
+
+static struct ata_port_operations ahci_qoriq_ops = {
+	.inherits	= &ahci_ops,
+	.hardreset	= ahci_qoriq_hardreset,
+};
+
+static const struct ata_port_info ahci_qoriq_port_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_qoriq_ops,
+};
+
+static struct scsi_host_template ahci_qoriq_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
+{
+	struct ahci_qoriq_priv *qpriv = hpriv->plat_data;
+	void __iomem *reg_base = hpriv->mmio;
+
+	switch (qpriv->type) {
+	case AHCI_LS1021A:
+		if (!qpriv->ecc_addr)
+			return -EINVAL;
+		writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
+		writel(LS1021A_PORT_PHY3, reg_base + PORT_PHY3);
+		writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
+		writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG,
+					reg_base + LS1021A_AXICC_ADDR);
+		break;
+
+	case AHCI_LS1043A:
+		if (!qpriv->ecc_addr)
+			return -EINVAL;
+		writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+				qpriv->ecc_addr);
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+		writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
+
+	case AHCI_LS2080A:
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+		writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
+
+	case AHCI_LS1046A:
+		if (!qpriv->ecc_addr)
+			return -EINVAL;
+		writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+				qpriv->ecc_addr);
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+		writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
+
+	case AHCI_LS1088A:
+		if (!qpriv->ecc_addr)
+			return -EINVAL;
+		writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
+		       qpriv->ecc_addr);
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+		writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
+
+	case AHCI_LS2088A:
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+		writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		if (qpriv->is_dmacoherent)
+			writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
+	}
+
+	return 0;
+}
+
+static int ahci_qoriq_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ahci_qoriq_priv *qoriq_priv;
+	const struct of_device_id *of_id;
+	struct resource *res;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	of_id = of_match_node(ahci_qoriq_of_match, np);
+	if (!of_id)
+		return -ENODEV;
+
+	qoriq_priv = devm_kzalloc(dev, sizeof(*qoriq_priv), GFP_KERNEL);
+	if (!qoriq_priv)
+		return -ENOMEM;
+
+	qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"sata-ecc");
+	if (res) {
+		qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qoriq_priv->ecc_addr))
+			return PTR_ERR(qoriq_priv->ecc_addr);
+	}
+	qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	hpriv->plat_data = qoriq_priv;
+	rc = ahci_qoriq_phy_init(hpriv);
+	if (rc)
+		goto disable_resources;
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
+				     &ahci_qoriq_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_qoriq_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_qoriq_phy_init(hpriv);
+	if (rc)
+		goto disable_resources;
+
+	rc = ahci_platform_resume_host(dev);
+	if (rc)
+		goto disable_resources;
+
+	/* We resumed so update PM runtime state */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+
+	return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
+			 ahci_qoriq_resume);
+
+static struct platform_driver ahci_qoriq_driver = {
+	.probe = ahci_qoriq_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_qoriq_of_match,
+		.pm = &ahci_qoriq_pm_ops,
+	},
+};
+module_platform_driver(ahci_qoriq_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ AHCI SATA platform driver");
+MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 0000000..e57b6f9
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit		Type		Description
+ * 31		RW		OD7.2 (activity)
+ * 30		RW		OD7.1 (locate)
+ * 29		RW		OD7.0 (fault)
+ * 28...8	RW		OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7		RO		SGPIO feature flag
+ * 6:4		RO		Reserved
+ * 3:0		RO		Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x)		(8 + (3 * x))
+#define LOCATE_BIT_POS(x)		(ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x)		(LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK			0x00010000
+#define LOCATE_MASK			0x00080000
+#define FAULT_MASK			0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+					    ssize_t size);
+
+struct seattle_plat_data {
+	void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+	.inherits		= &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+	.inherits		= &ahci_ops,
+	.transmit_led_message   = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+	.link_flags	= ATA_LFLAG_SW_ACTIVITY,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+					    ssize_t size)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct seattle_plat_data *plat_data = hpriv->plat_data;
+	unsigned long flags;
+	int pmp;
+	struct ahci_em_priv *emp;
+	u32 val;
+
+	/* get the slot number from the message */
+	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+	if (pmp >= EM_MAX_SLOTS)
+		return -EINVAL;
+	emp = &pp->em_priv[pmp];
+
+	val = ioread32(plat_data->sgpio_ctrl);
+	if (state & ACTIVITY_MASK)
+		val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+	if (state & LOCATE_MASK)
+		val |= 1 << LOCATE_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+	if (state & FAULT_MASK)
+		val |= 1 << FAULT_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+	iowrite32(val, plat_data->sgpio_ctrl);
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	/* save off new led state for port/slot */
+	emp->led_state = state;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+		struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+	struct device *dev = &pdev->dev;
+	struct seattle_plat_data *plat_data;
+	u32 val;
+
+	plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+	if (!plat_data)
+		return &ahci_port_info;
+
+	plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+			      platform_get_resource(pdev, IORESOURCE_MEM, 1));
+	if (IS_ERR(plat_data->sgpio_ctrl))
+		return &ahci_port_info;
+
+	val = ioread32(plat_data->sgpio_ctrl);
+
+	if (!(val & 0xf))
+		return &ahci_port_info;
+
+	hpriv->em_loc = 0;
+	hpriv->em_buf_sz = 4;
+	hpriv->em_msg_type = EM_MSG_TYPE_LED;
+	hpriv->plat_data = plat_data;
+
+	dev_info(dev, "SGPIO LED control is enabled.\n");
+	return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ahci_host_priv *hpriv;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_platform_init_host(pdev, hpriv,
+				     ahci_seattle_get_port_info(pdev, hpriv),
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+	{ "AMDI0600", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+	.probe = ahci_seattle_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.acpi_match_table = ahci_acpi_match,
+		.pm = &ahci_pm_ops,
+	},
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
new file mode 100644
index 0000000..21c5c44
--- /dev/null
+++ b/drivers/ata/ahci_st.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2012 STMicroelectronics Limited
+ *
+ * Authors: Francesco Virlinzi <francesco.virlinzi@st.com>
+ *	    Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/ahci_platform.h>
+#include <linux/libata.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ahci.h"
+
+#define DRV_NAME  "st_ahci"
+
+#define ST_AHCI_OOBR			0xbc
+#define ST_AHCI_OOBR_WE			BIT(31)
+#define ST_AHCI_OOBR_CWMIN_SHIFT	24
+#define ST_AHCI_OOBR_CWMAX_SHIFT	16
+#define ST_AHCI_OOBR_CIMIN_SHIFT	8
+#define ST_AHCI_OOBR_CIMAX_SHIFT	0
+
+struct st_ahci_drv_data {
+	struct platform_device *ahci;
+	struct reset_control *pwr;
+	struct reset_control *sw_rst;
+	struct reset_control *pwr_rst;
+};
+
+static void st_ahci_configure_oob(void __iomem *mmio)
+{
+	unsigned long old_val, new_val;
+
+	new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) |
+		  (0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) |
+		  (0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) |
+		  (0x0C << ST_AHCI_OOBR_CIMAX_SHIFT);
+
+	old_val = readl(mmio + ST_AHCI_OOBR);
+	writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+	writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+	writel(new_val, mmio + ST_AHCI_OOBR);
+}
+
+static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
+				struct device *dev)
+{
+	struct st_ahci_drv_data *drv_data = hpriv->plat_data;
+	int err;
+
+	if (drv_data->pwr) {
+		err = reset_control_deassert(drv_data->pwr);
+		if (err) {
+			dev_err(dev, "unable to bring out of pwrdwn\n");
+			return err;
+		}
+	}
+
+	if (drv_data->sw_rst) {
+		err = reset_control_deassert(drv_data->sw_rst);
+		if (err) {
+			dev_err(dev, "unable to bring out of sw-rst\n");
+			return err;
+		}
+	}
+
+	if (drv_data->pwr_rst) {
+		err = reset_control_deassert(drv_data->pwr_rst);
+		if (err) {
+			dev_err(dev, "unable to bring out of pwr-rst\n");
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void st_ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct st_ahci_drv_data *drv_data = hpriv->plat_data;
+	struct device *dev = host->dev;
+	int err;
+
+	if (drv_data->pwr) {
+		err = reset_control_assert(drv_data->pwr);
+		if (err)
+			dev_err(dev, "unable to pwrdwn\n");
+	}
+
+	ahci_platform_disable_resources(hpriv);
+}
+
+static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
+				struct device *dev)
+{
+	struct st_ahci_drv_data *drv_data = hpriv->plat_data;
+
+	drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
+	if (IS_ERR(drv_data->pwr)) {
+		dev_info(dev, "power reset control not defined\n");
+		drv_data->pwr = NULL;
+	}
+
+	drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
+	if (IS_ERR(drv_data->sw_rst)) {
+		dev_info(dev, "soft reset control not defined\n");
+		drv_data->sw_rst = NULL;
+	}
+
+	drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
+	if (IS_ERR(drv_data->pwr_rst)) {
+		dev_dbg(dev, "power soft reset control not defined\n");
+		drv_data->pwr_rst = NULL;
+	}
+
+	return st_ahci_deassert_resets(hpriv, dev);
+}
+
+static struct ata_port_operations st_ahci_port_ops = {
+	.inherits	= &ahci_platform_ops,
+	.host_stop	= st_ahci_host_stop,
+};
+
+static const struct ata_port_info st_ahci_port_info = {
+	.flags          = AHCI_FLAG_COMMON,
+	.pio_mask       = ATA_PIO4,
+	.udma_mask      = ATA_UDMA6,
+	.port_ops       = &st_ahci_port_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int st_ahci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct st_ahci_drv_data *drv_data;
+	struct ahci_host_priv *hpriv;
+	int err;
+
+	drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+	hpriv->plat_data = drv_data;
+
+	err = st_ahci_probe_resets(hpriv, &pdev->dev);
+	if (err)
+		return err;
+
+	err = ahci_platform_enable_resources(hpriv);
+	if (err)
+		return err;
+
+	st_ahci_configure_oob(hpriv->mmio);
+
+	of_property_read_u32(dev->of_node,
+			     "ports-implemented", &hpriv->force_port_map);
+
+	err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
+				      &ahci_platform_sht);
+	if (err) {
+		ahci_platform_disable_resources(hpriv);
+		return err;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_ahci_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct st_ahci_drv_data *drv_data = hpriv->plat_data;
+	int err;
+
+	err = ahci_platform_suspend_host(dev);
+	if (err)
+		return err;
+
+	if (drv_data->pwr) {
+		err = reset_control_assert(drv_data->pwr);
+		if (err) {
+			dev_err(dev, "unable to pwrdwn");
+			return err;
+		}
+	}
+
+	ahci_platform_disable_resources(hpriv);
+
+	return 0;
+}
+
+static int st_ahci_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int err;
+
+	err = ahci_platform_enable_resources(hpriv);
+	if (err)
+		return err;
+
+	err = st_ahci_deassert_resets(hpriv, dev);
+	if (err) {
+		ahci_platform_disable_resources(hpriv);
+		return err;
+	}
+
+	st_ahci_configure_oob(hpriv->mmio);
+
+	return ahci_platform_resume_host(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+
+static const struct of_device_id st_ahci_match[] = {
+	{ .compatible = "st,ahci", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_ahci_match);
+
+static struct platform_driver st_ahci_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &st_ahci_pm_ops,
+		.of_match_table = of_match_ptr(st_ahci_match),
+	},
+	.probe = st_ahci_probe,
+	.remove = ata_platform_remove_one,
+};
+module_platform_driver(st_ahci_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
+MODULE_AUTHOR("Francesco Virlinzi <francesco.virlinzi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
new file mode 100644
index 0000000..631610b
--- /dev/null
+++ b/drivers/ata/ahci_sunxi.c
@@ -0,0 +1,270 @@
+/*
+ * Allwinner sunxi AHCI SATA platform driver
+ * Copyright 2013 Olliver Schinagl <oliver@schinagl.nl>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ * Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>,
+ * Daniel Wang <danielwang@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci-sunxi"
+
+/* Insmod parameters */
+static bool enable_pmp;
+module_param(enable_pmp, bool, 0);
+MODULE_PARM_DESC(enable_pmp,
+	"Enable support for sata port multipliers, only use if you use a pmp!");
+
+#define AHCI_BISTAFR	0x00a0
+#define AHCI_BISTCR	0x00a4
+#define AHCI_BISTFCTR	0x00a8
+#define AHCI_BISTSR	0x00ac
+#define AHCI_BISTDECR	0x00b0
+#define AHCI_DIAGNR0	0x00b4
+#define AHCI_DIAGNR1	0x00b8
+#define AHCI_OOBR	0x00bc
+#define AHCI_PHYCS0R	0x00c0
+#define AHCI_PHYCS1R	0x00c4
+#define AHCI_PHYCS2R	0x00c8
+#define AHCI_TIMER1MS	0x00e0
+#define AHCI_GPARAM1R	0x00e8
+#define AHCI_GPARAM2R	0x00ec
+#define AHCI_PPARAMR	0x00f0
+#define AHCI_TESTR	0x00f4
+#define AHCI_VERSIONR	0x00f8
+#define AHCI_IDR	0x00fc
+#define AHCI_RWCR	0x00fc
+#define AHCI_P0DMACR	0x0170
+#define AHCI_P0PHYCR	0x0178
+#define AHCI_P0PHYSR	0x017c
+
+static void sunxi_clrbits(void __iomem *reg, u32 clr_val)
+{
+	u32 reg_val;
+
+	reg_val = readl(reg);
+	reg_val &= ~(clr_val);
+	writel(reg_val, reg);
+}
+
+static void sunxi_setbits(void __iomem *reg, u32 set_val)
+{
+	u32 reg_val;
+
+	reg_val = readl(reg);
+	reg_val |= set_val;
+	writel(reg_val, reg);
+}
+
+static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val)
+{
+	u32 reg_val;
+
+	reg_val = readl(reg);
+	reg_val &= ~(clr_val);
+	reg_val |= set_val;
+	writel(reg_val, reg);
+}
+
+static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift)
+{
+	return (readl(reg) >> shift) & mask;
+}
+
+static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base)
+{
+	u32 reg_val;
+	int timeout;
+
+	/* This magic is from the original code */
+	writel(0, reg_base + AHCI_RWCR);
+	msleep(5);
+
+	sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19));
+	sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+			 (0x7 << 24),
+			 (0x5 << 24) | BIT(23) | BIT(18));
+	sunxi_clrsetbits(reg_base + AHCI_PHYCS1R,
+			 (0x3 << 16) | (0x1f << 8) | (0x3 << 6),
+			 (0x2 << 16) | (0x6 << 8) | (0x2 << 6));
+	sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15));
+	sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19));
+	sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+			 (0x7 << 20), (0x3 << 20));
+	sunxi_clrsetbits(reg_base + AHCI_PHYCS2R,
+			 (0x1f << 5), (0x19 << 5));
+	msleep(5);
+
+	sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19));
+
+	timeout = 250; /* Power up takes aprox 50 us */
+	do {
+		reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28);
+		if (reg_val == 0x02)
+			break;
+
+		if (--timeout == 0) {
+			dev_err(dev, "PHY power up failed.\n");
+			return -EIO;
+		}
+		udelay(1);
+	} while (1);
+
+	sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24));
+
+	timeout = 100; /* Calibration takes aprox 10 us */
+	do {
+		reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24);
+		if (reg_val == 0x00)
+			break;
+
+		if (--timeout == 0) {
+			dev_err(dev, "PHY calibration failed.\n");
+			return -EIO;
+		}
+		udelay(1);
+	} while (1);
+
+	msleep(15);
+
+	writel(0x7, reg_base + AHCI_RWCR);
+
+	return 0;
+}
+
+static void ahci_sunxi_start_engine(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	/* Setup DMA before DMA start */
+	sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400);
+
+	/* Start DMA */
+	sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
+}
+
+static const struct ata_port_info ahci_sunxi_port_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_sunxi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	hpriv->start_engine = ahci_sunxi_start_engine;
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+	if (rc)
+		goto disable_resources;
+
+	hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+		       AHCI_HFLAG_YES_NCQ;
+
+	/*
+	 * The sunxi sata controller seems to be unable to successfully do a
+	 * soft reset if no pmp is attached, so disable pmp use unless
+	 * requested, otherwise directly attached disks do not work.
+	 */
+	if (!enable_pmp)
+		hpriv->flags |= AHCI_HFLAG_NO_PMP;
+
+	rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_sunxi_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+	if (rc)
+		goto disable_resources;
+
+	rc = ahci_platform_resume_host(dev);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
+			 ahci_sunxi_resume);
+
+static const struct of_device_id ahci_sunxi_of_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-ahci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
+
+static struct platform_driver ahci_sunxi_driver = {
+	.probe = ahci_sunxi_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = ahci_sunxi_of_match,
+		.pm = &ahci_sunxi_pm_ops,
+	},
+};
+module_platform_driver(ahci_sunxi_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver");
+MODULE_AUTHOR("Olliver Schinagl <oliver@schinagl.nl>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
new file mode 100644
index 0000000..004f260
--- /dev/null
+++ b/drivers/ata/ahci_tegra.c
@@ -0,0 +1,596 @@
+/*
+ * drivers/ata/ahci_tegra.c
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author:
+ *	Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+#include "ahci.h"
+
+#define DRV_NAME "tegra-ahci"
+
+#define SATA_CONFIGURATION_0				0x180
+#define SATA_CONFIGURATION_0_EN_FPCI			BIT(0)
+#define SATA_CONFIGURATION_0_CLK_OVERRIDE			BIT(31)
+
+#define SCFG_OFFSET					0x1000
+
+#define T_SATA0_CFG_1					0x04
+#define T_SATA0_CFG_1_IO_SPACE				BIT(0)
+#define T_SATA0_CFG_1_MEMORY_SPACE			BIT(1)
+#define T_SATA0_CFG_1_BUS_MASTER			BIT(2)
+#define T_SATA0_CFG_1_SERR				BIT(8)
+
+#define T_SATA0_CFG_9					0x24
+#define T_SATA0_CFG_9_BASE_ADDRESS			0x40020000
+
+#define SATA_FPCI_BAR5					0x94
+#define SATA_FPCI_BAR5_START_MASK			(0xfffffff << 4)
+#define SATA_FPCI_BAR5_START				(0x0040020 << 4)
+#define SATA_FPCI_BAR5_ACCESS_TYPE			(0x1)
+
+#define SATA_INTR_MASK					0x188
+#define SATA_INTR_MASK_IP_INT_MASK			BIT(16)
+
+#define T_SATA0_CFG_35					0x94
+#define T_SATA0_CFG_35_IDP_INDEX_MASK			(0x7ff << 2)
+#define T_SATA0_CFG_35_IDP_INDEX			(0x2a << 2)
+
+#define T_SATA0_AHCI_IDP1				0x98
+#define T_SATA0_AHCI_IDP1_DATA				(0x400040)
+
+#define T_SATA0_CFG_PHY_1				0x12c
+#define T_SATA0_CFG_PHY_1_PADS_IDDQ_EN			BIT(23)
+#define T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN		BIT(22)
+
+#define T_SATA0_NVOOB                                   0x114
+#define T_SATA0_NVOOB_COMMA_CNT_MASK                    (0xff << 16)
+#define T_SATA0_NVOOB_COMMA_CNT                         (0x07 << 16)
+#define T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK          (0x3 << 24)
+#define T_SATA0_NVOOB_SQUELCH_FILTER_MODE               (0x1 << 24)
+#define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK        (0x3 << 26)
+#define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH             (0x3 << 26)
+
+#define T_SATA_CFG_PHY_0                                0x120
+#define T_SATA_CFG_PHY_0_USE_7BIT_ALIGN_DET_FOR_SPD     BIT(11)
+#define T_SATA_CFG_PHY_0_MASK_SQUELCH                   BIT(24)
+
+#define T_SATA0_CFG2NVOOB_2				0x134
+#define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK	(0x1ff << 18)
+#define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW	(0xc << 18)
+
+#define T_SATA0_AHCI_HBA_CAP_BKDR			0x300
+#define T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP	BIT(13)
+#define T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP	BIT(14)
+#define T_SATA0_AHCI_HBA_CAP_BKDR_SALP			BIT(26)
+#define T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM		BIT(17)
+#define T_SATA0_AHCI_HBA_CAP_BKDR_SNCQ			BIT(30)
+
+#define T_SATA0_BKDOOR_CC				0x4a4
+#define T_SATA0_BKDOOR_CC_CLASS_CODE_MASK		(0xffff << 16)
+#define T_SATA0_BKDOOR_CC_CLASS_CODE			(0x0106 << 16)
+#define T_SATA0_BKDOOR_CC_PROG_IF_MASK			(0xff << 8)
+#define T_SATA0_BKDOOR_CC_PROG_IF			(0x01 << 8)
+
+#define T_SATA0_CFG_SATA				0x54c
+#define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN		BIT(12)
+
+#define T_SATA0_CFG_MISC				0x550
+
+#define T_SATA0_INDEX					0x680
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN1			0x690
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK		0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT		0
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK		(0xff << 8)
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT	8
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN2			0x694
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK		0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT		0
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK		(0xff << 12)
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT	12
+
+#define T_SATA0_CHX_PHY_CTRL2				0x69c
+#define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1		0x23
+
+#define T_SATA0_CHX_PHY_CTRL11				0x6d0
+#define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ		(0x2800 << 16)
+
+#define T_SATA0_CHX_PHY_CTRL17_0			0x6e8
+#define T_SATA0_CHX_PHY_CTRL17_0_RX_EQ_CTRL_L_GEN1	0x55010000
+#define T_SATA0_CHX_PHY_CTRL18_0			0x6ec
+#define T_SATA0_CHX_PHY_CTRL18_0_RX_EQ_CTRL_L_GEN2	0x55010000
+#define T_SATA0_CHX_PHY_CTRL20_0			0x6f4
+#define T_SATA0_CHX_PHY_CTRL20_0_RX_EQ_CTRL_H_GEN1	0x1
+#define T_SATA0_CHX_PHY_CTRL21_0			0x6f8
+#define T_SATA0_CHX_PHY_CTRL21_0_RX_EQ_CTRL_H_GEN2	0x1
+
+/* AUX Registers */
+#define SATA_AUX_MISC_CNTL_1_0				0x8
+#define SATA_AUX_MISC_CNTL_1_0_DEVSLP_OVERRIDE		BIT(17)
+#define SATA_AUX_MISC_CNTL_1_0_SDS_SUPPORT		BIT(13)
+#define SATA_AUX_MISC_CNTL_1_0_DESO_SUPPORT		BIT(15)
+
+#define SATA_AUX_RX_STAT_INT_0				0xc
+#define SATA_AUX_RX_STAT_INT_0_SATA_DEVSLP		BIT(7)
+
+#define SATA_AUX_SPARE_CFG0_0				0x18
+#define SATA_AUX_SPARE_CFG0_0_MDAT_TIMER_AFTER_PG_VALID	BIT(14)
+
+#define FUSE_SATA_CALIB					0x124
+#define FUSE_SATA_CALIB_MASK				0x3
+
+struct sata_pad_calibration {
+	u8 gen1_tx_amp;
+	u8 gen1_tx_peak;
+	u8 gen2_tx_amp;
+	u8 gen2_tx_peak;
+};
+
+static const struct sata_pad_calibration tegra124_pad_calibration[] = {
+	{0x18, 0x04, 0x18, 0x0a},
+	{0x0e, 0x04, 0x14, 0x0a},
+	{0x0e, 0x07, 0x1a, 0x0e},
+	{0x14, 0x0e, 0x1a, 0x0e},
+};
+
+struct tegra_ahci_ops {
+	int (*init)(struct ahci_host_priv *hpriv);
+};
+
+struct tegra_ahci_soc {
+	const char *const		*supply_names;
+	u32				num_supplies;
+	bool				supports_devslp;
+	const struct tegra_ahci_ops	*ops;
+};
+
+struct tegra_ahci_priv {
+	struct platform_device	   *pdev;
+	void __iomem		   *sata_regs;
+	void __iomem		   *sata_aux_regs;
+	struct reset_control	   *sata_rst;
+	struct reset_control	   *sata_oob_rst;
+	struct reset_control	   *sata_cold_rst;
+	/* Needs special handling, cannot use ahci_platform */
+	struct clk		   *sata_clk;
+	struct regulator_bulk_data *supplies;
+	const struct tegra_ahci_soc *soc;
+};
+
+static void tegra_ahci_handle_quirks(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	u32 val;
+
+	if (tegra->sata_aux_regs && !tegra->soc->supports_devslp) {
+		val = readl(tegra->sata_aux_regs + SATA_AUX_MISC_CNTL_1_0);
+		val &= ~SATA_AUX_MISC_CNTL_1_0_SDS_SUPPORT;
+		writel(val, tegra->sata_aux_regs + SATA_AUX_MISC_CNTL_1_0);
+	}
+}
+
+static int tegra124_ahci_init(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	struct sata_pad_calibration calib;
+	int ret;
+	u32 val;
+
+	/* Pad calibration */
+	ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val);
+	if (ret)
+		return ret;
+
+	calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
+
+	writel(BIT(0), tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+	val = readl(tegra->sata_regs +
+		    SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1);
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK;
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK;
+	val |= calib.gen1_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+	val |= calib.gen1_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+	writel(val, tegra->sata_regs + SCFG_OFFSET +
+	       T_SATA0_CHX_PHY_CTRL1_GEN1);
+
+	val = readl(tegra->sata_regs +
+		    SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2);
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK;
+	val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK;
+	val |= calib.gen2_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+	val |= calib.gen2_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+	writel(val, tegra->sata_regs + SCFG_OFFSET +
+	       T_SATA0_CHX_PHY_CTRL1_GEN2);
+
+	writel(T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ,
+	       tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11);
+	writel(T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1,
+	       tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2);
+
+	writel(0, tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+	return 0;
+}
+
+static int tegra_ahci_power_on(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	int ret;
+
+	ret = regulator_bulk_enable(tegra->soc->num_supplies,
+				    tegra->supplies);
+	if (ret)
+		return ret;
+
+	ret = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SATA,
+						tegra->sata_clk,
+						tegra->sata_rst);
+	if (ret)
+		goto disable_regulators;
+
+	reset_control_assert(tegra->sata_oob_rst);
+	reset_control_assert(tegra->sata_cold_rst);
+
+	ret = ahci_platform_enable_resources(hpriv);
+	if (ret)
+		goto disable_power;
+
+	reset_control_deassert(tegra->sata_cold_rst);
+	reset_control_deassert(tegra->sata_oob_rst);
+
+	return 0;
+
+disable_power:
+	clk_disable_unprepare(tegra->sata_clk);
+
+	tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+disable_regulators:
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+
+	return ret;
+}
+
+static void tegra_ahci_power_off(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+
+	ahci_platform_disable_resources(hpriv);
+
+	reset_control_assert(tegra->sata_rst);
+	reset_control_assert(tegra->sata_oob_rst);
+	reset_control_assert(tegra->sata_cold_rst);
+
+	clk_disable_unprepare(tegra->sata_clk);
+	tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+}
+
+static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
+{
+	struct tegra_ahci_priv *tegra = hpriv->plat_data;
+	int ret;
+	u32 val;
+
+	ret = tegra_ahci_power_on(hpriv);
+	if (ret) {
+		dev_err(&tegra->pdev->dev,
+			"failed to power on AHCI controller: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Program the following SATA IPFS registers to allow SW accesses to
+	 * SATA's MMIO register range.
+	 */
+	val = readl(tegra->sata_regs + SATA_FPCI_BAR5);
+	val &= ~(SATA_FPCI_BAR5_START_MASK | SATA_FPCI_BAR5_ACCESS_TYPE);
+	val |= SATA_FPCI_BAR5_START | SATA_FPCI_BAR5_ACCESS_TYPE;
+	writel(val, tegra->sata_regs + SATA_FPCI_BAR5);
+
+	/* Program the following SATA IPFS register to enable the SATA */
+	val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
+	val |= SATA_CONFIGURATION_0_EN_FPCI;
+	writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
+
+	/* Electrical settings for better link stability */
+	val = T_SATA0_CHX_PHY_CTRL17_0_RX_EQ_CTRL_L_GEN1;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL17_0);
+	val = T_SATA0_CHX_PHY_CTRL18_0_RX_EQ_CTRL_L_GEN2;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL18_0);
+	val = T_SATA0_CHX_PHY_CTRL20_0_RX_EQ_CTRL_H_GEN1;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL20_0);
+	val = T_SATA0_CHX_PHY_CTRL21_0_RX_EQ_CTRL_H_GEN2;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL21_0);
+
+	/* For SQUELCH Filter & Gen3 drive getting detected as Gen1 drive */
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA_CFG_PHY_0);
+	val |= T_SATA_CFG_PHY_0_MASK_SQUELCH;
+	val &= ~T_SATA_CFG_PHY_0_USE_7BIT_ALIGN_DET_FOR_SPD;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA_CFG_PHY_0);
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_NVOOB);
+	val &= ~(T_SATA0_NVOOB_COMMA_CNT_MASK |
+		 T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK |
+		 T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK);
+	val |= (T_SATA0_NVOOB_COMMA_CNT |
+		T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH |
+		T_SATA0_NVOOB_SQUELCH_FILTER_MODE);
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_NVOOB);
+
+	/*
+	 * Change CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW from 83.3 ns to 58.8ns
+	 */
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG2NVOOB_2);
+	val &= ~T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK;
+	val |= T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG2NVOOB_2);
+
+	if (tegra->soc->ops && tegra->soc->ops->init)
+		tegra->soc->ops->init(hpriv);
+
+	/*
+	 * Program the following SATA configuration registers to
+	 * initialize SATA
+	 */
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+	val |= (T_SATA0_CFG_1_IO_SPACE | T_SATA0_CFG_1_MEMORY_SPACE |
+		T_SATA0_CFG_1_BUS_MASTER | T_SATA0_CFG_1_SERR);
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+	val = T_SATA0_CFG_9_BASE_ADDRESS;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_9);
+
+	/* Program Class Code and Programming interface for SATA */
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+	val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
+	val &=
+	    ~(T_SATA0_BKDOOR_CC_CLASS_CODE_MASK |
+	      T_SATA0_BKDOOR_CC_PROG_IF_MASK);
+	val |= T_SATA0_BKDOOR_CC_CLASS_CODE | T_SATA0_BKDOOR_CC_PROG_IF;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+	val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+	/* Enabling LPM capabilities through Backdoor Programming */
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR);
+	val |= (T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP |
+		T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP |
+		T_SATA0_AHCI_HBA_CAP_BKDR_SALP |
+		T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM);
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR);
+
+	/* SATA Second Level Clock Gating configuration
+	 * Enabling Gating of Tx/Rx clocks and driving Pad IDDQ and Lane
+	 * IDDQ Signals
+	 */
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_35);
+	val &= ~T_SATA0_CFG_35_IDP_INDEX_MASK;
+	val |= T_SATA0_CFG_35_IDP_INDEX;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_35);
+
+	val = T_SATA0_AHCI_IDP1_DATA;
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_AHCI_IDP1);
+
+	val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_PHY_1);
+	val |= (T_SATA0_CFG_PHY_1_PADS_IDDQ_EN |
+		T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN);
+	writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_PHY_1);
+
+	/* Enabling IPFS Clock Gating */
+	val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
+	val &= ~SATA_CONFIGURATION_0_CLK_OVERRIDE;
+	writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
+
+	tegra_ahci_handle_quirks(hpriv);
+
+	/* Unmask SATA interrupts */
+
+	val = readl(tegra->sata_regs + SATA_INTR_MASK);
+	val |= SATA_INTR_MASK_IP_INT_MASK;
+	writel(val, tegra->sata_regs + SATA_INTR_MASK);
+
+	return 0;
+}
+
+static void tegra_ahci_controller_deinit(struct ahci_host_priv *hpriv)
+{
+	tegra_ahci_power_off(hpriv);
+}
+
+static void tegra_ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	tegra_ahci_controller_deinit(hpriv);
+}
+
+static struct ata_port_operations ahci_tegra_port_ops = {
+	.inherits	= &ahci_ops,
+	.host_stop	= tegra_ahci_host_stop,
+};
+
+static const struct ata_port_info ahci_tegra_port_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_tegra_port_ops,
+};
+
+static const char *const tegra124_supply_names[] = {
+	"avdd", "hvdd", "vddio", "target-5v", "target-12v"
+};
+
+static const struct tegra_ahci_ops tegra124_ahci_ops = {
+	.init = tegra124_ahci_init,
+};
+
+static const struct tegra_ahci_soc tegra124_ahci_soc = {
+	.supply_names = tegra124_supply_names,
+	.num_supplies = ARRAY_SIZE(tegra124_supply_names),
+	.supports_devslp = false,
+	.ops = &tegra124_ahci_ops,
+};
+
+static const struct tegra_ahci_soc tegra210_ahci_soc = {
+	.supports_devslp = false,
+};
+
+static const struct of_device_id tegra_ahci_of_match[] = {
+	{
+		.compatible = "nvidia,tegra124-ahci",
+		.data = &tegra124_ahci_soc
+	},
+	{
+		.compatible = "nvidia,tegra210-ahci",
+		.data = &tegra210_ahci_soc
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static int tegra_ahci_probe(struct platform_device *pdev)
+{
+	struct ahci_host_priv *hpriv;
+	struct tegra_ahci_priv *tegra;
+	struct resource *res;
+	int ret;
+	unsigned int i;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	hpriv->plat_data = tegra;
+
+	tegra->pdev = pdev;
+	tegra->soc = of_device_get_match_data(&pdev->dev);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->sata_regs))
+		return PTR_ERR(tegra->sata_regs);
+
+	/*
+	 * AUX registers is optional.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (res) {
+		tegra->sata_aux_regs = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(tegra->sata_aux_regs))
+			return PTR_ERR(tegra->sata_aux_regs);
+	}
+
+	tegra->sata_rst = devm_reset_control_get(&pdev->dev, "sata");
+	if (IS_ERR(tegra->sata_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata reset\n");
+		return PTR_ERR(tegra->sata_rst);
+	}
+
+	tegra->sata_oob_rst = devm_reset_control_get(&pdev->dev, "sata-oob");
+	if (IS_ERR(tegra->sata_oob_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata-oob reset\n");
+		return PTR_ERR(tegra->sata_oob_rst);
+	}
+
+	tegra->sata_cold_rst = devm_reset_control_get(&pdev->dev, "sata-cold");
+	if (IS_ERR(tegra->sata_cold_rst)) {
+		dev_err(&pdev->dev, "Failed to get sata-cold reset\n");
+		return PTR_ERR(tegra->sata_cold_rst);
+	}
+
+	tegra->sata_clk = devm_clk_get(&pdev->dev, "sata");
+	if (IS_ERR(tegra->sata_clk)) {
+		dev_err(&pdev->dev, "Failed to get sata clock\n");
+		return PTR_ERR(tegra->sata_clk);
+	}
+
+	tegra->supplies = devm_kcalloc(&pdev->dev,
+				       tegra->soc->num_supplies,
+				       sizeof(*tegra->supplies), GFP_KERNEL);
+	if (!tegra->supplies)
+		return -ENOMEM;
+
+	for (i = 0; i < tegra->soc->num_supplies; i++)
+		tegra->supplies[i].supply = tegra->soc->supply_names[i];
+
+	ret = devm_regulator_bulk_get(&pdev->dev,
+				      tegra->soc->num_supplies,
+				      tegra->supplies);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to get regulators\n");
+		return ret;
+	}
+
+	ret = tegra_ahci_controller_init(hpriv);
+	if (ret)
+		return ret;
+
+	ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info,
+				      &ahci_platform_sht);
+	if (ret)
+		goto deinit_controller;
+
+	return 0;
+
+deinit_controller:
+	tegra_ahci_controller_deinit(hpriv);
+
+	return ret;
+};
+
+static struct platform_driver tegra_ahci_driver = {
+	.probe = tegra_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = tegra_ahci_of_match,
+	},
+	/* LP0 suspend support not implemented */
+};
+module_platform_driver(tegra_ahci_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHCI SATA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
new file mode 100644
index 0000000..7e157e1
--- /dev/null
+++ b/drivers/ata/ahci_xgene.c
@@ -0,0 +1,901 @@
+/*
+ * AppliedMicro X-Gene SoC SATA Host Controller Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ *         Tuan Phan <tphan@apm.com>
+ *         Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is not currently available.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include "ahci.h"
+
+#define DRV_NAME "xgene-ahci"
+
+/* Max # of disk per a controller */
+#define MAX_AHCI_CHN_PERCTR		2
+
+/* MUX CSR */
+#define SATA_ENET_CONFIG_REG		0x00000000
+#define  CFG_SATA_ENET_SELECT_MASK	0x00000001
+
+/* SATA core host controller CSR */
+#define SLVRDERRATTRIBUTES		0x00000000
+#define SLVWRERRATTRIBUTES		0x00000004
+#define MSTRDERRATTRIBUTES		0x00000008
+#define MSTWRERRATTRIBUTES		0x0000000c
+#define BUSCTLREG			0x00000014
+#define IOFMSTRWAUX			0x00000018
+#define INTSTATUSMASK			0x0000002c
+#define ERRINTSTATUS			0x00000030
+#define ERRINTSTATUSMASK		0x00000034
+
+/* SATA host AHCI CSR */
+#define PORTCFG				0x000000a4
+#define  PORTADDR_SET(dst, src) \
+		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
+#define PORTPHY1CFG		0x000000a8
+#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
+		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
+#define PORTPHY2CFG			0x000000ac
+#define PORTPHY3CFG			0x000000b0
+#define PORTPHY4CFG			0x000000b4
+#define PORTPHY5CFG			0x000000b8
+#define SCTL0				0x0000012C
+#define PORTPHY5CFG_RTCHG_SET(dst, src) \
+		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
+#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
+		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
+#define PORTAXICFG			0x000000bc
+#define PORTAXICFG_OUTTRANS_SET(dst, src) \
+		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+#define PORTRANSCFG			0x000000c8
+#define PORTRANSCFG_RXWM_SET(dst, src)		\
+		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
+
+/* SATA host controller AXI CSR */
+#define INT_SLV_TMOMASK			0x00000010
+
+/* SATA diagnostic CSR */
+#define CFG_MEM_RAM_SHUTDOWN		0x00000070
+#define BLOCK_MEM_RDY			0x00000074
+
+/* Max retry for link down */
+#define MAX_LINK_DOWN_RETRY 3
+
+enum xgene_ahci_version {
+	XGENE_AHCI_V1 = 1,
+	XGENE_AHCI_V2,
+};
+
+struct xgene_ahci_context {
+	struct ahci_host_priv *hpriv;
+	struct device *dev;
+	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
+	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
+	void __iomem *csr_core;		/* Core CSR address of IP */
+	void __iomem *csr_diag;		/* Diag CSR address of IP */
+	void __iomem *csr_axi;		/* AXI CSR address of IP */
+	void __iomem *csr_mux;		/* MUX CSR address of IP */
+};
+
+static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
+{
+	dev_dbg(ctx->dev, "Release memory from shutdown\n");
+	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
+	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
+	msleep(1);	/* reset may take up to 1ms */
+	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
+		dev_err(ctx->dev, "failed to release memory from shutdown\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/**
+ * xgene_ahci_poll_reg_val- Poll a register on a specific value.
+ * @ap : ATA port of interest.
+ * @reg : Register of interest.
+ * @val : Value to be attained.
+ * @interval : waiting interval for polling.
+ * @timeout : timeout for achieving the value.
+ */
+static int xgene_ahci_poll_reg_val(struct ata_port *ap,
+				   void __iomem *reg, unsigned
+				   int val, unsigned long interval,
+				   unsigned long timeout)
+{
+	unsigned long deadline;
+	unsigned int tmp;
+
+	tmp = ioread32(reg);
+	deadline = ata_deadline(jiffies, timeout);
+
+	while (tmp != val && time_before(jiffies, deadline)) {
+		ata_msleep(ap, interval);
+		tmp = ioread32(reg);
+	}
+
+	return tmp;
+}
+
+/**
+ * xgene_ahci_restart_engine - Restart the dma engine.
+ * @ap : ATA port of interest
+ *
+ * Waits for completion of multiple commands and restarts
+ * the DMA engine inside the controller.
+ */
+static int xgene_ahci_restart_engine(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 fbs;
+
+	/*
+	 * In case of PMP multiple IDENTIFY DEVICE commands can be
+	 * issued inside PxCI. So need to poll PxCI for the
+	 * completion of outstanding IDENTIFY DEVICE commands before
+	 * we restart the DMA engine.
+	 */
+	if (xgene_ahci_poll_reg_val(ap, port_mmio +
+				    PORT_CMD_ISSUE, 0x0, 1, 100))
+		  return -EBUSY;
+
+	hpriv->stop_engine(ap);
+	ahci_start_fis_rx(ap);
+
+	/*
+	 * Enable the PxFBS.FBS_EN bit as it
+	 * gets cleared due to stopping the engine.
+	 */
+	if (pp->fbs_supported) {
+		fbs = readl(port_mmio + PORT_FBS);
+		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+		fbs = readl(port_mmio + PORT_FBS);
+	}
+
+	hpriv->start_engine(ap);
+
+	return 0;
+}
+
+/**
+ * xgene_ahci_qc_issue - Issue commands to the device
+ * @qc: Command to issue
+ *
+ * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
+ * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
+ * state machine goes into the CMFatalErrorUpdate state and locks up. By
+ * restarting the dma engine, it removes the controller out of lock up state.
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The
+ * workaround is to write the pmp value to PxFBS.DEV field before issuing
+ * any command to PMP.
+ */
+static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct xgene_ahci_context *ctx = hpriv->plat_data;
+	int rc = 0;
+	u32 port_fbs;
+	void *port_mmio = ahci_port_base(ap);
+
+	/*
+	 * Write the pmp value to PxFBS.DEV
+	 * for case of Port Mulitplier.
+	 */
+	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
+		port_fbs = readl(port_mmio + PORT_FBS);
+		port_fbs &= ~PORT_FBS_DEV_MASK;
+		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+		writel(port_fbs, port_mmio + PORT_FBS);
+	}
+
+	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
+	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
+		xgene_ahci_restart_engine(ap);
+
+	rc = ahci_qc_issue(qc);
+
+	/* Save the last command issued */
+	ctx->last_cmd[ap->port_no] = qc->tf.command;
+
+	return rc;
+}
+
+static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
+{
+	void __iomem *diagcsr = ctx->csr_diag;
+
+	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
+	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
+}
+
+/**
+ * xgene_ahci_read_id - Read ID data from the specified device
+ * @dev: device
+ * @tf: proposed taskfile
+ * @id: data buffer
+ *
+ * This custom read ID function is required due to the fact that the HW
+ * does not support DEVSLP.
+ */
+static unsigned int xgene_ahci_read_id(struct ata_device *dev,
+				       struct ata_taskfile *tf, u16 *id)
+{
+	u32 err_mask;
+
+	err_mask = ata_do_dev_read_id(dev, tf, id);
+	if (err_mask)
+		return err_mask;
+
+	/*
+	 * Mask reserved area. Word78 spec of Link Power Management
+	 * bit15-8: reserved
+	 * bit7: NCQ autosence
+	 * bit6: Software settings preservation supported
+	 * bit5: reserved
+	 * bit4: In-order sata delivery supported
+	 * bit3: DIPM requests supported
+	 * bit2: DMA Setup FIS Auto-Activate optimization supported
+	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
+	 * bit0: Reserved
+	 *
+	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
+	 */
+	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+
+	return 0;
+}
+
+static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
+{
+	void __iomem *mmio = ctx->hpriv->mmio;
+	u32 val;
+
+	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
+		mmio, channel);
+	val = readl(mmio + PORTCFG);
+	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
+	writel(val, mmio + PORTCFG);
+	readl(mmio + PORTCFG);  /* Force a barrier */
+	/* Disable fix rate */
+	writel(0x0001fffe, mmio + PORTPHY1CFG);
+	readl(mmio + PORTPHY1CFG); /* Force a barrier */
+	writel(0x28183219, mmio + PORTPHY2CFG);
+	readl(mmio + PORTPHY2CFG); /* Force a barrier */
+	writel(0x13081008, mmio + PORTPHY3CFG);
+	readl(mmio + PORTPHY3CFG); /* Force a barrier */
+	writel(0x00480815, mmio + PORTPHY4CFG);
+	readl(mmio + PORTPHY4CFG); /* Force a barrier */
+	/* Set window negotiation */
+	val = readl(mmio + PORTPHY5CFG);
+	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
+	writel(val, mmio + PORTPHY5CFG);
+	readl(mmio + PORTPHY5CFG); /* Force a barrier */
+	val = readl(mmio + PORTAXICFG);
+	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
+	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
+	writel(val, mmio + PORTAXICFG);
+	readl(mmio + PORTAXICFG); /* Force a barrier */
+	/* Set the watermark threshold of the receive FIFO */
+	val = readl(mmio + PORTRANSCFG);
+	val = PORTRANSCFG_RXWM_SET(val, 0x30);
+	writel(val, mmio + PORTRANSCFG);
+}
+
+/**
+ * xgene_ahci_do_hardreset - Issue the actual COMRESET
+ * @link: link to reset
+ * @deadline: deadline jiffies for the operation
+ * @online: Return value to indicate if device online
+ *
+ * Due to the limitation of the hardware PHY, a difference set of setting is
+ * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
+ * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
+ * report disparity error and etc. In addition, during COMRESET, there can
+ * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
+ * reboot cycle regression, sometimes the PHY reports link down even if the
+ * device is present because of speed negotiation failure. so need to retry
+ * the COMRESET to get the link up. The following algorithm is followed to
+ * proper configure the hardware PHY during COMRESET:
+ *
+ * Alg Part 1:
+ * 1. Start the PHY at Gen3 speed (default setting)
+ * 2. Issue the COMRESET
+ * 3. If no link, go to Alg Part 3
+ * 4. If link up, determine if the negotiated speed matches the PHY
+ *    configured speed
+ * 5. If they matched, go to Alg Part 2
+ * 6. If they do not matched and first time, configure the PHY for the linked
+ *    up disk speed and repeat step 2
+ * 7. Go to Alg Part 2
+ *
+ * Alg Part 2:
+ * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
+ *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
+ * 2. Go to Alg Part 4
+ *
+ * Alg Part 3:
+ * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
+ *    communication establishment failed and maximum link down attempts are
+ *    less than Max attempts 3 then goto Alg Part 1.
+ * 2. Go to Alg Part 4.
+ *
+ * Alg Part 4:
+ * 1. Clear any pending from register PORT_SCR_ERR.
+ *
+ * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
+ *       and until the underlying PHY supports an method to reset the receiver
+ *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
+ *       an warning message will be printed.
+ */
+static int xgene_ahci_do_hardreset(struct ata_link *link,
+				   unsigned long deadline, bool *online)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	struct ata_port *ap = link->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct xgene_ahci_context *ctx = hpriv->plat_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ata_taskfile tf;
+	int link_down_retry = 0;
+	int rc;
+	u32 val, sstatus;
+
+	do {
+		/* clear D2H reception area to properly wait for D2H FIS */
+		ata_tf_init(link->device, &tf);
+		tf.command = ATA_BUSY;
+		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+		rc = sata_link_hardreset(link, timing, deadline, online,
+				 ahci_check_ready);
+		if (*online) {
+			val = readl(port_mmio + PORT_SCR_ERR);
+			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+				dev_warn(ctx->dev, "link has error\n");
+			break;
+		}
+
+		sata_scr_read(link, SCR_STATUS, &sstatus);
+	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
+		 (sstatus & 0xff) == 0x1);
+
+	/* clear all errors if any pending */
+	val = readl(port_mmio + PORT_SCR_ERR);
+	writel(val, port_mmio + PORT_SCR_ERR);
+
+	return rc;
+}
+
+static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+        struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	bool online;
+	int rc;
+	u32 portcmd_saved;
+	u32 portclb_saved;
+	u32 portclbhi_saved;
+	u32 portrxfis_saved;
+	u32 portrxfishi_saved;
+
+	/* As hardreset resets these CSR, save it to restore later */
+	portcmd_saved = readl(port_mmio + PORT_CMD);
+	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
+	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
+	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
+	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
+
+	hpriv->stop_engine(ap);
+
+	rc = xgene_ahci_do_hardreset(link, deadline, &online);
+
+	/* As controller hardreset clears them, restore them */
+	writel(portcmd_saved, port_mmio + PORT_CMD);
+	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
+	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
+	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
+	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
+
+	hpriv->start_engine(ap);
+
+	if (online)
+		*class = ahci_dev_classify(ap);
+
+	return rc;
+}
+
+static void xgene_ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	ahci_platform_disable_resources(hpriv);
+}
+
+/**
+ * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
+ *                            to Port Multiplier.
+ * @link: link to reset
+ * @class: Return value to indicate class of device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The workaround
+ * is to write the pmp value to PxFBS.DEV field before issuing any command
+ * to PMP.
+ */
+static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	int pmp = sata_srst_pmp(link);
+	struct ata_port *ap = link->ap;
+	u32 rc;
+	void *port_mmio = ahci_port_base(ap);
+	u32 port_fbs;
+
+	/*
+	 * Set PxFBS.DEV field with pmp
+	 * value.
+	 */
+	port_fbs = readl(port_mmio + PORT_FBS);
+	port_fbs &= ~PORT_FBS_DEV_MASK;
+	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
+	writel(port_fbs, port_mmio + PORT_FBS);
+
+	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+
+	return rc;
+}
+
+/**
+ * xgene_ahci_softreset - Issue the softreset to the drive.
+ * @link: link to reset
+ * @class: Return value to indicate class of device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Due to H/W errata, the controller is unable to save the PMP
+ * field fetched from command header before sending the H2D FIS.
+ * When the device returns the PMP port field in the D2H FIS, there is
+ * a mismatch and results in command completion failure. The workaround
+ * is to write the pmp value to PxFBS.DEV field before issuing any command
+ * to PMP. Here is the algorithm to detect PMP :
+ *
+ * 1. Save the PxFBS value
+ * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
+ *    0xF for both PMP/NON-PMP initially
+ * 3. Issue softreset
+ * 4. If signature class is PMP goto 6
+ * 5. restore the original PxFBS and goto 3
+ * 6. return
+ */
+static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	int pmp = sata_srst_pmp(link);
+	struct ata_port *ap = link->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct xgene_ahci_context *ctx = hpriv->plat_data;
+	void *port_mmio = ahci_port_base(ap);
+	u32 port_fbs;
+	u32 port_fbs_save;
+	u32 retry = 1;
+	u32 rc;
+
+	port_fbs_save = readl(port_mmio + PORT_FBS);
+
+	/*
+	 * Set PxFBS.DEV field with pmp
+	 * value.
+	 */
+	port_fbs = readl(port_mmio + PORT_FBS);
+	port_fbs &= ~PORT_FBS_DEV_MASK;
+	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
+	writel(port_fbs, port_mmio + PORT_FBS);
+
+softreset_retry:
+	rc = ahci_do_softreset(link, class, pmp,
+			       deadline, ahci_check_ready);
+
+	ctx->class[ap->port_no] = *class;
+	if (*class != ATA_DEV_PMP) {
+		/*
+		 * Retry for normal drives without
+		 * setting PxFBS.DEV field with pmp value.
+		 */
+		if (retry--) {
+			writel(port_fbs_save, port_mmio + PORT_FBS);
+			goto softreset_retry;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ *    traverse the rest of port's PORT_IRQ_STAT register
+ *    to check if an interrupt is triggered at that point else
+ *    go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ *    then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+					     u32 irq_masked)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *port_mmio;
+	int i;
+
+	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+		for (i = 0; i < host->n_ports; i++) {
+			if (irq_masked & (1 << i))
+				continue;
+
+			port_mmio = ahci_port_base(host->ports[i]);
+			if (readl(port_mmio + PORT_IRQ_STAT))
+				irq_masked |= (1 << i);
+		}
+	}
+
+	return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct ahci_host_priv *hpriv;
+	unsigned int rc = 0;
+	void __iomem *mmio;
+	u32 irq_stat, irq_masked;
+
+	VPRINTK("ENTER\n");
+
+	hpriv = host->private_data;
+	mmio = hpriv->mmio;
+
+	/* sigh.  0xffffffff is a valid return from h/w */
+	irq_stat = readl(mmio + HOST_IRQ_STAT);
+	if (!irq_stat)
+		return IRQ_NONE;
+
+	irq_masked = irq_stat & hpriv->port_map;
+
+	spin_lock(&host->lock);
+
+	/*
+	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+	 * it should be cleared before all the port events are cleared.
+	 */
+	writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+	spin_unlock(&host->lock);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(rc);
+}
+
+static struct ata_port_operations xgene_ahci_v1_ops = {
+	.inherits = &ahci_ops,
+	.host_stop = xgene_ahci_host_stop,
+	.hardreset = xgene_ahci_hardreset,
+	.read_id = xgene_ahci_read_id,
+	.qc_issue = xgene_ahci_qc_issue,
+	.softreset = xgene_ahci_softreset,
+	.pmp_softreset = xgene_ahci_pmp_softreset
+};
+
+static const struct ata_port_info xgene_ahci_v1_port_info = {
+	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
+	.pio_mask = ATA_PIO4,
+	.udma_mask = ATA_UDMA6,
+	.port_ops = &xgene_ahci_v1_ops,
+};
+
+static struct ata_port_operations xgene_ahci_v2_ops = {
+	.inherits = &ahci_ops,
+	.host_stop = xgene_ahci_host_stop,
+	.hardreset = xgene_ahci_hardreset,
+	.read_id = xgene_ahci_read_id,
+};
+
+static const struct ata_port_info xgene_ahci_v2_port_info = {
+	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
+	.pio_mask = ATA_PIO4,
+	.udma_mask = ATA_UDMA6,
+	.port_ops = &xgene_ahci_v2_ops,
+};
+
+static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
+{
+	struct xgene_ahci_context *ctx = hpriv->plat_data;
+	int i;
+	int rc;
+	u32 val;
+
+	/* Remove IP RAM out of shutdown */
+	rc = xgene_ahci_init_memram(ctx);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
+		xgene_ahci_set_phy_cfg(ctx, i);
+
+	/* AXI disable Mask */
+	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
+	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
+	writel(0, ctx->csr_core + INTSTATUSMASK);
+	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
+	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
+		INTSTATUSMASK, val);
+
+	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
+	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
+	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
+	readl(ctx->csr_axi + INT_SLV_TMOMASK);
+
+	/* Enable AXI Interrupt */
+	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
+	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
+	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
+	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
+
+	/* Enable coherency */
+	val = readl(ctx->csr_core + BUSCTLREG);
+	val &= ~0x00000002;     /* Enable write coherency */
+	val &= ~0x00000001;     /* Enable read coherency */
+	writel(val, ctx->csr_core + BUSCTLREG);
+
+	val = readl(ctx->csr_core + IOFMSTRWAUX);
+	val |= (1 << 3);        /* Enable read coherency */
+	val |= (1 << 9);        /* Enable write coherency */
+	writel(val, ctx->csr_core + IOFMSTRWAUX);
+	val = readl(ctx->csr_core + IOFMSTRWAUX);
+	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
+		IOFMSTRWAUX, val);
+
+	return rc;
+}
+
+static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
+{
+	u32 val;
+
+	/* Check for optional MUX resource */
+	if (!ctx->csr_mux)
+		return 0;
+
+	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+	val &= ~CFG_SATA_ENET_SELECT_MASK;
+	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
+	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
+}
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_ahci_acpi_match[] = {
+	{ "APMC0D0D", XGENE_AHCI_V1},
+	{ "APMC0D32", XGENE_AHCI_V2},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
+#endif
+
+static const struct of_device_id xgene_ahci_of_match[] = {
+	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
+	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
+
+static int xgene_ahci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct xgene_ahci_context *ctx;
+	struct resource *res;
+	const struct of_device_id *of_devid;
+	enum xgene_ahci_version version = XGENE_AHCI_V1;
+	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
+					      &xgene_ahci_v2_port_info };
+	int rc;
+
+	hpriv = ahci_platform_get_resources(pdev, 0);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	hpriv->plat_data = ctx;
+	ctx->hpriv = hpriv;
+	ctx->dev = dev;
+
+	/* Retrieve the IP core resource */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	ctx->csr_core = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctx->csr_core))
+		return PTR_ERR(ctx->csr_core);
+
+	/* Retrieve the IP diagnostic resource */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	ctx->csr_diag = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctx->csr_diag))
+		return PTR_ERR(ctx->csr_diag);
+
+	/* Retrieve the IP AXI resource */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	ctx->csr_axi = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctx->csr_axi))
+		return PTR_ERR(ctx->csr_axi);
+
+	/* Retrieve the optional IP mux resource */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+	if (res) {
+		void __iomem *csr = devm_ioremap_resource(dev, res);
+		if (IS_ERR(csr))
+			return PTR_ERR(csr);
+
+		ctx->csr_mux = csr;
+	}
+
+	of_devid = of_match_device(xgene_ahci_of_match, dev);
+	if (of_devid) {
+		if (of_devid->data)
+			version = (enum xgene_ahci_version) of_devid->data;
+	}
+#ifdef CONFIG_ACPI
+	else {
+		const struct acpi_device_id *acpi_id;
+		struct acpi_device_info *info;
+		acpi_status status;
+
+		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
+		if (!acpi_id) {
+			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
+			version = XGENE_AHCI_V1;
+		} else if (acpi_id->driver_data) {
+			version = (enum xgene_ahci_version) acpi_id->driver_data;
+			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
+			if (ACPI_FAILURE(status)) {
+				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
+					__func__);
+				version = XGENE_AHCI_V1;
+			} else {
+				if (info->valid & ACPI_VALID_CID)
+					version = XGENE_AHCI_V2;
+				kfree(info);
+			}
+		}
+	}
+#endif
+
+	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
+		hpriv->mmio);
+
+	/* Select ATA */
+	if ((rc = xgene_ahci_mux_select(ctx))) {
+		dev_err(dev, "SATA mux selection failed error %d\n", rc);
+		return -ENODEV;
+	}
+
+	if (xgene_ahci_is_memram_inited(ctx)) {
+		dev_info(dev, "skip clock and PHY initialization\n");
+		goto skip_clk_phy;
+	}
+
+	/* Due to errata, HW requires full toggle transition */
+	rc = ahci_platform_enable_clks(hpriv);
+	if (rc)
+		goto disable_resources;
+	ahci_platform_disable_clks(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		goto disable_resources;
+
+	/* Configure the host controller */
+	xgene_ahci_hw_init(hpriv);
+skip_clk_phy:
+
+	switch (version) {
+	case XGENE_AHCI_V1:
+		hpriv->flags = AHCI_HFLAG_NO_NCQ;
+		break;
+	case XGENE_AHCI_V2:
+		hpriv->flags |= AHCI_HFLAG_YES_FBS;
+		hpriv->irq_handler = xgene_ahci_irq_intr;
+		break;
+	default:
+		break;
+	}
+
+	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static struct platform_driver xgene_ahci_driver = {
+	.probe = xgene_ahci_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = xgene_ahci_of_match,
+		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
+	},
+};
+
+module_platform_driver(xgene_ahci_driver);
+
+MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.4");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
new file mode 100644
index 0000000..9ff545c
--- /dev/null
+++ b/drivers/ata/ata_generic.c
@@ -0,0 +1,258 @@
+/*
+ *  ata_generic.c - Generic PATA/SATA controller driver.
+ *  Copyright 2005 Red Hat Inc, all rights reserved.
+ *
+ *  Elements from ide/pci/generic.c
+ *	    Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
+ *	    Portions (C) Copyright 2002  Red Hat Inc <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  Driver for PCI IDE interfaces implementing the standard bus mastering
+ *  interface functionality. This assumes the BIOS did the drive set up and
+ *  tuning for us. By default we do not grab all IDE class devices as they
+ *  may have other drivers or need fixups to avoid problems. Instead we keep
+ *  a default list of stuff without documentation/driver that appears to
+ *  work.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "ata_generic"
+#define DRV_VERSION "0.2.15"
+
+/*
+ *	A generic parallel ATA driver using libata
+ */
+
+enum {
+	ATA_GEN_CLASS_MATCH		= (1 << 0),
+	ATA_GEN_FORCE_DMA		= (1 << 1),
+	ATA_GEN_INTEL_IDER		= (1 << 2),
+};
+
+/**
+ *	generic_set_mode	-	mode setting
+ *	@link: link to set up
+ *	@unused: returned device on error
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *	The BIOS configured everything. Our job is not to fiddle. We
+ *	read the dma enabled bits from the PCI configuration of the device
+ *	and respect them.
+ */
+
+static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+	struct ata_port *ap = link->ap;
+	const struct pci_device_id *id = ap->host->private_data;
+	int dma_enabled = 0;
+	struct ata_device *dev;
+
+	if (id->driver_data & ATA_GEN_FORCE_DMA) {
+		dma_enabled = 0xff;
+	} else if (ap->ioaddr.bmdma_addr) {
+		/* Bits 5 and 6 indicate if DMA is active on master/slave */
+		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	}
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = XFER_PIO_0;
+		dev->dma_mode = XFER_MW_DMA_0;
+		/* We do need the right mode information for DMA or PIO
+		   and this comes from the current configuration flags */
+		if (dma_enabled & (1 << (5 + dev->devno))) {
+			unsigned int xfer_mask = ata_id_xfermask(dev->id);
+			const char *name;
+
+			if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+				name = ata_mode_string(xfer_mask);
+			else {
+				/* SWDMA perhaps? */
+				name = "DMA";
+				xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
+			}
+
+			ata_dev_info(dev, "configured for %s\n", name);
+
+			dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
+			dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
+			dev->flags &= ~ATA_DFLAG_PIO;
+		} else {
+			ata_dev_info(dev, "configured for PIO\n");
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+	return 0;
+}
+
+static struct scsi_host_template generic_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations generic_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_unknown,
+	.set_mode	= generic_set_mode,
+};
+
+static int all_generic_ide;		/* Set to claim all devices */
+
+/**
+ *	is_intel_ider		-	identify intel IDE-R devices
+ *	@dev: PCI device
+ *
+ *	Distinguish Intel IDE-R controller devices from other Intel IDE
+ *	devices. IDE-R devices have no timing registers and are in
+ *	most respects virtual. They should be driven by the ata_generic
+ *	driver.
+ *
+ *	IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has
+ *	it non zero. All Intel ATA has 0x40 writable (timing), but it is
+ *	not writable on IDE-R devices (this is guaranteed).
+ */
+
+static int is_intel_ider(struct pci_dev *dev)
+{
+	/* For Intel IDE the value at 0xF8 is only zero on IDE-R
+	   interfaces */
+	u32 r;
+	u16 t;
+
+	/* Check the manufacturing ID, it will be zero for IDE-R */
+	pci_read_config_dword(dev, 0xF8, &r);
+	/* Not IDE-R: punt so that ata_(old)piix gets it */
+	if (r != 0)
+		return 0;
+	/* 0xF8 will also be zero on some early Intel IDE devices
+	   but they will have a sane timing register */
+	pci_read_config_word(dev, 0x40, &t);
+	if (t != 0)
+		return 0;
+	/* Finally check if the timing register is writable so that
+	   we eliminate any early devices hot-docked in a docking
+	   station */
+	pci_write_config_word(dev, 0x40, 1);
+	pci_read_config_word(dev, 0x40, &t);
+	if (t) {
+		pci_write_config_word(dev, 0x40, 0);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ *	ata_generic_init		-	attach generic IDE
+ *	@dev: PCI device found
+ *	@id: match entry
+ *
+ *	Called each time a matching IDE interface is found. We check if the
+ *	interface is one we wish to claim and if so we perform any chip
+ *	specific hacks then let the ATA layer do the heavy lifting.
+ */
+
+static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	u16 command;
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &generic_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	/* Don't use the generic entry unless instructed to do so */
+	if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
+		return -ENODEV;
+
+	if ((id->driver_data & ATA_GEN_INTEL_IDER) && !all_generic_ide)
+		if (!is_intel_ider(dev))
+			return -ENODEV;
+
+	/* Devices that need care */
+	if (dev->vendor == PCI_VENDOR_ID_UMC &&
+	    dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+	    (!(PCI_FUNC(dev->devfn) & 1)))
+		return -ENODEV;
+
+	if (dev->vendor == PCI_VENDOR_ID_OPTI &&
+	    dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
+	    (!(PCI_FUNC(dev->devfn) & 1)))
+		return -ENODEV;
+
+	/* Don't re-enable devices in generic mode or we will break some
+	   motherboards with disabled and unused IDE controllers */
+	pci_read_config_word(dev, PCI_COMMAND, &command);
+	if (!(command & PCI_COMMAND_IO))
+		return -ENODEV;
+
+	if (dev->vendor == PCI_VENDOR_ID_AL)
+		ata_pci_bmdma_clear_simplex(dev);
+
+	if (dev->vendor == PCI_VENDOR_ID_ATI) {
+		int rc = pcim_enable_device(dev);
+		if (rc < 0)
+			return rc;
+		pcim_pin_device(dev);
+	}
+	return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
+}
+
+static struct pci_device_id ata_generic[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8673F), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8886A), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8886BF), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HINT,   PCI_DEVICE_ID_HINT_VXPROII_IDE), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_82C561), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_OPTI,   PCI_DEVICE_ID_OPTI_82C558), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
+	  .driver_data = ATA_GEN_FORCE_DMA },
+#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3),  },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5),  },
+#endif
+	/* Intel, IDE class device */
+	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
+	  .driver_data = ATA_GEN_INTEL_IDER },
+	/* Must come last. If you add entries adjust this table appropriately */
+	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
+	  .driver_data = ATA_GEN_CLASS_MATCH },
+	{ 0, },
+};
+
+static struct pci_driver ata_generic_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ata_generic,
+	.probe 		= ata_generic_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(ata_generic_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for generic ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ata_generic);
+MODULE_VERSION(DRV_VERSION);
+
+module_param(all_generic_ide, int, 0);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
new file mode 100644
index 0000000..7ecb132
--- /dev/null
+++ b/drivers/ata/ata_piix.c
@@ -0,0 +1,1806 @@
+/*
+ *    ata_piix.c - Intel PATA/SATA controllers
+ *
+ *    Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *
+ *	Copyright 2003-2005 Red Hat Inc
+ *	Copyright 2003-2005 Jeff Garzik
+ *
+ *
+ *	Copyright header from piix.c:
+ *
+ *  Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ *  Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ *  Copyright (C) 2003 Red Hat Inc
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available at http://developer.intel.com/
+ *
+ * Documentation
+ *	Publicly available from Intel web site. Errata documentation
+ * is also publicly available. As an aide to anyone hacking on this
+ * driver the list of errata that are relevant is below, going back to
+ * PIIX4. Older device documentation is now a bit tricky to find.
+ *
+ * The chipsets all follow very much the same design. The original Triton
+ * series chipsets do _not_ support independent device timings, but this
+ * is fixed in Triton II. With the odd mobile exception the chips then
+ * change little except in gaining more modes until SATA arrives. This
+ * driver supports only the chips with independent timing (that is those
+ * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
+ * for the early chip drivers.
+ *
+ * Errata of note:
+ *
+ * Unfixable
+ *	PIIX4    errata #9	- Only on ultra obscure hw
+ *	ICH3	 errata #13     - Not observed to affect real hw
+ *				  by Intel
+ *
+ * Things we must deal with
+ *	PIIX4	errata #10	- BM IDE hang with non UDMA
+ *				  (must stop/start dma to recover)
+ *	440MX   errata #15	- As PIIX4 errata #10
+ *	PIIX4	errata #15	- Must not read control registers
+ * 				  during a PIO transfer
+ *	440MX   errata #13	- As PIIX4 errata #15
+ *	ICH2	errata #21	- DMA mode 0 doesn't work right
+ *	ICH0/1  errata #55	- As ICH2 errata #21
+ *	ICH2	spec c #9	- Extra operations needed to handle
+ *				  drive hotswap [NOT YET SUPPORTED]
+ *	ICH2    spec c #20	- IDE PRD must not cross a 64K boundary
+ *				  and must be dword aligned
+ *	ICH2    spec c #24	- UDMA mode 4,5 t85/86 should be 6ns not 3.3
+ *	ICH7	errata #16	- MWDMA1 timings are incorrect
+ *
+ * Should have been BIOS fixed:
+ *	450NX:	errata #19	- DMA hangs on old 450NX
+ *	450NX:  errata #20	- DMA hangs on old 450NX
+ *	450NX:  errata #25	- Corruption with DMA on old 450NX
+ *	ICH3    errata #15      - IDE deadlock under high load
+ *				  (BIOS must set dev 31 fn 0 bit 23)
+ *	ICH3	errata #18	- Don't use native mode
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"ata_piix"
+#define DRV_VERSION	"2.13"
+
+enum {
+	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
+	ICH5_PMR		= 0x90, /* address map register */
+	ICH5_PCS		= 0x92,	/* port control and status */
+	PIIX_SIDPR_BAR		= 5,
+	PIIX_SIDPR_LEN		= 16,
+	PIIX_SIDPR_IDX		= 0,
+	PIIX_SIDPR_DATA		= 4,
+
+	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
+	PIIX_FLAG_SIDPR		= (1 << 29), /* SATA idx/data pair regs */
+
+	PIIX_PATA_FLAGS		= ATA_FLAG_SLAVE_POSS,
+	PIIX_SATA_FLAGS		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+
+	PIIX_FLAG_PIO16		= (1 << 30), /*support 16bit PIO only*/
+
+	PIIX_80C_PRI		= (1 << 5) | (1 << 4),
+	PIIX_80C_SEC		= (1 << 7) | (1 << 6),
+
+	/* constants for mapping table */
+	P0			= 0,  /* port 0 */
+	P1			= 1,  /* port 1 */
+	P2			= 2,  /* port 2 */
+	P3			= 3,  /* port 3 */
+	IDE			= -1, /* IDE */
+	NA			= -2, /* not available */
+	RV			= -3, /* reserved */
+
+	PIIX_AHCI_DEVICE	= 6,
+
+	/* host->flags bits */
+	PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
+};
+
+enum piix_controller_ids {
+	/* controller IDs */
+	piix_pata_mwdma,	/* PIIX3 MWDMA only */
+	piix_pata_33,		/* PIIX4 at 33Mhz */
+	ich_pata_33,		/* ICH up to UDMA 33 only */
+	ich_pata_66,		/* ICH up to 66 Mhz */
+	ich_pata_100,		/* ICH up to UDMA 100 */
+	ich_pata_100_nomwdma1,	/* ICH up to UDMA 100 but with no MWDMA1*/
+	ich5_sata,
+	ich6_sata,
+	ich6m_sata,
+	ich8_sata,
+	ich8_2port_sata,
+	ich8m_apple_sata,	/* locks up on second port enable */
+	tolapai_sata,
+	piix_pata_vmw,			/* PIIX4 for VMware, spurious DMA_ERR */
+	ich8_sata_snb,
+	ich8_2port_sata_snb,
+	ich8_2port_sata_byt,
+};
+
+struct piix_map_db {
+	const u32 mask;
+	const u16 port_enable;
+	const int map[][4];
+};
+
+struct piix_host_priv {
+	const int *map;
+	u32 saved_iocfg;
+	void __iomem *sidpr;
+};
+
+static unsigned int in_module_init = 1;
+
+static const struct pci_device_id piix_pci_tbl[] = {
+	/* Intel PIIX3 for the 430HX etc */
+	{ 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
+	/* VMware ICH4 */
+	{ 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
+	/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
+	/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
+	{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX4 */
+	{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX4 */
+	{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX */
+	{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel ICH (i810, i815, i840) UDMA 66*/
+	{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
+	/* Intel ICH0 : UDMA 33*/
+	{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
+	/* Intel ICH2M */
+	{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
+	{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/*  Intel ICH3M */
+	{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH3 (E7500/1) UDMA 100 */
+	{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH4-L */
+	{ 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
+	{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH5 */
+	{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* C-ICH (i810E2) */
+	{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ESB (855GME/875P + 6300ESB) UDMA 100  */
+	{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ICH6 (and 6) (i915) UDMA 100 */
+	{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ICH7/7-R (i945, i975) UDMA 100*/
+	{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
+	{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
+	/* ICH8 Mobile PATA Controller */
+	{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+
+	/* SATA ports */
+
+	/* 82801EB (ICH5) */
+	{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+	/* 82801EB (ICH5) */
+	{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+	/* 6300ESB (ICH5 variant with broken PCS present bits) */
+	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+	/* 6300ESB pretending RAID */
+	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+	/* 82801FB/FW (ICH6/ICH6W) */
+	{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+	/* 82801FR/FRW (ICH6R/ICH6RW) */
+	{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+	/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
+	 * Attach iff the controller is in IDE mode. */
+	{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
+	/* 82801GB/GR/GH (ICH7, identical to ICH6) */
+	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+	/* 82801GBM/GHM (ICH7M, identical to ICH6M)  */
+	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
+	/* Enterprise Southbridge 2 (631xESB/632xESB) */
+	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+	/* SATA Controller 1 IDE (ICH8) */
+	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller 2 IDE (ICH8) */
+	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* Mobile SATA Controller IDE (ICH8M), Apple */
+	{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
+	{ 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
+	{ 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
+	/* Mobile SATA Controller IDE (ICH8M) */
+	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (Tolapai) */
+	{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
+	/* SATA Controller IDE (ICH10) */
+	{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (ICH10) */
+	{ 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (ICH10) */
+	{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (ICH10) */
+	{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (PCH) */
+	{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+	/* SATA Controller IDE (CPT) */
+	{ 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (CPT) */
+	{ 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (CPT) */
+	{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (CPT) */
+	{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (PBG) */
+	{ 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (PBG) */
+	{ 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Panther Point) */
+	{ 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Panther Point) */
+	{ 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Panther Point) */
+	{ 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Panther Point) */
+	{ 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Lynx Point) */
+	{ 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Lynx Point) */
+	{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Lynx Point) */
+	{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+	/* SATA Controller IDE (Lynx Point) */
+	{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Lynx Point-LP) */
+	{ 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Lynx Point-LP) */
+	{ 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Lynx Point-LP) */
+	{ 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Lynx Point-LP) */
+	{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (DH89xxCC) */
+	{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Avoton) */
+	{ 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Avoton) */
+	{ 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Avoton) */
+	{ 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Avoton) */
+	{ 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (Wellsburg) */
+	{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Wellsburg) */
+	{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+	/* SATA Controller IDE (Wellsburg) */
+	{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (Wellsburg) */
+	{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (BayTrail) */
+	{ 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+	{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+	/* SATA Controller IDE (Coleto Creek) */
+	{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (9 Series) */
+	{ 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+	/* SATA Controller IDE (9 Series) */
+	{ 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+	/* SATA Controller IDE (9 Series) */
+	{ 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+	/* SATA Controller IDE (9 Series) */
+	{ 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+
+	{ }	/* terminate list */
+};
+
+static const struct piix_map_db ich5_map_db = {
+	.mask = 0x7,
+	.port_enable = 0x3,
+	.map = {
+		/* PM   PS   SM   SS       MAP  */
+		{  P0,  NA,  P1,  NA }, /* 000b */
+		{  P1,  NA,  P0,  NA }, /* 001b */
+		{  RV,  RV,  RV,  RV },
+		{  RV,  RV,  RV,  RV },
+		{  P0,  P1, IDE, IDE }, /* 100b */
+		{  P1,  P0, IDE, IDE }, /* 101b */
+		{ IDE, IDE,  P0,  P1 }, /* 110b */
+		{ IDE, IDE,  P1,  P0 }, /* 111b */
+	},
+};
+
+static const struct piix_map_db ich6_map_db = {
+	.mask = 0x3,
+	.port_enable = 0xf,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  P2,  P1,  P3 }, /* 00b */
+		{ IDE, IDE,  P1,  P3 }, /* 01b */
+		{  P0,  P2, IDE, IDE }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich6m_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x5,
+
+	/* Map 01b isn't specified in the doc but some notebooks use
+	 * it anyway.  MAP 01b have been spotted on both ICH6M and
+	 * ICH7M.
+	 */
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  P2,  NA,  NA }, /* 00b */
+		{ IDE, IDE,  P1,  P3 }, /* 01b */
+		{  P0,  P2, IDE, IDE }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich8_map_db = {
+	.mask = 0x3,
+	.port_enable = 0xf,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  P2,  P1,  P3 }, /* 00b (hardwired when in AHCI) */
+		{  RV,  RV,  RV,  RV },
+		{  P0,  P2, IDE, IDE }, /* 10b (IDE mode) */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich8_2port_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x3,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  NA,  P1,  NA }, /* 00b */
+		{  RV,  RV,  RV,  RV }, /* 01b */
+		{  RV,  RV,  RV,  RV }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich8m_apple_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x1,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  NA,  NA,  NA }, /* 00b */
+		{  RV,  RV,  RV,  RV },
+		{  P0,  P2, IDE, IDE }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db tolapai_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x3,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  NA,  P1,  NA }, /* 00b */
+		{  RV,  RV,  RV,  RV }, /* 01b */
+		{  RV,  RV,  RV,  RV }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db *piix_map_db_table[] = {
+	[ich5_sata]		= &ich5_map_db,
+	[ich6_sata]		= &ich6_map_db,
+	[ich6m_sata]		= &ich6m_map_db,
+	[ich8_sata]		= &ich8_map_db,
+	[ich8_2port_sata]	= &ich8_2port_map_db,
+	[ich8m_apple_sata]	= &ich8m_apple_map_db,
+	[tolapai_sata]		= &tolapai_map_db,
+	[ich8_sata_snb]		= &ich8_map_db,
+	[ich8_2port_sata_snb]	= &ich8_2port_map_db,
+	[ich8_2port_sata_byt]	= &ich8_2port_map_db,
+};
+
+static const struct pci_bits piix_enable_bits[] = {
+	{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+	{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+};
+
+MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+struct ich_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+/*
+ *	List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x27DF, 0x0005, 0x0280 },	/* ICH7 on Acer 5602WLMi */
+	{ 0x27DF, 0x1025, 0x0102 },	/* ICH7 on Acer 5602aWLMi */
+	{ 0x27DF, 0x1025, 0x0110 },	/* ICH7 on Acer 3682WLMi */
+	{ 0x27DF, 0x1028, 0x02b0 },	/* ICH7 on unknown Dell */
+	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
+	{ 0x27DF, 0x103C, 0x30A1 },	/* ICH7 on HP Compaq nc2400 */
+	{ 0x27DF, 0x103C, 0x361a },	/* ICH7 on unknown HP  */
+	{ 0x27DF, 0x1071, 0xD221 },	/* ICH7 on Hercules EC-900 */
+	{ 0x27DF, 0x152D, 0x0778 },	/* ICH7 on unknown Intel */
+	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on ACER Aspire 2023WLMi */
+	{ 0x24CA, 0x1025, 0x003d },	/* ICH4 on ACER TM290 */
+	{ 0x24CA, 0x10CF, 0x11AB },	/* ICH4M on Fujitsu-Siemens Lifebook S6120 */
+	{ 0x266F, 0x1025, 0x0066 },	/* ICH6 on ACER Aspire 1694WLMi */
+	{ 0x2653, 0x1043, 0x82D8 },	/* ICH6M on Asus Eee 701 */
+	{ 0x27df, 0x104d, 0x900e },	/* ICH7 on Sony TZ-90 */
+	/* end marker */
+	{ 0, }
+};
+
+static int piix_port_start(struct ata_port *ap)
+{
+	if (!(ap->flags & PIIX_FLAG_PIO16))
+		ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+
+	return ata_bmdma_port_start(ap);
+}
+
+/**
+ *	ich_pata_cable_detect - Probe host controller cable detect info
+ *	@ap: Port for which cable detect info is desired
+ *
+ *	Read 80c cable indicator from ATA PCI device's PCI config
+ *	register.  This register is normally set by firmware (BIOS).
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static int ich_pata_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct piix_host_priv *hpriv = ap->host->private_data;
+	const struct ich_laptop *lap = &ich_laptop[0];
+	u8 mask;
+
+	/* Check for specials */
+	while (lap->device) {
+		if (lap->device == pdev->device &&
+		    lap->subvendor == pdev->subsystem_vendor &&
+		    lap->subdevice == pdev->subsystem_device)
+			return ATA_CBL_PATA40_SHORT;
+
+		lap++;
+	}
+
+	/* check BIOS cable detect results */
+	mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
+	if ((hpriv->saved_iocfg & mask) == 0)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	piix_pata_prereset - prereset for PATA host controller
+ *	@link: Target link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
+		return -ENOENT;
+	return ata_sff_prereset(link, deadline);
+}
+
+static DEFINE_SPINLOCK(piix_lock);
+
+static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
+			     u8 pio)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned long flags;
+	unsigned int is_slave	= (adev->devno != 0);
+	unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+	unsigned int slave_port	= 0x44;
+	u16 master_data;
+	u8 slave_data;
+	u8 udma_enable;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for ICH controllers.
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio >= 2)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE enable */
+	/* Intel specifies that the PPE functionality is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+	/*
+	 * If the drive MWDMA is faster than it can do PIO then
+	 * we must force PIO into PIO0
+	 */
+	if (adev->pio_mode < XFER_PIO_0 + pio)
+		/* Enable DMA timing only */
+		control |= 8;	/* PIO cycles in PIO0 */
+
+	spin_lock_irqsave(&piix_lock, flags);
+
+	/* PIO configuration clears DTE unconditionally.  It will be
+	 * programmed in set_dmamode which is guaranteed to be called
+	 * after set_piomode if any DMA mode is available.
+	 */
+	pci_read_config_word(dev, master_port, &master_data);
+	if (is_slave) {
+		/* clear TIME1|IE1|PPE1|DTE1 */
+		master_data &= 0xff0f;
+		/* enable PPE1, IE1 and TIME1 as needed */
+		master_data |= (control << 4);
+		pci_read_config_byte(dev, slave_port, &slave_data);
+		slave_data &= (ap->port_no ? 0x0f : 0xf0);
+		/* Load the timing nibble for this slave */
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+						<< (ap->port_no ? 4 : 0);
+	} else {
+		/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+		master_data &= 0xccf0;
+		/* Enable PPE, IE and TIME as appropriate */
+		master_data |= control;
+		/* load ISP and RCT */
+		master_data |=
+			(timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	}
+
+	/* Enable SITRE (separate slave timing register) */
+	master_data |= 0x4000;
+	pci_write_config_word(dev, master_port, master_data);
+	if (is_slave)
+		pci_write_config_byte(dev, slave_port, slave_data);
+
+	/* Ensure the UDMA bit is off - it will be turned back on if
+	   UDMA is selected */
+
+	if (ap->udma_mask) {
+		pci_read_config_byte(dev, 0x48, &udma_enable);
+		udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+		pci_write_config_byte(dev, 0x48, udma_enable);
+	}
+
+	spin_unlock_irqrestore(&piix_lock, flags);
+}
+
+/**
+ *	piix_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Drive in question
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	do_pata_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Drive in question
+ *	@isich: set if the chip is an ICH device
+ *
+ *	Set UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned long flags;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno + 2 * ap->port_no;
+	u8 udma_enable		= 0;
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma = speed - XFER_UDMA_0;
+		u16 udma_timing;
+		u16 ideconf;
+		int u_clock, u_speed;
+
+		spin_lock_irqsave(&piix_lock, flags);
+
+		pci_read_config_byte(dev, 0x48, &udma_enable);
+
+		/*
+		 * UDMA is handled by a combination of clock switching and
+		 * selection of dividers
+		 *
+		 * Handy rule: Odd modes are UDMATIMx 01, even are 02
+		 *	       except UDMA0 which is 00
+		 */
+		u_speed = min(2 - (udma & 1), udma);
+		if (udma == 5)
+			u_clock = 0x1000;	/* 100Mhz */
+		else if (udma > 2)
+			u_clock = 1;		/* 66Mhz */
+		else
+			u_clock = 0;		/* 33Mhz */
+
+		udma_enable |= (1 << devid);
+
+		/* Load the CT/RP selection */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(3 << (4 * devid));
+		udma_timing |= u_speed << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+
+		if (isich) {
+			/* Select a 33/66/100Mhz clock */
+			pci_read_config_word(dev, 0x54, &ideconf);
+			ideconf &= ~(0x1001 << devid);
+			ideconf |= u_clock << devid;
+			/* For ICH or later we should set bit 10 for better
+			   performance (WR_PingPong_En) */
+			pci_write_config_word(dev, 0x54, ideconf);
+		}
+
+		pci_write_config_byte(dev, 0x48, udma_enable);
+
+		spin_unlock_irqrestore(&piix_lock, flags);
+	} else {
+		/* MWDMA is driven by the PIO timings. */
+		unsigned int mwdma = speed - XFER_MW_DMA_0;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		/* XFER_PIO_0 is never used currently */
+		piix_set_timings(ap, adev, pio);
+	}
+}
+
+/**
+ *	piix_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	do_pata_set_dmamode(ap, adev, 0);
+}
+
+/**
+ *	ich_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	do_pata_set_dmamode(ap, adev, 1);
+}
+
+/*
+ * Serial ATA Index/Data Pair Superset Registers access
+ *
+ * Beginning from ICH8, there's a sane way to access SCRs using index
+ * and data register pair located at BAR5 which means that we have
+ * separate SCRs for master and slave.  This is handled using libata
+ * slave_link facility.
+ */
+static const int piix_sidx_map[] = {
+	[SCR_STATUS]	= 0,
+	[SCR_ERROR]	= 2,
+	[SCR_CONTROL]	= 1,
+};
+
+static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
+{
+	struct ata_port *ap = link->ap;
+	struct piix_host_priv *hpriv = ap->host->private_data;
+
+	iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
+		  hpriv->sidpr + PIIX_SIDPR_IDX);
+}
+
+static int piix_sidpr_scr_read(struct ata_link *link,
+			       unsigned int reg, u32 *val)
+{
+	struct piix_host_priv *hpriv = link->ap->host->private_data;
+
+	if (reg >= ARRAY_SIZE(piix_sidx_map))
+		return -EINVAL;
+
+	piix_sidpr_sel(link, reg);
+	*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
+	return 0;
+}
+
+static int piix_sidpr_scr_write(struct ata_link *link,
+				unsigned int reg, u32 val)
+{
+	struct piix_host_priv *hpriv = link->ap->host->private_data;
+
+	if (reg >= ARRAY_SIZE(piix_sidx_map))
+		return -EINVAL;
+
+	piix_sidpr_sel(link, reg);
+	iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
+	return 0;
+}
+
+static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+			      unsigned hints)
+{
+	return sata_link_scr_lpm(link, policy, false);
+}
+
+static bool piix_irq_check(struct ata_port *ap)
+{
+	if (unlikely(!ap->ioaddr.bmdma_addr))
+		return false;
+
+	return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int piix_broken_suspend(void)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			.ident = "TECRA M3",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
+			},
+		},
+		{
+			.ident = "TECRA M3",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
+			},
+		},
+		{
+			.ident = "TECRA M4",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
+			},
+		},
+		{
+			.ident = "TECRA M4",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
+			},
+		},
+		{
+			.ident = "TECRA M5",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
+			},
+		},
+		{
+			.ident = "TECRA M6",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
+			},
+		},
+		{
+			.ident = "TECRA M7",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
+			},
+		},
+		{
+			.ident = "TECRA A8",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
+			},
+		},
+		{
+			.ident = "Satellite R20",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
+			},
+		},
+		{
+			.ident = "Satellite R25",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
+			},
+		},
+		{
+			.ident = "Satellite U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+			},
+		},
+		{
+			.ident = "Satellite U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
+			},
+		},
+		{
+			.ident = "Satellite Pro U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
+			},
+		},
+		{
+			.ident = "Satellite U205",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
+			},
+		},
+		{
+			.ident = "SATELLITE U205",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
+			},
+		},
+		{
+			.ident = "Satellite Pro A120",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+			},
+		},
+		{
+			.ident = "Portege M500",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
+			},
+		},
+		{
+			.ident = "VGN-BX297XP",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"),
+			},
+		},
+
+		{ }	/* terminate list */
+	};
+	static const char *oemstrs[] = {
+		"Tecra M3,",
+	};
+	int i;
+
+	if (dmi_check_system(sysids))
+		return 1;
+
+	for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
+		if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
+			return 1;
+
+	/* TECRA M4 sometimes forgets its identify and reports bogus
+	 * DMI information.  As the bogus information is a bit
+	 * generic, match as many entries as possible.  This manual
+	 * matching is necessary because dmi_system_id.matches is
+	 * limited to four entries.
+	 */
+	if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") &&
+	    dmi_match(DMI_PRODUCT_NAME, "000000") &&
+	    dmi_match(DMI_PRODUCT_VERSION, "000000") &&
+	    dmi_match(DMI_PRODUCT_SERIAL, "000000") &&
+	    dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") &&
+	    dmi_match(DMI_BOARD_NAME, "Portable PC") &&
+	    dmi_match(DMI_BOARD_VERSION, "Version A0"))
+		return 1;
+
+	return 0;
+}
+
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	unsigned long flags;
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	/* Some braindamaged ACPI suspend implementations expect the
+	 * controller to be awake on entry; otherwise, it burns cpu
+	 * cycles and power trying to do something to the sleeping
+	 * beauty.
+	 */
+	if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
+		pci_save_state(pdev);
+
+		/* mark its power state as "unknown", since we don't
+		 * know if e.g. the BIOS will change its device state
+		 * when we suspend.
+		 */
+		if (pdev->current_state == PCI_D0)
+			pdev->current_state = PCI_UNKNOWN;
+
+		/* tell resume that it's waking up from broken suspend */
+		spin_lock_irqsave(&host->lock, flags);
+		host->flags |= PIIX_HOST_BROKEN_SUSPEND;
+		spin_unlock_irqrestore(&host->lock, flags);
+	} else
+		ata_pci_device_do_suspend(pdev, mesg);
+
+	return 0;
+}
+
+static int piix_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	unsigned long flags;
+	int rc;
+
+	if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
+		spin_lock_irqsave(&host->lock, flags);
+		host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		pci_set_power_state(pdev, PCI_D0);
+		pci_restore_state(pdev);
+
+		/* PCI device wasn't disabled during suspend.  Use
+		 * pci_reenable_device() to avoid affecting the enable
+		 * count.
+		 */
+		rc = pci_reenable_device(pdev);
+		if (rc)
+			dev_err(&pdev->dev,
+				"failed to enable device after resume (%d)\n",
+				rc);
+	} else
+		rc = ata_pci_device_do_resume(pdev);
+
+	if (rc == 0)
+		ata_host_resume(host);
+
+	return rc;
+}
+#endif
+
+static u8 piix_vmw_bmdma_status(struct ata_port *ap)
+{
+	return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
+}
+
+static struct scsi_host_template piix_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations piix_sata_ops = {
+	.inherits		= &ata_bmdma32_port_ops,
+	.sff_irq_check		= piix_irq_check,
+	.port_start		= piix_port_start,
+};
+
+static struct ata_port_operations piix_pata_ops = {
+	.inherits		= &piix_sata_ops,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= piix_set_piomode,
+	.set_dmamode		= piix_set_dmamode,
+	.prereset		= piix_pata_prereset,
+};
+
+static struct ata_port_operations piix_vmw_ops = {
+	.inherits		= &piix_pata_ops,
+	.bmdma_status		= piix_vmw_bmdma_status,
+};
+
+static struct ata_port_operations ich_pata_ops = {
+	.inherits		= &piix_pata_ops,
+	.cable_detect		= ich_pata_cable_detect,
+	.set_dmamode		= ich_set_dmamode,
+};
+
+static struct device_attribute *piix_sidpr_shost_attrs[] = {
+	&dev_attr_link_power_management_policy,
+	NULL
+};
+
+static struct scsi_host_template piix_sidpr_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.shost_attrs		= piix_sidpr_shost_attrs,
+};
+
+static struct ata_port_operations piix_sidpr_sata_ops = {
+	.inherits		= &piix_sata_ops,
+	.hardreset		= sata_std_hardreset,
+	.scr_read		= piix_sidpr_scr_read,
+	.scr_write		= piix_sidpr_scr_write,
+	.set_lpm		= piix_sidpr_set_lpm,
+};
+
+static struct ata_port_info piix_port_info[] = {
+	[piix_pata_mwdma] =	/* PIIX3 MWDMA only */
+	{
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+		.port_ops	= &piix_pata_ops,
+	},
+
+	[piix_pata_33] =	/* PIIX4 at 33MHz */
+	{
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+		.udma_mask	= ATA_UDMA2,
+		.port_ops	= &piix_pata_ops,
+	},
+
+	[ich_pata_33] =		/* ICH0 - ICH at 33Mhz*/
+	{
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok  */
+		.udma_mask	= ATA_UDMA2,
+		.port_ops	= &ich_pata_ops,
+	},
+
+	[ich_pata_66] =		/* ICH controllers up to 66MHz */
+	{
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
+		.udma_mask	= ATA_UDMA4,
+		.port_ops	= &ich_pata_ops,
+	},
+
+	[ich_pata_100] =
+	{
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &ich_pata_ops,
+	},
+
+	[ich_pata_100_nomwdma1] =
+	{
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2_ONLY,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &ich_pata_ops,
+	},
+
+	[ich5_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich6_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich6m_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich8_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich8_2port_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[tolapai_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich8m_apple_sata] =
+	{
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[piix_pata_vmw] =
+	{
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+		.udma_mask	= ATA_UDMA2,
+		.port_ops	= &piix_vmw_ops,
+	},
+
+	/*
+	 * some Sandybridge chipsets have broken 32 mode up to now,
+	 * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+	 */
+	[ich8_sata_snb] =
+	{
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich8_2port_sata_snb] =
+	{
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
+					| PIIX_FLAG_PIO16,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
+	},
+
+	[ich8_2port_sata_byt] =
+	{
+		.flags          = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+		.pio_mask       = ATA_PIO4,
+		.mwdma_mask     = ATA_MWDMA2,
+		.udma_mask      = ATA_UDMA6,
+		.port_ops       = &piix_sata_ops,
+	},
+
+};
+
+#define AHCI_PCI_BAR 5
+#define AHCI_GLOBAL_CTL 0x04
+#define AHCI_ENABLE (1 << 31)
+static int piix_disable_ahci(struct pci_dev *pdev)
+{
+	void __iomem *mmio;
+	u32 tmp;
+	int rc = 0;
+
+	/* BUG: pci_enable_device has not yet been called.  This
+	 * works because this device is usually set up by BIOS.
+	 */
+
+	if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
+	    !pci_resource_len(pdev, AHCI_PCI_BAR))
+		return 0;
+
+	mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
+	if (!mmio)
+		return -ENOMEM;
+
+	tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
+	if (tmp & AHCI_ENABLE) {
+		tmp &= ~AHCI_ENABLE;
+		iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
+
+		tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
+		if (tmp & AHCI_ENABLE)
+			rc = -EIO;
+	}
+
+	pci_iounmap(pdev, mmio);
+	return rc;
+}
+
+/**
+ *	piix_check_450nx_errata	-	Check for problem 450NX setup
+ *	@ata_dev: the PCI device to check
+ *
+ *	Check for the present of 450NX errata #19 and errata #25. If
+ *	they are found return an error code so we can turn off DMA
+ */
+
+static int piix_check_450nx_errata(struct pci_dev *ata_dev)
+{
+	struct pci_dev *pdev = NULL;
+	u16 cfg;
+	int no_piix_dma = 0;
+
+	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
+		/* Look for 450NX PXB. Check for problem configurations
+		   A PCI quirk checks bit 6 already */
+		pci_read_config_word(pdev, 0x41, &cfg);
+		/* Only on the original revision: IDE DMA can hang */
+		if (pdev->revision == 0x00)
+			no_piix_dma = 1;
+		/* On all revisions below 5 PXB bus lock must be disabled for IDE */
+		else if (cfg & (1<<14) && pdev->revision < 5)
+			no_piix_dma = 2;
+	}
+	if (no_piix_dma)
+		dev_warn(&ata_dev->dev,
+			 "450NX errata present, disabling IDE DMA%s\n",
+			 no_piix_dma == 2 ? " - a BIOS update may resolve this"
+			 : "");
+
+	return no_piix_dma;
+}
+
+static void piix_init_pcs(struct ata_host *host,
+			  const struct piix_map_db *map_db)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u16 pcs, new_pcs;
+
+	pci_read_config_word(pdev, ICH5_PCS, &pcs);
+
+	new_pcs = pcs | map_db->port_enable;
+
+	if (new_pcs != pcs) {
+		DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
+		pci_write_config_word(pdev, ICH5_PCS, new_pcs);
+		msleep(150);
+	}
+}
+
+static const int *piix_init_sata_map(struct pci_dev *pdev,
+				     struct ata_port_info *pinfo,
+				     const struct piix_map_db *map_db)
+{
+	const int *map;
+	int i, invalid_map = 0;
+	u8 map_value;
+	char buf[32];
+	char *p = buf, *end = buf + sizeof(buf);
+
+	pci_read_config_byte(pdev, ICH5_PMR, &map_value);
+
+	map = map_db->map[map_value & map_db->mask];
+
+	for (i = 0; i < 4; i++) {
+		switch (map[i]) {
+		case RV:
+			invalid_map = 1;
+			p += scnprintf(p, end - p, " XX");
+			break;
+
+		case NA:
+			p += scnprintf(p, end - p, " --");
+			break;
+
+		case IDE:
+			WARN_ON((i & 1) || map[i + 1] != IDE);
+			pinfo[i / 2] = piix_port_info[ich_pata_100];
+			i++;
+			p += scnprintf(p, end - p, " IDE IDE");
+			break;
+
+		default:
+			p += scnprintf(p, end - p, " P%d", map[i]);
+			if (i & 1)
+				pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
+			break;
+		}
+	}
+	dev_info(&pdev->dev, "MAP [%s ]\n", buf);
+
+	if (invalid_map)
+		dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
+
+	return map;
+}
+
+static bool piix_no_sidpr(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+
+	/*
+	 * Samsung DB-P70 only has three ATA ports exposed and
+	 * curiously the unconnected first port reports link online
+	 * while not responding to SRST protocol causing excessive
+	 * detection delay.
+	 *
+	 * Unfortunately, the system doesn't carry enough DMI
+	 * information to identify the machine but does have subsystem
+	 * vendor and device set.  As it's unclear whether the
+	 * subsystem vendor/device is used only for this specific
+	 * board, the port can't be disabled solely with the
+	 * information; however, turning off SIDPR access works around
+	 * the problem.  Turn it off.
+	 *
+	 * This problem is reported in bnc#441240.
+	 *
+	 * https://bugzilla.novell.com/show_bug.cgi?id=441420
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
+	    pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+	    pdev->subsystem_device == 0xb049) {
+		dev_warn(host->dev,
+			 "Samsung DB-P70 detected, disabling SIDPR\n");
+		return true;
+	}
+
+	return false;
+}
+
+static int piix_init_sidpr(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct piix_host_priv *hpriv = host->private_data;
+	struct ata_link *link0 = &host->ports[0]->link;
+	u32 scontrol;
+	int i, rc;
+
+	/* check for availability */
+	for (i = 0; i < 4; i++)
+		if (hpriv->map[i] == IDE)
+			return 0;
+
+	/* is it blacklisted? */
+	if (piix_no_sidpr(host))
+		return 0;
+
+	if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
+		return 0;
+
+	if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
+	    pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
+		return 0;
+
+	if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
+		return 0;
+
+	hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
+
+	/* SCR access via SIDPR doesn't work on some configurations.
+	 * Give it a test drive by inhibiting power save modes which
+	 * we'll do anyway.
+	 */
+	piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
+
+	/* if IPM is already 3, SCR access is probably working.  Don't
+	 * un-inhibit power save modes as BIOS might have inhibited
+	 * them for a reason.
+	 */
+	if ((scontrol & 0xf00) != 0x300) {
+		scontrol |= 0x300;
+		piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
+		piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
+
+		if ((scontrol & 0xf00) != 0x300) {
+			dev_info(host->dev,
+				 "SCR access via SIDPR is available but doesn't work\n");
+			return 0;
+		}
+	}
+
+	/* okay, SCRs available, set ops and ask libata for slave_link */
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ap->ops = &piix_sidpr_sata_ops;
+
+		if (ap->flags & ATA_FLAG_SLAVE_POSS) {
+			rc = ata_slave_link_init(ap);
+			if (rc)
+				return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void piix_iocfg_bit18_quirk(struct ata_host *host)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			/* Clevo M570U sets IOCFG bit 18 if the cdrom
+			 * isn't used to boot the system which
+			 * disables the channel.
+			 */
+			.ident = "M570U",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
+				DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
+			},
+		},
+
+		{ }	/* terminate list */
+	};
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct piix_host_priv *hpriv = host->private_data;
+
+	if (!dmi_check_system(sysids))
+		return;
+
+	/* The datasheet says that bit 18 is NOOP but certain systems
+	 * seem to use it to disable a channel.  Clear the bit on the
+	 * affected systems.
+	 */
+	if (hpriv->saved_iocfg & (1 << 18)) {
+		dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n");
+		pci_write_config_dword(pdev, PIIX_IOCFG,
+				       hpriv->saved_iocfg & ~(1 << 18));
+	}
+}
+
+static bool piix_broken_system_poweroff(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id broken_systems[] = {
+		{
+			.ident = "HP Compaq 2510p",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"),
+			},
+			/* PCI slot number of the controller */
+			.driver_data = (void *)0x1FUL,
+		},
+		{
+			.ident = "HP Compaq nc6000",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
+			},
+			/* PCI slot number of the controller */
+			.driver_data = (void *)0x1FUL,
+		},
+
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
+
+	if (dmi) {
+		unsigned long slot = (unsigned long)dmi->driver_data;
+		/* apply the quirk only to on-board controllers */
+		return slot == PCI_SLOT(pdev->devfn);
+	}
+
+	return false;
+}
+
+static int prefer_ms_hyperv = 1;
+module_param(prefer_ms_hyperv, int, 0);
+MODULE_PARM_DESC(prefer_ms_hyperv,
+	"Prefer Hyper-V paravirtualization drivers instead of ATA, "
+	"0 - Use ATA drivers, "
+	"1 (Default) - Use the paravirtualization drivers.");
+
+static void piix_ignore_devices_quirk(struct ata_host *host)
+{
+#if IS_ENABLED(CONFIG_HYPERV_STORAGE)
+	static const struct dmi_system_id ignore_hyperv[] = {
+		{
+			/* On Hyper-V hypervisors the disks are exposed on
+			 * both the emulated SATA controller and on the
+			 * paravirtualised drivers.  The CD/DVD devices
+			 * are only exposed on the emulated controller.
+			 * Request we ignore ATA devices on this host.
+			 */
+			.ident = "Hyper-V Virtual Machine",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR,
+						"Microsoft Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+			},
+		},
+		{ }	/* terminate list */
+	};
+	static const struct dmi_system_id allow_virtual_pc[] = {
+		{
+			/* In MS Virtual PC guests the DMI ident is nearly
+			 * identical to a Hyper-V guest. One difference is the
+			 * product version which is used here to identify
+			 * a Virtual PC guest. This entry allows ata_piix to
+			 * drive the emulated hardware.
+			 */
+			.ident = "MS Virtual PC 2007",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR,
+						"Microsoft Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+			},
+		},
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
+	const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
+
+	if (ignore && !allow && prefer_ms_hyperv) {
+		host->flags |= ATA_HOST_IGNORE_ATA;
+		dev_info(host->dev, "%s detected, ATA device ignore set\n",
+			ignore->ident);
+	}
+#endif
+}
+
+/**
+ *	piix_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in piix_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_port_info port_info[2];
+	const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+	struct scsi_host_template *sht = &piix_sht;
+	unsigned long port_flags;
+	struct ata_host *host;
+	struct piix_host_priv *hpriv;
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* no hotplugging support for later devices (FIXME) */
+	if (!in_module_init && ent->driver_data >= ich5_sata)
+		return -ENODEV;
+
+	if (piix_broken_system_poweroff(pdev)) {
+		piix_port_info[ent->driver_data].flags |=
+				ATA_FLAG_NO_POWEROFF_SPINDOWN |
+					ATA_FLAG_NO_HIBERNATE_SPINDOWN;
+		dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
+				"on poweroff and hibernation\n");
+	}
+
+	port_info[0] = piix_port_info[ent->driver_data];
+	port_info[1] = piix_port_info[ent->driver_data];
+
+	port_flags = port_info[0].flags;
+
+	/* enable device and prepare host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+
+	/* Save IOCFG, this will be used for cable detection, quirk
+	 * detection and restoration on detach.  This is necessary
+	 * because some ACPI implementations mess up cable related
+	 * bits on _STM.  Reported on kernel bz#11879.
+	 */
+	pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
+
+	/* ICH6R may be driven by either ata_piix or ahci driver
+	 * regardless of BIOS configuration.  Make sure AHCI mode is
+	 * off.
+	 */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
+		rc = piix_disable_ahci(pdev);
+		if (rc)
+			return rc;
+	}
+
+	/* SATA map init can change port_info, do it before prepping host */
+	if (port_flags & ATA_FLAG_SATA)
+		hpriv->map = piix_init_sata_map(pdev, port_info,
+					piix_map_db_table[ent->driver_data]);
+
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+	host->private_data = hpriv;
+
+	/* initialize controller */
+	if (port_flags & ATA_FLAG_SATA) {
+		piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
+		rc = piix_init_sidpr(host);
+		if (rc)
+			return rc;
+		if (host->ports[0]->ops == &piix_sidpr_sata_ops)
+			sht = &piix_sidpr_sht;
+	}
+
+	/* apply IOCFG bit18 quirk */
+	piix_iocfg_bit18_quirk(host);
+
+	/* On ICH5, some BIOSen disable the interrupt using the
+	 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
+	 * On ICH6, this bit has the same effect, but only when
+	 * MSI is disabled (and it is disabled, as we don't use
+	 * message-signalled interrupts currently).
+	 */
+	if (port_flags & PIIX_FLAG_CHECKINTR)
+		pci_intx(pdev, 1);
+
+	if (piix_check_450nx_errata(pdev)) {
+		/* This writes into the master table but it does not
+		   really matter for this errata as we will apply it to
+		   all the PIIX devices on the board */
+		host->ports[0]->mwdma_mask = 0;
+		host->ports[0]->udma_mask = 0;
+		host->ports[1]->mwdma_mask = 0;
+		host->ports[1]->udma_mask = 0;
+	}
+	host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+	/* Allow hosts to specify device types to ignore when scanning. */
+	piix_ignore_devices_quirk(host);
+
+	pci_set_master(pdev);
+	return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+}
+
+static void piix_remove_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct piix_host_priv *hpriv = host->private_data;
+
+	pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
+
+	ata_pci_remove_one(pdev);
+}
+
+static struct pci_driver piix_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= piix_pci_tbl,
+	.probe			= piix_init_one,
+	.remove			= piix_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= piix_pci_device_suspend,
+	.resume			= piix_pci_device_resume,
+#endif
+};
+
+static int __init piix_init(void)
+{
+	int rc;
+
+	DPRINTK("pci_register_driver\n");
+	rc = pci_register_driver(&piix_pci_driver);
+	if (rc)
+		return rc;
+
+	in_module_init = 0;
+
+	DPRINTK("done\n");
+	return 0;
+}
+
+static void __exit piix_exit(void)
+{
+	pci_unregister_driver(&piix_pci_driver);
+}
+
+module_init(piix_init);
+module_exit(piix_exit);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 0000000..b5f57c6
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2624 @@
+/*
+ *  libahci.c - Common AHCI SATA low-level routines
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/driver-api/libata.rst
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include <linux/pci.h>
+#include "ahci.h"
+#include "libata.h"
+
+static int ahci_skip_host_reset;
+int ahci_ignore_sss;
+EXPORT_SYMBOL_GPL(ahci_ignore_sss);
+
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
+MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
+
+static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+			unsigned hints);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+			      size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+					ssize_t size);
+
+
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline);
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static void ahci_dev_config(struct ata_device *dev);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+				   enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_cap2(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+				      struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+				  struct device_attribute *attr, char *buf);
+static ssize_t ahci_read_em_buffer(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+static ssize_t ahci_store_em_buffer(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size);
+static ssize_t ahci_show_em_supported(struct device *dev,
+				      struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
+
+static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+		   ahci_read_em_buffer, ahci_store_em_buffer);
+static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
+
+struct device_attribute *ahci_shost_attrs[] = {
+	&dev_attr_link_power_management_policy,
+	&dev_attr_em_message_type,
+	&dev_attr_em_message,
+	&dev_attr_ahci_host_caps,
+	&dev_attr_ahci_host_cap2,
+	&dev_attr_ahci_host_version,
+	&dev_attr_ahci_port_cmd,
+	&dev_attr_em_buffer,
+	&dev_attr_em_message_supported,
+	NULL
+};
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
+
+struct device_attribute *ahci_sdev_attrs[] = {
+	&dev_attr_sw_activity,
+	&dev_attr_unload_heads,
+	&dev_attr_ncq_prio_enable,
+	NULL
+};
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
+
+struct ata_port_operations ahci_ops = {
+	.inherits		= &sata_pmp_port_ops,
+
+	.qc_defer		= ahci_pmp_qc_defer,
+	.qc_prep		= ahci_qc_prep,
+	.qc_issue		= ahci_qc_issue,
+	.qc_fill_rtf		= ahci_qc_fill_rtf,
+
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+	.softreset		= ahci_softreset,
+	.hardreset		= ahci_hardreset,
+	.postreset		= ahci_postreset,
+	.pmp_softreset		= ahci_softreset,
+	.error_handler		= ahci_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+	.dev_config		= ahci_dev_config,
+
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
+	.pmp_attach		= ahci_pmp_attach,
+	.pmp_detach		= ahci_pmp_detach,
+
+	.set_lpm		= ahci_set_lpm,
+	.em_show		= ahci_led_show,
+	.em_store		= ahci_led_store,
+	.sw_activity_show	= ahci_activity_show,
+	.sw_activity_store	= ahci_activity_store,
+	.transmit_led_message	= ahci_transmit_led_message,
+#ifdef CONFIG_PM
+	.port_suspend		= ahci_port_suspend,
+	.port_resume		= ahci_port_resume,
+#endif
+	.port_start		= ahci_port_start,
+	.port_stop		= ahci_port_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_ops);
+
+struct ata_port_operations ahci_pmp_retry_srst_ops = {
+	.inherits		= &ahci_ops,
+	.softreset		= ahci_pmp_retry_softreset,
+};
+EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
+
+static bool ahci_em_messages __read_mostly = true;
+EXPORT_SYMBOL_GPL(ahci_em_messages);
+module_param(ahci_em_messages, bool, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+	"AHCI Enclosure Management Message control (0 = off, 1 = on)");
+
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
+module_param(devslp_idle_timeout, int, 0644);
+MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+	int i;
+	u32 tmp;
+
+	/* turn on AHCI_EN */
+	tmp = readl(mmio + HOST_CTL);
+	if (tmp & HOST_AHCI_EN)
+		return;
+
+	/* Some controllers need AHCI_EN to be written multiple times.
+	 * Try a few times before giving up.
+	 */
+	for (i = 0; i < 5; i++) {
+		tmp |= HOST_AHCI_EN;
+		writel(tmp, mmio + HOST_CTL);
+		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
+		if (tmp & HOST_AHCI_EN)
+			return;
+		msleep(10);
+	}
+
+	WARN_ON(1);
+}
+
+/**
+ *	ahci_rpm_get_port - Make sure the port is powered on
+ *	@ap: Port to power on
+ *
+ *	Whenever there is need to access the AHCI host registers outside of
+ *	normal execution paths, call this function to make sure the host is
+ *	actually powered on.
+ */
+static int ahci_rpm_get_port(struct ata_port *ap)
+{
+	return pm_runtime_get_sync(ap->dev);
+}
+
+/**
+ *	ahci_rpm_put_port - Undoes ahci_rpm_get_port()
+ *	@ap: Port to power down
+ *
+ *	Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
+ *	if it has no more active users.
+ */
+static void ahci_rpm_put_port(struct ata_port *ap)
+{
+	pm_runtime_put(ap->dev);
+}
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_cap2(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	return sprintf(buf, "%x\n", hpriv->cap2);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	return sprintf(buf, "%x\n", hpriv->version);
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	void __iomem *port_mmio = ahci_port_base(ap);
+	ssize_t ret;
+
+	ahci_rpm_get_port(ap);
+	ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+	ahci_rpm_put_port(ap);
+
+	return ret;
+}
+
+static ssize_t ahci_read_em_buffer(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	void __iomem *em_mmio = mmio + hpriv->em_loc;
+	u32 em_ctl, msg;
+	unsigned long flags;
+	size_t count;
+	int i;
+
+	ahci_rpm_get_port(ap);
+	spin_lock_irqsave(ap->lock, flags);
+
+	em_ctl = readl(mmio + HOST_EM_CTL);
+	if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
+	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		ahci_rpm_put_port(ap);
+		return -EINVAL;
+	}
+
+	if (!(em_ctl & EM_CTL_MR)) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		ahci_rpm_put_port(ap);
+		return -EAGAIN;
+	}
+
+	if (!(em_ctl & EM_CTL_SMB))
+		em_mmio += hpriv->em_buf_sz;
+
+	count = hpriv->em_buf_sz;
+
+	/* the count should not be larger than PAGE_SIZE */
+	if (count > PAGE_SIZE) {
+		if (printk_ratelimit())
+			ata_port_warn(ap,
+				      "EM read buffer size too large: "
+				      "buffer size %u, page size %lu\n",
+				      hpriv->em_buf_sz, PAGE_SIZE);
+		count = PAGE_SIZE;
+	}
+
+	for (i = 0; i < count; i += 4) {
+		msg = readl(em_mmio + i);
+		buf[i] = msg & 0xff;
+		buf[i + 1] = (msg >> 8) & 0xff;
+		buf[i + 2] = (msg >> 16) & 0xff;
+		buf[i + 3] = (msg >> 24) & 0xff;
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	ahci_rpm_put_port(ap);
+
+	return i;
+}
+
+static ssize_t ahci_store_em_buffer(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	void __iomem *em_mmio = mmio + hpriv->em_loc;
+	const unsigned char *msg_buf = buf;
+	u32 em_ctl, msg;
+	unsigned long flags;
+	int i;
+
+	/* check size validity */
+	if (!(ap->flags & ATA_FLAG_EM) ||
+	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
+	    size % 4 || size > hpriv->em_buf_sz)
+		return -EINVAL;
+
+	ahci_rpm_get_port(ap);
+	spin_lock_irqsave(ap->lock, flags);
+
+	em_ctl = readl(mmio + HOST_EM_CTL);
+	if (em_ctl & EM_CTL_TM) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		ahci_rpm_put_port(ap);
+		return -EBUSY;
+	}
+
+	for (i = 0; i < size; i += 4) {
+		msg = msg_buf[i] | msg_buf[i + 1] << 8 |
+		      msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
+		writel(msg, em_mmio + i);
+	}
+
+	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	ahci_rpm_put_port(ap);
+
+	return size;
+}
+
+static ssize_t ahci_show_em_supported(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 em_ctl;
+
+	ahci_rpm_get_port(ap);
+	em_ctl = readl(mmio + HOST_EM_CTL);
+	ahci_rpm_put_port(ap);
+
+	return sprintf(buf, "%s%s%s%s\n",
+		       em_ctl & EM_CTL_LED ? "led " : "",
+		       em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
+		       em_ctl & EM_CTL_SES ? "ses-2 " : "",
+		       em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
+}
+
+/**
+ *	ahci_save_initial_config - Save and fixup initial config values
+ *	@dev: target AHCI device
+ *	@hpriv: host private area to store config values
+ *
+ *	Some registers containing configuration info might be setup by
+ *	BIOS and might be cleared on reset.  This function saves the
+ *	initial values of those registers into @hpriv such that they
+ *	can be restored after controller reset.
+ *
+ *	If inconsistent, config values are fixed up by this function.
+ *
+ *	If it is not set already this function sets hpriv->start_engine to
+ *	ahci_start_engine.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+{
+	void __iomem *mmio = hpriv->mmio;
+	u32 cap, cap2, vers, port_map;
+	int i;
+
+	/* make sure AHCI mode is enabled before accessing CAP */
+	ahci_enable_ahci(mmio);
+
+	/* Values prefixed with saved_ are written back to host after
+	 * reset.  Values without are used for driver operation.
+	 */
+	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+	/* CAP2 register is only defined for AHCI 1.2 and later */
+	vers = readl(mmio + HOST_VERSION);
+	if ((vers >> 16) > 1 ||
+	   ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
+		hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
+	else
+		hpriv->saved_cap2 = cap2 = 0;
+
+	/* some chips have errata preventing 64bit use */
+	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+		dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
+		cap &= ~HOST_CAP_64;
+	}
+
+	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+		dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
+		cap &= ~HOST_CAP_NCQ;
+	}
+
+	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+		dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
+		cap |= HOST_CAP_NCQ;
+	}
+
+	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+		dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
+		cap &= ~HOST_CAP_PMP;
+	}
+
+	if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
+		dev_info(dev,
+			 "controller can't do SNTF, turning off CAP_SNTF\n");
+		cap &= ~HOST_CAP_SNTF;
+	}
+
+	if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
+		dev_info(dev,
+			 "controller can't do DEVSLP, turning off\n");
+		cap2 &= ~HOST_CAP2_SDS;
+		cap2 &= ~HOST_CAP2_SADM;
+	}
+
+	if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+		dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
+		cap |= HOST_CAP_FBS;
+	}
+
+	if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
+		dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
+		cap &= ~HOST_CAP_FBS;
+	}
+
+	if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
+		dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
+		cap |= HOST_CAP_ALPM;
+	}
+
+	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
+		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+			 port_map, hpriv->force_port_map);
+		port_map = hpriv->force_port_map;
+		hpriv->saved_port_map = port_map;
+	}
+
+	if (hpriv->mask_port_map) {
+		dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
+			port_map,
+			port_map & hpriv->mask_port_map);
+		port_map &= hpriv->mask_port_map;
+	}
+
+	/* cross check port_map and cap.n_ports */
+	if (port_map) {
+		int map_ports = 0;
+
+		for (i = 0; i < AHCI_MAX_PORTS; i++)
+			if (port_map & (1 << i))
+				map_ports++;
+
+		/* If PI has more ports than n_ports, whine, clear
+		 * port_map and let it be generated from n_ports.
+		 */
+		if (map_ports > ahci_nr_ports(cap)) {
+			dev_warn(dev,
+				 "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
+				 port_map, ahci_nr_ports(cap));
+			port_map = 0;
+		}
+	}
+
+	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+	if (!port_map && vers < 0x10300) {
+		port_map = (1 << ahci_nr_ports(cap)) - 1;
+		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+		/* write the fixed up value to the PI register */
+		hpriv->saved_port_map = port_map;
+	}
+
+	/* record values to use during operation */
+	hpriv->cap = cap;
+	hpriv->cap2 = cap2;
+	hpriv->version = readl(mmio + HOST_VERSION);
+	hpriv->port_map = port_map;
+
+	if (!hpriv->start_engine)
+		hpriv->start_engine = ahci_start_engine;
+
+	if (!hpriv->stop_engine)
+		hpriv->stop_engine = ahci_stop_engine;
+
+	if (!hpriv->irq_handler)
+		hpriv->irq_handler = ahci_single_level_irq_intr;
+}
+EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+/**
+ *	ahci_restore_initial_config - Restore initial config
+ *	@host: target ATA host
+ *
+ *	Restore initial config stored by ahci_save_initial_config().
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+
+	writel(hpriv->saved_cap, mmio + HOST_CAP);
+	if (hpriv->saved_cap2)
+		writel(hpriv->saved_cap2, mmio + HOST_CAP2);
+	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+	static const int offset[] = {
+		[SCR_STATUS]		= PORT_SCR_STAT,
+		[SCR_CONTROL]		= PORT_SCR_CTL,
+		[SCR_ERROR]		= PORT_SCR_ERR,
+		[SCR_ACTIVE]		= PORT_SCR_ACT,
+		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
+	};
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	if (sc_reg < ARRAY_SIZE(offset) &&
+	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+		return offset[sc_reg];
+	return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	int offset = ahci_scr_offset(link->ap, sc_reg);
+
+	if (offset) {
+		*val = readl(port_mmio + offset);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	int offset = ahci_scr_offset(link->ap, sc_reg);
+
+	if (offset) {
+		writel(val, port_mmio + offset);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+void ahci_start_engine(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 tmp;
+
+	/* start DMA */
+	tmp = readl(port_mmio + PORT_CMD);
+	tmp |= PORT_CMD_START;
+	writel(tmp, port_mmio + PORT_CMD);
+	readl(port_mmio + PORT_CMD); /* flush */
+}
+EXPORT_SYMBOL_GPL(ahci_start_engine);
+
+int ahci_stop_engine(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u32 tmp;
+
+	/*
+	 * On some controllers, stopping a port's DMA engine while the port
+	 * is in ALPM state (partial or slumber) results in failures on
+	 * subsequent DMA engine starts.  For those controllers, put the
+	 * port back in active state before stopping its DMA engine.
+	 */
+	if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+	    (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+	    ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+		dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+		return -EIO;
+	}
+
+	tmp = readl(port_mmio + PORT_CMD);
+
+	/* check if the HBA is idle */
+	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+		return 0;
+
+	/*
+	 * Don't try to issue commands but return with ENODEV if the
+	 * AHCI controller not available anymore (e.g. due to PCIe hot
+	 * unplugging). Otherwise a 500ms delay for each port is added.
+	 */
+	if (tmp == 0xffffffff) {
+		dev_err(ap->host->dev, "AHCI controller unavailable!\n");
+		return -ENODEV;
+	}
+
+	/* setting HBA to idle */
+	tmp &= ~PORT_CMD_START;
+	writel(tmp, port_mmio + PORT_CMD);
+
+	/* wait for engine to stop. This could be as long as 500 msec */
+	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
+				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+	if (tmp & PORT_CMD_LIST_ON)
+		return -EIO;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_stop_engine);
+
+void ahci_start_fis_rx(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 tmp;
+
+	/* set FIS registers */
+	if (hpriv->cap & HOST_CAP_64)
+		writel((pp->cmd_slot_dma >> 16) >> 16,
+		       port_mmio + PORT_LST_ADDR_HI);
+	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+	if (hpriv->cap & HOST_CAP_64)
+		writel((pp->rx_fis_dma >> 16) >> 16,
+		       port_mmio + PORT_FIS_ADDR_HI);
+	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+	/* enable FIS reception */
+	tmp = readl(port_mmio + PORT_CMD);
+	tmp |= PORT_CMD_FIS_RX;
+	writel(tmp, port_mmio + PORT_CMD);
+
+	/* flush */
+	readl(port_mmio + PORT_CMD);
+}
+EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 tmp;
+
+	/* disable FIS reception */
+	tmp = readl(port_mmio + PORT_CMD);
+	tmp &= ~PORT_CMD_FIS_RX;
+	writel(tmp, port_mmio + PORT_CMD);
+
+	/* wait for completion, spec says 500ms, give it 1000 */
+	tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+				PORT_CMD_FIS_ON, 10, 1000);
+	if (tmp & PORT_CMD_FIS_ON)
+		return -EBUSY;
+
+	return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 cmd;
+
+	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+	/* spin up device */
+	if (hpriv->cap & HOST_CAP_SSS) {
+		cmd |= PORT_CMD_SPIN_UP;
+		writel(cmd, port_mmio + PORT_CMD);
+	}
+
+	/* wake up link */
+	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+			unsigned int hints)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+
+	if (policy != ATA_LPM_MAX_POWER) {
+		/* wakeup flag only applies to the max power policy */
+		hints &= ~ATA_LPM_WAKE_ONLY;
+
+		/*
+		 * Disable interrupts on Phy Ready. This keeps us from
+		 * getting woken up due to spurious phy ready
+		 * interrupts.
+		 */
+		pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+		sata_link_scr_lpm(link, policy, false);
+	}
+
+	if (hpriv->cap & HOST_CAP_ALPM) {
+		u32 cmd = readl(port_mmio + PORT_CMD);
+
+		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
+			if (!(hints & ATA_LPM_WAKE_ONLY))
+				cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+			cmd |= PORT_CMD_ICC_ACTIVE;
+
+			writel(cmd, port_mmio + PORT_CMD);
+			readl(port_mmio + PORT_CMD);
+
+			/* wait 10ms to be sure we've come out of LPM state */
+			ata_msleep(ap, 10);
+
+			if (hints & ATA_LPM_WAKE_ONLY)
+				return 0;
+		} else {
+			cmd |= PORT_CMD_ALPE;
+			if (policy == ATA_LPM_MIN_POWER)
+				cmd |= PORT_CMD_ASP;
+			else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
+				cmd &= ~PORT_CMD_ASP;
+
+			/* write out new cmd value */
+			writel(cmd, port_mmio + PORT_CMD);
+		}
+	}
+
+	/* set aggressive device sleep */
+	if ((hpriv->cap2 & HOST_CAP2_SDS) &&
+	    (hpriv->cap2 & HOST_CAP2_SADM) &&
+	    (link->device->flags & ATA_DFLAG_DEVSLP)) {
+		if (policy == ATA_LPM_MIN_POWER ||
+		    policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
+			ahci_set_aggressive_devslp(ap, true);
+		else
+			ahci_set_aggressive_devslp(ap, false);
+	}
+
+	if (policy == ATA_LPM_MAX_POWER) {
+		sata_link_scr_lpm(link, policy, false);
+
+		/* turn PHYRDY IRQ back on */
+		pp->intr_mask |= PORT_IRQ_PHYRDY;
+		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 cmd, scontrol;
+
+	if (!(hpriv->cap & HOST_CAP_SSS))
+		return;
+
+	/* put device into listen mode, first set PxSCTL.DET to 0 */
+	scontrol = readl(port_mmio + PORT_SCR_CTL);
+	scontrol &= ~0xf;
+	writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+	/* then set PxCMD.SUD to 0 */
+	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+	cmd &= ~PORT_CMD_SPIN_UP;
+	writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ata_link *link;
+	struct ahci_em_priv *emp;
+	ssize_t rc;
+	int i;
+
+	/* enable FIS reception */
+	ahci_start_fis_rx(ap);
+
+	/* enable DMA */
+	if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
+		hpriv->start_engine(ap);
+
+	/* turn on LEDs */
+	if (ap->flags & ATA_FLAG_EM) {
+		ata_for_each_link(link, ap, EDGE) {
+			emp = &pp->em_priv[link->pmp];
+
+			/* EM Transmit bit maybe busy during init */
+			for (i = 0; i < EM_MAX_RETRY; i++) {
+				rc = ap->ops->transmit_led_message(ap,
+							       emp->led_state,
+							       4);
+				/*
+				 * If busy, give a breather but do not
+				 * release EH ownership by using msleep()
+				 * instead of ata_msleep().  EM Transmit
+				 * bit is busy for the whole host and
+				 * releasing ownership will cause other
+				 * ports to fail the same way.
+				 */
+				if (rc == -EBUSY)
+					msleep(1);
+				else
+					break;
+			}
+		}
+	}
+
+	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+		ata_for_each_link(link, ap, EDGE)
+			ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+	int rc;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	/* disable DMA */
+	rc = hpriv->stop_engine(ap);
+	if (rc) {
+		*emsg = "failed to stop engine";
+		return rc;
+	}
+
+	/* disable FIS reception */
+	rc = ahci_stop_fis_rx(ap);
+	if (rc) {
+		*emsg = "failed stop FIS RX";
+		return rc;
+	}
+
+	return 0;
+}
+
+int ahci_reset_controller(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 tmp;
+
+	/* we must be in AHCI mode, before using anything
+	 * AHCI-specific, such as HOST_RESET.
+	 */
+	ahci_enable_ahci(mmio);
+
+	/* global controller reset */
+	if (!ahci_skip_host_reset) {
+		tmp = readl(mmio + HOST_CTL);
+		if ((tmp & HOST_RESET) == 0) {
+			writel(tmp | HOST_RESET, mmio + HOST_CTL);
+			readl(mmio + HOST_CTL); /* flush */
+		}
+
+		/*
+		 * to perform host reset, OS should set HOST_RESET
+		 * and poll until this bit is read to be "0".
+		 * reset must complete within 1 second, or
+		 * the hardware should be considered fried.
+		 */
+		tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
+					HOST_RESET, 10, 1000);
+
+		if (tmp & HOST_RESET) {
+			dev_err(host->dev, "controller reset failed (0x%x)\n",
+				tmp);
+			return -EIO;
+		}
+
+		/* turn on AHCI mode */
+		ahci_enable_ahci(mmio);
+
+		/* Some registers might be cleared on reset.  Restore
+		 * initial values.
+		 */
+		if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
+			ahci_restore_initial_config(host);
+	} else
+		dev_info(host->dev, "skipping global host reset\n");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_controller);
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+		return;
+
+	emp->activity++;
+	if (!timer_pending(&emp->timer))
+		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(struct timer_list *t)
+{
+	struct ahci_em_priv *emp = from_timer(emp, t, timer);
+	struct ata_link *link = emp->link;
+	struct ata_port *ap = link->ap;
+
+	unsigned long led_message = emp->led_state;
+	u32 activity_led_state;
+	unsigned long flags;
+
+	led_message &= EM_MSG_LED_VALUE;
+	led_message |= ap->port_no | (link->pmp << 8);
+
+	/* check to see if we've had activity.  If so,
+	 * toggle state of LED and reset timer.  If not,
+	 * turn LED to desired idle state.
+	 */
+	spin_lock_irqsave(ap->lock, flags);
+	if (emp->saved_activity != emp->activity) {
+		emp->saved_activity = emp->activity;
+		/* get the current LED state */
+		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+		if (activity_led_state)
+			activity_led_state = 0;
+		else
+			activity_led_state = 1;
+
+		/* clear old state */
+		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+		/* toggle state */
+		led_message |= (activity_led_state << 16);
+		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+	} else {
+		/* switch to idle */
+		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+		if (emp->blink_policy == BLINK_OFF)
+			led_message |= (1 << 16);
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+	ap->ops->transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+	/* init activity stats, setup timer */
+	emp->saved_activity = emp->activity = 0;
+	emp->link = link;
+	timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
+
+	/* check our blink policy and set flag for link if it's enabled */
+	if (emp->blink_policy)
+		link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+int ahci_reset_em(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 em_ctl;
+
+	em_ctl = readl(mmio + HOST_EM_CTL);
+	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+		return -EINVAL;
+
+	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_em);
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+					ssize_t size)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 em_ctl;
+	u32 message[] = {0, 0};
+	unsigned long flags;
+	int pmp;
+	struct ahci_em_priv *emp;
+
+	/* get the slot number from the message */
+	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+	if (pmp < EM_MAX_SLOTS)
+		emp = &pp->em_priv[pmp];
+	else
+		return -EINVAL;
+
+	ahci_rpm_get_port(ap);
+	spin_lock_irqsave(ap->lock, flags);
+
+	/*
+	 * if we are still busy transmitting a previous message,
+	 * do not allow
+	 */
+	em_ctl = readl(mmio + HOST_EM_CTL);
+	if (em_ctl & EM_CTL_TM) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		ahci_rpm_put_port(ap);
+		return -EBUSY;
+	}
+
+	if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
+		/*
+		 * create message header - this is all zero except for
+		 * the message size, which is 4 bytes.
+		 */
+		message[0] |= (4 << 8);
+
+		/* ignore 0:4 of byte zero, fill in port info yourself */
+		message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+		/* write message to EM_LOC */
+		writel(message[0], mmio + hpriv->em_loc);
+		writel(message[1], mmio + hpriv->em_loc+4);
+
+		/*
+		 * tell hardware to transmit the message
+		 */
+		writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+	}
+
+	/* save off new led state for port/slot */
+	emp->led_state = state;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	ahci_rpm_put_port(ap);
+
+	return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ata_link *link;
+	struct ahci_em_priv *emp;
+	int rc = 0;
+
+	ata_for_each_link(link, ap, EDGE) {
+		emp = &pp->em_priv[link->pmp];
+		rc += sprintf(buf, "%lx\n", emp->led_state);
+	}
+	return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+				size_t size)
+{
+	unsigned int state;
+	int pmp;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_em_priv *emp;
+
+	if (kstrtouint(buf, 0, &state) < 0)
+		return -EINVAL;
+
+	/* get the slot number from the message */
+	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+	if (pmp < EM_MAX_SLOTS) {
+		pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
+		emp = &pp->em_priv[pmp];
+	} else {
+		return -EINVAL;
+	}
+
+	/* mask off the activity bits if we are in sw_activity
+	 * mode, user should turn off sw_activity before setting
+	 * activity led through em_message
+	 */
+	if (emp->blink_policy)
+		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+	return ap->ops->transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+	u32 port_led_state = emp->led_state;
+
+	/* save the desired Activity LED behavior */
+	if (val == OFF) {
+		/* clear LFLAG */
+		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+		/* set the LED to OFF */
+		port_led_state &= EM_MSG_LED_VALUE_OFF;
+		port_led_state |= (ap->port_no | (link->pmp << 8));
+		ap->ops->transmit_led_message(ap, port_led_state, 4);
+	} else {
+		link->flags |= ATA_LFLAG_SW_ACTIVITY;
+		if (val == BLINK_OFF) {
+			/* set LED to ON for idle */
+			port_led_state &= EM_MSG_LED_VALUE_OFF;
+			port_led_state |= (ap->port_no | (link->pmp << 8));
+			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+			ap->ops->transmit_led_message(ap, port_led_state, 4);
+		}
+	}
+	emp->blink_policy = val;
+	return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+	/* display the saved value of activity behavior for this
+	 * disk.
+	 */
+	return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct device *dev, struct ata_port *ap,
+			   int port_no, void __iomem *mmio,
+			   void __iomem *port_mmio)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	const char *emsg = NULL;
+	int rc;
+	u32 tmp;
+
+	/* make sure port is not active */
+	rc = ahci_deinit_port(ap, &emsg);
+	if (rc)
+		dev_warn(dev, "%s (%d)\n", emsg, rc);
+
+	/* clear SError */
+	tmp = readl(port_mmio + PORT_SCR_ERR);
+	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+	writel(tmp, port_mmio + PORT_SCR_ERR);
+
+	/* clear port IRQ */
+	tmp = readl(port_mmio + PORT_IRQ_STAT);
+	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+	if (tmp)
+		writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+	writel(1 << port_no, mmio + HOST_IRQ_STAT);
+
+	/* mark esata ports */
+	tmp = readl(port_mmio + PORT_CMD);
+	if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
+		ap->pflags |= ATA_PFLAG_EXTERNAL;
+}
+
+void ahci_init_controller(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	int i;
+	void __iomem *port_mmio;
+	u32 tmp;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		port_mmio = ahci_port_base(ap);
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		ahci_port_init(host->dev, ap, i, mmio, port_mmio);
+	}
+
+	tmp = readl(mmio + HOST_CTL);
+	VPRINTK("HOST_CTL 0x%x\n", tmp);
+	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+	tmp = readl(mmio + HOST_CTL);
+	VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+EXPORT_SYMBOL_GPL(ahci_init_controller);
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+	if (hpriv->flags & AHCI_HFLAG_SECT255) {
+		dev->max_sectors = 255;
+		ata_dev_info(dev,
+			     "SB600 AHCI: limiting to 255 sectors per cmd\n");
+	}
+}
+
+unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ata_taskfile tf;
+	u32 tmp;
+
+	tmp = readl(port_mmio + PORT_SIG);
+	tf.lbah		= (tmp >> 24)	& 0xff;
+	tf.lbam		= (tmp >> 16)	& 0xff;
+	tf.lbal		= (tmp >> 8)	& 0xff;
+	tf.nsect	= (tmp)		& 0xff;
+
+	return ata_dev_classify(&tf);
+}
+EXPORT_SYMBOL_GPL(ahci_dev_classify);
+
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+			u32 opts)
+{
+	dma_addr_t cmd_tbl_dma;
+
+	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+	pp->cmd_slot[tag].status = 0;
+	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
+
+int ahci_kick_engine(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+	u32 tmp;
+	int busy, rc;
+
+	/* stop engine */
+	rc = hpriv->stop_engine(ap);
+	if (rc)
+		goto out_restart;
+
+	/* need to do CLO?
+	 * always do CLO if PMP is attached (AHCI-1.3 9.2)
+	 */
+	busy = status & (ATA_BUSY | ATA_DRQ);
+	if (!busy && !sata_pmp_attached(ap)) {
+		rc = 0;
+		goto out_restart;
+	}
+
+	if (!(hpriv->cap & HOST_CAP_CLO)) {
+		rc = -EOPNOTSUPP;
+		goto out_restart;
+	}
+
+	/* perform CLO */
+	tmp = readl(port_mmio + PORT_CMD);
+	tmp |= PORT_CMD_CLO;
+	writel(tmp, port_mmio + PORT_CMD);
+
+	rc = 0;
+	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
+				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+	if (tmp & PORT_CMD_CLO)
+		rc = -EIO;
+
+	/* restart engine */
+ out_restart:
+	hpriv->start_engine(ap);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+				struct ata_taskfile *tf, int is_cmd, u16 flags,
+				unsigned long timeout_msec)
+{
+	const u32 cmd_fis_len = 5; /* five dwords */
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u8 *fis = pp->cmd_tbl;
+	u32 tmp;
+
+	/* prep the command */
+	ata_tf_to_fis(tf, pmp, is_cmd, fis);
+	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+	/* set port value for softreset of Port Multiplier */
+	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
+		tmp = readl(port_mmio + PORT_FBS);
+		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+		tmp |= pmp << PORT_FBS_DEV_OFFSET;
+		writel(tmp, port_mmio + PORT_FBS);
+		pp->fbs_last_dev = pmp;
+	}
+
+	/* issue & wait */
+	writel(1, port_mmio + PORT_CMD_ISSUE);
+
+	if (timeout_msec) {
+		tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
+					0x1, 0x1, 1, timeout_msec);
+		if (tmp & 0x1) {
+			ahci_kick_engine(ap);
+			return -EBUSY;
+		}
+	} else
+		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
+
+	return 0;
+}
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+		      int pmp, unsigned long deadline,
+		      int (*check_ready)(struct ata_link *link))
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	const char *reason = NULL;
+	unsigned long now, msecs;
+	struct ata_taskfile tf;
+	bool fbs_disabled = false;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* prepare for SRST (AHCI-1.1 10.4.1) */
+	rc = ahci_kick_engine(ap);
+	if (rc && rc != -EOPNOTSUPP)
+		ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+
+	/*
+	 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
+	 * clear PxFBS.EN to '0' prior to issuing software reset to devices
+	 * that is attached to port multiplier.
+	 */
+	if (!ata_is_host_link(link) && pp->fbs_enabled) {
+		ahci_disable_fbs(ap);
+		fbs_disabled = true;
+	}
+
+	ata_tf_init(link->device, &tf);
+
+	/* issue the first H2D Register FIS */
+	msecs = 0;
+	now = jiffies;
+	if (time_after(deadline, now))
+		msecs = jiffies_to_msecs(deadline - now);
+
+	tf.ctl |= ATA_SRST;
+	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+		rc = -EIO;
+		reason = "1st FIS failed";
+		goto fail;
+	}
+
+	/* spec says at least 5us, but be generous and sleep for 1ms */
+	ata_msleep(ap, 1);
+
+	/* issue the second H2D Register FIS */
+	tf.ctl &= ~ATA_SRST;
+	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+	/* wait for link to become ready */
+	rc = ata_wait_after_reset(link, deadline, check_ready);
+	if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+		/*
+		 * Workaround for cases where link online status can't
+		 * be trusted.  Treat device readiness timeout as link
+		 * offline.
+		 */
+		ata_link_info(link, "device not ready, treating as offline\n");
+		*class = ATA_DEV_NONE;
+	} else if (rc) {
+		/* link occupied, -ENODEV too is an error */
+		reason = "device not ready";
+		goto fail;
+	} else
+		*class = ahci_dev_classify(ap);
+
+	/* re-enable FBS if disabled before */
+	if (fbs_disabled)
+		ahci_enable_fbs(ap);
+
+	DPRINTK("EXIT, class=%u\n", *class);
+	return 0;
+
+ fail:
+	ata_link_err(link, "softreset failed (%s)\n", reason);
+	return rc;
+}
+
+int ahci_check_ready(struct ata_link *link)
+{
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+	return ata_check_ready(status);
+}
+EXPORT_SYMBOL_GPL(ahci_check_ready);
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	int pmp = sata_srst_pmp(link);
+
+	DPRINTK("ENTER\n");
+
+	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+EXPORT_SYMBOL_GPL(ahci_do_softreset);
+
+static int ahci_bad_pmp_check_ready(struct ata_link *link)
+{
+	void __iomem *port_mmio = ahci_port_base(link->ap);
+	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
+
+	/*
+	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
+	 * which can save timeout delay.
+	 */
+	if (irq_status & PORT_IRQ_BAD_PMP)
+		return -EIO;
+
+	return ata_check_ready(status);
+}
+
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+				    unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	int pmp = sata_srst_pmp(link);
+	int rc;
+	u32 irq_sts;
+
+	DPRINTK("ENTER\n");
+
+	rc = ahci_do_softreset(link, class, pmp, deadline,
+			       ahci_bad_pmp_check_ready);
+
+	/*
+	 * Soft reset fails with IPMS set when PMP is enabled but
+	 * SATA HDD/ODD is connected to SATA port, do soft reset
+	 * again to port 0.
+	 */
+	if (rc == -EIO) {
+		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
+		if (irq_sts & PORT_IRQ_BAD_PMP) {
+			ata_link_warn(link,
+					"applying PMP SRST workaround "
+					"and retrying\n");
+			rc = ahci_do_softreset(link, class, 0, deadline,
+					       ahci_check_ready);
+		}
+	}
+
+	return rc;
+}
+
+int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
+		      unsigned long deadline, bool *online)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	hpriv->stop_engine(ap);
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = ATA_BUSY;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	rc = sata_link_hardreset(link, timing, deadline, online,
+				 ahci_check_ready);
+
+	hpriv->start_engine(ap);
+
+	if (*online)
+		*class = ahci_dev_classify(ap);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_do_hardreset);
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	bool online;
+
+	return ahci_do_hardreset(link, class, deadline, &online);
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 new_tmp, tmp;
+
+	ata_std_postreset(link, class);
+
+	/* Make sure port's ATAPI bit is set appropriately */
+	new_tmp = tmp = readl(port_mmio + PORT_CMD);
+	if (*class == ATA_DEV_ATAPI)
+		new_tmp |= PORT_CMD_ATAPI;
+	else
+		new_tmp &= ~PORT_CMD_ATAPI;
+	if (new_tmp != tmp) {
+		writel(new_tmp, port_mmio + PORT_CMD);
+		readl(port_mmio + PORT_CMD); /* flush */
+	}
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+	struct scatterlist *sg;
+	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+	unsigned int si;
+
+	VPRINTK("ENTER\n");
+
+	/*
+	 * Next, the S/G list.
+	 */
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+	}
+
+	return si;
+}
+
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+
+	if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+		return ata_std_qc_defer(qc);
+	else
+		return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	int is_atapi = ata_is_atapi(qc->tf.protocol);
+	void *cmd_tbl;
+	u32 opts;
+	const u32 cmd_fis_len = 5; /* five dwords */
+	unsigned int n_elem;
+
+	/*
+	 * Fill in command table information.  First, the header,
+	 * a SATA Register - Host to Device command FIS.
+	 */
+	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
+
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+	if (is_atapi) {
+		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+	}
+
+	n_elem = 0;
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+	/*
+	 * Fill in command slot information.
+	 */
+	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		opts |= AHCI_CMD_WRITE;
+	if (is_atapi)
+		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
+}
+
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 fbs = readl(port_mmio + PORT_FBS);
+	int retries = 3;
+
+	DPRINTK("ENTER\n");
+	BUG_ON(!pp->fbs_enabled);
+
+	/* time to wait for DEC is not specified by AHCI spec,
+	 * add a retry loop for safety.
+	 */
+	writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+	fbs = readl(port_mmio + PORT_FBS);
+	while ((fbs & PORT_FBS_DEC) && retries--) {
+		udelay(1);
+		fbs = readl(port_mmio + PORT_FBS);
+	}
+
+	if (fbs & PORT_FBS_DEC)
+		dev_err(ap->host->dev, "failed to clear device error\n");
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ata_eh_info *host_ehi = &ap->link.eh_info;
+	struct ata_link *link = NULL;
+	struct ata_queued_cmd *active_qc;
+	struct ata_eh_info *active_ehi;
+	bool fbs_need_dec = false;
+	u32 serror;
+
+	/* determine active link with error */
+	if (pp->fbs_enabled) {
+		void __iomem *port_mmio = ahci_port_base(ap);
+		u32 fbs = readl(port_mmio + PORT_FBS);
+		int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
+			link = &ap->pmp_link[pmp];
+			fbs_need_dec = true;
+		}
+
+	} else
+		ata_for_each_link(link, ap, EDGE)
+			if (ata_link_active(link))
+				break;
+
+	if (!link)
+		link = &ap->link;
+
+	active_qc = ata_qc_from_tag(ap, link->active_tag);
+	active_ehi = &link->eh_info;
+
+	/* record irq stat */
+	ata_ehi_clear_desc(host_ehi);
+	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+	/* AHCI needs SError cleared; otherwise, it might lock up */
+	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+	ahci_scr_write(&ap->link, SCR_ERROR, serror);
+	host_ehi->serror |= serror;
+
+	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
+	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+		irq_stat &= ~PORT_IRQ_IF_ERR;
+
+	if (irq_stat & PORT_IRQ_TF_ERR) {
+		/* If qc is active, charge it; otherwise, the active
+		 * link.  There's no active qc on NCQ errors.  It will
+		 * be determined by EH by reading log page 10h.
+		 */
+		if (active_qc)
+			active_qc->err_mask |= AC_ERR_DEV;
+		else
+			active_ehi->err_mask |= AC_ERR_DEV;
+
+		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+			host_ehi->serror &= ~SERR_INTERNAL;
+	}
+
+	if (irq_stat & PORT_IRQ_UNK_FIS) {
+		u32 *unk = pp->rx_fis + RX_FIS_UNK;
+
+		active_ehi->err_mask |= AC_ERR_HSM;
+		active_ehi->action |= ATA_EH_RESET;
+		ata_ehi_push_desc(active_ehi,
+				  "unknown FIS %08x %08x %08x %08x" ,
+				  unk[0], unk[1], unk[2], unk[3]);
+	}
+
+	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+		active_ehi->err_mask |= AC_ERR_HSM;
+		active_ehi->action |= ATA_EH_RESET;
+		ata_ehi_push_desc(active_ehi, "incorrect PMP");
+	}
+
+	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+		host_ehi->err_mask |= AC_ERR_HOST_BUS;
+		host_ehi->action |= ATA_EH_RESET;
+		ata_ehi_push_desc(host_ehi, "host bus error");
+	}
+
+	if (irq_stat & PORT_IRQ_IF_ERR) {
+		if (fbs_need_dec)
+			active_ehi->err_mask |= AC_ERR_DEV;
+		else {
+			host_ehi->err_mask |= AC_ERR_ATA_BUS;
+			host_ehi->action |= ATA_EH_RESET;
+		}
+
+		ata_ehi_push_desc(host_ehi, "interface fatal error");
+	}
+
+	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+		ata_ehi_hotplugged(host_ehi);
+		ata_ehi_push_desc(host_ehi, "%s",
+			irq_stat & PORT_IRQ_CONNECT ?
+			"connection status changed" : "PHY RDY changed");
+	}
+
+	/* okay, let's hand over to EH */
+
+	if (irq_stat & PORT_IRQ_FREEZE)
+		ata_port_freeze(ap);
+	else if (fbs_need_dec) {
+		ata_link_abort(link);
+		ahci_fbs_dec_intr(ap);
+	} else
+		ata_port_abort(ap);
+}
+
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+				       void __iomem *port_mmio, u32 status)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+	u32 qc_active = 0;
+	int rc;
+
+	/* ignore BAD_PMP while resetting */
+	if (unlikely(resetting))
+		status &= ~PORT_IRQ_BAD_PMP;
+
+	if (sata_lpm_ignore_phy_events(&ap->link)) {
+		status &= ~PORT_IRQ_PHYRDY;
+		ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
+	}
+
+	if (unlikely(status & PORT_IRQ_ERROR)) {
+		ahci_error_intr(ap, status);
+		return;
+	}
+
+	if (status & PORT_IRQ_SDB_FIS) {
+		/* If SNotification is available, leave notification
+		 * handling to sata_async_notification().  If not,
+		 * emulate it by snooping SDB FIS RX area.
+		 *
+		 * Snooping FIS RX area is probably cheaper than
+		 * poking SNotification but some constrollers which
+		 * implement SNotification, ICH9 for example, don't
+		 * store AN SDB FIS into receive area.
+		 */
+		if (hpriv->cap & HOST_CAP_SNTF)
+			sata_async_notification(ap);
+		else {
+			/* If the 'N' bit in word 0 of the FIS is set,
+			 * we just received asynchronous notification.
+			 * Tell libata about it.
+			 *
+			 * Lack of SNotification should not appear in
+			 * ahci 1.2, so the workaround is unnecessary
+			 * when FBS is enabled.
+			 */
+			if (pp->fbs_enabled)
+				WARN_ON_ONCE(1);
+			else {
+				const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+				u32 f0 = le32_to_cpu(f[0]);
+				if (f0 & (1 << 15))
+					sata_async_notification(ap);
+			}
+		}
+	}
+
+	/* pp->active_link is not reliable once FBS is enabled, both
+	 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+	 * NCQ and non-NCQ commands may be in flight at the same time.
+	 */
+	if (pp->fbs_enabled) {
+		if (ap->qc_active) {
+			qc_active = readl(port_mmio + PORT_SCR_ACT);
+			qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+		}
+	} else {
+		/* pp->active_link is valid iff any command is in flight */
+		if (ap->qc_active && pp->active_link->sactive)
+			qc_active = readl(port_mmio + PORT_SCR_ACT);
+		else
+			qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+	}
+
+
+	rc = ata_qc_complete_multiple(ap, qc_active);
+
+	/* while resetting, invalid completions are expected */
+	if (unlikely(rc < 0 && !resetting)) {
+		ehi->err_mask |= AC_ERR_HSM;
+		ehi->action |= ATA_EH_RESET;
+		ata_port_freeze(ap);
+	}
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 status;
+
+	status = readl(port_mmio + PORT_IRQ_STAT);
+	writel(status, port_mmio + PORT_IRQ_STAT);
+
+	ahci_handle_port_interrupt(ap, port_mmio, status);
+}
+
+static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
+{
+	struct ata_port *ap = dev_instance;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 status;
+
+	VPRINTK("ENTER\n");
+
+	status = readl(port_mmio + PORT_IRQ_STAT);
+	writel(status, port_mmio + PORT_IRQ_STAT);
+
+	spin_lock(ap->lock);
+	ahci_handle_port_interrupt(ap, port_mmio, status);
+	spin_unlock(ap->lock);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_HANDLED;
+}
+
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+{
+	unsigned int i, handled = 0;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap;
+
+		if (!(irq_masked & (1 << i)))
+			continue;
+
+		ap = host->ports[i];
+		if (ap) {
+			ahci_port_intr(ap);
+			VPRINTK("port %u\n", i);
+		} else {
+			VPRINTK("port %u (no irq)\n", i);
+			if (ata_ratelimit())
+				dev_warn(host->dev,
+					 "interrupt on disabled port %u\n", i);
+		}
+
+		handled = 1;
+	}
+
+	return handled;
+}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
+
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct ahci_host_priv *hpriv;
+	unsigned int rc = 0;
+	void __iomem *mmio;
+	u32 irq_stat, irq_masked;
+
+	VPRINTK("ENTER\n");
+
+	hpriv = host->private_data;
+	mmio = hpriv->mmio;
+
+	/* sigh.  0xffffffff is a valid return from h/w */
+	irq_stat = readl(mmio + HOST_IRQ_STAT);
+	if (!irq_stat)
+		return IRQ_NONE;
+
+	irq_masked = irq_stat & hpriv->port_map;
+
+	spin_lock(&host->lock);
+
+	rc = ahci_handle_port_intr(host, irq_masked);
+
+	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
+	 * it should be cleared after all the port events are cleared;
+	 * otherwise, it will raise a spurious interrupt after each
+	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
+	 * information.
+	 *
+	 * Also, use the unmasked value to clear interrupt as spurious
+	 * pending event on a dummy port might cause screaming IRQ.
+	 */
+	writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+	spin_unlock(&host->lock);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(rc);
+}
+
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+
+	/* Keep track of the currently active link.  It will be used
+	 * in completion path to determine whether NCQ phase is in
+	 * progress.
+	 */
+	pp->active_link = qc->dev->link;
+
+	if (ata_is_ncq(qc->tf.protocol))
+		writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
+
+	if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+		u32 fbs = readl(port_mmio + PORT_FBS);
+		fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+		fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+		writel(fbs, port_mmio + PORT_FBS);
+		pp->fbs_last_dev = qc->dev->link->pmp;
+	}
+
+	writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
+
+	ahci_sw_activity(qc->dev->link);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_qc_issue);
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct ahci_port_priv *pp = qc->ap->private_data;
+	u8 *rx_fis = pp->rx_fis;
+
+	if (pp->fbs_enabled)
+		rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+	/*
+	 * After a successful execution of an ATA PIO data-in command,
+	 * the device doesn't send D2H Reg FIS to update the TF and
+	 * the host should take TF and E_Status from the preceding PIO
+	 * Setup FIS.
+	 */
+	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
+	    !(qc->flags & ATA_QCFLAG_FAILED)) {
+		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
+		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+	} else
+		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+
+	return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+
+	/* turn IRQ off */
+	writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 tmp;
+	struct ahci_port_priv *pp = ap->private_data;
+
+	/* clear IRQ */
+	tmp = readl(port_mmio + PORT_IRQ_STAT);
+	writel(tmp, port_mmio + PORT_IRQ_STAT);
+	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+	/* turn IRQ back on */
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+void ahci_error_handler(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+
+	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+		/* restart engine */
+		hpriv->stop_engine(ap);
+		hpriv->start_engine(ap);
+	}
+
+	sata_pmp_error_handler(ap);
+
+	if (!ata_dev_enabled(ap->link.device))
+		hpriv->stop_engine(ap);
+}
+EXPORT_SYMBOL_GPL(ahci_error_handler);
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		ahci_kick_engine(ap);
+}
+
+static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ata_device *dev = ap->link.device;
+	u32 devslp, dm, dito, mdat, deto, dito_conf;
+	int rc;
+	unsigned int err_mask;
+
+	devslp = readl(port_mmio + PORT_DEVSLP);
+	if (!(devslp & PORT_DEVSLP_DSP)) {
+		dev_info(ap->host->dev, "port does not support device sleep\n");
+		return;
+	}
+
+	/* disable device sleep */
+	if (!sleep) {
+		if (devslp & PORT_DEVSLP_ADSE) {
+			writel(devslp & ~PORT_DEVSLP_ADSE,
+			       port_mmio + PORT_DEVSLP);
+			err_mask = ata_dev_set_feature(dev,
+						       SETFEATURES_SATA_DISABLE,
+						       SATA_DEVSLP);
+			if (err_mask && err_mask != AC_ERR_DEV)
+				ata_dev_warn(dev, "failed to disable DEVSLP\n");
+		}
+		return;
+	}
+
+	dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
+	dito = devslp_idle_timeout / (dm + 1);
+	if (dito > 0x3ff)
+		dito = 0x3ff;
+
+	dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
+
+	/* device sleep was already enabled and same dito */
+	if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
+		return;
+
+	/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
+	rc = hpriv->stop_engine(ap);
+	if (rc)
+		return;
+
+	/* Use the nominal value 10 ms if the read MDAT is zero,
+	 * the nominal value of DETO is 20 ms.
+	 */
+	if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
+	    ATA_LOG_DEVSLP_VALID_MASK) {
+		mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
+		       ATA_LOG_DEVSLP_MDAT_MASK;
+		if (!mdat)
+			mdat = 10;
+		deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
+		if (!deto)
+			deto = 20;
+	} else {
+		mdat = 10;
+		deto = 20;
+	}
+
+	/* Make dito, mdat, deto bits to 0s */
+	devslp &= ~GENMASK_ULL(24, 2);
+	devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
+		   (mdat << PORT_DEVSLP_MDAT_OFFSET) |
+		   (deto << PORT_DEVSLP_DETO_OFFSET) |
+		   PORT_DEVSLP_ADSE);
+	writel(devslp, port_mmio + PORT_DEVSLP);
+
+	hpriv->start_engine(ap);
+
+	/* enable device sleep feature for the drive */
+	err_mask = ata_dev_set_feature(dev,
+				       SETFEATURES_SATA_ENABLE,
+				       SATA_DEVSLP);
+	if (err_mask && err_mask != AC_ERR_DEV)
+		ata_dev_warn(dev, "failed to enable DEVSLP\n");
+}
+
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 fbs;
+	int rc;
+
+	if (!pp->fbs_supported)
+		return;
+
+	fbs = readl(port_mmio + PORT_FBS);
+	if (fbs & PORT_FBS_EN) {
+		pp->fbs_enabled = true;
+		pp->fbs_last_dev = -1; /* initialization */
+		return;
+	}
+
+	rc = hpriv->stop_engine(ap);
+	if (rc)
+		return;
+
+	writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+	fbs = readl(port_mmio + PORT_FBS);
+	if (fbs & PORT_FBS_EN) {
+		dev_info(ap->host->dev, "FBS is enabled\n");
+		pp->fbs_enabled = true;
+		pp->fbs_last_dev = -1; /* initialization */
+	} else
+		dev_err(ap->host->dev, "Failed to enable FBS\n");
+
+	hpriv->start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 fbs;
+	int rc;
+
+	if (!pp->fbs_supported)
+		return;
+
+	fbs = readl(port_mmio + PORT_FBS);
+	if ((fbs & PORT_FBS_EN) == 0) {
+		pp->fbs_enabled = false;
+		return;
+	}
+
+	rc = hpriv->stop_engine(ap);
+	if (rc)
+		return;
+
+	writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+	fbs = readl(port_mmio + PORT_FBS);
+	if (fbs & PORT_FBS_EN)
+		dev_err(ap->host->dev, "Failed to disable FBS\n");
+	else {
+		dev_info(ap->host->dev, "FBS is disabled\n");
+		pp->fbs_enabled = false;
+	}
+
+	hpriv->start_engine(ap);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 cmd;
+
+	cmd = readl(port_mmio + PORT_CMD);
+	cmd |= PORT_CMD_PMP;
+	writel(cmd, port_mmio + PORT_CMD);
+
+	ahci_enable_fbs(ap);
+
+	pp->intr_mask |= PORT_IRQ_BAD_PMP;
+
+	/*
+	 * We must not change the port interrupt mask register if the
+	 * port is marked frozen, the value in pp->intr_mask will be
+	 * restored later when the port is thawed.
+	 *
+	 * Note that during initialization, the port is marked as
+	 * frozen since the irq handler is not yet registered.
+	 */
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 cmd;
+
+	ahci_disable_fbs(ap);
+
+	cmd = readl(port_mmio + PORT_CMD);
+	cmd &= ~PORT_CMD_PMP;
+	writel(cmd, port_mmio + PORT_CMD);
+
+	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+
+	/* see comment above in ahci_pmp_attach() */
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+int ahci_port_resume(struct ata_port *ap)
+{
+	ahci_rpm_get_port(ap);
+
+	ahci_power_up(ap);
+	ahci_start_port(ap);
+
+	if (sata_pmp_attached(ap))
+		ahci_pmp_attach(ap);
+	else
+		ahci_pmp_detach(ap);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_port_resume);
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	const char *emsg = NULL;
+	int rc;
+
+	rc = ahci_deinit_port(ap, &emsg);
+	if (rc == 0)
+		ahci_power_down(ap);
+	else {
+		ata_port_err(ap, "%s (%d)\n", emsg, rc);
+		ata_port_freeze(ap);
+	}
+
+	ahci_rpm_put_port(ap);
+	return rc;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct device *dev = ap->host->dev;
+	struct ahci_port_priv *pp;
+	void *mem;
+	dma_addr_t mem_dma;
+	size_t dma_sz, rx_fis_sz;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	if (ap->host->n_ports > 1) {
+		pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL);
+		if (!pp->irq_desc) {
+			devm_kfree(dev, pp);
+			return -ENOMEM;
+		}
+		snprintf(pp->irq_desc, 8,
+			 "%s%d", dev_driver_string(dev), ap->port_no);
+	}
+
+	/* check FBS capability */
+	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+		void __iomem *port_mmio = ahci_port_base(ap);
+		u32 cmd = readl(port_mmio + PORT_CMD);
+		if (cmd & PORT_CMD_FBSCP)
+			pp->fbs_supported = true;
+		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+				 ap->port_no);
+			pp->fbs_supported = true;
+		} else
+			dev_warn(dev, "port %d is not capable of FBS\n",
+				 ap->port_no);
+	}
+
+	if (pp->fbs_supported) {
+		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+		rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+	} else {
+		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+		rx_fis_sz = AHCI_RX_FIS_SZ;
+	}
+
+	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+	memset(mem, 0, dma_sz);
+
+	/*
+	 * First item in chunk of DMA memory: 32-slot command table,
+	 * 32 bytes each in size
+	 */
+	pp->cmd_slot = mem;
+	pp->cmd_slot_dma = mem_dma;
+
+	mem += AHCI_CMD_SLOT_SZ;
+	mem_dma += AHCI_CMD_SLOT_SZ;
+
+	/*
+	 * Second item: Received-FIS area
+	 */
+	pp->rx_fis = mem;
+	pp->rx_fis_dma = mem_dma;
+
+	mem += rx_fis_sz;
+	mem_dma += rx_fis_sz;
+
+	/*
+	 * Third item: data area for storing a single command
+	 * and its scatter-gather table
+	 */
+	pp->cmd_tbl = mem;
+	pp->cmd_tbl_dma = mem_dma;
+
+	/*
+	 * Save off initial list of interrupts to be enabled.
+	 * This could be changed later
+	 */
+	pp->intr_mask = DEF_PORT_IRQ;
+
+	/*
+	 * Switch to per-port locking in case each port has its own MSI vector.
+	 */
+	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
+		spin_lock_init(&pp->lock);
+		ap->lock = &pp->lock;
+	}
+
+	ap->private_data = pp;
+
+	/* engage engines, captain */
+	return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+	const char *emsg = NULL;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *host_mmio = hpriv->mmio;
+	int rc;
+
+	/* de-initialize port */
+	rc = ahci_deinit_port(ap, &emsg);
+	if (rc)
+		ata_port_warn(ap, "%s (%d)\n", emsg, rc);
+
+	/*
+	 * Clear GHC.IS to prevent stuck INTx after disabling MSI and
+	 * re-enabling INTx.
+	 */
+	writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
+
+	ahci_rpm_put_port(ap);
+}
+
+void ahci_print_info(struct ata_host *host, const char *scc_s)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	u32 vers, cap, cap2, impl, speed;
+	const char *speed_s;
+
+	vers = hpriv->version;
+	cap = hpriv->cap;
+	cap2 = hpriv->cap2;
+	impl = hpriv->port_map;
+
+	speed = (cap >> 20) & 0xf;
+	if (speed == 1)
+		speed_s = "1.5";
+	else if (speed == 2)
+		speed_s = "3";
+	else if (speed == 3)
+		speed_s = "6";
+	else
+		speed_s = "?";
+
+	dev_info(host->dev,
+		"AHCI %02x%02x.%02x%02x "
+		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+		,
+
+		(vers >> 24) & 0xff,
+		(vers >> 16) & 0xff,
+		(vers >> 8) & 0xff,
+		vers & 0xff,
+
+		((cap >> 8) & 0x1f) + 1,
+		(cap & 0x1f) + 1,
+		speed_s,
+		impl,
+		scc_s);
+
+	dev_info(host->dev,
+		"flags: "
+		"%s%s%s%s%s%s%s"
+		"%s%s%s%s%s%s%s"
+		"%s%s%s%s%s%s%s"
+		"%s%s\n"
+		,
+
+		cap & HOST_CAP_64 ? "64bit " : "",
+		cap & HOST_CAP_NCQ ? "ncq " : "",
+		cap & HOST_CAP_SNTF ? "sntf " : "",
+		cap & HOST_CAP_MPS ? "ilck " : "",
+		cap & HOST_CAP_SSS ? "stag " : "",
+		cap & HOST_CAP_ALPM ? "pm " : "",
+		cap & HOST_CAP_LED ? "led " : "",
+		cap & HOST_CAP_CLO ? "clo " : "",
+		cap & HOST_CAP_ONLY ? "only " : "",
+		cap & HOST_CAP_PMP ? "pmp " : "",
+		cap & HOST_CAP_FBS ? "fbs " : "",
+		cap & HOST_CAP_PIO_MULTI ? "pio " : "",
+		cap & HOST_CAP_SSC ? "slum " : "",
+		cap & HOST_CAP_PART ? "part " : "",
+		cap & HOST_CAP_CCC ? "ccc " : "",
+		cap & HOST_CAP_EMS ? "ems " : "",
+		cap & HOST_CAP_SXS ? "sxs " : "",
+		cap2 & HOST_CAP2_DESO ? "deso " : "",
+		cap2 & HOST_CAP2_SADM ? "sadm " : "",
+		cap2 & HOST_CAP2_SDS ? "sds " : "",
+		cap2 & HOST_CAP2_APST ? "apst " : "",
+		cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
+		cap2 & HOST_CAP2_BOH ? "boh " : ""
+		);
+}
+EXPORT_SYMBOL_GPL(ahci_print_info);
+
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+			  struct ata_port_info *pi)
+{
+	u8 messages;
+	void __iomem *mmio = hpriv->mmio;
+	u32 em_loc = readl(mmio + HOST_EM_LOC);
+	u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+	if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
+		return;
+
+	messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+	if (messages) {
+		/* store em_loc */
+		hpriv->em_loc = ((em_loc >> 16) * 4);
+		hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
+		hpriv->em_msg_type = messages;
+		pi->flags |= ATA_FLAG_EM;
+		if (!(em_ctl & EM_CTL_ALHD))
+			pi->flags |= ATA_FLAG_SW_ACTIVITY;
+	}
+}
+EXPORT_SYMBOL_GPL(ahci_set_em_messages);
+
+static int ahci_host_activate_multi_irqs(struct ata_host *host,
+					 struct scsi_host_template *sht)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	int i, rc;
+
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+	/*
+	 * Requests IRQs according to AHCI-1.1 when multiple MSIs were
+	 * allocated. That is one MSI per port, starting from @irq.
+	 */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ahci_port_priv *pp = host->ports[i]->private_data;
+		int irq = hpriv->get_irq_vector(host, i);
+
+		/* Do not receive interrupts sent by dummy ports */
+		if (!pp) {
+			disable_irq(irq);
+			continue;
+		}
+
+		rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard,
+				0, pp->irq_desc, host->ports[i]);
+
+		if (rc)
+			return rc;
+		ata_port_desc(host->ports[i], "irq %d", irq);
+	}
+
+	return ata_host_register(host, sht);
+}
+
+/**
+ *	ahci_host_activate - start AHCI host, request IRQs and register it
+ *	@host: target ATA host
+ *	@sht: scsi_host_template to use when registering the host
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	int irq = hpriv->irq;
+	int rc;
+
+	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
+		if (hpriv->irq_handler)
+			dev_warn(host->dev,
+			         "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+		if (!hpriv->get_irq_vector) {
+			dev_err(host->dev,
+				"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+			return -EIO;
+		}
+
+		rc = ahci_host_activate_multi_irqs(host, sht);
+	} else {
+		rc = ata_host_activate(host, irq, hpriv->irq_handler,
+				       IRQF_SHARED, sht);
+	}
+
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_host_activate);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
new file mode 100644
index 0000000..c92c10d
--- /dev/null
+++ b/drivers/ata/libahci_platform.c
@@ -0,0 +1,800 @@
+/*
+ * AHCI SATA platform library
+ *
+ * Copyright 2004-2005  Red Hat, Inc.
+ *   Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010  MontaVista Software, LLC.
+ *   Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include "ahci.h"
+
+static void ahci_host_stop(struct ata_host *host);
+
+struct ata_port_operations ahci_platform_ops = {
+	.inherits	= &ahci_ops,
+	.host_stop	= ahci_host_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
+
+/**
+ * ahci_platform_enable_phys - Enable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the PHYs found in hpriv->phys, if any.
+ * If a PHY fails to be enabled, it disables all the PHYs already
+ * enabled in reverse order and returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+{
+	int rc, i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		rc = phy_init(hpriv->phys[i]);
+		if (rc)
+			goto disable_phys;
+
+		rc = phy_power_on(hpriv->phys[i]);
+		if (rc) {
+			phy_exit(hpriv->phys[i]);
+			goto disable_phys;
+		}
+	}
+
+	return 0;
+
+disable_phys:
+	while (--i >= 0) {
+		phy_power_off(hpriv->phys[i]);
+		phy_exit(hpriv->phys[i]);
+	}
+	return rc;
+}
+
+/**
+ * ahci_platform_disable_phys - Disable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all PHYs found in hpriv->phys.
+ */
+static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+{
+	int i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		phy_power_off(hpriv->phys[i]);
+		phy_exit(hpriv->phys[i]);
+	}
+}
+
+/**
+ * ahci_platform_enable_clks - Enable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the clks found in hpriv->clks, starting at
+ * index 0. If any clk fails to enable it disables all the clks already
+ * enabled in reverse order, and then returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+{
+	int c, rc;
+
+	for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
+		rc = clk_prepare_enable(hpriv->clks[c]);
+		if (rc)
+			goto disable_unprepare_clk;
+	}
+	return 0;
+
+disable_unprepare_clk:
+	while (--c >= 0)
+		clk_disable_unprepare(hpriv->clks[c]);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
+
+/**
+ * ahci_platform_disable_clks - Disable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all the clks found in hpriv->clks, in reverse
+ * order of ahci_platform_enable_clks (starting at the end of the array).
+ */
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
+{
+	int c;
+
+	for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
+		if (hpriv->clks[c])
+			clk_disable_unprepare(hpriv->clks[c]);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
+
+/**
+ * ahci_platform_enable_regulators - Enable regulators
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the regulators found in
+ * hpriv->target_pwrs, if any.  If a regulator fails to be enabled, it
+ * disables all the regulators already enabled in reverse order and
+ * returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
+{
+	int rc, i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		if (!hpriv->target_pwrs[i])
+			continue;
+
+		rc = regulator_enable(hpriv->target_pwrs[i]);
+		if (rc)
+			goto disable_target_pwrs;
+	}
+
+	return 0;
+
+disable_target_pwrs:
+	while (--i >= 0)
+		if (hpriv->target_pwrs[i])
+			regulator_disable(hpriv->target_pwrs[i]);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
+
+/**
+ * ahci_platform_disable_regulators - Disable regulators
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all regulators found in hpriv->target_pwrs.
+ */
+void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
+{
+	int i;
+
+	for (i = 0; i < hpriv->nports; i++) {
+		if (!hpriv->target_pwrs[i])
+			continue;
+		regulator_disable(hpriv->target_pwrs[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
+/**
+ * ahci_platform_enable_resources - Enable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all ahci_platform managed resources in the
+ * following order:
+ * 1) Regulator
+ * 2) Clocks (through ahci_platform_enable_clks)
+ * 3) Resets
+ * 4) Phys
+ *
+ * If resource enabling fails at any point the previous enabled resources
+ * are disabled in reverse order.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+	int rc;
+
+	rc = ahci_platform_enable_regulators(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_platform_enable_clks(hpriv);
+	if (rc)
+		goto disable_regulator;
+
+	rc = reset_control_deassert(hpriv->rsts);
+	if (rc)
+		goto disable_clks;
+
+	rc = ahci_platform_enable_phys(hpriv);
+	if (rc)
+		goto disable_resets;
+
+	return 0;
+
+disable_resets:
+	reset_control_assert(hpriv->rsts);
+
+disable_clks:
+	ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+	ahci_platform_disable_regulators(hpriv);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
+
+/**
+ * ahci_platform_disable_resources - Disable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all ahci_platform managed resources in the
+ * following order:
+ * 1) Phys
+ * 2) Resets
+ * 3) Clocks (through ahci_platform_disable_clks)
+ * 4) Regulator
+ */
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
+{
+	ahci_platform_disable_phys(hpriv);
+
+	reset_control_assert(hpriv->rsts);
+
+	ahci_platform_disable_clks(hpriv);
+
+	ahci_platform_disable_regulators(hpriv);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
+
+static void ahci_platform_put_resources(struct device *dev, void *res)
+{
+	struct ahci_host_priv *hpriv = res;
+	int c;
+
+	if (hpriv->got_runtime_pm) {
+		pm_runtime_put_sync(dev);
+		pm_runtime_disable(dev);
+	}
+
+	for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
+		clk_put(hpriv->clks[c]);
+	/*
+	 * The regulators are tied to child node device and not to the
+	 * SATA device itself. So we can't use devm for automatically
+	 * releasing them. We have to do it manually here.
+	 */
+	for (c = 0; c < hpriv->nports; c++)
+		if (hpriv->target_pwrs && hpriv->target_pwrs[c])
+			regulator_put(hpriv->target_pwrs[c]);
+
+	kfree(hpriv->target_pwrs);
+}
+
+static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
+				struct device *dev, struct device_node *node)
+{
+	int rc;
+
+	hpriv->phys[port] = devm_of_phy_get(dev, node, NULL);
+
+	if (!IS_ERR(hpriv->phys[port]))
+		return 0;
+
+	rc = PTR_ERR(hpriv->phys[port]);
+	switch (rc) {
+	case -ENOSYS:
+		/* No PHY support. Check if PHY is required. */
+		if (of_find_property(node, "phys", NULL)) {
+			dev_err(dev,
+				"couldn't get PHY in node %s: ENOSYS\n",
+				node->name);
+			break;
+		}
+		/* fall through */
+	case -ENODEV:
+		/* continue normally */
+		hpriv->phys[port] = NULL;
+		rc = 0;
+		break;
+
+	default:
+		dev_err(dev,
+			"couldn't get PHY in node %s: %d\n",
+			node->name, rc);
+
+		break;
+	}
+
+	return rc;
+}
+
+static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
+				struct device *dev)
+{
+	struct regulator *target_pwr;
+	int rc = 0;
+
+	target_pwr = regulator_get_optional(dev, "target");
+
+	if (!IS_ERR(target_pwr))
+		hpriv->target_pwrs[port] = target_pwr;
+	else
+		rc = PTR_ERR(target_pwr);
+
+	return rc;
+}
+
+/**
+ * ahci_platform_get_resources - Get platform resources
+ * @pdev: platform device to get resources for
+ * @flags: bitmap representing the resource to get
+ *
+ * This function allocates an ahci_host_priv struct, and gets the following
+ * resources, storing a reference to them inside the returned struct:
+ *
+ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
+ * 2) regulator for controlling the targets power (optional)
+ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
+ *    or for non devicetree enabled platforms a single clock
+ * 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
+ * 5) phys (optional)
+ *
+ * RETURNS:
+ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
+ */
+struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
+						   unsigned int flags)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct clk *clk;
+	struct device_node *child;
+	int i, enabled_ports = 0, rc = -ENOMEM, child_nodes;
+	u32 mask_port_map = 0;
+
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		return ERR_PTR(-ENOMEM);
+
+	hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
+			     GFP_KERNEL);
+	if (!hpriv)
+		goto err_out;
+
+	devres_add(dev, hpriv);
+
+	hpriv->mmio = devm_ioremap_resource(dev,
+			      platform_get_resource(pdev, IORESOURCE_MEM, 0));
+	if (IS_ERR(hpriv->mmio)) {
+		dev_err(dev, "no mmio space\n");
+		rc = PTR_ERR(hpriv->mmio);
+		goto err_out;
+	}
+
+	for (i = 0; i < AHCI_MAX_CLKS; i++) {
+		/*
+		 * For now we must use clk_get(dev, NULL) for the first clock,
+		 * because some platforms (da850, spear13xx) are not yet
+		 * converted to use devicetree for clocks.  For new platforms
+		 * this is equivalent to of_clk_get(dev->of_node, 0).
+		 */
+		if (i == 0)
+			clk = clk_get(dev, NULL);
+		else
+			clk = of_clk_get(dev->of_node, i);
+
+		if (IS_ERR(clk)) {
+			rc = PTR_ERR(clk);
+			if (rc == -EPROBE_DEFER)
+				goto err_out;
+			break;
+		}
+		hpriv->clks[i] = clk;
+	}
+
+	if (flags & AHCI_PLATFORM_GET_RESETS) {
+		hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
+		if (IS_ERR(hpriv->rsts)) {
+			rc = PTR_ERR(hpriv->rsts);
+			goto err_out;
+		}
+	}
+
+	hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
+
+	/*
+	 * If no sub-node was found, we still need to set nports to
+	 * one in order to be able to use the
+	 * ahci_platform_[en|dis]able_[phys|regulators] functions.
+	 */
+	if (!child_nodes)
+		hpriv->nports = 1;
+
+	hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
+	if (!hpriv->phys) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	/*
+	 * We cannot use devm_ here, since ahci_platform_put_resources() uses
+	 * target_pwrs after devm_ have freed memory
+	 */
+	hpriv->target_pwrs = kcalloc(hpriv->nports, sizeof(*hpriv->target_pwrs), GFP_KERNEL);
+	if (!hpriv->target_pwrs) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	if (child_nodes) {
+		for_each_child_of_node(dev->of_node, child) {
+			u32 port;
+			struct platform_device *port_dev __maybe_unused;
+
+			if (!of_device_is_available(child))
+				continue;
+
+			if (of_property_read_u32(child, "reg", &port)) {
+				rc = -EINVAL;
+				goto err_out;
+			}
+
+			if (port >= hpriv->nports) {
+				dev_warn(dev, "invalid port number %d\n", port);
+				continue;
+			}
+			mask_port_map |= BIT(port);
+
+#ifdef CONFIG_OF_ADDRESS
+			of_platform_device_create(child, NULL, NULL);
+
+			port_dev = of_find_device_by_node(child);
+
+			if (port_dev) {
+				rc = ahci_platform_get_regulator(hpriv, port,
+								&port_dev->dev);
+				if (rc == -EPROBE_DEFER)
+					goto err_out;
+			}
+#endif
+
+			rc = ahci_platform_get_phy(hpriv, port, dev, child);
+			if (rc)
+				goto err_out;
+
+			enabled_ports++;
+		}
+		if (!enabled_ports) {
+			dev_warn(dev, "No port enabled\n");
+			rc = -ENODEV;
+			goto err_out;
+		}
+
+		if (!hpriv->mask_port_map)
+			hpriv->mask_port_map = mask_port_map;
+	} else {
+		/*
+		 * If no sub-node was found, keep this for device tree
+		 * compatibility
+		 */
+		rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
+		if (rc)
+			goto err_out;
+
+		rc = ahci_platform_get_regulator(hpriv, 0, dev);
+		if (rc == -EPROBE_DEFER)
+			goto err_out;
+	}
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+	hpriv->got_runtime_pm = true;
+
+	devres_remove_group(dev, NULL);
+	return hpriv;
+
+err_out:
+	devres_release_group(dev, NULL);
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
+
+/**
+ * ahci_platform_init_host - Bring up an ahci-platform host
+ * @pdev: platform device pointer for the host
+ * @hpriv: ahci-host private data for the host
+ * @pi_template: template for the ata_port_info to use
+ * @sht: scsi_host_template to use when registering
+ *
+ * This function does all the usual steps needed to bring up an
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
+ * must be initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_init_host(struct platform_device *pdev,
+			    struct ahci_host_priv *hpriv,
+			    const struct ata_port_info *pi_template,
+			    struct scsi_host_template *sht)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_port_info pi = *pi_template;
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct ata_host *host;
+	int i, irq, n_ports, rc;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		if (irq != -EPROBE_DEFER)
+			dev_err(dev, "no irq\n");
+		return irq;
+	}
+
+	hpriv->irq = irq;
+
+	/* prepare host */
+	pi.private_data = (void *)(unsigned long)hpriv->flags;
+
+	ahci_save_initial_config(dev, hpriv);
+
+	if (hpriv->cap & HOST_CAP_NCQ)
+		pi.flags |= ATA_FLAG_NCQ;
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	ahci_set_em_messages(hpriv, &pi);
+
+	/* CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	host->private_data = hpriv;
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+	else
+		dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+
+	if (pi.flags & ATA_FLAG_EM)
+		ahci_reset_em(host);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_desc(ap, "mmio %pR",
+			      platform_get_resource(pdev, IORESOURCE_MEM, 0));
+		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+		/* set enclosure management message type */
+		if (ap->flags & ATA_FLAG_EM)
+			ap->em_message_type = hpriv->em_msg_type;
+
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	if (hpriv->cap & HOST_CAP_64) {
+		rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_coerce_mask_and_coherent(dev,
+							  DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(dev, "Failed to enable 64-bit DMA.\n");
+				return rc;
+			}
+			dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
+		}
+	}
+
+	rc = ahci_reset_controller(host);
+	if (rc)
+		return rc;
+
+	ahci_init_controller(host);
+	ahci_print_info(host, "platform");
+
+	return ahci_host_activate(host, sht);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_init_host);
+
+static void ahci_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+
+	ahci_platform_disable_resources(hpriv);
+}
+
+/**
+ * ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
+ * @pdev: platform device pointer for the host
+ *
+ * This function is called during system shutdown and performs the minimal
+ * deconfiguration required to ensure that an ahci_platform host cannot
+ * corrupt or otherwise interfere with a new kernel being started with kexec.
+ */
+void ahci_platform_shutdown(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		/* Disable port interrupts */
+		if (ap->ops->freeze)
+			ap->ops->freeze(ap);
+
+		/* Stop the port DMA engines */
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+
+	/* Disable and clear host interrupts */
+	writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+	writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * ahci_platform_suspend_host - Suspend an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to suspend an
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
+ * must be disabled after calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend_host(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+
+	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_err(dev, "firmware update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	/*
+	 * AHCI spec rev1.1 section 8.3.3:
+	 * Software must disable interrupts prior to requesting a
+	 * transition of the HBA to D3 state.
+	 */
+	ctl = readl(mmio + HOST_CTL);
+	ctl &= ~HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+
+	return ata_host_suspend(host, PMSG_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
+
+/**
+ * ahci_platform_resume_host - Resume an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to resume an ahci-platform
+ * host, note any necessary resources (ie clks, phys, etc.)  must be
+ * initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume_host(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	int rc;
+
+	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
+
+/**
+ * ahci_platform_suspend - Suspend an ahci-platform device
+ * @dev: the platform device to suspend
+ *
+ * This function suspends the host associated with the device, followed by
+ * disabling all the resources of the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ahci_platform_suspend_host(dev);
+	if (rc)
+		return rc;
+
+	ahci_platform_disable_resources(hpriv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend);
+
+/**
+ * ahci_platform_resume - Resume an ahci-platform device
+ * @dev: the platform device to resume
+ *
+ * This function enables all the resources of the device followed by
+ * resuming the host associated with the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_platform_resume_host(dev);
+	if (rc)
+		goto disable_resources;
+
+	/* We resumed so update PM runtime state */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume);
+#endif
+
+MODULE_DESCRIPTION("AHCI SATA platform library");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
new file mode 100644
index 0000000..97a14fe
--- /dev/null
+++ b/drivers/ata/libata-acpi.c
@@ -0,0 +1,1054 @@
+/*
+ * libata-acpi.c
+ * Provides ACPI support for PATA/SATA.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Copyright (C) 2006 Randy Dunlap
+ */
+
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/libata.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <scsi/scsi_device.h>
+#include "libata.h"
+
+unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
+module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
+MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)");
+
+#define NO_PORT_MULT		0xffff
+#define SATA_ADR(root, pmp)	(((root) << 16) | (pmp))
+
+#define REGS_PER_GTF		7
+struct ata_acpi_gtf {
+	u8	tf[REGS_PER_GTF];	/* regs. 0x1f1 - 0x1f7 */
+} __packed;
+
+static void ata_acpi_clear_gtf(struct ata_device *dev)
+{
+	kfree(dev->gtf_cache);
+	dev->gtf_cache = NULL;
+}
+
+struct ata_acpi_hotplug_context {
+	struct acpi_hotplug_context hp;
+	union {
+		struct ata_port *ap;
+		struct ata_device *dev;
+	} data;
+};
+
+#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
+
+/**
+ * ata_dev_acpi_handle - provide the acpi_handle for an ata_device
+ * @dev: the acpi_handle returned will correspond to this device
+ *
+ * Returns the acpi_handle for the ACPI namespace object corresponding to
+ * the ata_device passed into the function, or NULL if no such object exists
+ * or ACPI is disabled for this device due to consecutive errors.
+ */
+acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
+{
+	return dev->flags & ATA_DFLAG_ACPI_DISABLED ?
+			NULL : ACPI_HANDLE(&dev->tdev);
+}
+
+/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
+static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
+{
+	if (dev)
+		dev->flags |= ATA_DFLAG_DETACH;
+	else {
+		struct ata_link *tlink;
+		struct ata_device *tdev;
+
+		ata_for_each_link(tlink, ap, EDGE)
+			ata_for_each_dev(tdev, tlink, ALL)
+				tdev->flags |= ATA_DFLAG_DETACH;
+	}
+
+	ata_port_schedule_eh(ap);
+}
+
+/**
+ * ata_acpi_handle_hotplug - ACPI event handler backend
+ * @ap: ATA port ACPI event occurred
+ * @dev: ATA device ACPI event occurred (can be NULL)
+ * @event: ACPI event which occurred
+ *
+ * All ACPI bay / device realted events end up in this function.  If
+ * the event is port-wide @dev is NULL.  If the event is specific to a
+ * device, @dev points to it.
+ *
+ * Hotplug (as opposed to unplug) notification is always handled as
+ * port-wide while unplug only kills the target device on device-wide
+ * event.
+ *
+ * LOCKING:
+ * ACPI notify handler context.  May sleep.
+ */
+static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
+				    u32 event)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	int wait = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+	/*
+	 * When dock driver calls into the routine, it will always use
+	 * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
+	 * ACPI_NOTIFY_EJECT_REQUEST for remove
+	 */
+	switch (event) {
+	case ACPI_NOTIFY_BUS_CHECK:
+	case ACPI_NOTIFY_DEVICE_CHECK:
+		ata_ehi_push_desc(ehi, "ACPI event");
+
+		ata_ehi_hotplugged(ehi);
+		ata_port_freeze(ap);
+		break;
+	case ACPI_NOTIFY_EJECT_REQUEST:
+		ata_ehi_push_desc(ehi, "ACPI event");
+
+		ata_acpi_detach_device(ap, dev);
+		wait = 1;
+		break;
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if (wait)
+		ata_port_wait_eh(ap);
+}
+
+static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
+{
+	struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
+	ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+	return 0;
+}
+
+static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
+{
+	ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
+	return 0;
+}
+
+static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
+	u32 event)
+{
+	struct kobject *kobj = NULL;
+	char event_string[20];
+	char *envp[] = { event_string, NULL };
+
+	if (dev) {
+		if (dev->sdev)
+			kobj = &dev->sdev->sdev_gendev.kobj;
+	} else
+		kobj = &ap->dev->kobj;
+
+	if (kobj) {
+		snprintf(event_string, 20, "BAY_EVENT=%d", event);
+		kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
+	}
+}
+
+static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
+{
+	ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
+}
+
+static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
+{
+	struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
+	ata_acpi_uevent(dev->link->ap, dev, event);
+}
+
+/* bind acpi handle to pata port */
+void ata_acpi_bind_port(struct ata_port *ap)
+{
+	struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+	struct acpi_device *adev;
+	struct ata_acpi_hotplug_context *context;
+
+	if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
+		return;
+
+	acpi_preset_companion(&ap->tdev, host_companion, ap->port_no);
+
+	if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+		ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
+
+	adev = ACPI_COMPANION(&ap->tdev);
+	if (!adev || adev->hp)
+		return;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return;
+
+	context->data.ap = ap;
+	acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
+				   ata_acpi_ap_uevent);
+}
+
+void ata_acpi_bind_dev(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
+	struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+	struct acpi_device *parent, *adev;
+	struct ata_acpi_hotplug_context *context;
+	u64 adr;
+
+	/*
+	 * For both sata/pata devices, host companion device is required.
+	 * For pata device, port companion device is also required.
+	 */
+	if (libata_noacpi || !host_companion ||
+			(!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion))
+		return;
+
+	if (ap->flags & ATA_FLAG_ACPI_SATA) {
+		if (!sata_pmp_attached(ap))
+			adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
+		else
+			adr = SATA_ADR(ap->port_no, dev->link->pmp);
+		parent = host_companion;
+	} else {
+		adr = dev->devno;
+		parent = port_companion;
+	}
+
+	acpi_preset_companion(&dev->tdev, parent, adr);
+	adev = ACPI_COMPANION(&dev->tdev);
+	if (!adev || adev->hp)
+		return;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return;
+
+	context->data.dev = dev;
+	acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
+				   ata_acpi_dev_uevent);
+}
+
+/**
+ * ata_acpi_dissociate - dissociate ATA host from ACPI objects
+ * @host: target ATA host
+ *
+ * This function is called during driver detach after the whole host
+ * is shut down.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_dissociate(struct ata_host *host)
+{
+	int i;
+
+	/* Restore initial _GTM values so that driver which attaches
+	 * afterward can use them too.
+	 */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+		if (ACPI_HANDLE(&ap->tdev) && gtm)
+			ata_acpi_stm(ap, gtm);
+	}
+}
+
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+{
+	struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
+	union acpi_object *out_obj;
+	acpi_status status;
+	int rc = 0;
+	acpi_handle handle = ACPI_HANDLE(&ap->tdev);
+
+	if (!handle)
+		return -EINVAL;
+
+	status = acpi_evaluate_object(handle, "_GTM", NULL, &output);
+
+	rc = -ENOENT;
+	if (status == AE_NOT_FOUND)
+		goto out_free;
+
+	rc = -EINVAL;
+	if (ACPI_FAILURE(status)) {
+		ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n",
+			     status);
+		goto out_free;
+	}
+
+	out_obj = output.pointer;
+	if (out_obj->type != ACPI_TYPE_BUFFER) {
+		ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n",
+			      out_obj->type);
+
+		goto out_free;
+	}
+
+	if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
+		ata_port_err(ap, "_GTM returned invalid length %d\n",
+			     out_obj->buffer.length);
+		goto out_free;
+	}
+
+	memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm));
+	rc = 0;
+ out_free:
+	kfree(output.pointer);
+	return rc;
+}
+
+EXPORT_SYMBOL_GPL(ata_acpi_gtm);
+
+/**
+ * ata_acpi_stm - execute _STM
+ * @ap: target ATA port
+ * @stm: timing parameter to _STM
+ *
+ * Evaluate _STM with timing parameter @stm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure.
+ */
+int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
+{
+	acpi_status status;
+	struct ata_acpi_gtm		stm_buf = *stm;
+	struct acpi_object_list         input;
+	union acpi_object               in_params[3];
+
+	in_params[0].type = ACPI_TYPE_BUFFER;
+	in_params[0].buffer.length = sizeof(struct ata_acpi_gtm);
+	in_params[0].buffer.pointer = (u8 *)&stm_buf;
+	/* Buffers for id may need byteswapping ? */
+	in_params[1].type = ACPI_TYPE_BUFFER;
+	in_params[1].buffer.length = 512;
+	in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
+	in_params[2].type = ACPI_TYPE_BUFFER;
+	in_params[2].buffer.length = 512;
+	in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
+
+	input.count = 3;
+	input.pointer = in_params;
+
+	status = acpi_evaluate_object(ACPI_HANDLE(&ap->tdev), "_STM",
+				      &input, NULL);
+
+	if (status == AE_NOT_FOUND)
+		return -ENOENT;
+	if (ACPI_FAILURE(status)) {
+		ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n",
+			     status);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(ata_acpi_stm);
+
+/**
+ * ata_dev_get_GTF - get the drive bootup default taskfile settings
+ * @dev: target ATA device
+ * @gtf: output parameter for buffer containing _GTF taskfile arrays
+ *
+ * This applies to both PATA and SATA drives.
+ *
+ * The _GTF method has no input parameters.
+ * It returns a variable number of register set values (registers
+ * hex 1F1..1F7, taskfiles).
+ * The <variable number> is not known in advance, so have ACPI-CA
+ * allocate the buffer as needed and return it, then free it later.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of taskfiles on success, 0 if _GTF doesn't exist.  -EINVAL
+ * if _GTF is invalid.
+ */
+static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
+{
+	struct ata_port *ap = dev->link->ap;
+	acpi_status status;
+	struct acpi_buffer output;
+	union acpi_object *out_obj;
+	int rc = 0;
+
+	/* if _GTF is cached, use the cached value */
+	if (dev->gtf_cache) {
+		out_obj = dev->gtf_cache;
+		goto done;
+	}
+
+	/* set up output buffer */
+	output.length = ACPI_ALLOCATE_BUFFER;
+	output.pointer = NULL;	/* ACPI-CA sets this; save/free it later */
+
+	if (ata_msg_probe(ap))
+		ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
+			    __func__, ap->port_no);
+
+	/* _GTF has no input parameters */
+	status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
+				      &output);
+	out_obj = dev->gtf_cache = output.pointer;
+
+	if (ACPI_FAILURE(status)) {
+		if (status != AE_NOT_FOUND) {
+			ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n",
+				     status);
+			rc = -EINVAL;
+		}
+		goto out_free;
+	}
+
+	if (!output.length || !output.pointer) {
+		if (ata_msg_probe(ap))
+			ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
+				    __func__,
+				    (unsigned long long)output.length,
+				    output.pointer);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	if (out_obj->type != ACPI_TYPE_BUFFER) {
+		ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n",
+			     out_obj->type);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	if (out_obj->buffer.length % REGS_PER_GTF) {
+		ata_dev_warn(dev, "unexpected _GTF length (%d)\n",
+			     out_obj->buffer.length);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+ done:
+	rc = out_obj->buffer.length / REGS_PER_GTF;
+	if (gtf) {
+		*gtf = (void *)out_obj->buffer.pointer;
+		if (ata_msg_probe(ap))
+			ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
+				    __func__, *gtf, rc);
+	}
+	return rc;
+
+ out_free:
+	ata_acpi_clear_gtf(dev);
+	return rc;
+}
+
+/**
+ * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter
+ * @dev: target device
+ * @gtm: GTM parameter to use
+ *
+ * Determine xfermask for @dev from @gtm.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Determined xfermask.
+ */
+unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
+				    const struct ata_acpi_gtm *gtm)
+{
+	unsigned long xfer_mask = 0;
+	unsigned int type;
+	int unit;
+	u8 mode;
+
+	/* we always use the 0 slot for crap hardware */
+	unit = dev->devno;
+	if (!(gtm->flags & 0x10))
+		unit = 0;
+
+	/* PIO */
+	mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
+	xfer_mask |= ata_xfer_mode2mask(mode);
+
+	/* See if we have MWDMA or UDMA data. We don't bother with
+	 * MWDMA if UDMA is available as this means the BIOS set UDMA
+	 * and our error changedown if it works is UDMA to PIO anyway.
+	 */
+	if (!(gtm->flags & (1 << (2 * unit))))
+		type = ATA_SHIFT_MWDMA;
+	else
+		type = ATA_SHIFT_UDMA;
+
+	mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
+	xfer_mask |= ata_xfer_mode2mask(mode);
+
+	return xfer_mask;
+}
+EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
+
+/**
+ * ata_acpi_cbl_80wire		-	Check for 80 wire cable
+ * @ap: Port to check
+ * @gtm: GTM data to use
+ *
+ * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ */
+int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, &ap->link, ENABLED) {
+		unsigned long xfer_mask, udma_mask;
+
+		xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
+		ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
+
+		if (udma_mask & ~ATA_UDMA_MASK_40C)
+			return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+
+static void ata_acpi_gtf_to_tf(struct ata_device *dev,
+			       const struct ata_acpi_gtf *gtf,
+			       struct ata_taskfile *tf)
+{
+	ata_tf_init(dev, tf);
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->protocol = ATA_PROT_NODATA;
+	tf->feature = gtf->tf[0];	/* 0x1f1 */
+	tf->nsect   = gtf->tf[1];	/* 0x1f2 */
+	tf->lbal    = gtf->tf[2];	/* 0x1f3 */
+	tf->lbam    = gtf->tf[3];	/* 0x1f4 */
+	tf->lbah    = gtf->tf[4];	/* 0x1f5 */
+	tf->device  = gtf->tf[5];	/* 0x1f6 */
+	tf->command = gtf->tf[6];	/* 0x1f7 */
+}
+
+static int ata_acpi_filter_tf(struct ata_device *dev,
+			      const struct ata_taskfile *tf,
+			      const struct ata_taskfile *ptf)
+{
+	if (dev->gtf_filter & ATA_ACPI_FILTER_SETXFER) {
+		/* libata doesn't use ACPI to configure transfer mode.
+		 * It will only confuse device configuration.  Skip.
+		 */
+		if (tf->command == ATA_CMD_SET_FEATURES &&
+		    tf->feature == SETFEATURES_XFER)
+			return 1;
+	}
+
+	if (dev->gtf_filter & ATA_ACPI_FILTER_LOCK) {
+		/* BIOS writers, sorry but we don't wanna lock
+		 * features unless the user explicitly said so.
+		 */
+
+		/* DEVICE CONFIGURATION FREEZE LOCK */
+		if (tf->command == ATA_CMD_CONF_OVERLAY &&
+		    tf->feature == ATA_DCO_FREEZE_LOCK)
+			return 1;
+
+		/* SECURITY FREEZE LOCK */
+		if (tf->command == ATA_CMD_SEC_FREEZE_LOCK)
+			return 1;
+
+		/* SET MAX LOCK and SET MAX FREEZE LOCK */
+		if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) &&
+		    tf->command == ATA_CMD_SET_MAX &&
+		    (tf->feature == ATA_SET_MAX_LOCK ||
+		     tf->feature == ATA_SET_MAX_FREEZE_LOCK))
+			return 1;
+	}
+
+	if (tf->command == ATA_CMD_SET_FEATURES &&
+	    tf->feature == SETFEATURES_SATA_ENABLE) {
+		/* inhibit enabling DIPM */
+		if (dev->gtf_filter & ATA_ACPI_FILTER_DIPM &&
+		    tf->nsect == SATA_DIPM)
+			return 1;
+
+		/* inhibit FPDMA non-zero offset */
+		if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_OFFSET &&
+		    (tf->nsect == SATA_FPDMA_OFFSET ||
+		     tf->nsect == SATA_FPDMA_IN_ORDER))
+			return 1;
+
+		/* inhibit FPDMA auto activation */
+		if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_AA &&
+		    tf->nsect == SATA_FPDMA_AA)
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * ata_acpi_run_tf - send taskfile registers to host controller
+ * @dev: target ATA device
+ * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
+ *
+ * Outputs ATA taskfile to standard ATA host controller.
+ * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
+ * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
+ * hob_lbal, hob_lbam, and hob_lbah.
+ *
+ * This function waits for idle (!BUSY and !DRQ) after writing
+ * registers.  If the control register has a new value, this
+ * function also waits for idle after writing control and before
+ * writing the remaining registers.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 1 if command is executed successfully.  0 if ignored, rejected or
+ * filtered out, -errno on other errors.
+ */
+static int ata_acpi_run_tf(struct ata_device *dev,
+			   const struct ata_acpi_gtf *gtf,
+			   const struct ata_acpi_gtf *prev_gtf)
+{
+	struct ata_taskfile *pptf = NULL;
+	struct ata_taskfile tf, ptf, rtf;
+	unsigned int err_mask;
+	const char *level;
+	const char *descr;
+	char msg[60];
+	int rc;
+
+	if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
+	    && (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0)
+	    && (gtf->tf[6] == 0))
+		return 0;
+
+	ata_acpi_gtf_to_tf(dev, gtf, &tf);
+	if (prev_gtf) {
+		ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf);
+		pptf = &ptf;
+	}
+
+	if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
+		rtf = tf;
+		err_mask = ata_exec_internal(dev, &rtf, NULL,
+					     DMA_NONE, NULL, 0, 0);
+
+		switch (err_mask) {
+		case 0:
+			level = KERN_DEBUG;
+			snprintf(msg, sizeof(msg), "succeeded");
+			rc = 1;
+			break;
+
+		case AC_ERR_DEV:
+			level = KERN_INFO;
+			snprintf(msg, sizeof(msg),
+				 "rejected by device (Stat=0x%02x Err=0x%02x)",
+				 rtf.command, rtf.feature);
+			rc = 0;
+			break;
+
+		default:
+			level = KERN_ERR;
+			snprintf(msg, sizeof(msg),
+				 "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
+				 err_mask, rtf.command, rtf.feature);
+			rc = -EIO;
+			break;
+		}
+	} else {
+		level = KERN_INFO;
+		snprintf(msg, sizeof(msg), "filtered out");
+		rc = 0;
+	}
+	descr = ata_get_cmd_descript(tf.command);
+
+	ata_dev_printk(dev, level,
+		       "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
+		       tf.command, tf.feature, tf.nsect, tf.lbal,
+		       tf.lbam, tf.lbah, tf.device,
+		       (descr ? descr : "unknown"), msg);
+
+	return rc;
+}
+
+/**
+ * ata_acpi_exec_tfs - get then write drive taskfile settings
+ * @dev: target ATA device
+ * @nr_executed: out parameter for the number of executed commands
+ *
+ * Evaluate _GTF and execute returned taskfiles.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of executed taskfiles on success, 0 if _GTF doesn't exist.
+ * -errno on other errors.
+ */
+static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed)
+{
+	struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL;
+	int gtf_count, i, rc;
+
+	/* get taskfiles */
+	rc = ata_dev_get_GTF(dev, &gtf);
+	if (rc < 0)
+		return rc;
+	gtf_count = rc;
+
+	/* execute them */
+	for (i = 0; i < gtf_count; i++, gtf++) {
+		rc = ata_acpi_run_tf(dev, gtf, pgtf);
+		if (rc < 0)
+			break;
+		if (rc) {
+			(*nr_executed)++;
+			pgtf = gtf;
+		}
+	}
+
+	ata_acpi_clear_gtf(dev);
+
+	if (rc < 0)
+		return rc;
+	return 0;
+}
+
+/**
+ * ata_acpi_push_id - send Identify data to drive
+ * @dev: target ATA device
+ *
+ * _SDD ACPI object: for SATA mode only
+ * Must be after Identify (Packet) Device -- uses its data
+ * ATM this function never returns a failure.  It is an optional
+ * method and if it fails for whatever reason, we should still
+ * just keep going.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _SDD doesn't exist, -errno on failure.
+ */
+static int ata_acpi_push_id(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	acpi_status status;
+	struct acpi_object_list input;
+	union acpi_object in_params[1];
+
+	if (ata_msg_probe(ap))
+		ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
+			    __func__, dev->devno, ap->port_no);
+
+	/* Give the drive Identify data to the drive via the _SDD method */
+	/* _SDD: set up input parameters */
+	input.count = 1;
+	input.pointer = in_params;
+	in_params[0].type = ACPI_TYPE_BUFFER;
+	in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS;
+	in_params[0].buffer.pointer = (u8 *)dev->id;
+	/* Output buffer: _SDD has no output */
+
+	/* It's OK for _SDD to be missing too. */
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+	status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_SDD", &input,
+				      NULL);
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	if (status == AE_NOT_FOUND)
+		return -ENOENT;
+
+	if (ACPI_FAILURE(status)) {
+		ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * ata_acpi_on_suspend - ATA ACPI hook called on suspend
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is about to be suspended.  All
+ * devices are already put to sleep but the port_suspend() callback
+ * hasn't been executed yet.  Error return from this function aborts
+ * suspend.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_acpi_on_suspend(struct ata_port *ap)
+{
+	/* nada */
+	return 0;
+}
+
+/**
+ * ata_acpi_on_resume - ATA ACPI hook called on resume
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is resumed - right after port
+ * itself is resumed but before any EH action is taken.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_resume(struct ata_port *ap)
+{
+	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+	struct ata_device *dev;
+
+	if (ACPI_HANDLE(&ap->tdev) && gtm) {
+		/* _GTM valid */
+
+		/* restore timing parameters */
+		ata_acpi_stm(ap, gtm);
+
+		/* _GTF should immediately follow _STM so that it can
+		 * use values set by _STM.  Cache _GTF result and
+		 * schedule _GTF.
+		 */
+		ata_for_each_dev(dev, &ap->link, ALL) {
+			ata_acpi_clear_gtf(dev);
+			if (ata_dev_enabled(dev) &&
+			    ata_dev_acpi_handle(dev) &&
+			    ata_dev_get_GTF(dev, NULL) >= 0)
+				dev->flags |= ATA_DFLAG_ACPI_PENDING;
+		}
+	} else {
+		/* SATA _GTF needs to be evaulated after _SDD and
+		 * there's no reason to evaluate IDE _GTF early
+		 * without _STM.  Clear cache and schedule _GTF.
+		 */
+		ata_for_each_dev(dev, &ap->link, ALL) {
+			ata_acpi_clear_gtf(dev);
+			if (ata_dev_enabled(dev))
+				dev->flags |= ATA_DFLAG_ACPI_PENDING;
+		}
+	}
+}
+
+static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
+{
+	int d_max_in = ACPI_STATE_D3_COLD;
+	if (!runtime)
+		goto out;
+
+	/*
+	 * For ATAPI, runtime D3 cold is only allowed
+	 * for ZPODD in zero power ready state
+	 */
+	if (dev->class == ATA_DEV_ATAPI &&
+	    !(zpodd_dev_enabled(dev) && zpodd_zpready(dev)))
+		d_max_in = ACPI_STATE_D3_HOT;
+
+out:
+	return acpi_pm_device_sleep_state(&dev->tdev, NULL, d_max_in);
+}
+
+static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+	bool runtime = PMSG_IS_AUTO(state);
+	struct ata_device *dev;
+	acpi_handle handle;
+	int acpi_state;
+
+	ata_for_each_dev(dev, &ap->link, ENABLED) {
+		handle = ata_dev_acpi_handle(dev);
+		if (!handle)
+			continue;
+
+		if (!(state.event & PM_EVENT_RESUME)) {
+			acpi_state = ata_acpi_choose_suspend_state(dev, runtime);
+			if (acpi_state == ACPI_STATE_D0)
+				continue;
+			if (runtime && zpodd_dev_enabled(dev) &&
+			    acpi_state == ACPI_STATE_D3_COLD)
+				zpodd_enable_run_wake(dev);
+			acpi_bus_set_power(handle, acpi_state);
+		} else {
+			if (runtime && zpodd_dev_enabled(dev))
+				zpodd_disable_run_wake(dev);
+			acpi_bus_set_power(handle, ACPI_STATE_D0);
+		}
+	}
+}
+
+/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */
+static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+	struct ata_device *dev;
+	acpi_handle port_handle;
+
+	port_handle = ACPI_HANDLE(&ap->tdev);
+	if (!port_handle)
+		return;
+
+	/* channel first and then drives for power on and vica versa
+	   for power off */
+	if (state.event & PM_EVENT_RESUME)
+		acpi_bus_set_power(port_handle, ACPI_STATE_D0);
+
+	ata_for_each_dev(dev, &ap->link, ENABLED) {
+		acpi_handle dev_handle = ata_dev_acpi_handle(dev);
+		if (!dev_handle)
+			continue;
+
+		acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
+					ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
+	}
+
+	if (!(state.event & PM_EVENT_RESUME))
+		acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD);
+}
+
+/**
+ * ata_acpi_set_state - set the port power state
+ * @ap: target ATA port
+ * @state: state, on/off
+ *
+ * This function sets a proper ACPI D state for the device on
+ * system and runtime PM operations.
+ */
+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+	if (ap->flags & ATA_FLAG_ACPI_SATA)
+		sata_acpi_set_state(ap, state);
+	else
+		pata_acpi_set_state(ap, state);
+}
+
+/**
+ * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be configured.
+ * IDENTIFY data might have been modified after this hook is run.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Positive number if IDENTIFY data needs to be refreshed, 0 if not,
+ * -errno on failure.
+ */
+int ata_acpi_on_devcfg(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_eh_context *ehc = &ap->link.eh_context;
+	int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
+	int nr_executed = 0;
+	int rc;
+
+	if (!ata_dev_acpi_handle(dev))
+		return 0;
+
+	/* do we need to do _GTF? */
+	if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) &&
+	    !(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET)))
+		return 0;
+
+	/* do _SDD if SATA */
+	if (acpi_sata) {
+		rc = ata_acpi_push_id(dev);
+		if (rc && rc != -ENOENT)
+			goto acpi_err;
+	}
+
+	/* do _GTF */
+	rc = ata_acpi_exec_tfs(dev, &nr_executed);
+	if (rc)
+		goto acpi_err;
+
+	dev->flags &= ~ATA_DFLAG_ACPI_PENDING;
+
+	/* refresh IDENTIFY page if any _GTF command has been executed */
+	if (nr_executed) {
+		rc = ata_dev_reread_id(dev, 0);
+		if (rc < 0) {
+			ata_dev_err(dev,
+				    "failed to IDENTIFY after ACPI commands\n");
+			return rc;
+		}
+	}
+
+	return 0;
+
+ acpi_err:
+	/* ignore evaluation failure if we can continue safely */
+	if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+		return 0;
+
+	/* fail and let EH retry once more for unknown IO errors */
+	if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) {
+		dev->flags |= ATA_DFLAG_ACPI_FAILED;
+		return rc;
+	}
+
+	dev->flags |= ATA_DFLAG_ACPI_DISABLED;
+	ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
+
+	/* We can safely continue if no _GTF command has been executed
+	 * and port is not frozen.
+	 */
+	if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+		return 0;
+
+	return rc;
+}
+
+/**
+ * ata_acpi_on_disable - ATA ACPI hook called when a device is disabled
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be disabled.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_disable(struct ata_device *dev)
+{
+	ata_acpi_clear_gtf(dev);
+}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
new file mode 100644
index 0000000..b8c3f9e
--- /dev/null
+++ b/drivers/ata/libata-core.c
@@ -0,0 +1,7409 @@
+/*
+ *  libata-core.c - helper library for ATA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available from http://www.t13.org/ and
+ *  http://www.sata-io.org/
+ *
+ *  Standards documents from:
+ *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
+ *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
+ *	http://www.sata-io.org (SATA)
+ *	http://www.compactflash.org (CF)
+ *	http://www.qic.org (QIC157 - Tape and DSC)
+ *	http://www.ce-ata.org (CE-ATA: not supported)
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/glob.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
+#include <linux/leds.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/libata.h>
+
+#include "libata.h"
+#include "libata-transport.h"
+
+/* debounce timing parameters in msecs { interval, duration, timeout } */
+const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
+const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
+const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
+
+const struct ata_port_operations ata_base_port_ops = {
+	.prereset		= ata_std_prereset,
+	.postreset		= ata_std_postreset,
+	.error_handler		= ata_std_error_handler,
+	.sched_eh		= ata_std_sched_eh,
+	.end_eh			= ata_std_end_eh,
+};
+
+const struct ata_port_operations sata_port_ops = {
+	.inherits		= &ata_base_port_ops,
+
+	.qc_defer		= ata_std_qc_defer,
+	.hardreset		= sata_std_hardreset,
+};
+
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+					u16 heads, u16 sectors);
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+static void ata_dev_xfermask(struct ata_device *dev);
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+
+atomic_t ata_print_id = ATOMIC_INIT(0);
+
+struct ata_force_param {
+	const char	*name;
+	unsigned int	cbl;
+	int		spd_limit;
+	unsigned long	xfer_mask;
+	unsigned int	horkage_on;
+	unsigned int	horkage_off;
+	unsigned int	lflags;
+};
+
+struct ata_force_ent {
+	int			port;
+	int			device;
+	struct ata_force_param	param;
+};
+
+static struct ata_force_ent *ata_force_tbl;
+static int ata_force_tbl_size;
+
+static char ata_force_param_buf[PAGE_SIZE] __initdata;
+/* param_buf is thrown away after initialization, disallow read */
+module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
+
+static int atapi_enabled = 1;
+module_param(atapi_enabled, int, 0444);
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
+
+static int atapi_dmadir = 0;
+module_param(atapi_dmadir, int, 0444);
+MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
+
+int atapi_passthru16 = 1;
+module_param(atapi_passthru16, int, 0444);
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
+
+int libata_fua = 0;
+module_param_named(fua, libata_fua, int, 0444);
+MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
+
+static int ata_ignore_hpa;
+module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
+MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
+
+static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
+module_param_named(dma, libata_dma_mask, int, 0444);
+MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
+
+static int ata_probe_timeout;
+module_param(ata_probe_timeout, int, 0444);
+MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
+
+int libata_noacpi = 0;
+module_param_named(noacpi, libata_noacpi, int, 0444);
+MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
+
+int libata_allow_tpm = 0;
+module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
+
+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Library module for ATA devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
+static bool ata_sstatus_online(u32 sstatus)
+{
+	return (sstatus & 0xf) == 0x3;
+}
+
+/**
+ *	ata_link_next - link iteration helper
+ *	@link: the previous link, NULL to start
+ *	@ap: ATA port containing links to iterate
+ *	@mode: iteration mode, one of ATA_LITER_*
+ *
+ *	LOCKING:
+ *	Host lock or EH context.
+ *
+ *	RETURNS:
+ *	Pointer to the next link.
+ */
+struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
+			       enum ata_link_iter_mode mode)
+{
+	BUG_ON(mode != ATA_LITER_EDGE &&
+	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
+
+	/* NULL link indicates start of iteration */
+	if (!link)
+		switch (mode) {
+		case ATA_LITER_EDGE:
+		case ATA_LITER_PMP_FIRST:
+			if (sata_pmp_attached(ap))
+				return ap->pmp_link;
+			/* fall through */
+		case ATA_LITER_HOST_FIRST:
+			return &ap->link;
+		}
+
+	/* we just iterated over the host link, what's next? */
+	if (link == &ap->link)
+		switch (mode) {
+		case ATA_LITER_HOST_FIRST:
+			if (sata_pmp_attached(ap))
+				return ap->pmp_link;
+			/* fall through */
+		case ATA_LITER_PMP_FIRST:
+			if (unlikely(ap->slave_link))
+				return ap->slave_link;
+			/* fall through */
+		case ATA_LITER_EDGE:
+			return NULL;
+		}
+
+	/* slave_link excludes PMP */
+	if (unlikely(link == ap->slave_link))
+		return NULL;
+
+	/* we were over a PMP link */
+	if (++link < ap->pmp_link + ap->nr_pmp_links)
+		return link;
+
+	if (mode == ATA_LITER_PMP_FIRST)
+		return &ap->link;
+
+	return NULL;
+}
+
+/**
+ *	ata_dev_next - device iteration helper
+ *	@dev: the previous device, NULL to start
+ *	@link: ATA link containing devices to iterate
+ *	@mode: iteration mode, one of ATA_DITER_*
+ *
+ *	LOCKING:
+ *	Host lock or EH context.
+ *
+ *	RETURNS:
+ *	Pointer to the next device.
+ */
+struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
+				enum ata_dev_iter_mode mode)
+{
+	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
+	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
+
+	/* NULL dev indicates start of iteration */
+	if (!dev)
+		switch (mode) {
+		case ATA_DITER_ENABLED:
+		case ATA_DITER_ALL:
+			dev = link->device;
+			goto check;
+		case ATA_DITER_ENABLED_REVERSE:
+		case ATA_DITER_ALL_REVERSE:
+			dev = link->device + ata_link_max_devices(link) - 1;
+			goto check;
+		}
+
+ next:
+	/* move to the next one */
+	switch (mode) {
+	case ATA_DITER_ENABLED:
+	case ATA_DITER_ALL:
+		if (++dev < link->device + ata_link_max_devices(link))
+			goto check;
+		return NULL;
+	case ATA_DITER_ENABLED_REVERSE:
+	case ATA_DITER_ALL_REVERSE:
+		if (--dev >= link->device)
+			goto check;
+		return NULL;
+	}
+
+ check:
+	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
+	    !ata_dev_enabled(dev))
+		goto next;
+	return dev;
+}
+
+/**
+ *	ata_dev_phys_link - find physical link for a device
+ *	@dev: ATA device to look up physical link for
+ *
+ *	Look up physical link which @dev is attached to.  Note that
+ *	this is different from @dev->link only when @dev is on slave
+ *	link.  For all other cases, it's the same as @dev->link.
+ *
+ *	LOCKING:
+ *	Don't care.
+ *
+ *	RETURNS:
+ *	Pointer to the found physical link.
+ */
+struct ata_link *ata_dev_phys_link(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+
+	if (!ap->slave_link)
+		return dev->link;
+	if (!dev->devno)
+		return &ap->link;
+	return ap->slave_link;
+}
+
+/**
+ *	ata_force_cbl - force cable type according to libata.force
+ *	@ap: ATA port of interest
+ *
+ *	Force cable type according to libata.force and whine about it.
+ *	The last entry which has matching port number is used, so it
+ *	can be specified as part of device force parameters.  For
+ *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
+ *	same effect.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+void ata_force_cbl(struct ata_port *ap)
+{
+	int i;
+
+	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+		const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+		if (fe->port != -1 && fe->port != ap->print_id)
+			continue;
+
+		if (fe->param.cbl == ATA_CBL_NONE)
+			continue;
+
+		ap->cbl = fe->param.cbl;
+		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
+		return;
+	}
+}
+
+/**
+ *	ata_force_link_limits - force link limits according to libata.force
+ *	@link: ATA link of interest
+ *
+ *	Force link flags and SATA spd limit according to libata.force
+ *	and whine about it.  When only the port part is specified
+ *	(e.g. 1:), the limit applies to all links connected to both
+ *	the host link and all fan-out ports connected via PMP.  If the
+ *	device part is specified as 0 (e.g. 1.00:), it specifies the
+ *	first fan-out link not the host link.  Device number 15 always
+ *	points to the host link whether PMP is attached or not.  If the
+ *	controller has slave link, device number 16 points to it.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+static void ata_force_link_limits(struct ata_link *link)
+{
+	bool did_spd = false;
+	int linkno = link->pmp;
+	int i;
+
+	if (ata_is_host_link(link))
+		linkno += 15;
+
+	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+		const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+		if (fe->port != -1 && fe->port != link->ap->print_id)
+			continue;
+
+		if (fe->device != -1 && fe->device != linkno)
+			continue;
+
+		/* only honor the first spd limit */
+		if (!did_spd && fe->param.spd_limit) {
+			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
+			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
+					fe->param.name);
+			did_spd = true;
+		}
+
+		/* let lflags stack */
+		if (fe->param.lflags) {
+			link->flags |= fe->param.lflags;
+			ata_link_notice(link,
+					"FORCE: link flag 0x%x forced -> 0x%x\n",
+					fe->param.lflags, link->flags);
+		}
+	}
+}
+
+/**
+ *	ata_force_xfermask - force xfermask according to libata.force
+ *	@dev: ATA device of interest
+ *
+ *	Force xfer_mask according to libata.force and whine about it.
+ *	For consistency with link selection, device number 15 selects
+ *	the first device connected to the host link.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+static void ata_force_xfermask(struct ata_device *dev)
+{
+	int devno = dev->link->pmp + dev->devno;
+	int alt_devno = devno;
+	int i;
+
+	/* allow n.15/16 for devices attached to host port */
+	if (ata_is_host_link(dev->link))
+		alt_devno += 15;
+
+	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+		const struct ata_force_ent *fe = &ata_force_tbl[i];
+		unsigned long pio_mask, mwdma_mask, udma_mask;
+
+		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+			continue;
+
+		if (fe->device != -1 && fe->device != devno &&
+		    fe->device != alt_devno)
+			continue;
+
+		if (!fe->param.xfer_mask)
+			continue;
+
+		ata_unpack_xfermask(fe->param.xfer_mask,
+				    &pio_mask, &mwdma_mask, &udma_mask);
+		if (udma_mask)
+			dev->udma_mask = udma_mask;
+		else if (mwdma_mask) {
+			dev->udma_mask = 0;
+			dev->mwdma_mask = mwdma_mask;
+		} else {
+			dev->udma_mask = 0;
+			dev->mwdma_mask = 0;
+			dev->pio_mask = pio_mask;
+		}
+
+		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
+			       fe->param.name);
+		return;
+	}
+}
+
+/**
+ *	ata_force_horkage - force horkage according to libata.force
+ *	@dev: ATA device of interest
+ *
+ *	Force horkage according to libata.force and whine about it.
+ *	For consistency with link selection, device number 15 selects
+ *	the first device connected to the host link.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+static void ata_force_horkage(struct ata_device *dev)
+{
+	int devno = dev->link->pmp + dev->devno;
+	int alt_devno = devno;
+	int i;
+
+	/* allow n.15/16 for devices attached to host port */
+	if (ata_is_host_link(dev->link))
+		alt_devno += 15;
+
+	for (i = 0; i < ata_force_tbl_size; i++) {
+		const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+			continue;
+
+		if (fe->device != -1 && fe->device != devno &&
+		    fe->device != alt_devno)
+			continue;
+
+		if (!(~dev->horkage & fe->param.horkage_on) &&
+		    !(dev->horkage & fe->param.horkage_off))
+			continue;
+
+		dev->horkage |= fe->param.horkage_on;
+		dev->horkage &= ~fe->param.horkage_off;
+
+		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+			       fe->param.name);
+	}
+}
+
+/**
+ *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
+ *	@opcode: SCSI opcode
+ *
+ *	Determine ATAPI command type from @opcode.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
+ */
+int atapi_cmd_type(u8 opcode)
+{
+	switch (opcode) {
+	case GPCMD_READ_10:
+	case GPCMD_READ_12:
+		return ATAPI_READ;
+
+	case GPCMD_WRITE_10:
+	case GPCMD_WRITE_12:
+	case GPCMD_WRITE_AND_VERIFY_10:
+		return ATAPI_WRITE;
+
+	case GPCMD_READ_CD:
+	case GPCMD_READ_CD_MSF:
+		return ATAPI_READ_CD;
+
+	case ATA_16:
+	case ATA_12:
+		if (atapi_passthru16)
+			return ATAPI_PASS_THRU;
+		/* fall thru */
+	default:
+		return ATAPI_MISC;
+	}
+}
+
+/**
+ *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ *	@tf: Taskfile to convert
+ *	@pmp: Port multiplier port
+ *	@is_cmd: This FIS is for command
+ *	@fis: Buffer into which data will output
+ *
+ *	Converts a standard ATA taskfile to a Serial ATA
+ *	FIS structure (Register - Host to Device).
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
+{
+	fis[0] = 0x27;			/* Register - Host to Device FIS */
+	fis[1] = pmp & 0xf;		/* Port multiplier number*/
+	if (is_cmd)
+		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
+
+	fis[2] = tf->command;
+	fis[3] = tf->feature;
+
+	fis[4] = tf->lbal;
+	fis[5] = tf->lbam;
+	fis[6] = tf->lbah;
+	fis[7] = tf->device;
+
+	fis[8] = tf->hob_lbal;
+	fis[9] = tf->hob_lbam;
+	fis[10] = tf->hob_lbah;
+	fis[11] = tf->hob_feature;
+
+	fis[12] = tf->nsect;
+	fis[13] = tf->hob_nsect;
+	fis[14] = 0;
+	fis[15] = tf->ctl;
+
+	fis[16] = tf->auxiliary & 0xff;
+	fis[17] = (tf->auxiliary >> 8) & 0xff;
+	fis[18] = (tf->auxiliary >> 16) & 0xff;
+	fis[19] = (tf->auxiliary >> 24) & 0xff;
+}
+
+/**
+ *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ *	@fis: Buffer from which data will be input
+ *	@tf: Taskfile to output
+ *
+ *	Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+	tf->command	= fis[2];	/* status */
+	tf->feature	= fis[3];	/* error */
+
+	tf->lbal	= fis[4];
+	tf->lbam	= fis[5];
+	tf->lbah	= fis[6];
+	tf->device	= fis[7];
+
+	tf->hob_lbal	= fis[8];
+	tf->hob_lbam	= fis[9];
+	tf->hob_lbah	= fis[10];
+
+	tf->nsect	= fis[12];
+	tf->hob_nsect	= fis[13];
+}
+
+static const u8 ata_rw_cmds[] = {
+	/* pio multi */
+	ATA_CMD_READ_MULTI,
+	ATA_CMD_WRITE_MULTI,
+	ATA_CMD_READ_MULTI_EXT,
+	ATA_CMD_WRITE_MULTI_EXT,
+	0,
+	0,
+	0,
+	ATA_CMD_WRITE_MULTI_FUA_EXT,
+	/* pio */
+	ATA_CMD_PIO_READ,
+	ATA_CMD_PIO_WRITE,
+	ATA_CMD_PIO_READ_EXT,
+	ATA_CMD_PIO_WRITE_EXT,
+	0,
+	0,
+	0,
+	0,
+	/* dma */
+	ATA_CMD_READ,
+	ATA_CMD_WRITE,
+	ATA_CMD_READ_EXT,
+	ATA_CMD_WRITE_EXT,
+	0,
+	0,
+	0,
+	ATA_CMD_WRITE_FUA_EXT
+};
+
+/**
+ *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
+ *	@tf: command to examine and configure
+ *	@dev: device tf belongs to
+ *
+ *	Examine the device configuration and tf->flags to calculate
+ *	the proper read/write commands and protocol to use.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
+{
+	u8 cmd;
+
+	int index, fua, lba48, write;
+
+	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
+	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
+	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	if (dev->flags & ATA_DFLAG_PIO) {
+		tf->protocol = ATA_PROT_PIO;
+		index = dev->multi_count ? 0 : 8;
+	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
+		/* Unable to use DMA due to host limitation */
+		tf->protocol = ATA_PROT_PIO;
+		index = dev->multi_count ? 0 : 8;
+	} else {
+		tf->protocol = ATA_PROT_DMA;
+		index = 16;
+	}
+
+	cmd = ata_rw_cmds[index + fua + lba48 + write];
+	if (cmd) {
+		tf->command = cmd;
+		return 0;
+	}
+	return -1;
+}
+
+/**
+ *	ata_tf_read_block - Read block address from ATA taskfile
+ *	@tf: ATA taskfile of interest
+ *	@dev: ATA device @tf belongs to
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Read block address from @tf.  This function can handle all
+ *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
+ *	flags select the address format to use.
+ *
+ *	RETURNS:
+ *	Block address read from @tf.
+ */
+u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+{
+	u64 block = 0;
+
+	if (tf->flags & ATA_TFLAG_LBA) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			block |= (u64)tf->hob_lbah << 40;
+			block |= (u64)tf->hob_lbam << 32;
+			block |= (u64)tf->hob_lbal << 24;
+		} else
+			block |= (tf->device & 0xf) << 24;
+
+		block |= tf->lbah << 16;
+		block |= tf->lbam << 8;
+		block |= tf->lbal;
+	} else {
+		u32 cyl, head, sect;
+
+		cyl = tf->lbam | (tf->lbah << 8);
+		head = tf->device & 0xf;
+		sect = tf->lbal;
+
+		if (!sect) {
+			ata_dev_warn(dev,
+				     "device reported invalid CHS sector 0\n");
+			return U64_MAX;
+		}
+
+		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
+	}
+
+	return block;
+}
+
+/**
+ *	ata_build_rw_tf - Build ATA taskfile for given read/write request
+ *	@tf: Target ATA taskfile
+ *	@dev: ATA device @tf belongs to
+ *	@block: Block address
+ *	@n_block: Number of blocks
+ *	@tf_flags: RW/FUA etc...
+ *	@tag: tag
+ *	@class: IO priority class
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Build ATA taskfile @tf for read/write request described by
+ *	@block, @n_block, @tf_flags and @tag on @dev.
+ *
+ *	RETURNS:
+ *
+ *	0 on success, -ERANGE if the request is too large for @dev,
+ *	-EINVAL if the request is invalid.
+ */
+int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+		    u64 block, u32 n_block, unsigned int tf_flags,
+		    unsigned int tag, int class)
+{
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->flags |= tf_flags;
+
+	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
+		/* yay, NCQ */
+		if (!lba_48_ok(block, n_block))
+			return -ERANGE;
+
+		tf->protocol = ATA_PROT_NCQ;
+		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+
+		if (tf->flags & ATA_TFLAG_WRITE)
+			tf->command = ATA_CMD_FPDMA_WRITE;
+		else
+			tf->command = ATA_CMD_FPDMA_READ;
+
+		tf->nsect = tag << 3;
+		tf->hob_feature = (n_block >> 8) & 0xff;
+		tf->feature = n_block & 0xff;
+
+		tf->hob_lbah = (block >> 40) & 0xff;
+		tf->hob_lbam = (block >> 32) & 0xff;
+		tf->hob_lbal = (block >> 24) & 0xff;
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device = ATA_LBA;
+		if (tf->flags & ATA_TFLAG_FUA)
+			tf->device |= 1 << 7;
+
+		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
+			if (class == IOPRIO_CLASS_RT)
+				tf->hob_nsect |= ATA_PRIO_HIGH <<
+						 ATA_SHIFT_PRIO;
+		}
+	} else if (dev->flags & ATA_DFLAG_LBA) {
+		tf->flags |= ATA_TFLAG_LBA;
+
+		if (lba_28_ok(block, n_block)) {
+			/* use LBA28 */
+			tf->device |= (block >> 24) & 0xf;
+		} else if (lba_48_ok(block, n_block)) {
+			if (!(dev->flags & ATA_DFLAG_LBA48))
+				return -ERANGE;
+
+			/* use LBA48 */
+			tf->flags |= ATA_TFLAG_LBA48;
+
+			tf->hob_nsect = (n_block >> 8) & 0xff;
+
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else
+			/* request too large even for LBA48 */
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		tf->nsect = n_block & 0xff;
+
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device |= ATA_LBA;
+	} else {
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		/* The request -may- be too large for CHS addressing. */
+		if (!lba_28_ok(block, n_block))
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+
+		/* Check whether the converted CHS can fit.
+		   Cylinder: 0-65535
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+			return -ERANGE;
+
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
+ *	@pio_mask: pio_mask
+ *	@mwdma_mask: mwdma_mask
+ *	@udma_mask: udma_mask
+ *
+ *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
+ *	unsigned int xfer_mask.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Packed xfer_mask.
+ */
+unsigned long ata_pack_xfermask(unsigned long pio_mask,
+				unsigned long mwdma_mask,
+				unsigned long udma_mask)
+{
+	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
+		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
+		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
+}
+
+/**
+ *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
+ *	@xfer_mask: xfer_mask to unpack
+ *	@pio_mask: resulting pio_mask
+ *	@mwdma_mask: resulting mwdma_mask
+ *	@udma_mask: resulting udma_mask
+ *
+ *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
+ *	Any NULL destination masks will be ignored.
+ */
+void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
+			 unsigned long *mwdma_mask, unsigned long *udma_mask)
+{
+	if (pio_mask)
+		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
+	if (mwdma_mask)
+		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
+	if (udma_mask)
+		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
+}
+
+static const struct ata_xfer_ent {
+	int shift, bits;
+	u8 base;
+} ata_xfer_tbl[] = {
+	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
+	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
+	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
+	{ -1, },
+};
+
+/**
+ *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
+ *	@xfer_mask: xfer_mask of interest
+ *
+ *	Return matching XFER_* value for @xfer_mask.  Only the highest
+ *	bit of @xfer_mask is considered.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Matching XFER_* value, 0xff if no match found.
+ */
+u8 ata_xfer_mask2mode(unsigned long xfer_mask)
+{
+	int highbit = fls(xfer_mask) - 1;
+	const struct ata_xfer_ent *ent;
+
+	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
+			return ent->base + highbit - ent->shift;
+	return 0xff;
+}
+
+/**
+ *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
+ *	@xfer_mode: XFER_* of interest
+ *
+ *	Return matching xfer_mask for @xfer_mode.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Matching xfer_mask, 0 if no match found.
+ */
+unsigned long ata_xfer_mode2mask(u8 xfer_mode)
+{
+	const struct ata_xfer_ent *ent;
+
+	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
+			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
+				& ~((1 << ent->shift) - 1);
+	return 0;
+}
+
+/**
+ *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
+ *	@xfer_mode: XFER_* of interest
+ *
+ *	Return matching xfer_shift for @xfer_mode.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Matching xfer_shift, -1 if no match found.
+ */
+int ata_xfer_mode2shift(unsigned long xfer_mode)
+{
+	const struct ata_xfer_ent *ent;
+
+	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
+			return ent->shift;
+	return -1;
+}
+
+/**
+ *	ata_mode_string - convert xfer_mask to string
+ *	@xfer_mask: mask of bits supported; only highest bit counts.
+ *
+ *	Determine string which represents the highest speed
+ *	(highest bit in @modemask).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Constant C string representing highest speed listed in
+ *	@mode_mask, or the constant C string "<n/a>".
+ */
+const char *ata_mode_string(unsigned long xfer_mask)
+{
+	static const char * const xfer_mode_str[] = {
+		"PIO0",
+		"PIO1",
+		"PIO2",
+		"PIO3",
+		"PIO4",
+		"PIO5",
+		"PIO6",
+		"MWDMA0",
+		"MWDMA1",
+		"MWDMA2",
+		"MWDMA3",
+		"MWDMA4",
+		"UDMA/16",
+		"UDMA/25",
+		"UDMA/33",
+		"UDMA/44",
+		"UDMA/66",
+		"UDMA/100",
+		"UDMA/133",
+		"UDMA7",
+	};
+	int highbit;
+
+	highbit = fls(xfer_mask) - 1;
+	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
+		return xfer_mode_str[highbit];
+	return "<n/a>";
+}
+
+const char *sata_spd_string(unsigned int spd)
+{
+	static const char * const spd_str[] = {
+		"1.5 Gbps",
+		"3.0 Gbps",
+		"6.0 Gbps",
+	};
+
+	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
+		return "<unknown>";
+	return spd_str[spd - 1];
+}
+
+/**
+ *	ata_dev_classify - determine device type based on ATA-spec signature
+ *	@tf: ATA taskfile register set for device to be identified
+ *
+ *	Determine from taskfile register contents whether a device is
+ *	ATA or ATAPI, as per "Signature and persistence" section
+ *	of ATA/PI spec (volume 1, sect 5.14).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
+ *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
+ */
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
+{
+	/* Apple's open source Darwin code hints that some devices only
+	 * put a proper signature into the LBA mid/high registers,
+	 * So, we only check those.  It's sufficient for uniqueness.
+	 *
+	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
+	 * signatures for ATA and ATAPI devices attached on SerialATA,
+	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
+	 * spec has never mentioned about using different signatures
+	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
+	 * Multiplier specification began to use 0x69/0x96 to identify
+	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
+	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
+	 * 0x69/0x96 shortly and described them as reserved for
+	 * SerialATA.
+	 *
+	 * We follow the current spec and consider that 0x69/0x96
+	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
+	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
+	 * SEMB signature.  This is worked around in
+	 * ata_dev_read_id().
+	 */
+	if ((tf->lbam == 0) && (tf->lbah == 0)) {
+		DPRINTK("found ATA device by sig\n");
+		return ATA_DEV_ATA;
+	}
+
+	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
+		DPRINTK("found ATAPI device by sig\n");
+		return ATA_DEV_ATAPI;
+	}
+
+	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
+		DPRINTK("found PMP device by sig\n");
+		return ATA_DEV_PMP;
+	}
+
+	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
+		DPRINTK("found SEMB device by sig (could be ATA device)\n");
+		return ATA_DEV_SEMB;
+	}
+
+	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
+		DPRINTK("found ZAC device by sig\n");
+		return ATA_DEV_ZAC;
+	}
+
+	DPRINTK("unknown device\n");
+	return ATA_DEV_UNKNOWN;
+}
+
+/**
+ *	ata_id_string - Convert IDENTIFY DEVICE page into string
+ *	@id: IDENTIFY DEVICE results we will examine
+ *	@s: string into which data is output
+ *	@ofs: offset into identify device page
+ *	@len: length of string to return. must be an even number.
+ *
+ *	The strings in the IDENTIFY DEVICE page are broken up into
+ *	16-bit chunks.  Run through the string, and output each
+ *	8-bit chunk linearly, regardless of platform.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_id_string(const u16 *id, unsigned char *s,
+		   unsigned int ofs, unsigned int len)
+{
+	unsigned int c;
+
+	BUG_ON(len & 1);
+
+	while (len > 0) {
+		c = id[ofs] >> 8;
+		*s = c;
+		s++;
+
+		c = id[ofs] & 0xff;
+		*s = c;
+		s++;
+
+		ofs++;
+		len -= 2;
+	}
+}
+
+/**
+ *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
+ *	@id: IDENTIFY DEVICE results we will examine
+ *	@s: string into which data is output
+ *	@ofs: offset into identify device page
+ *	@len: length of string to return. must be an odd number.
+ *
+ *	This function is identical to ata_id_string except that it
+ *	trims trailing spaces and terminates the resulting string with
+ *	null.  @len must be actual maximum length (even number) + 1.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_id_c_string(const u16 *id, unsigned char *s,
+		     unsigned int ofs, unsigned int len)
+{
+	unsigned char *p;
+
+	ata_id_string(id, s, ofs, len - 1);
+
+	p = s + strnlen(s, len - 1);
+	while (p > s && p[-1] == ' ')
+		p--;
+	*p = '\0';
+}
+
+static u64 ata_id_n_sectors(const u16 *id)
+{
+	if (ata_id_has_lba(id)) {
+		if (ata_id_has_lba48(id))
+			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+		else
+			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+	} else {
+		if (ata_id_current_chs_valid(id))
+			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
+			       id[ATA_ID_CUR_SECTORS];
+		else
+			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
+			       id[ATA_ID_SECTORS];
+	}
+}
+
+u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
+{
+	u64 sectors = 0;
+
+	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
+	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
+	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
+	sectors |= (tf->lbah & 0xff) << 16;
+	sectors |= (tf->lbam & 0xff) << 8;
+	sectors |= (tf->lbal & 0xff);
+
+	return sectors;
+}
+
+u64 ata_tf_to_lba(const struct ata_taskfile *tf)
+{
+	u64 sectors = 0;
+
+	sectors |= (tf->device & 0x0f) << 24;
+	sectors |= (tf->lbah & 0xff) << 16;
+	sectors |= (tf->lbam & 0xff) << 8;
+	sectors |= (tf->lbal & 0xff);
+
+	return sectors;
+}
+
+/**
+ *	ata_read_native_max_address - Read native max address
+ *	@dev: target device
+ *	@max_sectors: out parameter for the result native max address
+ *
+ *	Perform an LBA48 or LBA28 native size query upon the device in
+ *	question.
+ *
+ *	RETURNS:
+ *	0 on success, -EACCES if command is aborted by the drive.
+ *	-EIO on other errors.
+ */
+static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
+{
+	unsigned int err_mask;
+	struct ata_taskfile tf;
+	int lba48 = ata_id_has_lba48(dev->id);
+
+	ata_tf_init(dev, &tf);
+
+	/* always clear all address registers */
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+
+	if (lba48) {
+		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
+		tf.flags |= ATA_TFLAG_LBA48;
+	} else
+		tf.command = ATA_CMD_READ_NATIVE_MAX;
+
+	tf.protocol = ATA_PROT_NODATA;
+	tf.device |= ATA_LBA;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (err_mask) {
+		ata_dev_warn(dev,
+			     "failed to read native max address (err_mask=0x%x)\n",
+			     err_mask);
+		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+			return -EACCES;
+		return -EIO;
+	}
+
+	if (lba48)
+		*max_sectors = ata_tf_to_lba48(&tf) + 1;
+	else
+		*max_sectors = ata_tf_to_lba(&tf) + 1;
+	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+		(*max_sectors)--;
+	return 0;
+}
+
+/**
+ *	ata_set_max_sectors - Set max sectors
+ *	@dev: target device
+ *	@new_sectors: new max sectors value to set for the device
+ *
+ *	Set max sectors of @dev to @new_sectors.
+ *
+ *	RETURNS:
+ *	0 on success, -EACCES if command is aborted or denied (due to
+ *	previous non-volatile SET_MAX) by the drive.  -EIO on other
+ *	errors.
+ */
+static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
+{
+	unsigned int err_mask;
+	struct ata_taskfile tf;
+	int lba48 = ata_id_has_lba48(dev->id);
+
+	new_sectors--;
+
+	ata_tf_init(dev, &tf);
+
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+
+	if (lba48) {
+		tf.command = ATA_CMD_SET_MAX_EXT;
+		tf.flags |= ATA_TFLAG_LBA48;
+
+		tf.hob_lbal = (new_sectors >> 24) & 0xff;
+		tf.hob_lbam = (new_sectors >> 32) & 0xff;
+		tf.hob_lbah = (new_sectors >> 40) & 0xff;
+	} else {
+		tf.command = ATA_CMD_SET_MAX;
+
+		tf.device |= (new_sectors >> 24) & 0xf;
+	}
+
+	tf.protocol = ATA_PROT_NODATA;
+	tf.device |= ATA_LBA;
+
+	tf.lbal = (new_sectors >> 0) & 0xff;
+	tf.lbam = (new_sectors >> 8) & 0xff;
+	tf.lbah = (new_sectors >> 16) & 0xff;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (err_mask) {
+		ata_dev_warn(dev,
+			     "failed to set max address (err_mask=0x%x)\n",
+			     err_mask);
+		if (err_mask == AC_ERR_DEV &&
+		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
+			return -EACCES;
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_hpa_resize		-	Resize a device with an HPA set
+ *	@dev: Device to resize
+ *
+ *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
+ *	it if required to the full size of the media. The caller must check
+ *	the drive has the HPA feature set enabled.
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int ata_hpa_resize(struct ata_device *dev)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
+	u64 sectors = ata_id_n_sectors(dev->id);
+	u64 native_sectors;
+	int rc;
+
+	/* do we need to do it? */
+	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
+	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
+	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+		return 0;
+
+	/* read native max address */
+	rc = ata_read_native_max_address(dev, &native_sectors);
+	if (rc) {
+		/* If device aborted the command or HPA isn't going to
+		 * be unlocked, skip HPA resizing.
+		 */
+		if (rc == -EACCES || !unlock_hpa) {
+			ata_dev_warn(dev,
+				     "HPA support seems broken, skipping HPA handling\n");
+			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+
+			/* we can continue if device aborted the command */
+			if (rc == -EACCES)
+				rc = 0;
+		}
+
+		return rc;
+	}
+	dev->n_native_sectors = native_sectors;
+
+	/* nothing to do? */
+	if (native_sectors <= sectors || !unlock_hpa) {
+		if (!print_info || native_sectors == sectors)
+			return 0;
+
+		if (native_sectors > sectors)
+			ata_dev_info(dev,
+				"HPA detected: current %llu, native %llu\n",
+				(unsigned long long)sectors,
+				(unsigned long long)native_sectors);
+		else if (native_sectors < sectors)
+			ata_dev_warn(dev,
+				"native sectors (%llu) is smaller than sectors (%llu)\n",
+				(unsigned long long)native_sectors,
+				(unsigned long long)sectors);
+		return 0;
+	}
+
+	/* let's unlock HPA */
+	rc = ata_set_max_sectors(dev, native_sectors);
+	if (rc == -EACCES) {
+		/* if device aborted the command, skip HPA resizing */
+		ata_dev_warn(dev,
+			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
+			     (unsigned long long)sectors,
+			     (unsigned long long)native_sectors);
+		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+		return 0;
+	} else if (rc)
+		return rc;
+
+	/* re-read IDENTIFY data */
+	rc = ata_dev_reread_id(dev, 0);
+	if (rc) {
+		ata_dev_err(dev,
+			    "failed to re-read IDENTIFY data after HPA resizing\n");
+		return rc;
+	}
+
+	if (print_info) {
+		u64 new_sectors = ata_id_n_sectors(dev->id);
+		ata_dev_info(dev,
+			"HPA unlocked: %llu -> %llu, native %llu\n",
+			(unsigned long long)sectors,
+			(unsigned long long)new_sectors,
+			(unsigned long long)native_sectors);
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_dump_id - IDENTIFY DEVICE info debugging output
+ *	@id: IDENTIFY DEVICE page to dump
+ *
+ *	Dump selected 16-bit words from the given IDENTIFY DEVICE
+ *	page.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static inline void ata_dump_id(const u16 *id)
+{
+	DPRINTK("49==0x%04x  "
+		"53==0x%04x  "
+		"63==0x%04x  "
+		"64==0x%04x  "
+		"75==0x%04x  \n",
+		id[49],
+		id[53],
+		id[63],
+		id[64],
+		id[75]);
+	DPRINTK("80==0x%04x  "
+		"81==0x%04x  "
+		"82==0x%04x  "
+		"83==0x%04x  "
+		"84==0x%04x  \n",
+		id[80],
+		id[81],
+		id[82],
+		id[83],
+		id[84]);
+	DPRINTK("88==0x%04x  "
+		"93==0x%04x\n",
+		id[88],
+		id[93]);
+}
+
+/**
+ *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
+ *	@id: IDENTIFY data to compute xfer mask from
+ *
+ *	Compute the xfermask for this device. This is not as trivial
+ *	as it seems if we must consider early devices correctly.
+ *
+ *	FIXME: pre IDE drive timing (do we care ?).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Computed xfermask
+ */
+unsigned long ata_id_xfermask(const u16 *id)
+{
+	unsigned long pio_mask, mwdma_mask, udma_mask;
+
+	/* Usual case. Word 53 indicates word 64 is valid */
+	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
+		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
+		pio_mask <<= 3;
+		pio_mask |= 0x7;
+	} else {
+		/* If word 64 isn't valid then Word 51 high byte holds
+		 * the PIO timing number for the maximum. Turn it into
+		 * a mask.
+		 */
+		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
+		if (mode < 5)	/* Valid PIO range */
+			pio_mask = (2 << mode) - 1;
+		else
+			pio_mask = 1;
+
+		/* But wait.. there's more. Design your standards by
+		 * committee and you too can get a free iordy field to
+		 * process. However its the speeds not the modes that
+		 * are supported... Note drivers using the timing API
+		 * will get this right anyway
+		 */
+	}
+
+	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
+
+	if (ata_id_is_cfa(id)) {
+		/*
+		 *	Process compact flash extended modes
+		 */
+		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
+		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
+
+		if (pio)
+			pio_mask |= (1 << 5);
+		if (pio > 1)
+			pio_mask |= (1 << 6);
+		if (dma)
+			mwdma_mask |= (1 << 3);
+		if (dma > 1)
+			mwdma_mask |= (1 << 4);
+	}
+
+	udma_mask = 0;
+	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
+		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
+
+	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+}
+
+static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+{
+	struct completion *waiting = qc->private_data;
+
+	complete(waiting);
+}
+
+/**
+ *	ata_exec_internal_sg - execute libata internal command
+ *	@dev: Device to which the command is sent
+ *	@tf: Taskfile registers for the command and the result
+ *	@cdb: CDB for packet command
+ *	@dma_dir: Data transfer direction of the command
+ *	@sgl: sg list for the data buffer of the command
+ *	@n_elem: Number of sg entries
+ *	@timeout: Timeout in msecs (0 for default)
+ *
+ *	Executes libata internal command with timeout.  @tf contains
+ *	command on entry and result on return.  Timeout and error
+ *	conditions are reported via return value.  No recovery action
+ *	is taken after a command times out.  It's caller's duty to
+ *	clean up after timeout.
+ *
+ *	LOCKING:
+ *	None.  Should be called with kernel context, might sleep.
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal_sg(struct ata_device *dev,
+			      struct ata_taskfile *tf, const u8 *cdb,
+			      int dma_dir, struct scatterlist *sgl,
+			      unsigned int n_elem, unsigned long timeout)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	u8 command = tf->command;
+	int auto_timeout = 0;
+	struct ata_queued_cmd *qc;
+	unsigned int preempted_tag;
+	u32 preempted_sactive;
+	u64 preempted_qc_active;
+	int preempted_nr_active_links;
+	DECLARE_COMPLETION_ONSTACK(wait);
+	unsigned long flags;
+	unsigned int err_mask;
+	int rc;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	/* no internal command while frozen */
+	if (ap->pflags & ATA_PFLAG_FROZEN) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		return AC_ERR_SYSTEM;
+	}
+
+	/* initialize internal qc */
+	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
+
+	qc->tag = ATA_TAG_INTERNAL;
+	qc->hw_tag = 0;
+	qc->scsicmd = NULL;
+	qc->ap = ap;
+	qc->dev = dev;
+	ata_qc_reinit(qc);
+
+	preempted_tag = link->active_tag;
+	preempted_sactive = link->sactive;
+	preempted_qc_active = ap->qc_active;
+	preempted_nr_active_links = ap->nr_active_links;
+	link->active_tag = ATA_TAG_POISON;
+	link->sactive = 0;
+	ap->qc_active = 0;
+	ap->nr_active_links = 0;
+
+	/* prepare & issue qc */
+	qc->tf = *tf;
+	if (cdb)
+		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+
+	/* some SATA bridges need us to indicate data xfer direction */
+	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
+	    dma_dir == DMA_FROM_DEVICE)
+		qc->tf.feature |= ATAPI_DMADIR;
+
+	qc->flags |= ATA_QCFLAG_RESULT_TF;
+	qc->dma_dir = dma_dir;
+	if (dma_dir != DMA_NONE) {
+		unsigned int i, buflen = 0;
+		struct scatterlist *sg;
+
+		for_each_sg(sgl, sg, n_elem, i)
+			buflen += sg->length;
+
+		ata_sg_init(qc, sgl, n_elem);
+		qc->nbytes = buflen;
+	}
+
+	qc->private_data = &wait;
+	qc->complete_fn = ata_qc_complete_internal;
+
+	ata_qc_issue(qc);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if (!timeout) {
+		if (ata_probe_timeout)
+			timeout = ata_probe_timeout * 1000;
+		else {
+			timeout = ata_internal_cmd_timeout(dev, command);
+			auto_timeout = 1;
+		}
+	}
+
+	if (ap->ops->error_handler)
+		ata_eh_release(ap);
+
+	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
+
+	if (ap->ops->error_handler)
+		ata_eh_acquire(ap);
+
+	ata_sff_flush_pio_task(ap);
+
+	if (!rc) {
+		spin_lock_irqsave(ap->lock, flags);
+
+		/* We're racing with irq here.  If we lose, the
+		 * following test prevents us from completing the qc
+		 * twice.  If we win, the port is frozen and will be
+		 * cleaned up by ->post_internal_cmd().
+		 */
+		if (qc->flags & ATA_QCFLAG_ACTIVE) {
+			qc->err_mask |= AC_ERR_TIMEOUT;
+
+			if (ap->ops->error_handler)
+				ata_port_freeze(ap);
+			else
+				ata_qc_complete(qc);
+
+			if (ata_msg_warn(ap))
+				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
+					     command);
+		}
+
+		spin_unlock_irqrestore(ap->lock, flags);
+	}
+
+	/* do post_internal_cmd */
+	if (ap->ops->post_internal_cmd)
+		ap->ops->post_internal_cmd(qc);
+
+	/* perform minimal error analysis */
+	if (qc->flags & ATA_QCFLAG_FAILED) {
+		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+			qc->err_mask |= AC_ERR_DEV;
+
+		if (!qc->err_mask)
+			qc->err_mask |= AC_ERR_OTHER;
+
+		if (qc->err_mask & ~AC_ERR_OTHER)
+			qc->err_mask &= ~AC_ERR_OTHER;
+	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+		qc->result_tf.command |= ATA_SENSE;
+	}
+
+	/* finish up */
+	spin_lock_irqsave(ap->lock, flags);
+
+	*tf = qc->result_tf;
+	err_mask = qc->err_mask;
+
+	ata_qc_free(qc);
+	link->active_tag = preempted_tag;
+	link->sactive = preempted_sactive;
+	ap->qc_active = preempted_qc_active;
+	ap->nr_active_links = preempted_nr_active_links;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
+		ata_internal_cmd_timed_out(dev, command);
+
+	return err_mask;
+}
+
+/**
+ *	ata_exec_internal - execute libata internal command
+ *	@dev: Device to which the command is sent
+ *	@tf: Taskfile registers for the command and the result
+ *	@cdb: CDB for packet command
+ *	@dma_dir: Data transfer direction of the command
+ *	@buf: Data buffer of the command
+ *	@buflen: Length of data buffer
+ *	@timeout: Timeout in msecs (0 for default)
+ *
+ *	Wrapper around ata_exec_internal_sg() which takes simple
+ *	buffer instead of sg list.
+ *
+ *	LOCKING:
+ *	None.  Should be called with kernel context, might sleep.
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal(struct ata_device *dev,
+			   struct ata_taskfile *tf, const u8 *cdb,
+			   int dma_dir, void *buf, unsigned int buflen,
+			   unsigned long timeout)
+{
+	struct scatterlist *psg = NULL, sg;
+	unsigned int n_elem = 0;
+
+	if (dma_dir != DMA_NONE) {
+		WARN_ON(!buf);
+		sg_init_one(&sg, buf, buflen);
+		psg = &sg;
+		n_elem++;
+	}
+
+	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
+				    timeout);
+}
+
+/**
+ *	ata_pio_need_iordy	-	check if iordy needed
+ *	@adev: ATA device
+ *
+ *	Check if the current speed of the device requires IORDY. Used
+ *	by various controllers for chip configuration.
+ */
+unsigned int ata_pio_need_iordy(const struct ata_device *adev)
+{
+	/* Don't set IORDY if we're preparing for reset.  IORDY may
+	 * lead to controller lock up on certain controllers if the
+	 * port is not occupied.  See bko#11703 for details.
+	 */
+	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
+		return 0;
+	/* Controller doesn't support IORDY.  Probably a pointless
+	 * check as the caller should know this.
+	 */
+	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
+		return 0;
+	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
+	if (ata_id_is_cfa(adev->id)
+	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
+		return 0;
+	/* PIO3 and higher it is mandatory */
+	if (adev->pio_mode > XFER_PIO_2)
+		return 1;
+	/* We turn it on when possible */
+	if (ata_id_has_iordy(adev->id))
+		return 1;
+	return 0;
+}
+
+/**
+ *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
+ *	@adev: ATA device
+ *
+ *	Compute the highest mode possible if we are not using iordy. Return
+ *	-1 if no iordy mode is available.
+ */
+static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
+{
+	/* If we have no drive specific rule, then PIO 2 is non IORDY */
+	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
+		u16 pio = adev->id[ATA_ID_EIDE_PIO];
+		/* Is the speed faster than the drive allows non IORDY ? */
+		if (pio) {
+			/* This is cycle times not frequency - watch the logic! */
+			if (pio > 240)	/* PIO2 is 240nS per cycle */
+				return 3 << ATA_SHIFT_PIO;
+			return 7 << ATA_SHIFT_PIO;
+		}
+	}
+	return 3 << ATA_SHIFT_PIO;
+}
+
+/**
+ *	ata_do_dev_read_id		-	default ID read method
+ *	@dev: device
+ *	@tf: proposed taskfile
+ *	@id: data buffer
+ *
+ *	Issue the identify taskfile and hand back the buffer containing
+ *	identify data. For some RAID controllers and for pre ATA devices
+ *	this function is wrapped or replaced by the driver
+ */
+unsigned int ata_do_dev_read_id(struct ata_device *dev,
+					struct ata_taskfile *tf, u16 *id)
+{
+	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
+				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
+}
+
+/**
+ *	ata_dev_read_id - Read ID data from the specified device
+ *	@dev: target device
+ *	@p_class: pointer to class of the target device (may be changed)
+ *	@flags: ATA_READID_* flags
+ *	@id: buffer to read IDENTIFY data into
+ *
+ *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
+ *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
+ *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
+ *	for pre-ATA4 drives.
+ *
+ *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
+ *	now we abort if we hit that case.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
+		    unsigned int flags, u16 *id)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int class = *p_class;
+	struct ata_taskfile tf;
+	unsigned int err_mask = 0;
+	const char *reason;
+	bool is_semb = class == ATA_DEV_SEMB;
+	int may_fallback = 1, tried_spinup = 0;
+	int rc;
+
+	if (ata_msg_ctl(ap))
+		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
+
+retry:
+	ata_tf_init(dev, &tf);
+
+	switch (class) {
+	case ATA_DEV_SEMB:
+		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
+		/* fall through */
+	case ATA_DEV_ATA:
+	case ATA_DEV_ZAC:
+		tf.command = ATA_CMD_ID_ATA;
+		break;
+	case ATA_DEV_ATAPI:
+		tf.command = ATA_CMD_ID_ATAPI;
+		break;
+	default:
+		rc = -ENODEV;
+		reason = "unsupported class";
+		goto err_out;
+	}
+
+	tf.protocol = ATA_PROT_PIO;
+
+	/* Some devices choke if TF registers contain garbage.  Make
+	 * sure those are properly initialized.
+	 */
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+
+	/* Device presence detection is unreliable on some
+	 * controllers.  Always poll IDENTIFY if available.
+	 */
+	tf.flags |= ATA_TFLAG_POLLING;
+
+	if (ap->ops->read_id)
+		err_mask = ap->ops->read_id(dev, &tf, id);
+	else
+		err_mask = ata_do_dev_read_id(dev, &tf, id);
+
+	if (err_mask) {
+		if (err_mask & AC_ERR_NODEV_HINT) {
+			ata_dev_dbg(dev, "NODEV after polling detection\n");
+			return -ENOENT;
+		}
+
+		if (is_semb) {
+			ata_dev_info(dev,
+		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
+			/* SEMB is not supported yet */
+			*p_class = ATA_DEV_SEMB_UNSUP;
+			return 0;
+		}
+
+		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+			/* Device or controller might have reported
+			 * the wrong device class.  Give a shot at the
+			 * other IDENTIFY if the current one is
+			 * aborted by the device.
+			 */
+			if (may_fallback) {
+				may_fallback = 0;
+
+				if (class == ATA_DEV_ATA)
+					class = ATA_DEV_ATAPI;
+				else
+					class = ATA_DEV_ATA;
+				goto retry;
+			}
+
+			/* Control reaches here iff the device aborted
+			 * both flavors of IDENTIFYs which happens
+			 * sometimes with phantom devices.
+			 */
+			ata_dev_dbg(dev,
+				    "both IDENTIFYs aborted, assuming NODEV\n");
+			return -ENOENT;
+		}
+
+		rc = -EIO;
+		reason = "I/O error";
+		goto err_out;
+	}
+
+	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+		ata_dev_dbg(dev, "dumping IDENTIFY data, "
+			    "class=%d may_fallback=%d tried_spinup=%d\n",
+			    class, may_fallback, tried_spinup);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
+	}
+
+	/* Falling back doesn't make sense if ID data was read
+	 * successfully at least once.
+	 */
+	may_fallback = 0;
+
+	swap_buf_le16(id, ATA_ID_WORDS);
+
+	/* sanity check */
+	rc = -EINVAL;
+	reason = "device reports invalid type";
+
+	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
+		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
+			goto err_out;
+		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
+							ata_id_is_ata(id)) {
+			ata_dev_dbg(dev,
+				"host indicates ignore ATA devices, ignored\n");
+			return -ENOENT;
+		}
+	} else {
+		if (ata_id_is_ata(id))
+			goto err_out;
+	}
+
+	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
+		tried_spinup = 1;
+		/*
+		 * Drive powered-up in standby mode, and requires a specific
+		 * SET_FEATURES spin-up subcommand before it will accept
+		 * anything other than the original IDENTIFY command.
+		 */
+		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
+		if (err_mask && id[2] != 0x738c) {
+			rc = -EIO;
+			reason = "SPINUP failed";
+			goto err_out;
+		}
+		/*
+		 * If the drive initially returned incomplete IDENTIFY info,
+		 * we now must reissue the IDENTIFY command.
+		 */
+		if (id[2] == 0x37c8)
+			goto retry;
+	}
+
+	if ((flags & ATA_READID_POSTRESET) &&
+	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
+		/*
+		 * The exact sequence expected by certain pre-ATA4 drives is:
+		 * SRST RESET
+		 * IDENTIFY (optional in early ATA)
+		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
+		 * anything else..
+		 * Some drives were very specific about that exact sequence.
+		 *
+		 * Note that ATA4 says lba is mandatory so the second check
+		 * should never trigger.
+		 */
+		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
+			err_mask = ata_dev_init_params(dev, id[3], id[6]);
+			if (err_mask) {
+				rc = -EIO;
+				reason = "INIT_DEV_PARAMS failed";
+				goto err_out;
+			}
+
+			/* current CHS translation info (id[53-58]) might be
+			 * changed. reread the identify device info.
+			 */
+			flags &= ~ATA_READID_POSTRESET;
+			goto retry;
+		}
+	}
+
+	*p_class = class;
+
+	return 0;
+
+ err_out:
+	if (ata_msg_warn(ap))
+		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
+			     reason, err_mask);
+	return rc;
+}
+
+/**
+ *	ata_read_log_page - read a specific log page
+ *	@dev: target device
+ *	@log: log to read
+ *	@page: page to read
+ *	@buf: buffer to store read page
+ *	@sectors: number of sectors to read
+ *
+ *	Read log page using READ_LOG_EXT command.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask otherwise.
+ */
+unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+			       u8 page, void *buf, unsigned int sectors)
+{
+	unsigned long ap_flags = dev->link->ap->flags;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+	bool dma = false;
+
+	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+
+	/*
+	 * Return error without actually issuing the command on controllers
+	 * which e.g. lockup on a read log page.
+	 */
+	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
+		return AC_ERR_DEV;
+
+retry:
+	ata_tf_init(dev, &tf);
+	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
+	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
+		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
+		tf.protocol = ATA_PROT_DMA;
+		dma = true;
+	} else {
+		tf.command = ATA_CMD_READ_LOG_EXT;
+		tf.protocol = ATA_PROT_PIO;
+		dma = false;
+	}
+	tf.lbal = log;
+	tf.lbam = page;
+	tf.nsect = sectors;
+	tf.hob_nsect = sectors >> 8;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
+				     buf, sectors * ATA_SECT_SIZE, 0);
+
+	if (err_mask && dma) {
+		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
+		goto retry;
+	}
+
+	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+	return err_mask;
+}
+
+static bool ata_log_supported(struct ata_device *dev, u8 log)
+{
+	struct ata_port *ap = dev->link->ap;
+
+	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+		return false;
+	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
+}
+
+static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err, i;
+
+	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
+		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
+		return false;
+	}
+
+	/*
+	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
+	 * supported.
+	 */
+	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
+				1);
+	if (err) {
+		ata_dev_info(dev,
+			     "failed to get Device Identify Log Emask 0x%x\n",
+			     err);
+		return false;
+	}
+
+	for (i = 0; i < ap->sector_buf[8]; i++) {
+		if (ap->sector_buf[9 + i] == page)
+			return true;
+	}
+
+	return false;
+}
+
+static int ata_do_link_spd_horkage(struct ata_device *dev)
+{
+	struct ata_link *plink = ata_dev_phys_link(dev);
+	u32 target, target_limit;
+
+	if (!sata_scr_valid(plink))
+		return 0;
+
+	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
+		target = 1;
+	else
+		return 0;
+
+	target_limit = (1 << target) - 1;
+
+	/* if already on stricter limit, no need to push further */
+	if (plink->sata_spd_limit <= target_limit)
+		return 0;
+
+	plink->sata_spd_limit = target_limit;
+
+	/* Request another EH round by returning -EAGAIN if link is
+	 * going faster than the target speed.  Forward progress is
+	 * guaranteed by setting sata_spd_limit to target_limit above.
+	 */
+	if (plink->sata_spd > target) {
+		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+			     sata_spd_string(target));
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static inline u8 ata_dev_knobble(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+
+	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
+		return 0;
+
+	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
+}
+
+static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+
+	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
+		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
+		return;
+	}
+	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
+			    err_mask);
+	} else {
+		u8 *cmds = dev->ncq_send_recv_cmds;
+
+		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
+		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+			ata_dev_dbg(dev, "disabling queued TRIM support\n");
+			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+		}
+	}
+}
+
+static void ata_dev_config_ncq_non_data(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+
+	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
+		ata_dev_warn(dev,
+			     "NCQ Send/Recv Log not supported\n");
+		return;
+	}
+	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
+				     0, ap->sector_buf, 1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
+			    err_mask);
+	} else {
+		u8 *cmds = dev->ncq_non_data_cmds;
+
+		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
+	}
+}
+
+static void ata_dev_config_ncq_prio(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+
+	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+		return;
+	}
+
+	err_mask = ata_read_log_page(dev,
+				     ATA_LOG_IDENTIFY_DEVICE,
+				     ATA_LOG_SATA_SETTINGS,
+				     ap->sector_buf,
+				     1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get Identify Device data, Emask 0x%x\n",
+			    err_mask);
+		return;
+	}
+
+	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
+		dev->flags |= ATA_DFLAG_NCQ_PRIO;
+	} else {
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+		ata_dev_dbg(dev, "SATA page does not support priority\n");
+	}
+
+}
+
+static int ata_dev_config_ncq(struct ata_device *dev,
+			       char *desc, size_t desc_sz)
+{
+	struct ata_port *ap = dev->link->ap;
+	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
+	unsigned int err_mask;
+	char *aa_desc = "";
+
+	if (!ata_id_has_ncq(dev->id)) {
+		desc[0] = '\0';
+		return 0;
+	}
+	if (dev->horkage & ATA_HORKAGE_NONCQ) {
+		snprintf(desc, desc_sz, "NCQ (not used)");
+		return 0;
+	}
+	if (ap->flags & ATA_FLAG_NCQ) {
+		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
+		dev->flags |= ATA_DFLAG_NCQ;
+	}
+
+	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+		(ap->flags & ATA_FLAG_FPDMA_AA) &&
+		ata_id_has_fpdma_aa(dev->id)) {
+		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
+			SATA_FPDMA_AA);
+		if (err_mask) {
+			ata_dev_err(dev,
+				    "failed to enable AA (error_mask=0x%x)\n",
+				    err_mask);
+			if (err_mask != AC_ERR_DEV) {
+				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+				return -EIO;
+			}
+		} else
+			aa_desc = ", AA";
+	}
+
+	if (hdepth >= ddepth)
+		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
+	else
+		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
+			ddepth, aa_desc);
+
+	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
+		if (ata_id_has_ncq_send_and_recv(dev->id))
+			ata_dev_config_ncq_send_recv(dev);
+		if (ata_id_has_ncq_non_data(dev->id))
+			ata_dev_config_ncq_non_data(dev);
+		if (ata_id_has_ncq_prio(dev->id))
+			ata_dev_config_ncq_prio(dev);
+	}
+
+	return 0;
+}
+
+static void ata_dev_config_sense_reporting(struct ata_device *dev)
+{
+	unsigned int err_mask;
+
+	if (!ata_id_has_sense_reporting(dev->id))
+		return;
+
+	if (ata_id_sense_reporting_enabled(dev->id))
+		return;
+
+	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
+			    err_mask);
+	}
+}
+
+static void ata_dev_config_zac(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+	u8 *identify_buf = ap->sector_buf;
+
+	dev->zac_zones_optimal_open = U32_MAX;
+	dev->zac_zones_optimal_nonseq = U32_MAX;
+	dev->zac_zones_max_open = U32_MAX;
+
+	/*
+	 * Always set the 'ZAC' flag for Host-managed devices.
+	 */
+	if (dev->class == ATA_DEV_ZAC)
+		dev->flags |= ATA_DFLAG_ZAC;
+	else if (ata_id_zoned_cap(dev->id) == 0x01)
+		/*
+		 * Check for host-aware devices.
+		 */
+		dev->flags |= ATA_DFLAG_ZAC;
+
+	if (!(dev->flags & ATA_DFLAG_ZAC))
+		return;
+
+	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
+		ata_dev_warn(dev,
+			     "ATA Zoned Information Log not supported\n");
+		return;
+	}
+
+	/*
+	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
+	 */
+	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
+				     ATA_LOG_ZONED_INFORMATION,
+				     identify_buf, 1);
+	if (!err_mask) {
+		u64 zoned_cap, opt_open, opt_nonseq, max_open;
+
+		zoned_cap = get_unaligned_le64(&identify_buf[8]);
+		if ((zoned_cap >> 63))
+			dev->zac_zoned_cap = (zoned_cap & 1);
+		opt_open = get_unaligned_le64(&identify_buf[24]);
+		if ((opt_open >> 63))
+			dev->zac_zones_optimal_open = (u32)opt_open;
+		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
+		if ((opt_nonseq >> 63))
+			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
+		max_open = get_unaligned_le64(&identify_buf[40]);
+		if ((max_open >> 63))
+			dev->zac_zones_max_open = (u32)max_open;
+	}
+}
+
+static void ata_dev_config_trusted(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	u64 trusted_cap;
+	unsigned int err;
+
+	if (!ata_id_has_trusted(dev->id))
+		return;
+
+	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
+		ata_dev_warn(dev,
+			     "Security Log not supported\n");
+		return;
+	}
+
+	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
+			ap->sector_buf, 1);
+	if (err) {
+		ata_dev_dbg(dev,
+			    "failed to read Security Log, Emask 0x%x\n", err);
+		return;
+	}
+
+	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
+	if (!(trusted_cap & (1ULL << 63))) {
+		ata_dev_dbg(dev,
+			    "Trusted Computing capability qword not valid!\n");
+		return;
+	}
+
+	if (trusted_cap & (1 << 0))
+		dev->flags |= ATA_DFLAG_TRUSTED;
+}
+
+/**
+ *	ata_dev_configure - Configure the specified ATA/ATAPI device
+ *	@dev: Target device to configure
+ *
+ *	Configure @dev according to @dev->id.  Generic and low-level
+ *	driver specific fixups are also applied.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise
+ */
+int ata_dev_configure(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+	const u16 *id = dev->id;
+	unsigned long xfer_mask;
+	unsigned int err_mask;
+	char revbuf[7];		/* XYZ-99\0 */
+	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
+	char modelbuf[ATA_ID_PROD_LEN+1];
+	int rc;
+
+	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
+		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
+		return 0;
+	}
+
+	if (ata_msg_probe(ap))
+		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
+
+	/* set horkage */
+	dev->horkage |= ata_dev_blacklisted(dev);
+	ata_force_horkage(dev);
+
+	if (dev->horkage & ATA_HORKAGE_DISABLE) {
+		ata_dev_info(dev, "unsupported device, disabling\n");
+		ata_dev_disable(dev);
+		return 0;
+	}
+
+	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
+	    dev->class == ATA_DEV_ATAPI) {
+		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
+			     atapi_enabled ? "not supported with this driver"
+			     : "disabled");
+		ata_dev_disable(dev);
+		return 0;
+	}
+
+	rc = ata_do_link_spd_horkage(dev);
+	if (rc)
+		return rc;
+
+	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
+	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+		dev->horkage |= ATA_HORKAGE_NOLPM;
+
+	if (ap->flags & ATA_FLAG_NO_LPM)
+		dev->horkage |= ATA_HORKAGE_NOLPM;
+
+	if (dev->horkage & ATA_HORKAGE_NOLPM) {
+		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+	}
+
+	/* let ACPI work its magic */
+	rc = ata_acpi_on_devcfg(dev);
+	if (rc)
+		return rc;
+
+	/* massage HPA, do it early as it might change IDENTIFY data */
+	rc = ata_hpa_resize(dev);
+	if (rc)
+		return rc;
+
+	/* print device capabilities */
+	if (ata_msg_probe(ap))
+		ata_dev_dbg(dev,
+			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
+			    __func__,
+			    id[49], id[82], id[83], id[84],
+			    id[85], id[86], id[87], id[88]);
+
+	/* initialize to-be-configured parameters */
+	dev->flags &= ~ATA_DFLAG_CFG_MASK;
+	dev->max_sectors = 0;
+	dev->cdb_len = 0;
+	dev->n_sectors = 0;
+	dev->cylinders = 0;
+	dev->heads = 0;
+	dev->sectors = 0;
+	dev->multi_count = 0;
+
+	/*
+	 * common ATA, ATAPI feature tests
+	 */
+
+	/* find max transfer mode; for printk only */
+	xfer_mask = ata_id_xfermask(id);
+
+	if (ata_msg_probe(ap))
+		ata_dump_id(id);
+
+	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
+	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
+			sizeof(fwrevbuf));
+
+	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
+			sizeof(modelbuf));
+
+	/* ATA-specific feature tests */
+	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
+		if (ata_id_is_cfa(id)) {
+			/* CPRM may make this media unusable */
+			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
+				ata_dev_warn(dev,
+	"supports DRM functions and may not be fully accessible\n");
+			snprintf(revbuf, 7, "CFA");
+		} else {
+			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
+			/* Warn the user if the device has TPM extensions */
+			if (ata_id_has_tpm(id))
+				ata_dev_warn(dev,
+	"supports DRM functions and may not be fully accessible\n");
+		}
+
+		dev->n_sectors = ata_id_n_sectors(id);
+
+		/* get current R/W Multiple count setting */
+		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
+			unsigned int max = dev->id[47] & 0xff;
+			unsigned int cnt = dev->id[59] & 0xff;
+			/* only recognize/allow powers of two here */
+			if (is_power_of_2(max) && is_power_of_2(cnt))
+				if (cnt <= max)
+					dev->multi_count = cnt;
+		}
+
+		if (ata_id_has_lba(id)) {
+			const char *lba_desc;
+			char ncq_desc[24];
+
+			lba_desc = "LBA";
+			dev->flags |= ATA_DFLAG_LBA;
+			if (ata_id_has_lba48(id)) {
+				dev->flags |= ATA_DFLAG_LBA48;
+				lba_desc = "LBA48";
+
+				if (dev->n_sectors >= (1UL << 28) &&
+				    ata_id_has_flush_ext(id))
+					dev->flags |= ATA_DFLAG_FLUSH_EXT;
+			}
+
+			/* config NCQ */
+			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+			if (rc)
+				return rc;
+
+			/* print device info to dmesg */
+			if (ata_msg_drv(ap) && print_info) {
+				ata_dev_info(dev, "%s: %s, %s, max %s\n",
+					     revbuf, modelbuf, fwrevbuf,
+					     ata_mode_string(xfer_mask));
+				ata_dev_info(dev,
+					     "%llu sectors, multi %u: %s %s\n",
+					(unsigned long long)dev->n_sectors,
+					dev->multi_count, lba_desc, ncq_desc);
+			}
+		} else {
+			/* CHS */
+
+			/* Default translation */
+			dev->cylinders	= id[1];
+			dev->heads	= id[3];
+			dev->sectors	= id[6];
+
+			if (ata_id_current_chs_valid(id)) {
+				/* Current CHS translation is valid. */
+				dev->cylinders = id[54];
+				dev->heads     = id[55];
+				dev->sectors   = id[56];
+			}
+
+			/* print device info to dmesg */
+			if (ata_msg_drv(ap) && print_info) {
+				ata_dev_info(dev, "%s: %s, %s, max %s\n",
+					     revbuf,	modelbuf, fwrevbuf,
+					     ata_mode_string(xfer_mask));
+				ata_dev_info(dev,
+					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
+					     (unsigned long long)dev->n_sectors,
+					     dev->multi_count, dev->cylinders,
+					     dev->heads, dev->sectors);
+			}
+		}
+
+		/* Check and mark DevSlp capability. Get DevSlp timing variables
+		 * from SATA Settings page of Identify Device Data Log.
+		 */
+		if (ata_id_has_devslp(dev->id)) {
+			u8 *sata_setting = ap->sector_buf;
+			int i, j;
+
+			dev->flags |= ATA_DFLAG_DEVSLP;
+			err_mask = ata_read_log_page(dev,
+						     ATA_LOG_IDENTIFY_DEVICE,
+						     ATA_LOG_SATA_SETTINGS,
+						     sata_setting,
+						     1);
+			if (err_mask)
+				ata_dev_dbg(dev,
+					    "failed to get Identify Device Data, Emask 0x%x\n",
+					    err_mask);
+			else
+				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+					j = ATA_LOG_DEVSLP_OFFSET + i;
+					dev->devslp_timing[i] = sata_setting[j];
+				}
+		}
+		ata_dev_config_sense_reporting(dev);
+		ata_dev_config_zac(dev);
+		ata_dev_config_trusted(dev);
+		dev->cdb_len = 32;
+	}
+
+	/* ATAPI-specific feature tests */
+	else if (dev->class == ATA_DEV_ATAPI) {
+		const char *cdb_intr_string = "";
+		const char *atapi_an_string = "";
+		const char *dma_dir_string = "";
+		u32 sntf;
+
+		rc = atapi_cdb_len(id);
+		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
+			if (ata_msg_warn(ap))
+				ata_dev_warn(dev, "unsupported CDB len\n");
+			rc = -EINVAL;
+			goto err_out_nosup;
+		}
+		dev->cdb_len = (unsigned int) rc;
+
+		/* Enable ATAPI AN if both the host and device have
+		 * the support.  If PMP is attached, SNTF is required
+		 * to enable ATAPI AN to discern between PHY status
+		 * changed notifications and ATAPI ANs.
+		 */
+		if (atapi_an &&
+		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+		    (!sata_pmp_attached(ap) ||
+		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
+			/* issue SET feature command to turn this on */
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_ENABLE, SATA_AN);
+			if (err_mask)
+				ata_dev_err(dev,
+					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
+					    err_mask);
+			else {
+				dev->flags |= ATA_DFLAG_AN;
+				atapi_an_string = ", ATAPI AN";
+			}
+		}
+
+		if (ata_id_cdb_intr(dev->id)) {
+			dev->flags |= ATA_DFLAG_CDB_INTR;
+			cdb_intr_string = ", CDB intr";
+		}
+
+		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
+			dev->flags |= ATA_DFLAG_DMADIR;
+			dma_dir_string = ", DMADIR";
+		}
+
+		if (ata_id_has_da(dev->id)) {
+			dev->flags |= ATA_DFLAG_DA;
+			zpodd_init(dev);
+		}
+
+		/* print device info to dmesg */
+		if (ata_msg_drv(ap) && print_info)
+			ata_dev_info(dev,
+				     "ATAPI: %s, %s, max %s%s%s%s\n",
+				     modelbuf, fwrevbuf,
+				     ata_mode_string(xfer_mask),
+				     cdb_intr_string, atapi_an_string,
+				     dma_dir_string);
+	}
+
+	/* determine max_sectors */
+	dev->max_sectors = ATA_MAX_SECTORS;
+	if (dev->flags & ATA_DFLAG_LBA48)
+		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
+	/* Limit PATA drive on SATA cable bridge transfers to udma5,
+	   200 sectors */
+	if (ata_dev_knobble(dev)) {
+		if (ata_msg_drv(ap) && print_info)
+			ata_dev_info(dev, "applying bridge limits\n");
+		dev->udma_mask &= ATA_UDMA5;
+		dev->max_sectors = ATA_MAX_SECTORS;
+	}
+
+	if ((dev->class == ATA_DEV_ATAPI) &&
+	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
+		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
+		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+	}
+
+	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
+		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+					 dev->max_sectors);
+
+	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+					 dev->max_sectors);
+
+	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
+	if (ap->ops->dev_config)
+		ap->ops->dev_config(dev);
+
+	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+		/* Let the user know. We don't want to disallow opens for
+		   rescue purposes, or in case the vendor is just a blithering
+		   idiot. Do this after the dev_config call as some controllers
+		   with buggy firmware may want to avoid reporting false device
+		   bugs */
+
+		if (print_info) {
+			ata_dev_warn(dev,
+"Drive reports diagnostics failure. This may indicate a drive\n");
+			ata_dev_warn(dev,
+"fault or invalid emulation. Contact drive vendor for information.\n");
+		}
+	}
+
+	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
+		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
+		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
+	}
+
+	return 0;
+
+err_out_nosup:
+	if (ata_msg_probe(ap))
+		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
+	return rc;
+}
+
+/**
+ *	ata_cable_40wire	-	return 40 wire cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which want to hardwire 40 wire cable
+ *	detection.
+ */
+
+int ata_cable_40wire(struct ata_port *ap)
+{
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	ata_cable_80wire	-	return 80 wire cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which want to hardwire 80 wire cable
+ *	detection.
+ */
+
+int ata_cable_80wire(struct ata_port *ap)
+{
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	ata_cable_unknown	-	return unknown PATA cable.
+ *	@ap: port
+ *
+ *	Helper method for drivers which have no PATA cable detection.
+ */
+
+int ata_cable_unknown(struct ata_port *ap)
+{
+	return ATA_CBL_PATA_UNK;
+}
+
+/**
+ *	ata_cable_ignore	-	return ignored PATA cable.
+ *	@ap: port
+ *
+ *	Helper method for drivers which don't use cable type to limit
+ *	transfer mode.
+ */
+int ata_cable_ignore(struct ata_port *ap)
+{
+	return ATA_CBL_PATA_IGN;
+}
+
+/**
+ *	ata_cable_sata	-	return SATA cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which have SATA cables
+ */
+
+int ata_cable_sata(struct ata_port *ap)
+{
+	return ATA_CBL_SATA;
+}
+
+/**
+ *	ata_bus_probe - Reset and probe ATA bus
+ *	@ap: Bus to probe
+ *
+ *	Master ATA bus probing function.  Initiates a hardware-dependent
+ *	bus reset, then attempts to identify any devices found on
+ *	the bus.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, negative errno otherwise.
+ */
+
+int ata_bus_probe(struct ata_port *ap)
+{
+	unsigned int classes[ATA_MAX_DEVICES];
+	int tries[ATA_MAX_DEVICES];
+	int rc;
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, &ap->link, ALL)
+		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
+
+ retry:
+	ata_for_each_dev(dev, &ap->link, ALL) {
+		/* If we issue an SRST then an ATA drive (not ATAPI)
+		 * may change configuration and be in PIO0 timing. If
+		 * we do a hard reset (or are coming from power on)
+		 * this is true for ATA or ATAPI. Until we've set a
+		 * suitable controller mode we should not touch the
+		 * bus as we may be talking too fast.
+		 */
+		dev->pio_mode = XFER_PIO_0;
+		dev->dma_mode = 0xff;
+
+		/* If the controller has a pio mode setup function
+		 * then use it to set the chipset to rights. Don't
+		 * touch the DMA setup as that will be dealt with when
+		 * configuring devices.
+		 */
+		if (ap->ops->set_piomode)
+			ap->ops->set_piomode(ap, dev);
+	}
+
+	/* reset and determine device classes */
+	ap->ops->phy_reset(ap);
+
+	ata_for_each_dev(dev, &ap->link, ALL) {
+		if (dev->class != ATA_DEV_UNKNOWN)
+			classes[dev->devno] = dev->class;
+		else
+			classes[dev->devno] = ATA_DEV_NONE;
+
+		dev->class = ATA_DEV_UNKNOWN;
+	}
+
+	/* read IDENTIFY page and configure devices. We have to do the identify
+	   specific sequence bass-ackwards so that PDIAG- is released by
+	   the slave device */
+
+	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
+		if (tries[dev->devno])
+			dev->class = classes[dev->devno];
+
+		if (!ata_dev_enabled(dev))
+			continue;
+
+		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
+				     dev->id);
+		if (rc)
+			goto fail;
+	}
+
+	/* Now ask for the cable type as PDIAG- should have been released */
+	if (ap->ops->cable_detect)
+		ap->cbl = ap->ops->cable_detect(ap);
+
+	/* We may have SATA bridge glue hiding here irrespective of
+	 * the reported cable types and sensed types.  When SATA
+	 * drives indicate we have a bridge, we don't know which end
+	 * of the link the bridge is which is a problem.
+	 */
+	ata_for_each_dev(dev, &ap->link, ENABLED)
+		if (ata_id_is_sata(dev->id))
+			ap->cbl = ATA_CBL_SATA;
+
+	/* After the identify sequence we can now set up the devices. We do
+	   this in the normal order so that the user doesn't get confused */
+
+	ata_for_each_dev(dev, &ap->link, ENABLED) {
+		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
+		rc = ata_dev_configure(dev);
+		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
+		if (rc)
+			goto fail;
+	}
+
+	/* configure transfer mode */
+	rc = ata_set_mode(&ap->link, &dev);
+	if (rc)
+		goto fail;
+
+	ata_for_each_dev(dev, &ap->link, ENABLED)
+		return 0;
+
+	return -ENODEV;
+
+ fail:
+	tries[dev->devno]--;
+
+	switch (rc) {
+	case -EINVAL:
+		/* eeek, something went very wrong, give up */
+		tries[dev->devno] = 0;
+		break;
+
+	case -ENODEV:
+		/* give it just one more chance */
+		tries[dev->devno] = min(tries[dev->devno], 1);
+		/* fall through */
+	case -EIO:
+		if (tries[dev->devno] == 1) {
+			/* This is the last chance, better to slow
+			 * down than lose it.
+			 */
+			sata_down_spd_limit(&ap->link, 0);
+			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		}
+	}
+
+	if (!tries[dev->devno])
+		ata_dev_disable(dev);
+
+	goto retry;
+}
+
+/**
+ *	sata_print_link_status - Print SATA link status
+ *	@link: SATA link to printk link status about
+ *
+ *	This function prints link speed and status of a SATA link.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void sata_print_link_status(struct ata_link *link)
+{
+	u32 sstatus, scontrol, tmp;
+
+	if (sata_scr_read(link, SCR_STATUS, &sstatus))
+		return;
+	sata_scr_read(link, SCR_CONTROL, &scontrol);
+
+	if (ata_phys_link_online(link)) {
+		tmp = (sstatus >> 4) & 0xf;
+		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
+			      sata_spd_string(tmp), sstatus, scontrol);
+	} else {
+		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
+			      sstatus, scontrol);
+	}
+}
+
+/**
+ *	ata_dev_pair		-	return other device on cable
+ *	@adev: device
+ *
+ *	Obtain the other device on the same cable, or if none is
+ *	present NULL is returned
+ */
+
+struct ata_device *ata_dev_pair(struct ata_device *adev)
+{
+	struct ata_link *link = adev->link;
+	struct ata_device *pair = &link->device[1 - adev->devno];
+	if (!ata_dev_enabled(pair))
+		return NULL;
+	return pair;
+}
+
+/**
+ *	sata_down_spd_limit - adjust SATA spd limit downward
+ *	@link: Link to adjust SATA spd limit for
+ *	@spd_limit: Additional limit
+ *
+ *	Adjust SATA spd limit of @link downward.  Note that this
+ *	function only adjusts the limit.  The change must be applied
+ *	using sata_set_spd().
+ *
+ *	If @spd_limit is non-zero, the speed is limited to equal to or
+ *	lower than @spd_limit if such speed is supported.  If
+ *	@spd_limit is slower than any supported speed, only the lowest
+ *	supported speed is allowed.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure
+ */
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+	u32 sstatus, spd, mask;
+	int rc, bit;
+
+	if (!sata_scr_valid(link))
+		return -EOPNOTSUPP;
+
+	/* If SCR can be read, use it to determine the current SPD.
+	 * If not, use cached value in link->sata_spd.
+	 */
+	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+	if (rc == 0 && ata_sstatus_online(sstatus))
+		spd = (sstatus >> 4) & 0xf;
+	else
+		spd = link->sata_spd;
+
+	mask = link->sata_spd_limit;
+	if (mask <= 1)
+		return -EINVAL;
+
+	/* unconditionally mask off the highest bit */
+	bit = fls(mask) - 1;
+	mask &= ~(1 << bit);
+
+	/*
+	 * Mask off all speeds higher than or equal to the current one.  At
+	 * this point, if current SPD is not available and we previously
+	 * recorded the link speed from SStatus, the driver has already
+	 * masked off the highest bit so mask should already be 1 or 0.
+	 * Otherwise, we should not force 1.5Gbps on a link where we have
+	 * not previously recorded speed from SStatus.  Just return in this
+	 * case.
+	 */
+	if (spd > 1)
+		mask &= (1 << (spd - 1)) - 1;
+	else
+		return -EINVAL;
+
+	/* were we already at the bottom? */
+	if (!mask)
+		return -EINVAL;
+
+	if (spd_limit) {
+		if (mask & ((1 << spd_limit) - 1))
+			mask &= (1 << spd_limit) - 1;
+		else {
+			bit = ffs(mask) - 1;
+			mask = 1 << bit;
+		}
+	}
+
+	link->sata_spd_limit = mask;
+
+	ata_link_warn(link, "limiting SATA link speed to %s\n",
+		      sata_spd_string(fls(mask)));
+
+	return 0;
+}
+
+static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
+{
+	struct ata_link *host_link = &link->ap->link;
+	u32 limit, target, spd;
+
+	limit = link->sata_spd_limit;
+
+	/* Don't configure downstream link faster than upstream link.
+	 * It doesn't speed up anything and some PMPs choke on such
+	 * configuration.
+	 */
+	if (!ata_is_host_link(link) && host_link->sata_spd)
+		limit &= (1 << host_link->sata_spd) - 1;
+
+	if (limit == UINT_MAX)
+		target = 0;
+	else
+		target = fls(limit);
+
+	spd = (*scontrol >> 4) & 0xf;
+	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
+
+	return spd != target;
+}
+
+/**
+ *	sata_set_spd_needed - is SATA spd configuration needed
+ *	@link: Link in question
+ *
+ *	Test whether the spd limit in SControl matches
+ *	@link->sata_spd_limit.  This function is used to determine
+ *	whether hardreset is necessary to apply SATA spd
+ *	configuration.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	1 if SATA spd configuration is needed, 0 otherwise.
+ */
+static int sata_set_spd_needed(struct ata_link *link)
+{
+	u32 scontrol;
+
+	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+		return 1;
+
+	return __sata_set_spd_needed(link, &scontrol);
+}
+
+/**
+ *	sata_set_spd - set SATA spd according to spd limit
+ *	@link: Link to set SATA spd for
+ *
+ *	Set SATA spd of @link according to sata_spd_limit.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	0 if spd doesn't need to be changed, 1 if spd has been
+ *	changed.  Negative errno if SCR registers are inaccessible.
+ */
+int sata_set_spd(struct ata_link *link)
+{
+	u32 scontrol;
+	int rc;
+
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+		return rc;
+
+	if (!__sata_set_spd_needed(link, &scontrol))
+		return 0;
+
+	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+		return rc;
+
+	return 1;
+}
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for UDMA6, which is currently supported only by Maxtor drives.
+ *
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
+ */
+
+static const struct ata_timing ata_timing[] = {
+/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
+	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
+	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
+	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
+	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
+	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
+	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
+	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
+
+	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
+	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
+	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
+
+	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
+	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
+	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
+	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
+	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
+
+/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
+	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
+	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
+	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
+	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
+	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
+	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
+	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
+
+	{ 0xFF }
+};
+
+#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
+#define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
+{
+	q->setup	= EZ(t->setup,       T);
+	q->act8b	= EZ(t->act8b,       T);
+	q->rec8b	= EZ(t->rec8b,       T);
+	q->cyc8b	= EZ(t->cyc8b,       T);
+	q->active	= EZ(t->active,      T);
+	q->recover	= EZ(t->recover,     T);
+	q->dmack_hold	= EZ(t->dmack_hold,  T);
+	q->cycle	= EZ(t->cycle,       T);
+	q->udma		= EZ(t->udma,       UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+		      struct ata_timing *m, unsigned int what)
+{
+	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
+	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+}
+
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+{
+	const struct ata_timing *t = ata_timing;
+
+	while (xfer_mode > t->mode)
+		t++;
+
+	if (xfer_mode == t->mode)
+		return t;
+
+	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+			__func__, xfer_mode);
+
+	return NULL;
+}
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+		       struct ata_timing *t, int T, int UT)
+{
+	const u16 *id = adev->id;
+	const struct ata_timing *s;
+	struct ata_timing p;
+
+	/*
+	 * Find the mode.
+	 */
+
+	if (!(s = ata_timing_find_mode(speed)))
+		return -EINVAL;
+
+	memcpy(t, s, sizeof(*s));
+
+	/*
+	 * If the drive is an EIDE drive, it can tell us it needs extended
+	 * PIO/MW_DMA cycle timing.
+	 */
+
+	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
+		memset(&p, 0, sizeof(p));
+
+		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
+			if (speed <= XFER_PIO_2)
+				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
+			else if ((speed <= XFER_PIO_4) ||
+				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
+				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
+		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
+
+		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+	}
+
+	/*
+	 * Convert the timing to bus clock counts.
+	 */
+
+	ata_timing_quantize(t, t, T, UT);
+
+	/*
+	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
+	 * S.M.A.R.T * and some other commands. We have to ensure that the
+	 * DMA cycle timing is slower/equal than the fastest PIO timing.
+	 */
+
+	if (speed > XFER_PIO_6) {
+		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+	}
+
+	/*
+	 * Lengthen active & recovery time so that cycle time is correct.
+	 */
+
+	if (t->act8b + t->rec8b < t->cyc8b) {
+		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+		t->rec8b = t->cyc8b - t->act8b;
+	}
+
+	if (t->active + t->recover < t->cycle) {
+		t->active += (t->cycle - (t->active + t->recover)) / 2;
+		t->recover = t->cycle - t->active;
+	}
+
+	/* In a few cases quantisation may produce enough errors to
+	   leave t->cycle too low for the sum of active and recovery
+	   if so we must correct this */
+	if (t->active + t->recover > t->cycle)
+		t->cycle = t->active + t->recover;
+
+	return 0;
+}
+
+/**
+ *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
+ *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
+ *	@cycle: cycle duration in ns
+ *
+ *	Return matching xfer mode for @cycle.  The returned mode is of
+ *	the transfer type specified by @xfer_shift.  If @cycle is too
+ *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
+ *	than the fastest known mode, the fasted mode is returned.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Matching xfer_mode, 0xff if no match found.
+ */
+u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
+{
+	u8 base_mode = 0xff, last_mode = 0xff;
+	const struct ata_xfer_ent *ent;
+	const struct ata_timing *t;
+
+	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+		if (ent->shift == xfer_shift)
+			base_mode = ent->base;
+
+	for (t = ata_timing_find_mode(base_mode);
+	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
+		unsigned short this_cycle;
+
+		switch (xfer_shift) {
+		case ATA_SHIFT_PIO:
+		case ATA_SHIFT_MWDMA:
+			this_cycle = t->cycle;
+			break;
+		case ATA_SHIFT_UDMA:
+			this_cycle = t->udma;
+			break;
+		default:
+			return 0xff;
+		}
+
+		if (cycle > this_cycle)
+			break;
+
+		last_mode = t->mode;
+	}
+
+	return last_mode;
+}
+
+/**
+ *	ata_down_xfermask_limit - adjust dev xfer masks downward
+ *	@dev: Device to adjust xfer masks
+ *	@sel: ATA_DNXFER_* selector
+ *
+ *	Adjust xfer masks of @dev downward.  Note that this function
+ *	does not apply the change.  Invoking ata_set_mode() afterwards
+ *	will apply the limit.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure
+ */
+int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
+{
+	char buf[32];
+	unsigned long orig_mask, xfer_mask;
+	unsigned long pio_mask, mwdma_mask, udma_mask;
+	int quiet, highbit;
+
+	quiet = !!(sel & ATA_DNXFER_QUIET);
+	sel &= ~ATA_DNXFER_QUIET;
+
+	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
+						  dev->mwdma_mask,
+						  dev->udma_mask);
+	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
+
+	switch (sel) {
+	case ATA_DNXFER_PIO:
+		highbit = fls(pio_mask) - 1;
+		pio_mask &= ~(1 << highbit);
+		break;
+
+	case ATA_DNXFER_DMA:
+		if (udma_mask) {
+			highbit = fls(udma_mask) - 1;
+			udma_mask &= ~(1 << highbit);
+			if (!udma_mask)
+				return -ENOENT;
+		} else if (mwdma_mask) {
+			highbit = fls(mwdma_mask) - 1;
+			mwdma_mask &= ~(1 << highbit);
+			if (!mwdma_mask)
+				return -ENOENT;
+		}
+		break;
+
+	case ATA_DNXFER_40C:
+		udma_mask &= ATA_UDMA_MASK_40C;
+		break;
+
+	case ATA_DNXFER_FORCE_PIO0:
+		pio_mask &= 1;
+		/* fall through */
+	case ATA_DNXFER_FORCE_PIO:
+		mwdma_mask = 0;
+		udma_mask = 0;
+		break;
+
+	default:
+		BUG();
+	}
+
+	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+
+	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
+		return -ENOENT;
+
+	if (!quiet) {
+		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+			snprintf(buf, sizeof(buf), "%s:%s",
+				 ata_mode_string(xfer_mask),
+				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
+		else
+			snprintf(buf, sizeof(buf), "%s",
+				 ata_mode_string(xfer_mask));
+
+		ata_dev_warn(dev, "limiting speed to %s\n", buf);
+	}
+
+	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
+			    &dev->udma_mask);
+
+	return 0;
+}
+
+static int ata_dev_set_mode(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
+	const char *dev_err_whine = "";
+	int ign_dev_err = 0;
+	unsigned int err_mask = 0;
+	int rc;
+
+	dev->flags &= ~ATA_DFLAG_PIO;
+	if (dev->xfer_shift == ATA_SHIFT_PIO)
+		dev->flags |= ATA_DFLAG_PIO;
+
+	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
+		dev_err_whine = " (SET_XFERMODE skipped)";
+	else {
+		if (nosetxfer)
+			ata_dev_warn(dev,
+				     "NOSETXFER but PATA detected - can't "
+				     "skip SETXFER, might malfunction\n");
+		err_mask = ata_dev_set_xfermode(dev);
+	}
+
+	if (err_mask & ~AC_ERR_DEV)
+		goto fail;
+
+	/* revalidate */
+	ehc->i.flags |= ATA_EHI_POST_SETMODE;
+	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
+	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
+	if (rc)
+		return rc;
+
+	if (dev->xfer_shift == ATA_SHIFT_PIO) {
+		/* Old CFA may refuse this command, which is just fine */
+		if (ata_id_is_cfa(dev->id))
+			ign_dev_err = 1;
+		/* Catch several broken garbage emulations plus some pre
+		   ATA devices */
+		if (ata_id_major_version(dev->id) == 0 &&
+					dev->pio_mode <= XFER_PIO_2)
+			ign_dev_err = 1;
+		/* Some very old devices and some bad newer ones fail
+		   any kind of SET_XFERMODE request but support PIO0-2
+		   timings and no IORDY */
+		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
+			ign_dev_err = 1;
+	}
+	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
+	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
+	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
+	    dev->dma_mode == XFER_MW_DMA_0 &&
+	    (dev->id[63] >> 8) & 1)
+		ign_dev_err = 1;
+
+	/* if the device is actually configured correctly, ignore dev err */
+	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
+		ign_dev_err = 1;
+
+	if (err_mask & AC_ERR_DEV) {
+		if (!ign_dev_err)
+			goto fail;
+		else
+			dev_err_whine = " (device error ignored)";
+	}
+
+	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
+		dev->xfer_shift, (int)dev->xfer_mode);
+
+	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
+	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
+		ata_dev_info(dev, "configured for %s%s\n",
+			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+			     dev_err_whine);
+
+	return 0;
+
+ fail:
+	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
+	return -EIO;
+}
+
+/**
+ *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ *	@link: link on which timings will be programmed
+ *	@r_failed_dev: out parameter for failed device
+ *
+ *	Standard implementation of the function used to tune and set
+ *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
+ *	ata_dev_set_mode() fails, pointer to the failing device is
+ *	returned in @r_failed_dev.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+
+int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+	int rc = 0, used_dma = 0, found = 0;
+
+	/* step 1: calculate xfer_mask */
+	ata_for_each_dev(dev, link, ENABLED) {
+		unsigned long pio_mask, dma_mask;
+		unsigned int mode_mask;
+
+		mode_mask = ATA_DMA_MASK_ATA;
+		if (dev->class == ATA_DEV_ATAPI)
+			mode_mask = ATA_DMA_MASK_ATAPI;
+		else if (ata_id_is_cfa(dev->id))
+			mode_mask = ATA_DMA_MASK_CFA;
+
+		ata_dev_xfermask(dev);
+		ata_force_xfermask(dev);
+
+		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
+
+		if (libata_dma_mask & mode_mask)
+			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
+						     dev->udma_mask);
+		else
+			dma_mask = 0;
+
+		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
+		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
+
+		found = 1;
+		if (ata_dma_enabled(dev))
+			used_dma = 1;
+	}
+	if (!found)
+		goto out;
+
+	/* step 2: always set host PIO timings */
+	ata_for_each_dev(dev, link, ENABLED) {
+		if (dev->pio_mode == 0xff) {
+			ata_dev_warn(dev, "no PIO support\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		dev->xfer_mode = dev->pio_mode;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		if (ap->ops->set_piomode)
+			ap->ops->set_piomode(ap, dev);
+	}
+
+	/* step 3: set host DMA timings */
+	ata_for_each_dev(dev, link, ENABLED) {
+		if (!ata_dma_enabled(dev))
+			continue;
+
+		dev->xfer_mode = dev->dma_mode;
+		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
+		if (ap->ops->set_dmamode)
+			ap->ops->set_dmamode(ap, dev);
+	}
+
+	/* step 4: update devices' xfer mode */
+	ata_for_each_dev(dev, link, ENABLED) {
+		rc = ata_dev_set_mode(dev);
+		if (rc)
+			goto out;
+	}
+
+	/* Record simplex status. If we selected DMA then the other
+	 * host channels are not permitted to do so.
+	 */
+	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
+		ap->host->simplex_claimed = ap;
+
+ out:
+	if (rc)
+		*r_failed_dev = dev;
+	return rc;
+}
+
+/**
+ *	ata_wait_ready - wait for link to become ready
+ *	@link: link to be waited on
+ *	@deadline: deadline jiffies for the operation
+ *	@check_ready: callback to check link readiness
+ *
+ *	Wait for @link to become ready.  @check_ready should return
+ *	positive number if @link is ready, 0 if it isn't, -ENODEV if
+ *	link doesn't seem to be occupied, other errno for other error
+ *	conditions.
+ *
+ *	Transient -ENODEV conditions are allowed for
+ *	ATA_TMOUT_FF_WAIT.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	0 if @link is ready before @deadline; otherwise, -errno.
+ */
+int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+		   int (*check_ready)(struct ata_link *link))
+{
+	unsigned long start = jiffies;
+	unsigned long nodev_deadline;
+	int warned = 0;
+
+	/* choose which 0xff timeout to use, read comment in libata.h */
+	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
+		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
+	else
+		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+
+	/* Slave readiness can't be tested separately from master.  On
+	 * M/S emulation configuration, this function should be called
+	 * only on the master and it will handle both master and slave.
+	 */
+	WARN_ON(link == link->ap->slave_link);
+
+	if (time_after(nodev_deadline, deadline))
+		nodev_deadline = deadline;
+
+	while (1) {
+		unsigned long now = jiffies;
+		int ready, tmp;
+
+		ready = tmp = check_ready(link);
+		if (ready > 0)
+			return 0;
+
+		/*
+		 * -ENODEV could be transient.  Ignore -ENODEV if link
+		 * is online.  Also, some SATA devices take a long
+		 * time to clear 0xff after reset.  Wait for
+		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
+		 * offline.
+		 *
+		 * Note that some PATA controllers (pata_ali) explode
+		 * if status register is read more than once when
+		 * there's no device attached.
+		 */
+		if (ready == -ENODEV) {
+			if (ata_link_online(link))
+				ready = 0;
+			else if ((link->ap->flags & ATA_FLAG_SATA) &&
+				 !ata_link_offline(link) &&
+				 time_before(now, nodev_deadline))
+				ready = 0;
+		}
+
+		if (ready)
+			return ready;
+		if (time_after(now, deadline))
+			return -EBUSY;
+
+		if (!warned && time_after(now, start + 5 * HZ) &&
+		    (deadline - now > 3 * HZ)) {
+			ata_link_warn(link,
+				"link is slow to respond, please be patient "
+				"(ready=%d)\n", tmp);
+			warned = 1;
+		}
+
+		ata_msleep(link->ap, 50);
+	}
+}
+
+/**
+ *	ata_wait_after_reset - wait for link to become ready after reset
+ *	@link: link to be waited on
+ *	@deadline: deadline jiffies for the operation
+ *	@check_ready: callback to check link readiness
+ *
+ *	Wait for @link to become ready after reset.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	0 if @link is ready before @deadline; otherwise, -errno.
+ */
+int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+				int (*check_ready)(struct ata_link *link))
+{
+	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
+
+	return ata_wait_ready(link, deadline, check_ready);
+}
+
+/**
+ *	sata_link_debounce - debounce SATA phy status
+ *	@link: ATA link to debounce SATA phy status for
+ *	@params: timing parameters { interval, duration, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Make sure SStatus of @link reaches stable state, determined by
+ *	holding the same value where DET is not 1 for @duration polled
+ *	every @interval, before @timeout.  Timeout constraints the
+ *	beginning of the stable state.  Because DET gets stuck at 1 on
+ *	some controllers after hot unplugging, this functions waits
+ *	until timeout then returns 0 if DET is stable at 1.
+ *
+ *	@timeout is further limited by @deadline.  The sooner of the
+ *	two is used.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+		       unsigned long deadline)
+{
+	unsigned long interval = params[0];
+	unsigned long duration = params[1];
+	unsigned long last_jiffies, t;
+	u32 last, cur;
+	int rc;
+
+	t = ata_deadline(jiffies, params[2]);
+	if (time_before(t, deadline))
+		deadline = t;
+
+	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+		return rc;
+	cur &= 0xf;
+
+	last = cur;
+	last_jiffies = jiffies;
+
+	while (1) {
+		ata_msleep(link->ap, interval);
+		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+			return rc;
+		cur &= 0xf;
+
+		/* DET stable? */
+		if (cur == last) {
+			if (cur == 1 && time_before(jiffies, deadline))
+				continue;
+			if (time_after(jiffies,
+				       ata_deadline(last_jiffies, duration)))
+				return 0;
+			continue;
+		}
+
+		/* unstable, start over */
+		last = cur;
+		last_jiffies = jiffies;
+
+		/* Check deadline.  If debouncing failed, return
+		 * -EPIPE to tell upper layer to lower link speed.
+		 */
+		if (time_after(jiffies, deadline))
+			return -EPIPE;
+	}
+}
+
+/**
+ *	sata_link_resume - resume SATA link
+ *	@link: ATA link to resume SATA
+ *	@params: timing parameters { interval, duration, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Resume SATA phy @link and debounce it.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_link_resume(struct ata_link *link, const unsigned long *params,
+		     unsigned long deadline)
+{
+	int tries = ATA_LINK_RESUME_TRIES;
+	u32 scontrol, serror;
+	int rc;
+
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+		return rc;
+
+	/*
+	 * Writes to SControl sometimes get ignored under certain
+	 * controllers (ata_piix SIDPR).  Make sure DET actually is
+	 * cleared.
+	 */
+	do {
+		scontrol = (scontrol & 0x0f0) | 0x300;
+		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+			return rc;
+		/*
+		 * Some PHYs react badly if SStatus is pounded
+		 * immediately after resuming.  Delay 200ms before
+		 * debouncing.
+		 */
+		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
+			ata_msleep(link->ap, 200);
+
+		/* is SControl restored correctly? */
+		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+			return rc;
+	} while ((scontrol & 0xf0f) != 0x300 && --tries);
+
+	if ((scontrol & 0xf0f) != 0x300) {
+		ata_link_warn(link, "failed to resume link (SControl %X)\n",
+			     scontrol);
+		return 0;
+	}
+
+	if (tries < ATA_LINK_RESUME_TRIES)
+		ata_link_warn(link, "link resume succeeded after %d retries\n",
+			      ATA_LINK_RESUME_TRIES - tries);
+
+	if ((rc = sata_link_debounce(link, params, deadline)))
+		return rc;
+
+	/* clear SError, some PHYs require this even for SRST to work */
+	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+		rc = sata_scr_write(link, SCR_ERROR, serror);
+
+	return rc != -EINVAL ? rc : 0;
+}
+
+/**
+ *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
+ *	@link: ATA link to manipulate SControl for
+ *	@policy: LPM policy to configure
+ *	@spm_wakeup: initiate LPM transition to active state
+ *
+ *	Manipulate the IPM field of the SControl register of @link
+ *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
+ *	@spm_wakeup is %true, the SPM field is manipulated to wake up
+ *	the link.  This function also clears PHYRDY_CHG before
+ *	returning.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+		      bool spm_wakeup)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	bool woken_up = false;
+	u32 scontrol;
+	int rc;
+
+	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+	if (rc)
+		return rc;
+
+	switch (policy) {
+	case ATA_LPM_MAX_POWER:
+		/* disable all LPM transitions */
+		scontrol |= (0x7 << 8);
+		/* initiate transition to active state */
+		if (spm_wakeup) {
+			scontrol |= (0x4 << 12);
+			woken_up = true;
+		}
+		break;
+	case ATA_LPM_MED_POWER:
+		/* allow LPM to PARTIAL */
+		scontrol &= ~(0x1 << 8);
+		scontrol |= (0x6 << 8);
+		break;
+	case ATA_LPM_MED_POWER_WITH_DIPM:
+	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
+	case ATA_LPM_MIN_POWER:
+		if (ata_link_nr_enabled(link) > 0)
+			/* no restrictions on LPM transitions */
+			scontrol &= ~(0x7 << 8);
+		else {
+			/* empty port, power off */
+			scontrol &= ~0xf;
+			scontrol |= (0x1 << 2);
+		}
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+	if (rc)
+		return rc;
+
+	/* give the link time to transit out of LPM state */
+	if (woken_up)
+		msleep(10);
+
+	/* clear PHYRDY_CHG from SError */
+	ehc->i.serror &= ~SERR_PHYRDY_CHG;
+	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+}
+
+/**
+ *	ata_std_prereset - prepare for reset
+ *	@link: ATA link to be reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	@link is about to be reset.  Initialize it.  Failure from
+ *	prereset makes libata abort whole reset sequence and give up
+ *	that port, so prereset should be best-effort.  It does its
+ *	best to prepare for reset sequence but if things go wrong, it
+ *	should just whine, not fail.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_std_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	const unsigned long *timing = sata_ehc_deb_timing(ehc);
+	int rc;
+
+	/* if we're about to do hardreset, nothing more to do */
+	if (ehc->i.action & ATA_EH_HARDRESET)
+		return 0;
+
+	/* if SATA, resume link */
+	if (ap->flags & ATA_FLAG_SATA) {
+		rc = sata_link_resume(link, timing, deadline);
+		/* whine about phy resume failure but proceed */
+		if (rc && rc != -EOPNOTSUPP)
+			ata_link_warn(link,
+				      "failed to resume link for reset (errno=%d)\n",
+				      rc);
+	}
+
+	/* no point in trying softreset on offline link */
+	if (ata_phys_link_offline(link))
+		ehc->i.action &= ~ATA_EH_SOFTRESET;
+
+	return 0;
+}
+
+/**
+ *	sata_link_hardreset - reset link via SATA phy reset
+ *	@link: link to reset
+ *	@timing: timing parameters { interval, duration, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
+ *	@online: optional out parameter indicating link onlineness
+ *	@check_ready: optional callback to check link readiness
+ *
+ *	SATA phy-reset @link using DET bits of SControl register.
+ *	After hardreset, link readiness is waited upon using
+ *	ata_wait_ready() if @check_ready is specified.  LLDs are
+ *	allowed to not specify @check_ready and wait itself after this
+ *	function returns.  Device classification is LLD's
+ *	responsibility.
+ *
+ *	*@online is set to one iff reset succeeded and @link is online
+ *	after reset.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+			unsigned long deadline,
+			bool *online, int (*check_ready)(struct ata_link *))
+{
+	u32 scontrol;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	if (online)
+		*online = false;
+
+	if (sata_set_spd_needed(link)) {
+		/* SATA spec says nothing about how to reconfigure
+		 * spd.  To be on the safe side, turn off phy during
+		 * reconfiguration.  This works for at least ICH7 AHCI
+		 * and Sil3124.
+		 */
+		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+			goto out;
+
+		scontrol = (scontrol & 0x0f0) | 0x304;
+
+		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+			goto out;
+
+		sata_set_spd(link);
+	}
+
+	/* issue phy wake/reset */
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+		goto out;
+
+	scontrol = (scontrol & 0x0f0) | 0x301;
+
+	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
+		goto out;
+
+	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
+	 * 10.4.2 says at least 1 ms.
+	 */
+	ata_msleep(link->ap, 1);
+
+	/* bring link back */
+	rc = sata_link_resume(link, timing, deadline);
+	if (rc)
+		goto out;
+	/* if link is offline nothing more to do */
+	if (ata_phys_link_offline(link))
+		goto out;
+
+	/* Link is online.  From this point, -ENODEV too is an error. */
+	if (online)
+		*online = true;
+
+	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+		/* If PMP is supported, we have to do follow-up SRST.
+		 * Some PMPs don't send D2H Reg FIS after hardreset if
+		 * the first port is empty.  Wait only for
+		 * ATA_TMOUT_PMP_SRST_WAIT.
+		 */
+		if (check_ready) {
+			unsigned long pmp_deadline;
+
+			pmp_deadline = ata_deadline(jiffies,
+						    ATA_TMOUT_PMP_SRST_WAIT);
+			if (time_after(pmp_deadline, deadline))
+				pmp_deadline = deadline;
+			ata_wait_ready(link, pmp_deadline, check_ready);
+		}
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	rc = 0;
+	if (check_ready)
+		rc = ata_wait_ready(link, deadline, check_ready);
+ out:
+	if (rc && rc != -EAGAIN) {
+		/* online is set iff link is online && reset succeeded */
+		if (online)
+			*online = false;
+		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
+	}
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	sata_std_hardreset - COMRESET w/o waiting or classification
+ *	@link: link to reset
+ *	@class: resulting class of attached device
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Standard SATA COMRESET w/o waiting or classification.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 if link offline, -EAGAIN if link online, -errno on errors.
+ */
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+		       unsigned long deadline)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	bool online;
+	int rc;
+
+	/* do hardreset */
+	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+	return online ? -EAGAIN : rc;
+}
+
+/**
+ *	ata_std_postreset - standard postreset callback
+ *	@link: the target ata_link
+ *	@classes: classes of attached devices
+ *
+ *	This function is invoked after a successful reset.  Note that
+ *	the device might have been reset more than once using
+ *	different reset methods before postreset is invoked.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_std_postreset(struct ata_link *link, unsigned int *classes)
+{
+	u32 serror;
+
+	DPRINTK("ENTER\n");
+
+	/* reset complete, clear SError */
+	if (!sata_scr_read(link, SCR_ERROR, &serror))
+		sata_scr_write(link, SCR_ERROR, serror);
+
+	/* print link status */
+	sata_print_link_status(link);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_dev_same_device - Determine whether new ID matches configured device
+ *	@dev: device to compare against
+ *	@new_class: class of the new device
+ *	@new_id: IDENTIFY page of the new device
+ *
+ *	Compare @new_class and @new_id against @dev and determine
+ *	whether @dev is the device indicated by @new_class and
+ *	@new_id.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if @dev matches @new_class and @new_id, 0 otherwise.
+ */
+static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
+			       const u16 *new_id)
+{
+	const u16 *old_id = dev->id;
+	unsigned char model[2][ATA_ID_PROD_LEN + 1];
+	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
+
+	if (dev->class != new_class) {
+		ata_dev_info(dev, "class mismatch %d != %d\n",
+			     dev->class, new_class);
+		return 0;
+	}
+
+	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
+	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
+	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
+	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
+
+	if (strcmp(model[0], model[1])) {
+		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
+			     model[0], model[1]);
+		return 0;
+	}
+
+	if (strcmp(serial[0], serial[1])) {
+		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
+			     serial[0], serial[1]);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ *	ata_dev_reread_id - Re-read IDENTIFY data
+ *	@dev: target ATA device
+ *	@readid_flags: read ID flags
+ *
+ *	Re-read IDENTIFY page and make sure @dev is still attached to
+ *	the port.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
+{
+	unsigned int class = dev->class;
+	u16 *id = (void *)dev->link->ap->sector_buf;
+	int rc;
+
+	/* read ID data */
+	rc = ata_dev_read_id(dev, &class, readid_flags, id);
+	if (rc)
+		return rc;
+
+	/* is the device still there? */
+	if (!ata_dev_same_device(dev, class, id))
+		return -ENODEV;
+
+	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
+	return 0;
+}
+
+/**
+ *	ata_dev_revalidate - Revalidate ATA device
+ *	@dev: device to revalidate
+ *	@new_class: new class code
+ *	@readid_flags: read ID flags
+ *
+ *	Re-read IDENTIFY page, make sure @dev is still attached to the
+ *	port and reconfigure it according to the new IDENTIFY page.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+		       unsigned int readid_flags)
+{
+	u64 n_sectors = dev->n_sectors;
+	u64 n_native_sectors = dev->n_native_sectors;
+	int rc;
+
+	if (!ata_dev_enabled(dev))
+		return -ENODEV;
+
+	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
+	if (ata_class_enabled(new_class) &&
+	    new_class != ATA_DEV_ATA &&
+	    new_class != ATA_DEV_ATAPI &&
+	    new_class != ATA_DEV_ZAC &&
+	    new_class != ATA_DEV_SEMB) {
+		ata_dev_info(dev, "class mismatch %u != %u\n",
+			     dev->class, new_class);
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	/* re-read ID */
+	rc = ata_dev_reread_id(dev, readid_flags);
+	if (rc)
+		goto fail;
+
+	/* configure device according to the new ID */
+	rc = ata_dev_configure(dev);
+	if (rc)
+		goto fail;
+
+	/* verify n_sectors hasn't changed */
+	if (dev->class != ATA_DEV_ATA || !n_sectors ||
+	    dev->n_sectors == n_sectors)
+		return 0;
+
+	/* n_sectors has changed */
+	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
+		     (unsigned long long)n_sectors,
+		     (unsigned long long)dev->n_sectors);
+
+	/*
+	 * Something could have caused HPA to be unlocked
+	 * involuntarily.  If n_native_sectors hasn't changed and the
+	 * new size matches it, keep the device.
+	 */
+	if (dev->n_native_sectors == n_native_sectors &&
+	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
+		ata_dev_warn(dev,
+			     "new n_sectors matches native, probably "
+			     "late HPA unlock, n_sectors updated\n");
+		/* use the larger n_sectors */
+		return 0;
+	}
+
+	/*
+	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
+	 * unlocking HPA in those cases.
+	 *
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
+	 */
+	if (dev->n_native_sectors == n_native_sectors &&
+	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
+	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
+		ata_dev_warn(dev,
+			     "old n_sectors matches native, probably "
+			     "late HPA lock, will try to unlock HPA\n");
+		/* try unlocking HPA */
+		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
+		rc = -EIO;
+	} else
+		rc = -ENODEV;
+
+	/* restore original n_[native_]sectors and fail */
+	dev->n_native_sectors = n_native_sectors;
+	dev->n_sectors = n_sectors;
+ fail:
+	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
+	return rc;
+}
+
+struct ata_blacklist_entry {
+	const char *model_num;
+	const char *model_rev;
+	unsigned long horkage;
+};
+
+static const struct ata_blacklist_entry ata_device_blacklist [] = {
+	/* Devices with DMA related problems under Linux */
+	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
+	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
+	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
+	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
+	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
+	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
+	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
+	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
+	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
+	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
+	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
+	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
+	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
+	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
+	/* Odd clown on sil3726/4726 PMPs */
+	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
+
+	/* Weird ATAPI devices */
+	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
+	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
+	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
+	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
+
+	/*
+	 * Causes silent data corruption with higher max sects.
+	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+	 */
+	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
+
+	/*
+	 * These devices time out with higher max sects.
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+	 */
+	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+
+	/* Devices we expect to fail diagnostics */
+
+	/* Devices where NCQ should be avoided */
+	/* NCQ is slow */
+	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
+	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
+	/* http://thread.gmane.org/gmane.linux.ide/14907 */
+	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
+	/* NCQ is broken */
+	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
+	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
+	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
+	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
+	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
+
+	/* Seagate NCQ + FLUSH CACHE firmware bug */
+	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+						ATA_HORKAGE_FIRMWARE_WARN },
+
+	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+						ATA_HORKAGE_FIRMWARE_WARN },
+
+	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+						ATA_HORKAGE_FIRMWARE_WARN },
+
+	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+						ATA_HORKAGE_FIRMWARE_WARN },
+
+	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
+	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
+
+	/* Blacklist entries taken from Silicon Image 3124/3132
+	   Windows driver .inf file - also several Linux problem reports */
+	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
+	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
+	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
+
+	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
+	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
+
+	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
+	   SD7SN6S256G and SD8SN8U256G */
+	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
+
+	/* devices which puke on READ_NATIVE_MAX */
+	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
+	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
+
+	/* this one allows HPA unlocking but fails IOs on the area */
+	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
+
+	/* Devices which report 1 sector over size HPA */
+	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
+	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
+	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
+
+	/* Devices which get the IVB wrong */
+	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
+	/* Maybe we should just blacklist TSSTcorp... */
+	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
+
+	/* Devices that do not need bridging limits applied */
+	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
+	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
+
+	/* Devices which aren't very happy with higher link speeds */
+	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
+	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
+
+	/*
+	 * Devices which choke on SETXFER.  Applies only if both the
+	 * device and controller are SATA.
+	 */
+	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+
+	/* Crucial BX100 SSD 500GB has broken LPM support */
+	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
+
+	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+	/* 512GB MX100 with newer firmware has only LPM issues */
+	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+
+	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
+	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM |
+						ATA_HORKAGE_NOLPM, },
+
+	/* These specific Samsung models/firmware-revs do not handle LPM well */
+	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
+	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
+
+	/* devices that don't properly handle queued TRIM commands */
+	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+
+	/* devices that don't properly handle TRIM commands */
+	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
+
+	/*
+	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+	 * (Return Zero After Trim) flags in the ATA Command Set are
+	 * unreliable in the sense that they only define what happens if
+	 * the device successfully executed the DSM TRIM command. TRIM
+	 * is only advisory, however, and the device is free to silently
+	 * ignore all or parts of the request.
+	 *
+	 * Whitelist drives that are known to reliably return zeroes
+	 * after TRIM.
+	 */
+
+	/*
+	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+	 * that model before whitelisting all other intel SSDs.
+	 */
+	{ "INTEL*SSDSC2MH*",		NULL,	0, },
+
+	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+
+	/*
+	 * Some WD SATA-I drives spin up and down erratically when the link
+	 * is put into the slumber mode.  We don't have full list of the
+	 * affected devices.  Disable LPM if the device matches one of the
+	 * known prefixes and is SATA-1.  As a side effect LPM partial is
+	 * lost too.
+	 *
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
+	 */
+	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
+
+	/* End Marker */
+	{ }
+};
+
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
+	const struct ata_blacklist_entry *ad = ata_device_blacklist;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
+
+	while (ad->model_num) {
+		if (glob_match(ad->model_num, model_num)) {
+			if (ad->model_rev == NULL)
+				return ad->horkage;
+			if (glob_match(ad->model_rev, model_rev))
+				return ad->horkage;
+		}
+		ad++;
+	}
+	return 0;
+}
+
+static int ata_dma_blacklisted(const struct ata_device *dev)
+{
+	/* We don't support polling DMA.
+	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
+	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+	 */
+	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
+	    (dev->flags & ATA_DFLAG_CDB_INTR))
+		return 1;
+	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+}
+
+/**
+ *	ata_is_40wire		-	check drive side detection
+ *	@dev: device
+ *
+ *	Perform drive side detection decoding, allowing for device vendors
+ *	who can't follow the documentation.
+ */
+
+static int ata_is_40wire(struct ata_device *dev)
+{
+	if (dev->horkage & ATA_HORKAGE_IVB)
+		return ata_drive_40wire_relaxed(dev->id);
+	return ata_drive_40wire(dev->id);
+}
+
+/**
+ *	cable_is_40wire		-	40/80/SATA decider
+ *	@ap: port to consider
+ *
+ *	This function encapsulates the policy for speed management
+ *	in one place. At the moment we don't cache the result but
+ *	there is a good case for setting ap->cbl to the result when
+ *	we are called with unknown cables (and figuring out if it
+ *	impacts hotplug at all).
+ *
+ *	Return 1 if the cable appears to be 40 wire.
+ */
+
+static int cable_is_40wire(struct ata_port *ap)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+
+	/* If the controller thinks we are 40 wire, we are. */
+	if (ap->cbl == ATA_CBL_PATA40)
+		return 1;
+
+	/* If the controller thinks we are 80 wire, we are. */
+	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
+		return 0;
+
+	/* If the system is known to be 40 wire short cable (eg
+	 * laptop), then we allow 80 wire modes even if the drive
+	 * isn't sure.
+	 */
+	if (ap->cbl == ATA_CBL_PATA40_SHORT)
+		return 0;
+
+	/* If the controller doesn't know, we scan.
+	 *
+	 * Note: We look for all 40 wire detects at this point.  Any
+	 *       80 wire detect is taken to be 80 wire cable because
+	 * - in many setups only the one drive (slave if present) will
+	 *   give a valid detect
+	 * - if you have a non detect capable drive you don't want it
+	 *   to colour the choice
+	 */
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ENABLED) {
+			if (!ata_is_40wire(dev))
+				return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ *	ata_dev_xfermask - Compute supported xfermask of the given device
+ *	@dev: Device to compute xfermask for
+ *
+ *	Compute supported xfermask of @dev and store it in
+ *	dev->*_mask.  This function is responsible for applying all
+ *	known limits including host controller limits, device
+ *	blacklist, etc...
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_dev_xfermask(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ata_host *host = ap->host;
+	unsigned long xfer_mask;
+
+	/* controller modes available */
+	xfer_mask = ata_pack_xfermask(ap->pio_mask,
+				      ap->mwdma_mask, ap->udma_mask);
+
+	/* drive modes available */
+	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
+				       dev->mwdma_mask, dev->udma_mask);
+	xfer_mask &= ata_id_xfermask(dev->id);
+
+	/*
+	 *	CFA Advanced TrueIDE timings are not allowed on a shared
+	 *	cable
+	 */
+	if (ata_dev_pair(dev)) {
+		/* No PIO5 or PIO6 */
+		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
+		/* No MWDMA3 or MWDMA 4 */
+		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
+	}
+
+	if (ata_dma_blacklisted(dev)) {
+		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+		ata_dev_warn(dev,
+			     "device is on DMA blacklist, disabling DMA\n");
+	}
+
+	if ((host->flags & ATA_HOST_SIMPLEX) &&
+	    host->simplex_claimed && host->simplex_claimed != ap) {
+		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+		ata_dev_warn(dev,
+			     "simplex DMA is claimed by other device, disabling DMA\n");
+	}
+
+	if (ap->flags & ATA_FLAG_NO_IORDY)
+		xfer_mask &= ata_pio_mask_no_iordy(dev);
+
+	if (ap->ops->mode_filter)
+		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
+
+	/* Apply cable rule here.  Don't apply it early because when
+	 * we handle hot plug the cable type can itself change.
+	 * Check this last so that we know if the transfer rate was
+	 * solely limited by the cable.
+	 * Unknown or 80 wire cables reported host side are checked
+	 * drive side as well. Cases where we know a 40wire cable
+	 * is used safely for 80 are not checked here.
+	 */
+	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+		/* UDMA/44 or higher would be available */
+		if (cable_is_40wire(ap)) {
+			ata_dev_warn(dev,
+				     "limited to UDMA/33 due to 40-wire cable\n");
+			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+		}
+
+	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
+			    &dev->mwdma_mask, &dev->udma_mask);
+}
+
+/**
+ *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
+ *	@dev: Device to which command will be sent
+ *
+ *	Issue SET FEATURES - XFER MODE command to device @dev
+ *	on port @ap.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask otherwise.
+ */
+
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+{
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	/* set up set-features taskfile */
+	DPRINTK("set features - xfer mode\n");
+
+	/* Some controllers and ATAPI devices show flaky interrupt
+	 * behavior after setting xfer mode.  Use polling instead.
+	 */
+	ata_tf_init(dev, &tf);
+	tf.command = ATA_CMD_SET_FEATURES;
+	tf.feature = SETFEATURES_XFER;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
+	tf.protocol = ATA_PROT_NODATA;
+	/* If we are using IORDY we must send the mode setting command */
+	if (ata_pio_need_iordy(dev))
+		tf.nsect = dev->xfer_mode;
+	/* If the device has IORDY and the controller does not - turn it off */
+ 	else if (ata_id_has_iordy(dev->id))
+		tf.nsect = 0x01;
+	else /* In the ancient relic department - skip all of this */
+		return 0;
+
+	/* On some disks, this command causes spin-up, so we need longer timeout */
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
+
+	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+	return err_mask;
+}
+
+/**
+ *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
+ *	@dev: Device to which command will be sent
+ *	@enable: Whether to enable or disable the feature
+ *	@feature: The sector count represents the feature to set
+ *
+ *	Issue SET FEATURES - SATA FEATURES command to device @dev
+ *	on port @ap with sector count
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask otherwise.
+ */
+unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
+{
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+	unsigned long timeout = 0;
+
+	/* set up set-features taskfile */
+	DPRINTK("set features - SATA features\n");
+
+	ata_tf_init(dev, &tf);
+	tf.command = ATA_CMD_SET_FEATURES;
+	tf.feature = enable;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.nsect = feature;
+
+	if (enable == SETFEATURES_SPINUP)
+		timeout = ata_probe_timeout ?
+			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
+
+	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+	return err_mask;
+}
+EXPORT_SYMBOL_GPL(ata_dev_set_feature);
+
+/**
+ *	ata_dev_init_params - Issue INIT DEV PARAMS command
+ *	@dev: Device to which command will be sent
+ *	@heads: Number of heads (taskfile parameter)
+ *	@sectors: Number of sectors (taskfile parameter)
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask otherwise.
+ */
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+					u16 heads, u16 sectors)
+{
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	/* Number of sectors per track 1-255. Number of heads 1-16 */
+	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+		return AC_ERR_INVALID;
+
+	/* set up init dev params taskfile */
+	DPRINTK("init dev params \n");
+
+	ata_tf_init(dev, &tf);
+	tf.command = ATA_CMD_INIT_DEV_PARAMS;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.nsect = sectors;
+	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	/* A clean abort indicates an original or just out of spec drive
+	   and we should continue as we issue the setup based on the
+	   drive reported working geometry */
+	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+		err_mask = 0;
+
+	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+	return err_mask;
+}
+
+/**
+ *	atapi_check_dma - Check whether ATAPI DMA can be supported
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	Allow low-level driver to filter ATA PACKET commands, returning
+ *	a status indicating whether or not it is OK to use DMA for the
+ *	supplied PACKET command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *               nonzero otherwise
+ */
+int atapi_check_dma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
+	 * few ATAPI devices choke on such DMA requests.
+	 */
+	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
+	    unlikely(qc->nbytes & 15))
+		return 1;
+
+	if (ap->ops->check_atapi_dma)
+		return ap->ops->check_atapi_dma(qc);
+
+	return 0;
+}
+
+/**
+ *	ata_std_qc_defer - Check whether a qc needs to be deferred
+ *	@qc: ATA command in question
+ *
+ *	Non-NCQ commands cannot run with any other command, NCQ or
+ *	not.  As upper layer only knows the queue depth, we are
+ *	responsible for maintaining exclusion.  This function checks
+ *	whether a new command @qc can be issued.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int ata_std_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+
+	if (ata_is_ncq(qc->tf.protocol)) {
+		if (!ata_tag_valid(link->active_tag))
+			return 0;
+	} else {
+		if (!ata_tag_valid(link->active_tag) && !link->sactive)
+			return 0;
+	}
+
+	return ATA_DEFER_LINK;
+}
+
+void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+
+/**
+ *	ata_sg_init - Associate command with scatter-gather table.
+ *	@qc: Command to be associated
+ *	@sg: Scatter-gather table.
+ *	@n_elem: Number of elements in s/g table.
+ *
+ *	Initialize the data-related elements of queued_cmd @qc
+ *	to point to a scatter-gather table @sg, containing @n_elem
+ *	elements.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+		 unsigned int n_elem)
+{
+	qc->sg = sg;
+	qc->n_elem = n_elem;
+	qc->cursg = qc->sg;
+}
+
+#ifdef CONFIG_HAS_DMA
+
+/**
+ *	ata_sg_clean - Unmap DMA memory associated with command
+ *	@qc: Command containing DMA memory to be released
+ *
+ *	Unmap all mapped DMA memory associated with this command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg = qc->sg;
+	int dir = qc->dma_dir;
+
+	WARN_ON_ONCE(sg == NULL);
+
+	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+	if (qc->n_elem)
+		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
+
+	qc->flags &= ~ATA_QCFLAG_DMAMAP;
+	qc->sg = NULL;
+}
+
+/**
+ *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
+ *	@qc: Command with scatter-gather table to be mapped.
+ *
+ *	DMA-map the scatter-gather table associated with queued_cmd @qc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ *
+ */
+static int ata_sg_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int n_elem;
+
+	VPRINTK("ENTER, ata%u\n", ap->print_id);
+
+	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
+	if (n_elem < 1)
+		return -1;
+
+	VPRINTK("%d sg elements mapped\n", n_elem);
+	qc->orig_n_elem = qc->n_elem;
+	qc->n_elem = n_elem;
+	qc->flags |= ATA_QCFLAG_DMAMAP;
+
+	return 0;
+}
+
+#else /* !CONFIG_HAS_DMA */
+
+static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
+static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
+
+#endif /* !CONFIG_HAS_DMA */
+
+/**
+ *	swap_buf_le16 - swap halves of 16-bit words in place
+ *	@buf:  Buffer to swap
+ *	@buf_words:  Number of 16-bit words in buffer.
+ *
+ *	Swap halves of 16-bit words if needed to convert from
+ *	little-endian byte order to native cpu byte order, or
+ *	vice-versa.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+	unsigned int i;
+
+	for (i = 0; i < buf_words; i++)
+		buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+/**
+ *	ata_qc_new_init - Request an available ATA command, and initialize it
+ *	@dev: Device from whom we request an available command structure
+ *	@tag: tag
+ *
+ *	LOCKING:
+ *	None.
+ */
+
+struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_queued_cmd *qc;
+
+	/* no command while frozen */
+	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+		return NULL;
+
+	/* libsas case */
+	if (ap->flags & ATA_FLAG_SAS_HOST) {
+		tag = ata_sas_allocate_tag(ap);
+		if (tag < 0)
+			return NULL;
+	}
+
+	qc = __ata_qc_from_tag(ap, tag);
+	qc->tag = qc->hw_tag = tag;
+	qc->scsicmd = NULL;
+	qc->ap = ap;
+	qc->dev = dev;
+
+	ata_qc_reinit(qc);
+
+	return qc;
+}
+
+/**
+ *	ata_qc_free - free unused ata_queued_cmd
+ *	@qc: Command to complete
+ *
+ *	Designed to free unused ata_queued_cmd object
+ *	in case something prevents using it.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_qc_free(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap;
+	unsigned int tag;
+
+	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+	ap = qc->ap;
+
+	qc->flags = 0;
+	tag = qc->tag;
+	if (ata_tag_valid(tag)) {
+		qc->tag = ATA_TAG_POISON;
+		if (ap->flags & ATA_FLAG_SAS_HOST)
+			ata_sas_free_tag(tag, ap);
+	}
+}
+
+void __ata_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap;
+	struct ata_link *link;
+
+	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+	ap = qc->ap;
+	link = qc->dev->link;
+
+	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
+		ata_sg_clean(qc);
+
+	/* command should be marked inactive atomically with qc completion */
+	if (ata_is_ncq(qc->tf.protocol)) {
+		link->sactive &= ~(1 << qc->hw_tag);
+		if (!link->sactive)
+			ap->nr_active_links--;
+	} else {
+		link->active_tag = ATA_TAG_POISON;
+		ap->nr_active_links--;
+	}
+
+	/* clear exclusive status */
+	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
+		     ap->excl_link == link))
+		ap->excl_link = NULL;
+
+	/* atapi: mark qc as inactive to prevent the interrupt handler
+	 * from completing the command twice later, before the error handler
+	 * is called. (when rc != 0 and atapi request sense is needed)
+	 */
+	qc->flags &= ~ATA_QCFLAG_ACTIVE;
+	ap->qc_active &= ~(1ULL << qc->tag);
+
+	/* call completion callback */
+	qc->complete_fn(qc);
+}
+
+static void fill_result_tf(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	qc->result_tf.flags = qc->tf.flags;
+	ap->ops->qc_fill_rtf(qc);
+}
+
+static void ata_verify_xfer(struct ata_queued_cmd *qc)
+{
+	struct ata_device *dev = qc->dev;
+
+	if (!ata_is_data(qc->tf.protocol))
+		return;
+
+	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
+		return;
+
+	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
+}
+
+/**
+ *	ata_qc_complete - Complete an active ATA command
+ *	@qc: Command to complete
+ *
+ *	Indicate to the mid and upper layers that an ATA command has
+ *	completed, with either an ok or not-ok status.
+ *
+ *	Refrain from calling this function multiple times when
+ *	successfully completing multiple NCQ commands.
+ *	ata_qc_complete_multiple() should be used instead, which will
+ *	properly update IRQ expect state.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* Trigger the LED (if available) */
+	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
+
+	/* XXX: New EH and old EH use different mechanisms to
+	 * synchronize EH with regular execution path.
+	 *
+	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
+	 * Normal execution path is responsible for not accessing a
+	 * failed qc.  libata core enforces the rule by returning NULL
+	 * from ata_qc_from_tag() for failed qcs.
+	 *
+	 * Old EH depends on ata_qc_complete() nullifying completion
+	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
+	 * not synchronize with interrupt handler.  Only PIO task is
+	 * taken care of.
+	 */
+	if (ap->ops->error_handler) {
+		struct ata_device *dev = qc->dev;
+		struct ata_eh_info *ehi = &dev->link->eh_info;
+
+		if (unlikely(qc->err_mask))
+			qc->flags |= ATA_QCFLAG_FAILED;
+
+		/*
+		 * Finish internal commands without any further processing
+		 * and always with the result TF filled.
+		 */
+		if (unlikely(ata_tag_internal(qc->tag))) {
+			fill_result_tf(qc);
+			trace_ata_qc_complete_internal(qc);
+			__ata_qc_complete(qc);
+			return;
+		}
+
+		/*
+		 * Non-internal qc has failed.  Fill the result TF and
+		 * summon EH.
+		 */
+		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+			fill_result_tf(qc);
+			trace_ata_qc_complete_failed(qc);
+			ata_qc_schedule_eh(qc);
+			return;
+		}
+
+		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
+
+		/* read result TF if requested */
+		if (qc->flags & ATA_QCFLAG_RESULT_TF)
+			fill_result_tf(qc);
+
+		trace_ata_qc_complete_done(qc);
+		/* Some commands need post-processing after successful
+		 * completion.
+		 */
+		switch (qc->tf.command) {
+		case ATA_CMD_SET_FEATURES:
+			if (qc->tf.feature != SETFEATURES_WC_ON &&
+			    qc->tf.feature != SETFEATURES_WC_OFF &&
+			    qc->tf.feature != SETFEATURES_RA_ON &&
+			    qc->tf.feature != SETFEATURES_RA_OFF)
+				break;
+			/* fall through */
+		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+		case ATA_CMD_SET_MULTI: /* multi_count changed */
+			/* revalidate device */
+			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+			ata_port_schedule_eh(ap);
+			break;
+
+		case ATA_CMD_SLEEP:
+			dev->flags |= ATA_DFLAG_SLEEPING;
+			break;
+		}
+
+		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+			ata_verify_xfer(qc);
+
+		__ata_qc_complete(qc);
+	} else {
+		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
+			return;
+
+		/* read result TF if failed or requested */
+		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
+			fill_result_tf(qc);
+
+		__ata_qc_complete(qc);
+	}
+}
+
+/**
+ *	ata_qc_complete_multiple - Complete multiple qcs successfully
+ *	@ap: port in question
+ *	@qc_active: new qc_active mask
+ *
+ *	Complete in-flight commands.  This functions is meant to be
+ *	called from low-level driver's interrupt routine to complete
+ *	requests normally.  ap->qc_active and @qc_active is compared
+ *	and commands are completed accordingly.
+ *
+ *	Always use this function when completing multiple NCQ commands
+ *	from IRQ handlers instead of calling ata_qc_complete()
+ *	multiple times to keep IRQ expect status properly in sync.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Number of completed commands on success, -errno otherwise.
+ */
+int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
+{
+	u64 done_mask, ap_qc_active = ap->qc_active;
+	int nr_done = 0;
+
+	/*
+	 * If the internal tag is set on ap->qc_active, then we care about
+	 * bit0 on the passed in qc_active mask. Move that bit up to match
+	 * the internal tag.
+	 */
+	if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+		qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+		qc_active ^= qc_active & 0x01;
+	}
+
+	done_mask = ap_qc_active ^ qc_active;
+
+	if (unlikely(done_mask & qc_active)) {
+		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
+			     ap->qc_active, qc_active);
+		return -EINVAL;
+	}
+
+	while (done_mask) {
+		struct ata_queued_cmd *qc;
+		unsigned int tag = __ffs64(done_mask);
+
+		qc = ata_qc_from_tag(ap, tag);
+		if (qc) {
+			ata_qc_complete(qc);
+			nr_done++;
+		}
+		done_mask &= ~(1ULL << tag);
+	}
+
+	return nr_done;
+}
+
+/**
+ *	ata_qc_issue - issue taskfile to device
+ *	@qc: command to issue to device
+ *
+ *	Prepare an ATA command to submission to device.
+ *	This includes mapping the data into a DMA-able
+ *	area, filling in the S/G table, and finally
+ *	writing the taskfile to hardware, starting the command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_link *link = qc->dev->link;
+	u8 prot = qc->tf.protocol;
+
+	/* Make sure only one non-NCQ command is outstanding.  The
+	 * check is skipped for old EH because it reuses active qc to
+	 * request ATAPI sense.
+	 */
+	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+
+	if (ata_is_ncq(prot)) {
+		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
+
+		if (!link->sactive)
+			ap->nr_active_links++;
+		link->sactive |= 1 << qc->hw_tag;
+	} else {
+		WARN_ON_ONCE(link->sactive);
+
+		ap->nr_active_links++;
+		link->active_tag = qc->tag;
+	}
+
+	qc->flags |= ATA_QCFLAG_ACTIVE;
+	ap->qc_active |= 1ULL << qc->tag;
+
+	/*
+	 * We guarantee to LLDs that they will have at least one
+	 * non-zero sg if the command is a data command.
+	 */
+	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
+		goto sys_err;
+
+	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
+				 (ap->flags & ATA_FLAG_PIO_DMA)))
+		if (ata_sg_setup(qc))
+			goto sys_err;
+
+	/* if device is sleeping, schedule reset and abort the link */
+	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
+		link->eh_info.action |= ATA_EH_RESET;
+		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
+		ata_link_abort(link);
+		return;
+	}
+
+	ap->ops->qc_prep(qc);
+	trace_ata_qc_issue(qc);
+	qc->err_mask |= ap->ops->qc_issue(qc);
+	if (unlikely(qc->err_mask))
+		goto err;
+	return;
+
+sys_err:
+	qc->err_mask |= AC_ERR_SYSTEM;
+err:
+	ata_qc_complete(qc);
+}
+
+/**
+ *	sata_scr_valid - test whether SCRs are accessible
+ *	@link: ATA link to test SCR accessibility for
+ *
+ *	Test whether SCRs are accessible for @link.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if SCRs are accessible, 0 otherwise.
+ */
+int sata_scr_valid(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+
+	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
+}
+
+/**
+ *	sata_scr_read - read SCR register of the specified port
+ *	@link: ATA link to read SCR for
+ *	@reg: SCR to read
+ *	@val: Place to store read value
+ *
+ *	Read SCR register @reg of @link into *@val.  This function is
+ *	guaranteed to succeed if @link is ap->link, the cable type of
+ *	the port is SATA and the port implements ->scr_read.
+ *
+ *	LOCKING:
+ *	None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+	if (ata_is_host_link(link)) {
+		if (sata_scr_valid(link))
+			return link->ap->ops->scr_read(link, reg, val);
+		return -EOPNOTSUPP;
+	}
+
+	return sata_pmp_scr_read(link, reg, val);
+}
+
+/**
+ *	sata_scr_write - write SCR register of the specified port
+ *	@link: ATA link to write SCR for
+ *	@reg: SCR to write
+ *	@val: value to write
+ *
+ *	Write @val to SCR register @reg of @link.  This function is
+ *	guaranteed to succeed if @link is ap->link, the cable type of
+ *	the port is SATA and the port implements ->scr_read.
+ *
+ *	LOCKING:
+ *	None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+	if (ata_is_host_link(link)) {
+		if (sata_scr_valid(link))
+			return link->ap->ops->scr_write(link, reg, val);
+		return -EOPNOTSUPP;
+	}
+
+	return sata_pmp_scr_write(link, reg, val);
+}
+
+/**
+ *	sata_scr_write_flush - write SCR register of the specified port and flush
+ *	@link: ATA link to write SCR for
+ *	@reg: SCR to write
+ *	@val: value to write
+ *
+ *	This function is identical to sata_scr_write() except that this
+ *	function performs flush after writing to the register.
+ *
+ *	LOCKING:
+ *	None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno on failure.
+ */
+int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+	if (ata_is_host_link(link)) {
+		int rc;
+
+		if (sata_scr_valid(link)) {
+			rc = link->ap->ops->scr_write(link, reg, val);
+			if (rc == 0)
+				rc = link->ap->ops->scr_read(link, reg, &val);
+			return rc;
+		}
+		return -EOPNOTSUPP;
+	}
+
+	return sata_pmp_scr_write(link, reg, val);
+}
+
+/**
+ *	ata_phys_link_online - test whether the given link is online
+ *	@link: ATA link to test
+ *
+ *	Test whether @link is online.  Note that this function returns
+ *	0 if online status of @link cannot be obtained, so
+ *	ata_link_online(link) != !ata_link_offline(link).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	True if the port online status is available and online.
+ */
+bool ata_phys_link_online(struct ata_link *link)
+{
+	u32 sstatus;
+
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+	    ata_sstatus_online(sstatus))
+		return true;
+	return false;
+}
+
+/**
+ *	ata_phys_link_offline - test whether the given link is offline
+ *	@link: ATA link to test
+ *
+ *	Test whether @link is offline.  Note that this function
+ *	returns 0 if offline status of @link cannot be obtained, so
+ *	ata_link_online(link) != !ata_link_offline(link).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	True if the port offline status is available and offline.
+ */
+bool ata_phys_link_offline(struct ata_link *link)
+{
+	u32 sstatus;
+
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+	    !ata_sstatus_online(sstatus))
+		return true;
+	return false;
+}
+
+/**
+ *	ata_link_online - test whether the given link is online
+ *	@link: ATA link to test
+ *
+ *	Test whether @link is online.  This is identical to
+ *	ata_phys_link_online() when there's no slave link.  When
+ *	there's a slave link, this function should only be called on
+ *	the master link and will return true if any of M/S links is
+ *	online.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	True if the port online status is available and online.
+ */
+bool ata_link_online(struct ata_link *link)
+{
+	struct ata_link *slave = link->ap->slave_link;
+
+	WARN_ON(link == slave);	/* shouldn't be called on slave link */
+
+	return ata_phys_link_online(link) ||
+		(slave && ata_phys_link_online(slave));
+}
+
+/**
+ *	ata_link_offline - test whether the given link is offline
+ *	@link: ATA link to test
+ *
+ *	Test whether @link is offline.  This is identical to
+ *	ata_phys_link_offline() when there's no slave link.  When
+ *	there's a slave link, this function should only be called on
+ *	the master link and will return true if both M/S links are
+ *	offline.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	True if the port offline status is available and offline.
+ */
+bool ata_link_offline(struct ata_link *link)
+{
+	struct ata_link *slave = link->ap->slave_link;
+
+	WARN_ON(link == slave);	/* shouldn't be called on slave link */
+
+	return ata_phys_link_offline(link) &&
+		(!slave || ata_phys_link_offline(slave));
+}
+
+#ifdef CONFIG_PM
+static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+				unsigned int action, unsigned int ehi_flags,
+				bool async)
+{
+	struct ata_link *link;
+	unsigned long flags;
+
+	/* Previous resume operation might still be in
+	 * progress.  Wait for PM_PENDING to clear.
+	 */
+	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+		ata_port_wait_eh(ap);
+		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+	}
+
+	/* request PM ops to EH */
+	spin_lock_irqsave(ap->lock, flags);
+
+	ap->pm_mesg = mesg;
+	ap->pflags |= ATA_PFLAG_PM_PENDING;
+	ata_for_each_link(link, ap, HOST_FIRST) {
+		link->eh_info.action |= action;
+		link->eh_info.flags |= ehi_flags;
+	}
+
+	ata_port_schedule_eh(ap);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if (!async) {
+		ata_port_wait_eh(ap);
+		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+	}
+}
+
+/*
+ * On some hardware, device fails to respond after spun down for suspend.  As
+ * the device won't be used before being resumed, we don't need to touch the
+ * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+						 | ATA_EHI_NO_AUTOPSY
+						 | ATA_EHI_NO_RECOVERY;
+
+static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
+}
+
+static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
+{
+	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
+}
+
+static int ata_port_pm_suspend(struct device *dev)
+{
+	struct ata_port *ap = to_ata_port(dev);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	ata_port_suspend(ap, PMSG_SUSPEND);
+	return 0;
+}
+
+static int ata_port_pm_freeze(struct device *dev)
+{
+	struct ata_port *ap = to_ata_port(dev);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	ata_port_suspend(ap, PMSG_FREEZE);
+	return 0;
+}
+
+static int ata_port_pm_poweroff(struct device *dev)
+{
+	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
+	return 0;
+}
+
+static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
+						| ATA_EHI_QUIET;
+
+static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
+{
+	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
+}
+
+static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
+{
+	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
+}
+
+static int ata_port_pm_resume(struct device *dev)
+{
+	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	return 0;
+}
+
+/*
+ * For ODDs, the upper layer will poll for media change every few seconds,
+ * which will make it enter and leave suspend state every few seconds. And
+ * as each suspend will cause a hard/soft reset, the gain of runtime suspend
+ * is very little and the ODD may malfunction after constantly being reset.
+ * So the idle callback here will not proceed to suspend if a non-ZPODD capable
+ * ODD is attached to the port.
+ */
+static int ata_port_runtime_idle(struct device *dev)
+{
+	struct ata_port *ap = to_ata_port(dev);
+	struct ata_link *link;
+	struct ata_device *adev;
+
+	ata_for_each_link(link, ap, HOST_FIRST) {
+		ata_for_each_dev(adev, link, ENABLED)
+			if (adev->class == ATA_DEV_ATAPI &&
+			    !zpodd_dev_enabled(adev))
+				return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int ata_port_runtime_suspend(struct device *dev)
+{
+	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
+	return 0;
+}
+
+static int ata_port_runtime_resume(struct device *dev)
+{
+	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
+	return 0;
+}
+
+static const struct dev_pm_ops ata_port_pm_ops = {
+	.suspend = ata_port_pm_suspend,
+	.resume = ata_port_pm_resume,
+	.freeze = ata_port_pm_freeze,
+	.thaw = ata_port_pm_resume,
+	.poweroff = ata_port_pm_poweroff,
+	.restore = ata_port_pm_resume,
+
+	.runtime_suspend = ata_port_runtime_suspend,
+	.runtime_resume = ata_port_runtime_resume,
+	.runtime_idle = ata_port_runtime_idle,
+};
+
+/* sas ports don't participate in pm runtime management of ata_ports,
+ * and need to resume ata devices at the domain level, not the per-port
+ * level. sas suspend/resume is async to allow parallel port recovery
+ * since sas has multiple ata_port instances per Scsi_Host.
+ */
+void ata_sas_port_suspend(struct ata_port *ap)
+{
+	ata_port_suspend_async(ap, PMSG_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
+
+void ata_sas_port_resume(struct ata_port *ap)
+{
+	ata_port_resume_async(ap, PMSG_RESUME);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_resume);
+
+/**
+ *	ata_host_suspend - suspend host
+ *	@host: host to suspend
+ *	@mesg: PM message
+ *
+ *	Suspend @host.  Actual operation is performed by port suspend.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+	host->dev->power.power_state = mesg;
+	return 0;
+}
+
+/**
+ *	ata_host_resume - resume host
+ *	@host: host to resume
+ *
+ *	Resume @host.  Actual operation is performed by port resume.
+ */
+void ata_host_resume(struct ata_host *host)
+{
+	host->dev->power.power_state = PMSG_ON;
+}
+#endif
+
+const struct device_type ata_port_type = {
+	.name = "ata_port",
+#ifdef CONFIG_PM
+	.pm = &ata_port_pm_ops,
+#endif
+};
+
+/**
+ *	ata_dev_init - Initialize an ata_device structure
+ *	@dev: Device structure to initialize
+ *
+ *	Initialize @dev in preparation for probing.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_dev_init(struct ata_device *dev)
+{
+	struct ata_link *link = ata_dev_phys_link(dev);
+	struct ata_port *ap = link->ap;
+	unsigned long flags;
+
+	/* SATA spd limit is bound to the attached device, reset together */
+	link->sata_spd_limit = link->hw_sata_spd_limit;
+	link->sata_spd = 0;
+
+	/* High bits of dev->flags are used to record warm plug
+	 * requests which occur asynchronously.  Synchronize using
+	 * host lock.
+	 */
+	spin_lock_irqsave(ap->lock, flags);
+	dev->flags &= ~ATA_DFLAG_INIT_MASK;
+	dev->horkage = 0;
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
+	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
+	dev->pio_mask = UINT_MAX;
+	dev->mwdma_mask = UINT_MAX;
+	dev->udma_mask = UINT_MAX;
+}
+
+/**
+ *	ata_link_init - Initialize an ata_link structure
+ *	@ap: ATA port link is attached to
+ *	@link: Link structure to initialize
+ *	@pmp: Port multiplier port number
+ *
+ *	Initialize @link.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
+{
+	int i;
+
+	/* clear everything except for devices */
+	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
+	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
+
+	link->ap = ap;
+	link->pmp = pmp;
+	link->active_tag = ATA_TAG_POISON;
+	link->hw_sata_spd_limit = UINT_MAX;
+
+	/* can't use iterator, ap isn't initialized yet */
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &link->device[i];
+
+		dev->link = link;
+		dev->devno = dev - link->device;
+#ifdef CONFIG_ATA_ACPI
+		dev->gtf_filter = ata_acpi_gtf_filter;
+#endif
+		ata_dev_init(dev);
+	}
+}
+
+/**
+ *	sata_link_init_spd - Initialize link->sata_spd_limit
+ *	@link: Link to configure sata_spd_limit for
+ *
+ *	Initialize @link->[hw_]sata_spd_limit to the currently
+ *	configured value.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_link_init_spd(struct ata_link *link)
+{
+	u8 spd;
+	int rc;
+
+	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
+	if (rc)
+		return rc;
+
+	spd = (link->saved_scontrol >> 4) & 0xf;
+	if (spd)
+		link->hw_sata_spd_limit &= (1 << spd) - 1;
+
+	ata_force_link_limits(link);
+
+	link->sata_spd_limit = link->hw_sata_spd_limit;
+
+	return 0;
+}
+
+/**
+ *	ata_port_alloc - allocate and initialize basic ATA port resources
+ *	@host: ATA host this allocated port belongs to
+ *
+ *	Allocate and initialize basic ATA port resources.
+ *
+ *	RETURNS:
+ *	Allocate ATA port on success, NULL on failure.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+struct ata_port *ata_port_alloc(struct ata_host *host)
+{
+	struct ata_port *ap;
+
+	DPRINTK("ENTER\n");
+
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+	if (!ap)
+		return NULL;
+
+	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
+	ap->lock = &host->lock;
+	ap->print_id = -1;
+	ap->local_port_no = -1;
+	ap->host = host;
+	ap->dev = host->dev;
+
+#if defined(ATA_VERBOSE_DEBUG)
+	/* turn on all debugging levels */
+	ap->msg_enable = 0x00FF;
+#elif defined(ATA_DEBUG)
+	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
+#else
+	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
+#endif
+
+	mutex_init(&ap->scsi_scan_mutex);
+	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+	INIT_LIST_HEAD(&ap->eh_done_q);
+	init_waitqueue_head(&ap->eh_wait_q);
+	init_completion(&ap->park_req_pending);
+	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
+		    TIMER_DEFERRABLE);
+
+	ap->cbl = ATA_CBL_NONE;
+
+	ata_link_init(ap, &ap->link, 0);
+
+#ifdef ATA_IRQ_TRAP
+	ap->stats.unhandled_irq = 1;
+	ap->stats.idle_irq = 1;
+#endif
+	ata_sff_port_init(ap);
+
+	return ap;
+}
+
+static void ata_devres_release(struct device *gendev, void *res)
+{
+	struct ata_host *host = dev_get_drvdata(gendev);
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (!ap)
+			continue;
+
+		if (ap->scsi_host)
+			scsi_host_put(ap->scsi_host);
+
+	}
+
+	dev_set_drvdata(gendev, NULL);
+	ata_host_put(host);
+}
+
+static void ata_host_release(struct kref *kref)
+{
+	struct ata_host *host = container_of(kref, struct ata_host, kref);
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		kfree(ap->pmp_link);
+		kfree(ap->slave_link);
+		kfree(ap);
+		host->ports[i] = NULL;
+	}
+	kfree(host);
+}
+
+void ata_host_get(struct ata_host *host)
+{
+	kref_get(&host->kref);
+}
+
+void ata_host_put(struct ata_host *host)
+{
+	kref_put(&host->kref, ata_host_release);
+}
+
+/**
+ *	ata_host_alloc - allocate and init basic ATA host resources
+ *	@dev: generic device this host is associated with
+ *	@max_ports: maximum number of ATA ports associated with this host
+ *
+ *	Allocate and initialize basic ATA host resources.  LLD calls
+ *	this function to allocate a host, initializes it fully and
+ *	attaches it using ata_host_register().
+ *
+ *	@max_ports ports are allocated and host->n_ports is
+ *	initialized to @max_ports.  The caller is allowed to decrease
+ *	host->n_ports before calling ata_host_register().  The unused
+ *	ports will be automatically freed on registration.
+ *
+ *	RETURNS:
+ *	Allocate ATA host on success, NULL on failure.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+{
+	struct ata_host *host;
+	size_t sz;
+	int i;
+	void *dr;
+
+	DPRINTK("ENTER\n");
+
+	/* alloc a container for our list of ATA ports (buses) */
+	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+	host = kzalloc(sz, GFP_KERNEL);
+	if (!host)
+		return NULL;
+
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		goto err_free;
+
+	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+	if (!dr)
+		goto err_out;
+
+	devres_add(dev, dr);
+	dev_set_drvdata(dev, host);
+
+	spin_lock_init(&host->lock);
+	mutex_init(&host->eh_mutex);
+	host->dev = dev;
+	host->n_ports = max_ports;
+	kref_init(&host->kref);
+
+	/* allocate ports bound to this host */
+	for (i = 0; i < max_ports; i++) {
+		struct ata_port *ap;
+
+		ap = ata_port_alloc(host);
+		if (!ap)
+			goto err_out;
+
+		ap->port_no = i;
+		host->ports[i] = ap;
+	}
+
+	devres_remove_group(dev, NULL);
+	return host;
+
+ err_out:
+	devres_release_group(dev, NULL);
+ err_free:
+	kfree(host);
+	return NULL;
+}
+
+/**
+ *	ata_host_alloc_pinfo - alloc host and init with port_info array
+ *	@dev: generic device this host is associated with
+ *	@ppi: array of ATA port_info to initialize host with
+ *	@n_ports: number of ATA ports attached to this host
+ *
+ *	Allocate ATA host and initialize with info from @ppi.  If NULL
+ *	terminated, @ppi may contain fewer entries than @n_ports.  The
+ *	last entry will be used for the remaining ports.
+ *
+ *	RETURNS:
+ *	Allocate ATA host on success, NULL on failure.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+				      const struct ata_port_info * const * ppi,
+				      int n_ports)
+{
+	const struct ata_port_info *pi;
+	struct ata_host *host;
+	int i, j;
+
+	host = ata_host_alloc(dev, n_ports);
+	if (!host)
+		return NULL;
+
+	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ppi[j])
+			pi = ppi[j++];
+
+		ap->pio_mask = pi->pio_mask;
+		ap->mwdma_mask = pi->mwdma_mask;
+		ap->udma_mask = pi->udma_mask;
+		ap->flags |= pi->flags;
+		ap->link.flags |= pi->link_flags;
+		ap->ops = pi->port_ops;
+
+		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
+			host->ops = pi->port_ops;
+	}
+
+	return host;
+}
+
+/**
+ *	ata_slave_link_init - initialize slave link
+ *	@ap: port to initialize slave link for
+ *
+ *	Create and initialize slave link for @ap.  This enables slave
+ *	link handling on the port.
+ *
+ *	In libata, a port contains links and a link contains devices.
+ *	There is single host link but if a PMP is attached to it,
+ *	there can be multiple fan-out links.  On SATA, there's usually
+ *	a single device connected to a link but PATA and SATA
+ *	controllers emulating TF based interface can have two - master
+ *	and slave.
+ *
+ *	However, there are a few controllers which don't fit into this
+ *	abstraction too well - SATA controllers which emulate TF
+ *	interface with both master and slave devices but also have
+ *	separate SCR register sets for each device.  These controllers
+ *	need separate links for physical link handling
+ *	(e.g. onlineness, link speed) but should be treated like a
+ *	traditional M/S controller for everything else (e.g. command
+ *	issue, softreset).
+ *
+ *	slave_link is libata's way of handling this class of
+ *	controllers without impacting core layer too much.  For
+ *	anything other than physical link handling, the default host
+ *	link is used for both master and slave.  For physical link
+ *	handling, separate @ap->slave_link is used.  All dirty details
+ *	are implemented inside libata core layer.  From LLD's POV, the
+ *	only difference is that prereset, hardreset and postreset are
+ *	called once more for the slave link, so the reset sequence
+ *	looks like the following.
+ *
+ *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
+ *	softreset(M) -> postreset(M) -> postreset(S)
+ *
+ *	Note that softreset is called only for the master.  Softreset
+ *	resets both M/S by definition, so SRST on master should handle
+ *	both (the standard method will work just fine).
+ *
+ *	LOCKING:
+ *	Should be called before host is registered.
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int ata_slave_link_init(struct ata_port *ap)
+{
+	struct ata_link *link;
+
+	WARN_ON(ap->slave_link);
+	WARN_ON(ap->flags & ATA_FLAG_PMP);
+
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	ata_link_init(ap, link, 1);
+	ap->slave_link = link;
+	return 0;
+}
+
+static void ata_host_stop(struct device *gendev, void *res)
+{
+	struct ata_host *host = dev_get_drvdata(gendev);
+	int i;
+
+	WARN_ON(!(host->flags & ATA_HOST_STARTED));
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+
+	if (host->ops->host_stop)
+		host->ops->host_stop(host);
+}
+
+/**
+ *	ata_finalize_port_ops - finalize ata_port_operations
+ *	@ops: ata_port_operations to finalize
+ *
+ *	An ata_port_operations can inherit from another ops and that
+ *	ops can again inherit from another.  This can go on as many
+ *	times as necessary as long as there is no loop in the
+ *	inheritance chain.
+ *
+ *	Ops tables are finalized when the host is started.  NULL or
+ *	unspecified entries are inherited from the closet ancestor
+ *	which has the method and the entry is populated with it.
+ *	After finalization, the ops table directly points to all the
+ *	methods and ->inherits is no longer necessary and cleared.
+ *
+ *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_finalize_port_ops(struct ata_port_operations *ops)
+{
+	static DEFINE_SPINLOCK(lock);
+	const struct ata_port_operations *cur;
+	void **begin = (void **)ops;
+	void **end = (void **)&ops->inherits;
+	void **pp;
+
+	if (!ops || !ops->inherits)
+		return;
+
+	spin_lock(&lock);
+
+	for (cur = ops->inherits; cur; cur = cur->inherits) {
+		void **inherit = (void **)cur;
+
+		for (pp = begin; pp < end; pp++, inherit++)
+			if (!*pp)
+				*pp = *inherit;
+	}
+
+	for (pp = begin; pp < end; pp++)
+		if (IS_ERR(*pp))
+			*pp = NULL;
+
+	ops->inherits = NULL;
+
+	spin_unlock(&lock);
+}
+
+/**
+ *	ata_host_start - start and freeze ports of an ATA host
+ *	@host: ATA host to start ports for
+ *
+ *	Start and then freeze ports of @host.  Started status is
+ *	recorded in host->flags, so this function can be called
+ *	multiple times.  Ports are guaranteed to get started only
+ *	once.  If host->ops isn't initialized yet, its set to the
+ *	first non-dummy port ops.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 if all ports are started successfully, -errno otherwise.
+ */
+int ata_host_start(struct ata_host *host)
+{
+	int have_stop = 0;
+	void *start_dr = NULL;
+	int i, rc;
+
+	if (host->flags & ATA_HOST_STARTED)
+		return 0;
+
+	ata_finalize_port_ops(host->ops);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_finalize_port_ops(ap->ops);
+
+		if (!host->ops && !ata_port_is_dummy(ap))
+			host->ops = ap->ops;
+
+		if (ap->ops->port_stop)
+			have_stop = 1;
+	}
+
+	if (host->ops->host_stop)
+		have_stop = 1;
+
+	if (have_stop) {
+		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
+		if (!start_dr)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->ops->port_start) {
+			rc = ap->ops->port_start(ap);
+			if (rc) {
+				if (rc != -ENODEV)
+					dev_err(host->dev,
+						"failed to start port %d (errno=%d)\n",
+						i, rc);
+				goto err_out;
+			}
+		}
+		ata_eh_freeze_port(ap);
+	}
+
+	if (start_dr)
+		devres_add(host->dev, start_dr);
+	host->flags |= ATA_HOST_STARTED;
+	return 0;
+
+ err_out:
+	while (--i >= 0) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+	devres_free(start_dr);
+	return rc;
+}
+
+/**
+ *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
+ *	@host:	host to initialize
+ *	@dev:	device host is attached to
+ *	@ops:	port_ops
+ *
+ */
+void ata_host_init(struct ata_host *host, struct device *dev,
+		   struct ata_port_operations *ops)
+{
+	spin_lock_init(&host->lock);
+	mutex_init(&host->eh_mutex);
+	host->n_tags = ATA_MAX_QUEUE;
+	host->dev = dev;
+	host->ops = ops;
+	kref_init(&host->kref);
+}
+
+void __ata_port_probe(struct ata_port *ap)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	unsigned long flags;
+
+	/* kick EH for boot probing */
+	spin_lock_irqsave(ap->lock, flags);
+
+	ehi->probe_mask |= ATA_ALL_DEVICES;
+	ehi->action |= ATA_EH_RESET;
+	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+
+	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
+	ap->pflags |= ATA_PFLAG_LOADING;
+	ata_port_schedule_eh(ap);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+int ata_port_probe(struct ata_port *ap)
+{
+	int rc = 0;
+
+	if (ap->ops->error_handler) {
+		__ata_port_probe(ap);
+		ata_port_wait_eh(ap);
+	} else {
+		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
+		rc = ata_bus_probe(ap);
+		DPRINTK("ata%u: bus probe end\n", ap->print_id);
+	}
+	return rc;
+}
+
+
+static void async_port_probe(void *data, async_cookie_t cookie)
+{
+	struct ata_port *ap = data;
+
+	/*
+	 * If we're not allowed to scan this host in parallel,
+	 * we need to wait until all previous scans have completed
+	 * before going further.
+	 * Jeff Garzik says this is only within a controller, so we
+	 * don't need to wait for port 0, only for later ports.
+	 */
+	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
+		async_synchronize_cookie(cookie);
+
+	(void)ata_port_probe(ap);
+
+	/* in order to keep device order, we need to synchronize at this point */
+	async_synchronize_cookie(cookie);
+
+	ata_scsi_scan_host(ap, 1);
+}
+
+/**
+ *	ata_host_register - register initialized ATA host
+ *	@host: ATA host to register
+ *	@sht: template for SCSI host
+ *
+ *	Register initialized ATA host.  @host is allocated using
+ *	ata_host_alloc() and fully initialized by LLD.  This function
+ *	starts ports, registers @host with ATA and SCSI layers and
+ *	probe registered devices.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+{
+	int i, rc;
+
+	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
+
+	/* host must have been started */
+	if (!(host->flags & ATA_HOST_STARTED)) {
+		dev_err(host->dev, "BUG: trying to register unstarted host\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	/* Blow away unused ports.  This happens when LLD can't
+	 * determine the exact number of ports to allocate at
+	 * allocation time.
+	 */
+	for (i = host->n_ports; host->ports[i]; i++)
+		kfree(host->ports[i]);
+
+	/* give ports names and add SCSI hosts */
+	for (i = 0; i < host->n_ports; i++) {
+		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+		host->ports[i]->local_port_no = i + 1;
+	}
+
+	/* Create associated sysfs transport objects  */
+	for (i = 0; i < host->n_ports; i++) {
+		rc = ata_tport_add(host->dev,host->ports[i]);
+		if (rc) {
+			goto err_tadd;
+		}
+	}
+
+	rc = ata_scsi_add_hosts(host, sht);
+	if (rc)
+		goto err_tadd;
+
+	/* set cable, sata_spd_limit and report */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned long xfer_mask;
+
+		/* set SATA cable type if still unset */
+		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
+			ap->cbl = ATA_CBL_SATA;
+
+		/* init sata_spd_limit to the current value */
+		sata_link_init_spd(&ap->link);
+		if (ap->slave_link)
+			sata_link_init_spd(ap->slave_link);
+
+		/* print per-port info to dmesg */
+		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
+					      ap->udma_mask);
+
+		if (!ata_port_is_dummy(ap)) {
+			ata_port_info(ap, "%cATA max %s %s\n",
+				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
+				      ata_mode_string(xfer_mask),
+				      ap->link.eh_info.desc);
+			ata_ehi_clear_desc(&ap->link.eh_info);
+		} else
+			ata_port_info(ap, "DUMMY\n");
+	}
+
+	/* perform each probe asynchronously */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		async_schedule(async_port_probe, ap);
+	}
+
+	return 0;
+
+ err_tadd:
+	while (--i >= 0) {
+		ata_tport_delete(host->ports[i]);
+	}
+	return rc;
+
+}
+
+/**
+ *	ata_host_activate - start host, request IRQ and register it
+ *	@host: target ATA host
+ *	@irq: IRQ to request
+ *	@irq_handler: irq_handler used when requesting IRQ
+ *	@irq_flags: irq_flags used when requesting IRQ
+ *	@sht: scsi_host_template to use when registering the host
+ *
+ *	After allocating an ATA host and initializing it, most libata
+ *	LLDs perform three steps to activate the host - start host,
+ *	request IRQ and register it.  This helper takes necessary
+ *	arguments and performs the three steps in one go.
+ *
+ *	An invalid IRQ skips the IRQ registration and expects the host to
+ *	have set polling mode on the port. In this case, @irq_handler
+ *	should be NULL.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_host_activate(struct ata_host *host, int irq,
+		      irq_handler_t irq_handler, unsigned long irq_flags,
+		      struct scsi_host_template *sht)
+{
+	int i, rc;
+	char *irq_desc;
+
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+
+	/* Special case for polling mode */
+	if (!irq) {
+		WARN_ON(irq_handler);
+		return ata_host_register(host, sht);
+	}
+
+	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
+				  dev_driver_string(host->dev),
+				  dev_name(host->dev));
+	if (!irq_desc)
+		return -ENOMEM;
+
+	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
+			      irq_desc, host);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < host->n_ports; i++)
+		ata_port_desc(host->ports[i], "irq %d", irq);
+
+	rc = ata_host_register(host, sht);
+	/* if failed, just free the IRQ and leave ports alone */
+	if (rc)
+		devm_free_irq(host->dev, irq, host);
+
+	return rc;
+}
+
+/**
+ *	ata_port_detach - Detach ATA port in preparation of device removal
+ *	@ap: ATA port to be detached
+ *
+ *	Detach all ATA devices and the associated SCSI devices of @ap;
+ *	then, remove the associated SCSI host.  @ap is guaranteed to
+ *	be quiescent on return from this function.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_port_detach(struct ata_port *ap)
+{
+	unsigned long flags;
+	struct ata_link *link;
+	struct ata_device *dev;
+
+	if (!ap->ops->error_handler)
+		goto skip_eh;
+
+	/* tell EH we're leaving & flush EH */
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags |= ATA_PFLAG_UNLOADING;
+	ata_port_schedule_eh(ap);
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* wait till EH commits suicide */
+	ata_port_wait_eh(ap);
+
+	/* it better be dead now */
+	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
+
+	cancel_delayed_work_sync(&ap->hotplug_task);
+
+ skip_eh:
+	/* clean up zpodd on port removal */
+	ata_for_each_link(link, ap, HOST_FIRST) {
+		ata_for_each_dev(dev, link, ALL) {
+			if (zpodd_dev_enabled(dev))
+				zpodd_exit(dev);
+		}
+	}
+	if (ap->pmp_link) {
+		int i;
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+			ata_tlink_delete(&ap->pmp_link[i]);
+	}
+	/* remove the associated SCSI host */
+	scsi_remove_host(ap->scsi_host);
+	ata_tport_delete(ap);
+}
+
+/**
+ *	ata_host_detach - Detach all ports of an ATA host
+ *	@host: Host to detach
+ *
+ *	Detach all ports of @host.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_host_detach(struct ata_host *host)
+{
+	int i;
+
+	for (i = 0; i < host->n_ports; i++)
+		ata_port_detach(host->ports[i]);
+
+	/* the host is dead now, dissociate ACPI */
+	ata_acpi_dissociate(host);
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ *	ata_pci_remove_one - PCI layer callback for device removal
+ *	@pdev: PCI device that was removed
+ *
+ *	PCI layer indicates to libata via this hook that hot-unplug or
+ *	module unload event has occurred.  Detach all ports.  Resource
+ *	release is handled via devres.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ */
+void ata_pci_remove_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+
+	ata_host_detach(host);
+}
+
+/* move to PCI subsystem */
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+{
+	unsigned long tmp = 0;
+
+	switch (bits->width) {
+	case 1: {
+		u8 tmp8 = 0;
+		pci_read_config_byte(pdev, bits->reg, &tmp8);
+		tmp = tmp8;
+		break;
+	}
+	case 2: {
+		u16 tmp16 = 0;
+		pci_read_config_word(pdev, bits->reg, &tmp16);
+		tmp = tmp16;
+		break;
+	}
+	case 4: {
+		u32 tmp32 = 0;
+		pci_read_config_dword(pdev, bits->reg, &tmp32);
+		tmp = tmp32;
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	tmp &= bits->mask;
+
+	return (tmp == bits->val) ? 1 : 0;
+}
+
+#ifdef CONFIG_PM
+void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+
+	if (mesg.event & PM_EVENT_SLEEP)
+		pci_set_power_state(pdev, PCI_D3hot);
+}
+
+int ata_pci_device_do_resume(struct pci_dev *pdev)
+{
+	int rc;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	rc = pcim_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"failed to enable device after resume (%d)\n", rc);
+		return rc;
+	}
+
+	pci_set_master(pdev);
+	return 0;
+}
+
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	ata_pci_device_do_suspend(pdev, mesg);
+
+	return 0;
+}
+
+int ata_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc == 0)
+		ata_host_resume(host);
+	return rc;
+}
+#endif /* CONFIG_PM */
+
+#endif /* CONFIG_PCI */
+
+/**
+ *	ata_platform_remove_one - Platform layer callback for device removal
+ *	@pdev: Platform device that was removed
+ *
+ *	Platform layer indicates to libata via this hook that hot-unplug or
+ *	module unload event has occurred.  Detach all ports.  Resource
+ *	release is handled via devres.
+ *
+ *	LOCKING:
+ *	Inherited from platform layer (may sleep).
+ */
+int ata_platform_remove_one(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+
+	ata_host_detach(host);
+
+	return 0;
+}
+
+static int __init ata_parse_force_one(char **cur,
+				      struct ata_force_ent *force_ent,
+				      const char **reason)
+{
+	static const struct ata_force_param force_tbl[] __initconst = {
+		{ "40c",	.cbl		= ATA_CBL_PATA40 },
+		{ "80c",	.cbl		= ATA_CBL_PATA80 },
+		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
+		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
+		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
+		{ "sata",	.cbl		= ATA_CBL_SATA },
+		{ "1.5Gbps",	.spd_limit	= 1 },
+		{ "3.0Gbps",	.spd_limit	= 2 },
+		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
+		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
+		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
+		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
+		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
+		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
+		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
+		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
+		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
+		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
+		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
+		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
+		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
+		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
+		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
+		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
+		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
+		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
+		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
+		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
+		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
+		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
+		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
+		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
+		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
+		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
+		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
+		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
+		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
+		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
+		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
+		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
+		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
+		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
+		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
+		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
+		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
+		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
+		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
+		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
+		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
+		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
+		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
+		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
+	};
+	char *start = *cur, *p = *cur;
+	char *id, *val, *endp;
+	const struct ata_force_param *match_fp = NULL;
+	int nr_matches = 0, i;
+
+	/* find where this param ends and update *cur */
+	while (*p != '\0' && *p != ',')
+		p++;
+
+	if (*p == '\0')
+		*cur = p;
+	else
+		*cur = p + 1;
+
+	*p = '\0';
+
+	/* parse */
+	p = strchr(start, ':');
+	if (!p) {
+		val = strstrip(start);
+		goto parse_val;
+	}
+	*p = '\0';
+
+	id = strstrip(start);
+	val = strstrip(p + 1);
+
+	/* parse id */
+	p = strchr(id, '.');
+	if (p) {
+		*p++ = '\0';
+		force_ent->device = simple_strtoul(p, &endp, 10);
+		if (p == endp || *endp != '\0') {
+			*reason = "invalid device";
+			return -EINVAL;
+		}
+	}
+
+	force_ent->port = simple_strtoul(id, &endp, 10);
+	if (id == endp || *endp != '\0') {
+		*reason = "invalid port/link";
+		return -EINVAL;
+	}
+
+ parse_val:
+	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
+	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
+		const struct ata_force_param *fp = &force_tbl[i];
+
+		if (strncasecmp(val, fp->name, strlen(val)))
+			continue;
+
+		nr_matches++;
+		match_fp = fp;
+
+		if (strcasecmp(val, fp->name) == 0) {
+			nr_matches = 1;
+			break;
+		}
+	}
+
+	if (!nr_matches) {
+		*reason = "unknown value";
+		return -EINVAL;
+	}
+	if (nr_matches > 1) {
+		*reason = "ambiguous value";
+		return -EINVAL;
+	}
+
+	force_ent->param = *match_fp;
+
+	return 0;
+}
+
+static void __init ata_parse_force_param(void)
+{
+	int idx = 0, size = 1;
+	int last_port = -1, last_device = -1;
+	char *p, *cur, *next;
+
+	/* calculate maximum number of params and allocate force_tbl */
+	for (p = ata_force_param_buf; *p; p++)
+		if (*p == ',')
+			size++;
+
+	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
+	if (!ata_force_tbl) {
+		printk(KERN_WARNING "ata: failed to extend force table, "
+		       "libata.force ignored\n");
+		return;
+	}
+
+	/* parse and populate the table */
+	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
+		const char *reason = "";
+		struct ata_force_ent te = { .port = -1, .device = -1 };
+
+		next = cur;
+		if (ata_parse_force_one(&next, &te, &reason)) {
+			printk(KERN_WARNING "ata: failed to parse force "
+			       "parameter \"%s\" (%s)\n",
+			       cur, reason);
+			continue;
+		}
+
+		if (te.port == -1) {
+			te.port = last_port;
+			te.device = last_device;
+		}
+
+		ata_force_tbl[idx++] = te;
+
+		last_port = te.port;
+		last_device = te.device;
+	}
+
+	ata_force_tbl_size = idx;
+}
+
+static int __init ata_init(void)
+{
+	int rc;
+
+	ata_parse_force_param();
+
+	rc = ata_sff_init();
+	if (rc) {
+		kfree(ata_force_tbl);
+		return rc;
+	}
+
+	libata_transport_init();
+	ata_scsi_transport_template = ata_attach_transport();
+	if (!ata_scsi_transport_template) {
+		ata_sff_exit();
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
+	return 0;
+
+err_out:
+	return rc;
+}
+
+static void __exit ata_exit(void)
+{
+	ata_release_transport(ata_scsi_transport_template);
+	libata_transport_exit();
+	ata_sff_exit();
+	kfree(ata_force_tbl);
+}
+
+subsys_initcall(ata_init);
+module_exit(ata_exit);
+
+static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
+
+int ata_ratelimit(void)
+{
+	return __ratelimit(&ratelimit);
+}
+
+/**
+ *	ata_msleep - ATA EH owner aware msleep
+ *	@ap: ATA port to attribute the sleep to
+ *	@msecs: duration to sleep in milliseconds
+ *
+ *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
+ *	ownership is released before going to sleep and reacquired
+ *	after the sleep is complete.  IOW, other ports sharing the
+ *	@ap->host will be allowed to own the EH while this task is
+ *	sleeping.
+ *
+ *	LOCKING:
+ *	Might sleep.
+ */
+void ata_msleep(struct ata_port *ap, unsigned int msecs)
+{
+	bool owns_eh = ap && ap->host->eh_owner == current;
+
+	if (owns_eh)
+		ata_eh_release(ap);
+
+	if (msecs < 20) {
+		unsigned long usecs = msecs * USEC_PER_MSEC;
+		usleep_range(usecs, usecs + 50);
+	} else {
+		msleep(msecs);
+	}
+
+	if (owns_eh)
+		ata_eh_acquire(ap);
+}
+
+/**
+ *	ata_wait_register - wait until register value changes
+ *	@ap: ATA port to wait register for, can be NULL
+ *	@reg: IO-mapped register
+ *	@mask: Mask to apply to read register value
+ *	@val: Wait condition
+ *	@interval: polling interval in milliseconds
+ *	@timeout: timeout in milliseconds
+ *
+ *	Waiting for some bits of register to change is a common
+ *	operation for ATA controllers.  This function reads 32bit LE
+ *	IO-mapped register @reg and tests for the following condition.
+ *
+ *	(*@reg & mask) != val
+ *
+ *	If the condition is met, it returns; otherwise, the process is
+ *	repeated after @interval_msec until timeout.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	The final register value.
+ */
+u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
+		      unsigned long interval, unsigned long timeout)
+{
+	unsigned long deadline;
+	u32 tmp;
+
+	tmp = ioread32(reg);
+
+	/* Calculate timeout _after_ the first read to make sure
+	 * preceding writes reach the controller before starting to
+	 * eat away the timeout.
+	 */
+	deadline = ata_deadline(jiffies, timeout);
+
+	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
+		ata_msleep(ap, interval);
+		tmp = ioread32(reg);
+	}
+
+	return tmp;
+}
+
+/**
+ *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ *	@link: Link receiving the event
+ *
+ *	Test whether the received PHY event has to be ignored or not.
+ *
+ *	LOCKING:
+ *	None:
+ *
+ *	RETURNS:
+ *	True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+	unsigned long lpm_timeout = link->last_lpm_change +
+				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+	/* if LPM is enabled, PHYRDY doesn't mean anything */
+	if (link->lpm_policy > ATA_LPM_MAX_POWER)
+		return true;
+
+	/* ignore the first PHY event after the LPM policy changed
+	 * as it is might be spurious
+	 */
+	if ((link->flags & ATA_LFLAG_CHANGED) &&
+	    time_before(jiffies, lpm_timeout))
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
+/*
+ * Dummy port_ops
+ */
+static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
+{
+	return AC_ERR_SYSTEM;
+}
+
+static void ata_dummy_error_handler(struct ata_port *ap)
+{
+	/* truly dummy */
+}
+
+struct ata_port_operations ata_dummy_port_ops = {
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= ata_dummy_qc_issue,
+	.error_handler		= ata_dummy_error_handler,
+	.sched_eh		= ata_std_sched_eh,
+	.end_eh			= ata_std_end_eh,
+};
+
+const struct ata_port_info ata_dummy_port_info = {
+	.port_ops		= &ata_dummy_port_ops,
+};
+
+/*
+ * Utility print functions
+ */
+void ata_port_printk(const struct ata_port *ap, const char *level,
+		     const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sata%u: %pV", level, ap->print_id, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(ata_port_printk);
+
+void ata_link_printk(const struct ata_link *link, const char *level,
+		     const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
+		printk("%sata%u.%02u: %pV",
+		       level, link->ap->print_id, link->pmp, &vaf);
+	else
+		printk("%sata%u: %pV",
+		       level, link->ap->print_id, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(ata_link_printk);
+
+void ata_dev_printk(const struct ata_device *dev, const char *level,
+		    const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sata%u.%02u: %pV",
+	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
+	       &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(ata_dev_printk);
+
+void ata_print_version(const struct device *dev, const char *version)
+{
+	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
+}
+EXPORT_SYMBOL(ata_print_version);
+
+/*
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers.  As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
+ */
+EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
+EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
+EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+EXPORT_SYMBOL_GPL(ata_base_port_ops);
+EXPORT_SYMBOL_GPL(sata_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
+EXPORT_SYMBOL_GPL(ata_link_next);
+EXPORT_SYMBOL_GPL(ata_dev_next);
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
+EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
+EXPORT_SYMBOL_GPL(ata_host_init);
+EXPORT_SYMBOL_GPL(ata_host_alloc);
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
+EXPORT_SYMBOL_GPL(ata_slave_link_init);
+EXPORT_SYMBOL_GPL(ata_host_start);
+EXPORT_SYMBOL_GPL(ata_host_register);
+EXPORT_SYMBOL_GPL(ata_host_activate);
+EXPORT_SYMBOL_GPL(ata_host_detach);
+EXPORT_SYMBOL_GPL(ata_sg_init);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
+EXPORT_SYMBOL_GPL(atapi_cmd_type);
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
+EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
+EXPORT_SYMBOL_GPL(ata_mode_string);
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
+EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
+EXPORT_SYMBOL_GPL(ata_dev_disable);
+EXPORT_SYMBOL_GPL(sata_set_spd);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
+EXPORT_SYMBOL_GPL(sata_link_debounce);
+EXPORT_SYMBOL_GPL(sata_link_resume);
+EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
+EXPORT_SYMBOL_GPL(ata_std_prereset);
+EXPORT_SYMBOL_GPL(sata_link_hardreset);
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+EXPORT_SYMBOL_GPL(ata_std_postreset);
+EXPORT_SYMBOL_GPL(ata_dev_classify);
+EXPORT_SYMBOL_GPL(ata_dev_pair);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
+EXPORT_SYMBOL_GPL(ata_msleep);
+EXPORT_SYMBOL_GPL(ata_wait_register);
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+EXPORT_SYMBOL_GPL(sata_scr_valid);
+EXPORT_SYMBOL_GPL(sata_scr_read);
+EXPORT_SYMBOL_GPL(sata_scr_write);
+EXPORT_SYMBOL_GPL(sata_scr_write_flush);
+EXPORT_SYMBOL_GPL(ata_link_online);
+EXPORT_SYMBOL_GPL(ata_link_offline);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL_GPL(ata_host_suspend);
+EXPORT_SYMBOL_GPL(ata_host_resume);
+#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ata_id_string);
+EXPORT_SYMBOL_GPL(ata_id_c_string);
+EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
+EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
+EXPORT_SYMBOL_GPL(ata_timing_compute);
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
+EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_resume);
+#endif /* CONFIG_PM */
+#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
+EXPORT_SYMBOL_GPL(ata_port_desc);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
+#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
+EXPORT_SYMBOL_GPL(ata_link_abort);
+EXPORT_SYMBOL_GPL(ata_port_abort);
+EXPORT_SYMBOL_GPL(ata_port_freeze);
+EXPORT_SYMBOL_GPL(sata_async_notification);
+EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
+EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
+EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
+EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
+EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
+EXPORT_SYMBOL_GPL(ata_do_eh);
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
+
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
+EXPORT_SYMBOL_GPL(ata_cable_sata);
+EXPORT_SYMBOL_GPL(ata_host_get);
+EXPORT_SYMBOL_GPL(ata_host_put);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
new file mode 100644
index 0000000..01306c0
--- /dev/null
+++ b/drivers/ata/libata-eh.c
@@ -0,0 +1,4191 @@
+/*
+ *  libata-eh.c - libata error handling
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2006 Tejun Heo <htejun@gmail.com>
+ *
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License as
+ *  published by the Free Software Foundation; either version 2, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ *  USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available from http://www.t13.org/ and
+ *  http://www.sata-io.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include "../scsi/scsi_transport_api.h"
+
+#include <linux/libata.h>
+
+#include <trace/events/libata.h>
+#include "libata.h"
+
+enum {
+	/* speed down verdicts */
+	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
+	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
+	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
+	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
+
+	/* error flags */
+	ATA_EFLAG_IS_IO			= (1 << 0),
+	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
+	ATA_EFLAG_OLD_ER                = (1 << 31),
+
+	/* error categories */
+	ATA_ECAT_NONE			= 0,
+	ATA_ECAT_ATA_BUS		= 1,
+	ATA_ECAT_TOUT_HSM		= 2,
+	ATA_ECAT_UNK_DEV		= 3,
+	ATA_ECAT_DUBIOUS_NONE		= 4,
+	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
+	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
+	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
+	ATA_ECAT_NR			= 8,
+
+	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
+
+	/* always put at least this amount of time between resets */
+	ATA_EH_RESET_COOL_DOWN		=  5000,
+
+	/* Waiting in ->prereset can never be reliable.  It's
+	 * sometimes nice to wait there but it can't be depended upon;
+	 * otherwise, we wouldn't be resetting.  Just give it enough
+	 * time for most drives to spin up.
+	 */
+	ATA_EH_PRERESET_TIMEOUT		= 10000,
+	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
+
+	ATA_EH_UA_TRIES			= 5,
+
+	/* probe speed down parameters, see ata_eh_schedule_probe() */
+	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
+	ATA_EH_PROBE_TRIALS		= 2,
+};
+
+/* The following table determines how we sequence resets.  Each entry
+ * represents timeout for that try.  The first try can be soft or
+ * hardreset.  All others are hardreset if available.  In most cases
+ * the first reset w/ 10sec timeout should succeed.  Following entries
+ * are mostly for error handling, hotplug and those outlier devices that
+ * take an exceptionally long time to recover from reset.
+ */
+static const unsigned long ata_eh_reset_timeouts[] = {
+	10000,	/* most drives spin up by 10sec */
+	10000,	/* > 99% working drives spin up before 20sec */
+	35000,	/* give > 30 secs of idleness for outlier devices */
+	 5000,	/* and sweet one last chance */
+	ULONG_MAX, /* > 1 min has elapsed, give up */
+};
+
+static const unsigned long ata_eh_identify_timeouts[] = {
+	 5000,	/* covers > 99% of successes and not too boring on failures */
+	10000,  /* combined time till here is enough even for media access */
+	30000,	/* for true idiots */
+	ULONG_MAX,
+};
+
+static const unsigned long ata_eh_flush_timeouts[] = {
+	15000,	/* be generous with flush */
+	15000,  /* ditto */
+	30000,	/* and even more generous */
+	ULONG_MAX,
+};
+
+static const unsigned long ata_eh_other_timeouts[] = {
+	 5000,	/* same rationale as identify timeout */
+	10000,	/* ditto */
+	/* but no merciful 30sec for other commands, it just isn't worth it */
+	ULONG_MAX,
+};
+
+struct ata_eh_cmd_timeout_ent {
+	const u8		*commands;
+	const unsigned long	*timeouts;
+};
+
+/* The following table determines timeouts to use for EH internal
+ * commands.  Each table entry is a command class and matches the
+ * commands the entry applies to and the timeout table to use.
+ *
+ * On the retry after a command timed out, the next timeout value from
+ * the table is used.  If the table doesn't contain further entries,
+ * the last value is used.
+ *
+ * ehc->cmd_timeout_idx keeps track of which timeout to use per
+ * command class, so if SET_FEATURES times out on the first try, the
+ * next try will use the second timeout value only for that class.
+ */
+#define CMDS(cmds...)	(const u8 []){ cmds, 0 }
+static const struct ata_eh_cmd_timeout_ent
+ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
+	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
+	  .timeouts = ata_eh_identify_timeouts, },
+	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
+	  .timeouts = ata_eh_other_timeouts, },
+	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
+	  .timeouts = ata_eh_other_timeouts, },
+	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
+	  .timeouts = ata_eh_other_timeouts, },
+	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
+	  .timeouts = ata_eh_other_timeouts, },
+	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
+	  .timeouts = ata_eh_flush_timeouts },
+};
+#undef CMDS
+
+static void __ata_port_freeze(struct ata_port *ap);
+#ifdef CONFIG_PM
+static void ata_eh_handle_port_suspend(struct ata_port *ap);
+static void ata_eh_handle_port_resume(struct ata_port *ap);
+#else /* CONFIG_PM */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{ }
+
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{ }
+#endif /* CONFIG_PM */
+
+static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
+				 const char *fmt, va_list args)
+{
+	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
+				     ATA_EH_DESC_LEN - ehi->desc_len,
+				     fmt, args);
+}
+
+/**
+ *	__ata_ehi_push_desc - push error description without adding separator
+ *	@ehi: target EHI
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to @ehi->desc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(ehi, fmt, args);
+	va_end(args);
+}
+
+/**
+ *	ata_ehi_push_desc - push error description with separator
+ *	@ehi: target EHI
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to @ehi->desc.
+ *	If @ehi->desc is not empty, ", " is added in-between.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+	va_list args;
+
+	if (ehi->desc_len)
+		__ata_ehi_push_desc(ehi, ", ");
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(ehi, fmt, args);
+	va_end(args);
+}
+
+/**
+ *	ata_ehi_clear_desc - clean error description
+ *	@ehi: target EHI
+ *
+ *	Clear @ehi->desc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_ehi_clear_desc(struct ata_eh_info *ehi)
+{
+	ehi->desc[0] = '\0';
+	ehi->desc_len = 0;
+}
+
+/**
+ *	ata_port_desc - append port description
+ *	@ap: target ATA port
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to port
+ *	description.  If port description is not empty, " " is added
+ *	in-between.  This function is to be used while initializing
+ *	ata_host.  The description is printed on host registration.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
+{
+	va_list args;
+
+	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
+
+	if (ap->link.eh_info.desc_len)
+		__ata_ehi_push_desc(&ap->link.eh_info, " ");
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
+	va_end(args);
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ *	ata_port_pbar_desc - append PCI BAR description
+ *	@ap: target ATA port
+ *	@bar: target PCI BAR
+ *	@offset: offset into PCI BAR
+ *	@name: name of the area
+ *
+ *	If @offset is negative, this function formats a string which
+ *	contains the name, address, size and type of the BAR and
+ *	appends it to the port description.  If @offset is zero or
+ *	positive, only name and offsetted address is appended.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+			const char *name)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	char *type = "";
+	unsigned long long start, len;
+
+	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
+		type = "m";
+	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
+		type = "i";
+
+	start = (unsigned long long)pci_resource_start(pdev, bar);
+	len = (unsigned long long)pci_resource_len(pdev, bar);
+
+	if (offset < 0)
+		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
+	else
+		ata_port_desc(ap, "%s 0x%llx", name,
+				start + (unsigned long long)offset);
+}
+
+#endif /* CONFIG_PCI */
+
+static int ata_lookup_timeout_table(u8 cmd)
+{
+	int i;
+
+	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
+		const u8 *cur;
+
+		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
+			if (*cur == cmd)
+				return i;
+	}
+
+	return -1;
+}
+
+/**
+ *	ata_internal_cmd_timeout - determine timeout for an internal command
+ *	@dev: target device
+ *	@cmd: internal command to be issued
+ *
+ *	Determine timeout for internal command @cmd for @dev.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	Determined timeout.
+ */
+unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int ent = ata_lookup_timeout_table(cmd);
+	int idx;
+
+	if (ent < 0)
+		return ATA_EH_CMD_DFL_TIMEOUT;
+
+	idx = ehc->cmd_timeout_idx[dev->devno][ent];
+	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
+}
+
+/**
+ *	ata_internal_cmd_timed_out - notification for internal command timeout
+ *	@dev: target device
+ *	@cmd: internal command which timed out
+ *
+ *	Notify EH that internal command @cmd for @dev timed out.  This
+ *	function should be called only for commands whose timeouts are
+ *	determined using ata_internal_cmd_timeout().
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int ent = ata_lookup_timeout_table(cmd);
+	int idx;
+
+	if (ent < 0)
+		return;
+
+	idx = ehc->cmd_timeout_idx[dev->devno][ent];
+	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
+		ehc->cmd_timeout_idx[dev->devno][ent]++;
+}
+
+static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
+			     unsigned int err_mask)
+{
+	struct ata_ering_entry *ent;
+
+	WARN_ON(!err_mask);
+
+	ering->cursor++;
+	ering->cursor %= ATA_ERING_SIZE;
+
+	ent = &ering->ring[ering->cursor];
+	ent->eflags = eflags;
+	ent->err_mask = err_mask;
+	ent->timestamp = get_jiffies_64();
+}
+
+static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
+{
+	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
+
+	if (ent->err_mask)
+		return ent;
+	return NULL;
+}
+
+int ata_ering_map(struct ata_ering *ering,
+		  int (*map_fn)(struct ata_ering_entry *, void *),
+		  void *arg)
+{
+	int idx, rc = 0;
+	struct ata_ering_entry *ent;
+
+	idx = ering->cursor;
+	do {
+		ent = &ering->ring[idx];
+		if (!ent->err_mask)
+			break;
+		rc = map_fn(ent, arg);
+		if (rc)
+			break;
+		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
+	} while (idx != ering->cursor);
+
+	return rc;
+}
+
+static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+	ent->eflags |= ATA_EFLAG_OLD_ER;
+	return 0;
+}
+
+static void ata_ering_clear(struct ata_ering *ering)
+{
+	ata_ering_map(ering, ata_ering_clear_cb, NULL);
+}
+
+static unsigned int ata_eh_dev_action(struct ata_device *dev)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+
+	return ehc->i.action | ehc->i.dev_action[dev->devno];
+}
+
+static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
+				struct ata_eh_info *ehi, unsigned int action)
+{
+	struct ata_device *tdev;
+
+	if (!dev) {
+		ehi->action &= ~action;
+		ata_for_each_dev(tdev, link, ALL)
+			ehi->dev_action[tdev->devno] &= ~action;
+	} else {
+		/* doesn't make sense for port-wide EH actions */
+		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
+
+		/* break ehi->action into ehi->dev_action */
+		if (ehi->action & action) {
+			ata_for_each_dev(tdev, link, ALL)
+				ehi->dev_action[tdev->devno] |=
+					ehi->action & action;
+			ehi->action &= ~action;
+		}
+
+		/* turn off the specified per-dev action */
+		ehi->dev_action[dev->devno] &= ~action;
+	}
+}
+
+/**
+ *	ata_eh_acquire - acquire EH ownership
+ *	@ap: ATA port to acquire EH ownership for
+ *
+ *	Acquire EH ownership for @ap.  This is the basic exclusion
+ *	mechanism for ports sharing a host.  Only one port hanging off
+ *	the same host can claim the ownership of EH.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+void ata_eh_acquire(struct ata_port *ap)
+{
+	mutex_lock(&ap->host->eh_mutex);
+	WARN_ON_ONCE(ap->host->eh_owner);
+	ap->host->eh_owner = current;
+}
+
+/**
+ *	ata_eh_release - release EH ownership
+ *	@ap: ATA port to release EH ownership for
+ *
+ *	Release EH ownership for @ap if the caller.  The caller must
+ *	have acquired EH ownership using ata_eh_acquire() previously.
+ *
+ *	LOCKING:
+ *	EH context.
+ */
+void ata_eh_release(struct ata_port *ap)
+{
+	WARN_ON_ONCE(ap->host->eh_owner != current);
+	ap->host->eh_owner = NULL;
+	mutex_unlock(&ap->host->eh_mutex);
+}
+
+static void ata_eh_unload(struct ata_port *ap)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned long flags;
+
+	/* Restore SControl IPM and SPD for the next driver and
+	 * disable attached devices.
+	 */
+	ata_for_each_link(link, ap, PMP_FIRST) {
+		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
+		ata_for_each_dev(dev, link, ALL)
+			ata_dev_disable(dev);
+	}
+
+	/* freeze and set UNLOADED */
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_port_freeze(ap);			/* won't be thawed */
+	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
+	ap->pflags |= ATA_PFLAG_UNLOADED;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	ata_scsi_error - SCSI layer error handler callback
+ *	@host: SCSI host on which error occurred
+ *
+ *	Handles SCSI-layer-thrown error events.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer (none, can sleep)
+ *
+ *	RETURNS:
+ *	Zero.
+ */
+void ata_scsi_error(struct Scsi_Host *host)
+{
+	struct ata_port *ap = ata_shost_to_port(host);
+	unsigned long flags;
+	LIST_HEAD(eh_work_q);
+
+	DPRINTK("ENTER\n");
+
+	spin_lock_irqsave(host->host_lock, flags);
+	list_splice_init(&host->eh_cmd_q, &eh_work_q);
+	spin_unlock_irqrestore(host->host_lock, flags);
+
+	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
+
+	/* If we timed raced normal completion and there is nothing to
+	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
+	ata_scsi_port_error_handler(host, ap);
+
+	/* finish or retry handled scmd's and clean up */
+	WARN_ON(!list_empty(&eh_work_q));
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_scsi_cmd_error_handler - error callback for a list of commands
+ * @host:	scsi host containing the port
+ * @ap:		ATA port within the host
+ * @eh_work_q:	list of commands to process
+ *
+ * process the given list of commands and return those finished to the
+ * ap->eh_done_q.  This function is the first part of the libata error
+ * handler which processes a given list of failed commands.
+ */
+void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+				struct list_head *eh_work_q)
+{
+	int i;
+	unsigned long flags;
+
+	/* make sure sff pio task is not running */
+	ata_sff_flush_pio_task(ap);
+
+	/* synchronize with host lock and sort out timeouts */
+
+	/* For new EH, all qcs are finished in one of three ways -
+	 * normal completion, error completion, and SCSI timeout.
+	 * Both completions can race against SCSI timeout.  When normal
+	 * completion wins, the qc never reaches EH.  When error
+	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
+	 *
+	 * When SCSI timeout wins, things are a bit more complex.
+	 * Normal or error completion can occur after the timeout but
+	 * before this point.  In such cases, both types of
+	 * completions are honored.  A scmd is determined to have
+	 * timed out iff its associated qc is active and not failed.
+	 */
+	spin_lock_irqsave(ap->lock, flags);
+	if (ap->ops->error_handler) {
+		struct scsi_cmnd *scmd, *tmp;
+		int nr_timedout = 0;
+
+		/* This must occur under the ap->lock as we don't want
+		   a polled recovery to race the real interrupt handler
+
+		   The lost_interrupt handler checks for any completed but
+		   non-notified command and completes much like an IRQ handler.
+
+		   We then fall into the error recovery code which will treat
+		   this as if normal completion won the race */
+
+		if (ap->ops->lost_interrupt)
+			ap->ops->lost_interrupt(ap);
+
+		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
+			struct ata_queued_cmd *qc;
+
+			ata_qc_for_each_raw(ap, qc, i) {
+				if (qc->flags & ATA_QCFLAG_ACTIVE &&
+				    qc->scsicmd == scmd)
+					break;
+			}
+
+			if (i < ATA_MAX_QUEUE) {
+				/* the scmd has an associated qc */
+				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
+					/* which hasn't failed yet, timeout */
+					qc->err_mask |= AC_ERR_TIMEOUT;
+					qc->flags |= ATA_QCFLAG_FAILED;
+					nr_timedout++;
+				}
+			} else {
+				/* Normal completion occurred after
+				 * SCSI timeout but before this point.
+				 * Successfully complete it.
+				 */
+				scmd->retries = scmd->allowed;
+				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+			}
+		}
+
+		/* If we have timed out qcs.  They belong to EH from
+		 * this point but the state of the controller is
+		 * unknown.  Freeze the port to make sure the IRQ
+		 * handler doesn't diddle with those qcs.  This must
+		 * be done atomically w.r.t. setting QCFLAG_FAILED.
+		 */
+		if (nr_timedout)
+			__ata_port_freeze(ap);
+
+
+		/* initialize eh_tries */
+		ap->eh_tries = ATA_EH_MAX_TRIES;
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+}
+EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
+
+/**
+ * ata_scsi_port_error_handler - recover the port after the commands
+ * @host:	SCSI host containing the port
+ * @ap:		the ATA port
+ *
+ * Handle the recovery of the port @ap after all the commands
+ * have been recovered.
+ */
+void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+{
+	unsigned long flags;
+
+	/* invoke error handler */
+	if (ap->ops->error_handler) {
+		struct ata_link *link;
+
+		/* acquire EH ownership */
+		ata_eh_acquire(ap);
+ repeat:
+		/* kill fast drain timer */
+		del_timer_sync(&ap->fastdrain_timer);
+
+		/* process port resume request */
+		ata_eh_handle_port_resume(ap);
+
+		/* fetch & clear EH info */
+		spin_lock_irqsave(ap->lock, flags);
+
+		ata_for_each_link(link, ap, HOST_FIRST) {
+			struct ata_eh_context *ehc = &link->eh_context;
+			struct ata_device *dev;
+
+			memset(&link->eh_context, 0, sizeof(link->eh_context));
+			link->eh_context.i = link->eh_info;
+			memset(&link->eh_info, 0, sizeof(link->eh_info));
+
+			ata_for_each_dev(dev, link, ENABLED) {
+				int devno = dev->devno;
+
+				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+				if (ata_ncq_enabled(dev))
+					ehc->saved_ncq_enabled |= 1 << devno;
+			}
+		}
+
+		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
+
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* invoke EH, skip if unloading or suspended */
+		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
+			ap->ops->error_handler(ap);
+		else {
+			/* if unloading, commence suicide */
+			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
+			    !(ap->pflags & ATA_PFLAG_UNLOADED))
+				ata_eh_unload(ap);
+			ata_eh_finish(ap);
+		}
+
+		/* process port suspend request */
+		ata_eh_handle_port_suspend(ap);
+
+		/* Exception might have happened after ->error_handler
+		 * recovered the port but before this point.  Repeat
+		 * EH in such case.
+		 */
+		spin_lock_irqsave(ap->lock, flags);
+
+		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
+			if (--ap->eh_tries) {
+				spin_unlock_irqrestore(ap->lock, flags);
+				goto repeat;
+			}
+			ata_port_err(ap,
+				     "EH pending after %d tries, giving up\n",
+				     ATA_EH_MAX_TRIES);
+			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+		}
+
+		/* this run is complete, make sure EH info is clear */
+		ata_for_each_link(link, ap, HOST_FIRST)
+			memset(&link->eh_info, 0, sizeof(link->eh_info));
+
+		/* end eh (clear host_eh_scheduled) while holding
+		 * ap->lock such that if exception occurs after this
+		 * point but before EH completion, SCSI midlayer will
+		 * re-initiate EH.
+		 */
+		ap->ops->end_eh(ap);
+
+		spin_unlock_irqrestore(ap->lock, flags);
+		ata_eh_release(ap);
+	} else {
+		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
+		ap->ops->eng_timeout(ap);
+	}
+
+	scsi_eh_flush_done_q(&ap->eh_done_q);
+
+	/* clean up */
+	spin_lock_irqsave(ap->lock, flags);
+
+	if (ap->pflags & ATA_PFLAG_LOADING)
+		ap->pflags &= ~ATA_PFLAG_LOADING;
+	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
+		!(ap->flags & ATA_FLAG_SAS_HOST))
+		schedule_delayed_work(&ap->hotplug_task, 0);
+
+	if (ap->pflags & ATA_PFLAG_RECOVERED)
+		ata_port_info(ap, "EH complete\n");
+
+	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
+
+	/* tell wait_eh that we're done */
+	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
+	wake_up_all(&ap->eh_wait_q);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
+
+/**
+ *	ata_port_wait_eh - Wait for the currently pending EH to complete
+ *	@ap: Port to wait EH for
+ *
+ *	Wait until the currently pending EH is complete.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_port_wait_eh(struct ata_port *ap)
+{
+	unsigned long flags;
+	DEFINE_WAIT(wait);
+
+ retry:
+	spin_lock_irqsave(ap->lock, flags);
+
+	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
+		spin_unlock_irqrestore(ap->lock, flags);
+		schedule();
+		spin_lock_irqsave(ap->lock, flags);
+	}
+	finish_wait(&ap->eh_wait_q, &wait);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* make sure SCSI EH is complete */
+	if (scsi_host_in_recovery(ap->scsi_host)) {
+		ata_msleep(ap, 10);
+		goto retry;
+	}
+}
+EXPORT_SYMBOL_GPL(ata_port_wait_eh);
+
+static int ata_eh_nr_in_flight(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	unsigned int tag;
+	int nr = 0;
+
+	/* count only non-internal commands */
+	ata_qc_for_each(ap, qc, tag) {
+		if (qc)
+			nr++;
+	}
+
+	return nr;
+}
+
+void ata_eh_fastdrain_timerfn(struct timer_list *t)
+{
+	struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
+	unsigned long flags;
+	int cnt;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	cnt = ata_eh_nr_in_flight(ap);
+
+	/* are we done? */
+	if (!cnt)
+		goto out_unlock;
+
+	if (cnt == ap->fastdrain_cnt) {
+		struct ata_queued_cmd *qc;
+		unsigned int tag;
+
+		/* No progress during the last interval, tag all
+		 * in-flight qcs as timed out and freeze the port.
+		 */
+		ata_qc_for_each(ap, qc, tag) {
+			if (qc)
+				qc->err_mask |= AC_ERR_TIMEOUT;
+		}
+
+		ata_port_freeze(ap);
+	} else {
+		/* some qcs have finished, give it another chance */
+		ap->fastdrain_cnt = cnt;
+		ap->fastdrain_timer.expires =
+			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
+		add_timer(&ap->fastdrain_timer);
+	}
+
+ out_unlock:
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
+ *	@ap: target ATA port
+ *	@fastdrain: activate fast drain
+ *
+ *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
+ *	is non-zero and EH wasn't pending before.  Fast drain ensures
+ *	that EH kicks in in timely manner.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+{
+	int cnt;
+
+	/* already scheduled? */
+	if (ap->pflags & ATA_PFLAG_EH_PENDING)
+		return;
+
+	ap->pflags |= ATA_PFLAG_EH_PENDING;
+
+	if (!fastdrain)
+		return;
+
+	/* do we have in-flight qcs? */
+	cnt = ata_eh_nr_in_flight(ap);
+	if (!cnt)
+		return;
+
+	/* activate fast drain */
+	ap->fastdrain_cnt = cnt;
+	ap->fastdrain_timer.expires =
+		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
+	add_timer(&ap->fastdrain_timer);
+}
+
+/**
+ *	ata_qc_schedule_eh - schedule qc for error handling
+ *	@qc: command to schedule error handling for
+ *
+ *	Schedule error handling for @qc.  EH will kick in as soon as
+ *	other commands are drained.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct request_queue *q = qc->scsicmd->device->request_queue;
+	unsigned long flags;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	qc->flags |= ATA_QCFLAG_FAILED;
+	ata_eh_set_pending(ap, 1);
+
+	/* The following will fail if timeout has already expired.
+	 * ata_scsi_error() takes care of such scmds on EH entry.
+	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
+	 * this function completes.
+	 */
+	spin_lock_irqsave(q->queue_lock, flags);
+	blk_abort_request(qc->scsicmd->request);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
+ * @ap: ATA port to schedule EH for
+ *
+ *	LOCKING: inherited from ata_port_schedule_eh
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_std_sched_eh(struct ata_port *ap)
+{
+	WARN_ON(!ap->ops->error_handler);
+
+	if (ap->pflags & ATA_PFLAG_INITIALIZING)
+		return;
+
+	ata_eh_set_pending(ap, 1);
+	scsi_schedule_eh(ap->scsi_host);
+
+	DPRINTK("port EH scheduled\n");
+}
+EXPORT_SYMBOL_GPL(ata_std_sched_eh);
+
+/**
+ * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
+ * @ap: ATA port to end EH for
+ *
+ * In the libata object model there is a 1:1 mapping of ata_port to
+ * shost, so host fields can be directly manipulated under ap->lock, in
+ * the libsas case we need to hold a lock at the ha->level to coordinate
+ * these events.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_std_end_eh(struct ata_port *ap)
+{
+	struct Scsi_Host *host = ap->scsi_host;
+
+	host->host_eh_scheduled = 0;
+}
+EXPORT_SYMBOL(ata_std_end_eh);
+
+
+/**
+ *	ata_port_schedule_eh - schedule error handling without a qc
+ *	@ap: ATA port to schedule EH for
+ *
+ *	Schedule error handling for @ap.  EH will kick in as soon as
+ *	all commands are drained.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_port_schedule_eh(struct ata_port *ap)
+{
+	/* see: ata_std_sched_eh, unless you know better */
+	ap->ops->sched_eh(ap);
+}
+
+static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
+{
+	struct ata_queued_cmd *qc;
+	int tag, nr_aborted = 0;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	/* we're gonna abort all commands, no need for fast drain */
+	ata_eh_set_pending(ap, 0);
+
+	/* include internal tag in iteration */
+	ata_qc_for_each_with_internal(ap, qc, tag) {
+		if (qc && (!link || qc->dev->link == link)) {
+			qc->flags |= ATA_QCFLAG_FAILED;
+			ata_qc_complete(qc);
+			nr_aborted++;
+		}
+	}
+
+	if (!nr_aborted)
+		ata_port_schedule_eh(ap);
+
+	return nr_aborted;
+}
+
+/**
+ *	ata_link_abort - abort all qc's on the link
+ *	@link: ATA link to abort qc's for
+ *
+ *	Abort all active qc's active on @link and schedule EH.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Number of aborted qc's.
+ */
+int ata_link_abort(struct ata_link *link)
+{
+	return ata_do_link_abort(link->ap, link);
+}
+
+/**
+ *	ata_port_abort - abort all qc's on the port
+ *	@ap: ATA port to abort qc's for
+ *
+ *	Abort all active qc's of @ap and schedule EH.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Number of aborted qc's.
+ */
+int ata_port_abort(struct ata_port *ap)
+{
+	return ata_do_link_abort(ap, NULL);
+}
+
+/**
+ *	__ata_port_freeze - freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	This function is called when HSM violation or some other
+ *	condition disrupts normal operation of the port.  Frozen port
+ *	is not allowed to perform any operation until the port is
+ *	thawed, which usually follows a successful reset.
+ *
+ *	ap->ops->freeze() callback can be used for freezing the port
+ *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
+ *	port cannot be frozen hardware-wise, the interrupt handler
+ *	must ack and clear interrupts unconditionally while the port
+ *	is frozen.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void __ata_port_freeze(struct ata_port *ap)
+{
+	WARN_ON(!ap->ops->error_handler);
+
+	if (ap->ops->freeze)
+		ap->ops->freeze(ap);
+
+	ap->pflags |= ATA_PFLAG_FROZEN;
+
+	DPRINTK("ata%u port frozen\n", ap->print_id);
+}
+
+/**
+ *	ata_port_freeze - abort & freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	Abort and freeze @ap.  The freeze operation must be called
+ *	first, because some hardware requires special operations
+ *	before the taskfile registers are accessible.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Number of aborted commands.
+ */
+int ata_port_freeze(struct ata_port *ap)
+{
+	int nr_aborted;
+
+	WARN_ON(!ap->ops->error_handler);
+
+	__ata_port_freeze(ap);
+	nr_aborted = ata_port_abort(ap);
+
+	return nr_aborted;
+}
+
+/**
+ *	sata_async_notification - SATA async notification handler
+ *	@ap: ATA port where async notification is received
+ *
+ *	Handler to be called when async notification via SDB FIS is
+ *	received.  This function schedules EH if necessary.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	1 if EH is scheduled, 0 otherwise.
+ */
+int sata_async_notification(struct ata_port *ap)
+{
+	u32 sntf;
+	int rc;
+
+	if (!(ap->flags & ATA_FLAG_AN))
+		return 0;
+
+	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+	if (rc == 0)
+		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+	if (!sata_pmp_attached(ap) || rc) {
+		/* PMP is not attached or SNTF is not available */
+		if (!sata_pmp_attached(ap)) {
+			/* PMP is not attached.  Check whether ATAPI
+			 * AN is configured.  If so, notify media
+			 * change.
+			 */
+			struct ata_device *dev = ap->link.device;
+
+			if ((dev->class == ATA_DEV_ATAPI) &&
+			    (dev->flags & ATA_DFLAG_AN))
+				ata_scsi_media_change_notify(dev);
+			return 0;
+		} else {
+			/* PMP is attached but SNTF is not available.
+			 * ATAPI async media change notification is
+			 * not used.  The PMP must be reporting PHY
+			 * status change, schedule EH.
+			 */
+			ata_port_schedule_eh(ap);
+			return 1;
+		}
+	} else {
+		/* PMP is attached and SNTF is available */
+		struct ata_link *link;
+
+		/* check and notify ATAPI AN */
+		ata_for_each_link(link, ap, EDGE) {
+			if (!(sntf & (1 << link->pmp)))
+				continue;
+
+			if ((link->device->class == ATA_DEV_ATAPI) &&
+			    (link->device->flags & ATA_DFLAG_AN))
+				ata_scsi_media_change_notify(link->device);
+		}
+
+		/* If PMP is reporting that PHY status of some
+		 * downstream ports has changed, schedule EH.
+		 */
+		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
+			ata_port_schedule_eh(ap);
+			return 1;
+		}
+
+		return 0;
+	}
+}
+
+/**
+ *	ata_eh_freeze_port - EH helper to freeze port
+ *	@ap: ATA port to freeze
+ *
+ *	Freeze @ap.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_freeze_port(struct ata_port *ap)
+{
+	unsigned long flags;
+
+	if (!ap->ops->error_handler)
+		return;
+
+	spin_lock_irqsave(ap->lock, flags);
+	__ata_port_freeze(ap);
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	ata_port_thaw_port - EH helper to thaw port
+ *	@ap: ATA port to thaw
+ *
+ *	Thaw frozen port @ap.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_thaw_port(struct ata_port *ap)
+{
+	unsigned long flags;
+
+	if (!ap->ops->error_handler)
+		return;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ap->pflags &= ~ATA_PFLAG_FROZEN;
+
+	if (ap->ops->thaw)
+		ap->ops->thaw(ap);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	DPRINTK("ata%u port thawed\n", ap->print_id);
+}
+
+static void ata_eh_scsidone(struct scsi_cmnd *scmd)
+{
+	/* nada */
+}
+
+static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+	qc->scsidone = ata_eh_scsidone;
+	__ata_qc_complete(qc);
+	WARN_ON(ata_tag_valid(qc->tag));
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+}
+
+/**
+ *	ata_eh_qc_complete - Complete an active ATA command from EH
+ *	@qc: Command to complete
+ *
+ *	Indicate to the mid and upper layers that an ATA command has
+ *	completed.  To be used from EH.
+ */
+void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	scmd->retries = scmd->allowed;
+	__ata_eh_qc_complete(qc);
+}
+
+/**
+ *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
+ *	@qc: Command to retry
+ *
+ *	Indicate to the mid and upper layers that an ATA command
+ *	should be retried.  To be used from EH.
+ *
+ *	SCSI midlayer limits the number of retries to scmd->allowed.
+ *	scmd->allowed is incremented for commands which get retried
+ *	due to unrelated failures (qc->err_mask is zero).
+ */
+void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	if (!qc->err_mask)
+		scmd->allowed++;
+	__ata_eh_qc_complete(qc);
+}
+
+/**
+ *	ata_dev_disable - disable ATA device
+ *	@dev: ATA device to disable
+ *
+ *	Disable @dev.
+ *
+ *	Locking:
+ *	EH context.
+ */
+void ata_dev_disable(struct ata_device *dev)
+{
+	if (!ata_dev_enabled(dev))
+		return;
+
+	if (ata_msg_drv(dev->link->ap))
+		ata_dev_warn(dev, "disabled\n");
+	ata_acpi_on_disable(dev);
+	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
+	dev->class++;
+
+	/* From now till the next successful probe, ering is used to
+	 * track probe failures.  Clear accumulated device error info.
+	 */
+	ata_ering_clear(&dev->ering);
+}
+
+/**
+ *	ata_eh_detach_dev - detach ATA device
+ *	@dev: ATA device to detach
+ *
+ *	Detach @dev.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_detach_dev(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	unsigned long flags;
+
+	ata_dev_disable(dev);
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	dev->flags &= ~ATA_DFLAG_DETACH;
+
+	if (ata_scsi_offline_dev(dev)) {
+		dev->flags |= ATA_DFLAG_DETACHED;
+		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+	}
+
+	/* clear per-dev EH info */
+	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
+	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
+	ehc->saved_xfer_mode[dev->devno] = 0;
+	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	ata_eh_about_to_do - about to perform eh_action
+ *	@link: target ATA link
+ *	@dev: target ATA dev for per-dev action (can be NULL)
+ *	@action: action about to be performed
+ *
+ *	Called just before performing EH actions to clear related bits
+ *	in @link->eh_info such that eh actions are not unnecessarily
+ *	repeated.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+			unsigned int action)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_info *ehi = &link->eh_info;
+	struct ata_eh_context *ehc = &link->eh_context;
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_eh_clear_action(link, dev, ehi, action);
+
+	/* About to take EH action, set RECOVERED.  Ignore actions on
+	 * slave links as master will do them again.
+	 */
+	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
+		ap->pflags |= ATA_PFLAG_RECOVERED;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	ata_eh_done - EH action complete
+ *	@link: ATA link for which EH actions are complete
+ *	@dev: target ATA dev for per-dev action (can be NULL)
+ *	@action: action just completed
+ *
+ *	Called right after performing EH actions to clear related bits
+ *	in @link->eh_context.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+		 unsigned int action)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+
+	ata_eh_clear_action(link, dev, &ehc->i, action);
+}
+
+/**
+ *	ata_err_string - convert err_mask to descriptive string
+ *	@err_mask: error mask to convert to string
+ *
+ *	Convert @err_mask to descriptive string.  Errors are
+ *	prioritized according to severity and only the most severe
+ *	error is reported.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Descriptive string for @err_mask
+ */
+static const char *ata_err_string(unsigned int err_mask)
+{
+	if (err_mask & AC_ERR_HOST_BUS)
+		return "host bus error";
+	if (err_mask & AC_ERR_ATA_BUS)
+		return "ATA bus error";
+	if (err_mask & AC_ERR_TIMEOUT)
+		return "timeout";
+	if (err_mask & AC_ERR_HSM)
+		return "HSM violation";
+	if (err_mask & AC_ERR_SYSTEM)
+		return "internal error";
+	if (err_mask & AC_ERR_MEDIA)
+		return "media error";
+	if (err_mask & AC_ERR_INVALID)
+		return "invalid argument";
+	if (err_mask & AC_ERR_DEV)
+		return "device error";
+	if (err_mask & AC_ERR_NCQ)
+		return "NCQ error";
+	if (err_mask & AC_ERR_NODEV_HINT)
+		return "Polling detection error";
+	return "unknown error";
+}
+
+/**
+ *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
+ *	@dev: Device to read log page 10h from
+ *	@tag: Resulting tag of the failed command
+ *	@tf: Resulting taskfile registers of the failed command
+ *
+ *	Read log page 10h to obtain NCQ error details and clear error
+ *	condition.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int ata_eh_read_log_10h(struct ata_device *dev,
+			       int *tag, struct ata_taskfile *tf)
+{
+	u8 *buf = dev->link->ap->sector_buf;
+	unsigned int err_mask;
+	u8 csum;
+	int i;
+
+	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
+	if (err_mask)
+		return -EIO;
+
+	csum = 0;
+	for (i = 0; i < ATA_SECT_SIZE; i++)
+		csum += buf[i];
+	if (csum)
+		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+			     csum);
+
+	if (buf[0] & 0x80)
+		return -ENOENT;
+
+	*tag = buf[0] & 0x1f;
+
+	tf->command = buf[2];
+	tf->feature = buf[3];
+	tf->lbal = buf[4];
+	tf->lbam = buf[5];
+	tf->lbah = buf[6];
+	tf->device = buf[7];
+	tf->hob_lbal = buf[8];
+	tf->hob_lbam = buf[9];
+	tf->hob_lbah = buf[10];
+	tf->nsect = buf[12];
+	tf->hob_nsect = buf[13];
+	if (ata_id_has_ncq_autosense(dev->id))
+		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+
+	return 0;
+}
+
+/**
+ *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
+ *	@dev: target ATAPI device
+ *	@r_sense_key: out parameter for sense_key
+ *
+ *	Perform ATAPI TEST_UNIT_READY.
+ *
+ *	LOCKING:
+ *	EH context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure.
+ */
+unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+{
+	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	ata_tf_init(dev, &tf);
+
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.command = ATA_CMD_PACKET;
+	tf.protocol = ATAPI_PROT_NODATA;
+
+	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+	if (err_mask == AC_ERR_DEV)
+		*r_sense_key = tf.feature >> 4;
+	return err_mask;
+}
+
+/**
+ *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
+ *	@qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ *	@cmd: scsi command for which the sense code should be set
+ *
+ *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
+ *	SENSE.  This function is an EH helper.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_request_sense(struct ata_queued_cmd *qc,
+				 struct scsi_cmnd *cmd)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
+		ata_dev_warn(dev, "sense data available but port frozen\n");
+		return;
+	}
+
+	if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
+		return;
+
+	if (!ata_id_sense_reporting_enabled(dev->id)) {
+		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
+		return;
+	}
+
+	DPRINTK("ATA request sense\n");
+
+	ata_tf_init(dev, &tf);
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+	tf.command = ATA_CMD_REQ_SENSE_DATA;
+	tf.protocol = ATA_PROT_NODATA;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	/* Ignore err_mask; ATA_ERR might be set */
+	if (tf.command & ATA_SENSE) {
+		ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
+		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+	} else {
+		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
+			     tf.command, err_mask);
+	}
+}
+
+/**
+ *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
+ *	@dev: device to perform REQUEST_SENSE to
+ *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
+ *	@dfl_sense_key: default sense key to use
+ *
+ *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
+ *	SENSE.  This function is EH helper.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure
+ */
+unsigned int atapi_eh_request_sense(struct ata_device *dev,
+					   u8 *sense_buf, u8 dfl_sense_key)
+{
+	u8 cdb[ATAPI_CDB_LEN] =
+		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
+	struct ata_port *ap = dev->link->ap;
+	struct ata_taskfile tf;
+
+	DPRINTK("ATAPI request sense\n");
+
+	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+
+	/* initialize sense_buf with the error register,
+	 * for the case where they are -not- overwritten
+	 */
+	sense_buf[0] = 0x70;
+	sense_buf[2] = dfl_sense_key;
+
+	/* some devices time out if garbage left in tf */
+	ata_tf_init(dev, &tf);
+
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.command = ATA_CMD_PACKET;
+
+	/* is it pointless to prefer PIO for "safety reasons"? */
+	if (ap->flags & ATA_FLAG_PIO_DMA) {
+		tf.protocol = ATAPI_PROT_DMA;
+		tf.feature |= ATAPI_PKT_DMA;
+	} else {
+		tf.protocol = ATAPI_PROT_PIO;
+		tf.lbam = SCSI_SENSE_BUFFERSIZE;
+		tf.lbah = 0;
+	}
+
+	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
+}
+
+/**
+ *	ata_eh_analyze_serror - analyze SError for a failed port
+ *	@link: ATA link to analyze SError for
+ *
+ *	Analyze SError if available and further determine cause of
+ *	failure.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_analyze_serror(struct ata_link *link)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	u32 serror = ehc->i.serror;
+	unsigned int err_mask = 0, action = 0;
+	u32 hotplug_mask;
+
+	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_RESET;
+	}
+	if (serror & SERR_PROTOCOL) {
+		err_mask |= AC_ERR_HSM;
+		action |= ATA_EH_RESET;
+	}
+	if (serror & SERR_INTERNAL) {
+		err_mask |= AC_ERR_SYSTEM;
+		action |= ATA_EH_RESET;
+	}
+
+	/* Determine whether a hotplug event has occurred.  Both
+	 * SError.N/X are considered hotplug events for enabled or
+	 * host links.  For disabled PMP links, only N bit is
+	 * considered as X bit is left at 1 for link plugging.
+	 */
+	if (link->lpm_policy > ATA_LPM_MAX_POWER)
+		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
+	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
+		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
+	else
+		hotplug_mask = SERR_PHYRDY_CHG;
+
+	if (serror & hotplug_mask)
+		ata_ehi_hotplugged(&ehc->i);
+
+	ehc->i.err_mask |= err_mask;
+	ehc->i.action |= action;
+}
+
+/**
+ *	ata_eh_analyze_ncq_error - analyze NCQ error
+ *	@link: ATA link to analyze NCQ error for
+ *
+ *	Read log page 10h, determine the offending qc and acquire
+ *	error status TF.  For NCQ device errors, all LLDDs have to do
+ *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
+ *	care of the rest.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_eh_analyze_ncq_error(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev = link->device;
+	struct ata_queued_cmd *qc;
+	struct ata_taskfile tf;
+	int tag, rc;
+
+	/* if frozen, we can't do much */
+	if (ap->pflags & ATA_PFLAG_FROZEN)
+		return;
+
+	/* is it NCQ device error? */
+	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+		return;
+
+	/* has LLDD analyzed already? */
+	ata_qc_for_each_raw(ap, qc, tag) {
+		if (!(qc->flags & ATA_QCFLAG_FAILED))
+			continue;
+
+		if (qc->err_mask)
+			return;
+	}
+
+	/* okay, this error is ours */
+	memset(&tf, 0, sizeof(tf));
+	rc = ata_eh_read_log_10h(dev, &tag, &tf);
+	if (rc) {
+		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+			     rc);
+		return;
+	}
+
+	if (!(link->sactive & (1 << tag))) {
+		ata_link_err(link, "log page 10h reported inactive tag %d\n",
+			     tag);
+		return;
+	}
+
+	/* we've got the perpetrator, condemn it */
+	qc = __ata_qc_from_tag(ap, tag);
+	memcpy(&qc->result_tf, &tf, sizeof(tf));
+	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+	if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+		char sense_key, asc, ascq;
+
+		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+		ascq = qc->result_tf.auxiliary & 0xff;
+		ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
+		ata_scsi_set_sense_information(dev, qc->scsicmd,
+					       &qc->result_tf);
+		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+	}
+
+	ehc->i.err_mask &= ~AC_ERR_DEV;
+}
+
+/**
+ *	ata_eh_analyze_tf - analyze taskfile of a failed qc
+ *	@qc: qc to analyze
+ *	@tf: Taskfile registers to analyze
+ *
+ *	Analyze taskfile of @qc and further determine cause of
+ *	failure.  This function also requests ATAPI sense data if
+ *	available.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	Determined recovery action
+ */
+static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+				      const struct ata_taskfile *tf)
+{
+	unsigned int tmp, action = 0;
+	u8 stat = tf->command, err = tf->feature;
+
+	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
+		qc->err_mask |= AC_ERR_HSM;
+		return ATA_EH_RESET;
+	}
+
+	if (stat & (ATA_ERR | ATA_DF)) {
+		qc->err_mask |= AC_ERR_DEV;
+		/*
+		 * Sense data reporting does not work if the
+		 * device fault bit is set.
+		 */
+		if (stat & ATA_DF)
+			stat &= ~ATA_SENSE;
+	} else {
+		return 0;
+	}
+
+	switch (qc->dev->class) {
+	case ATA_DEV_ATA:
+	case ATA_DEV_ZAC:
+		if (stat & ATA_SENSE)
+			ata_eh_request_sense(qc, qc->scsicmd);
+		if (err & ATA_ICRC)
+			qc->err_mask |= AC_ERR_ATA_BUS;
+		if (err & (ATA_UNC | ATA_AMNF))
+			qc->err_mask |= AC_ERR_MEDIA;
+		if (err & ATA_IDNF)
+			qc->err_mask |= AC_ERR_INVALID;
+		break;
+
+	case ATA_DEV_ATAPI:
+		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
+			tmp = atapi_eh_request_sense(qc->dev,
+						qc->scsicmd->sense_buffer,
+						qc->result_tf.feature >> 4);
+			if (!tmp)
+				qc->flags |= ATA_QCFLAG_SENSE_VALID;
+			else
+				qc->err_mask |= tmp;
+		}
+	}
+
+	if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+		int ret = scsi_check_sense(qc->scsicmd);
+		/*
+		 * SUCCESS here means that the sense code could be
+		 * evaluated and should be passed to the upper layers
+		 * for correct evaluation.
+		 * FAILED means the sense code could not be interpreted
+		 * and the device would need to be reset.
+		 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
+		 * command would need to be retried.
+		 */
+		if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
+			qc->flags |= ATA_QCFLAG_RETRY;
+			qc->err_mask |= AC_ERR_OTHER;
+		} else if (ret != SUCCESS) {
+			qc->err_mask |= AC_ERR_HSM;
+		}
+	}
+	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
+		action |= ATA_EH_RESET;
+
+	return action;
+}
+
+static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
+				   int *xfer_ok)
+{
+	int base = 0;
+
+	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
+		*xfer_ok = 1;
+
+	if (!*xfer_ok)
+		base = ATA_ECAT_DUBIOUS_NONE;
+
+	if (err_mask & AC_ERR_ATA_BUS)
+		return base + ATA_ECAT_ATA_BUS;
+
+	if (err_mask & AC_ERR_TIMEOUT)
+		return base + ATA_ECAT_TOUT_HSM;
+
+	if (eflags & ATA_EFLAG_IS_IO) {
+		if (err_mask & AC_ERR_HSM)
+			return base + ATA_ECAT_TOUT_HSM;
+		if ((err_mask &
+		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+			return base + ATA_ECAT_UNK_DEV;
+	}
+
+	return 0;
+}
+
+struct speed_down_verdict_arg {
+	u64 since;
+	int xfer_ok;
+	int nr_errors[ATA_ECAT_NR];
+};
+
+static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+	struct speed_down_verdict_arg *arg = void_arg;
+	int cat;
+
+	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
+		return -1;
+
+	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
+				      &arg->xfer_ok);
+	arg->nr_errors[cat]++;
+
+	return 0;
+}
+
+/**
+ *	ata_eh_speed_down_verdict - Determine speed down verdict
+ *	@dev: Device of interest
+ *
+ *	This function examines error ring of @dev and determines
+ *	whether NCQ needs to be turned off, transfer speed should be
+ *	stepped down, or falling back to PIO is necessary.
+ *
+ *	ECAT_ATA_BUS	: ATA_BUS error for any command
+ *
+ *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
+ *			  IO commands
+ *
+ *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
+ *
+ *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
+ *			  data transfer hasn't been verified.
+ *
+ *	Verdicts are
+ *
+ *	NCQ_OFF		: Turn off NCQ.
+ *
+ *	SPEED_DOWN	: Speed down transfer speed but don't fall back
+ *			  to PIO.
+ *
+ *	FALLBACK_TO_PIO	: Fall back to PIO.
+ *
+ *	Even if multiple verdicts are returned, only one action is
+ *	taken per error.  An action triggered by non-DUBIOUS errors
+ *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
+ *	This is to expedite speed down decisions right after device is
+ *	initially configured.
+ *
+ *	The following are speed down rules.  #1 and #2 deal with
+ *	DUBIOUS errors.
+ *
+ *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
+ *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
+ *
+ *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
+ *	   occurred during last 5 mins, NCQ_OFF.
+ *
+ *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
+ *	   occurred during last 5 mins, FALLBACK_TO_PIO
+ *
+ *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
+ *	   during last 10 mins, NCQ_OFF.
+ *
+ *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
+ *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	OR of ATA_EH_SPDN_* flags.
+ */
+static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
+{
+	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
+	u64 j64 = get_jiffies_64();
+	struct speed_down_verdict_arg arg;
+	unsigned int verdict = 0;
+
+	/* scan past 5 mins of error history */
+	memset(&arg, 0, sizeof(arg));
+	arg.since = j64 - min(j64, j5mins);
+	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+
+	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
+	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
+		verdict |= ATA_EH_SPDN_SPEED_DOWN |
+			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
+
+	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
+	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
+		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
+
+	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
+	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
+		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
+
+	/* scan past 10 mins of error history */
+	memset(&arg, 0, sizeof(arg));
+	arg.since = j64 - min(j64, j10mins);
+	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+
+	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
+		verdict |= ATA_EH_SPDN_NCQ_OFF;
+
+	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
+	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
+	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
+		verdict |= ATA_EH_SPDN_SPEED_DOWN;
+
+	return verdict;
+}
+
+/**
+ *	ata_eh_speed_down - record error and speed down if necessary
+ *	@dev: Failed device
+ *	@eflags: mask of ATA_EFLAG_* flags
+ *	@err_mask: err_mask of the error
+ *
+ *	Record error and examine error history to determine whether
+ *	adjusting transmission speed is necessary.  It also sets
+ *	transmission limits appropriately if such adjustment is
+ *	necessary.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	Determined recovery action.
+ */
+static unsigned int ata_eh_speed_down(struct ata_device *dev,
+				unsigned int eflags, unsigned int err_mask)
+{
+	struct ata_link *link = ata_dev_phys_link(dev);
+	int xfer_ok = 0;
+	unsigned int verdict;
+	unsigned int action = 0;
+
+	/* don't bother if Cat-0 error */
+	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
+		return 0;
+
+	/* record error and determine whether speed down is necessary */
+	ata_ering_record(&dev->ering, eflags, err_mask);
+	verdict = ata_eh_speed_down_verdict(dev);
+
+	/* turn off NCQ? */
+	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
+	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
+			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
+		dev->flags |= ATA_DFLAG_NCQ_OFF;
+		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
+		goto done;
+	}
+
+	/* speed down? */
+	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
+		/* speed down SATA link speed if possible */
+		if (sata_down_spd_limit(link, 0) == 0) {
+			action |= ATA_EH_RESET;
+			goto done;
+		}
+
+		/* lower transfer mode */
+		if (dev->spdn_cnt < 2) {
+			static const int dma_dnxfer_sel[] =
+				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
+			static const int pio_dnxfer_sel[] =
+				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
+			int sel;
+
+			if (dev->xfer_shift != ATA_SHIFT_PIO)
+				sel = dma_dnxfer_sel[dev->spdn_cnt];
+			else
+				sel = pio_dnxfer_sel[dev->spdn_cnt];
+
+			dev->spdn_cnt++;
+
+			if (ata_down_xfermask_limit(dev, sel) == 0) {
+				action |= ATA_EH_RESET;
+				goto done;
+			}
+		}
+	}
+
+	/* Fall back to PIO?  Slowing down to PIO is meaningless for
+	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
+	 */
+	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
+	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
+	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
+		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
+			dev->spdn_cnt = 0;
+			action |= ATA_EH_RESET;
+			goto done;
+		}
+	}
+
+	return 0;
+ done:
+	/* device has been slowed down, blow error history */
+	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
+		ata_ering_clear(&dev->ering);
+	return action;
+}
+
+/**
+ *	ata_eh_worth_retry - analyze error and decide whether to retry
+ *	@qc: qc to possibly retry
+ *
+ *	Look at the cause of the error and decide if a retry
+ * 	might be useful or not.  We don't want to retry media errors
+ *	because the drive itself has probably already taken 10-30 seconds
+ *	doing its own internal retries before reporting the failure.
+ */
+static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
+{
+	if (qc->err_mask & AC_ERR_MEDIA)
+		return 0;	/* don't retry media errors */
+	if (qc->flags & ATA_QCFLAG_IO)
+		return 1;	/* otherwise retry anything from fs stack */
+	if (qc->err_mask & AC_ERR_INVALID)
+		return 0;	/* don't retry these */
+	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
+}
+
+/**
+ *      ata_eh_quiet - check if we need to be quiet about a command error
+ *      @qc: qc to check
+ *
+ *      Look at the qc flags anbd its scsi command request flags to determine
+ *      if we need to be quiet about the command failure.
+ */
+static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
+{
+	if (qc->scsicmd &&
+	    qc->scsicmd->request->rq_flags & RQF_QUIET)
+		qc->flags |= ATA_QCFLAG_QUIET;
+	return qc->flags & ATA_QCFLAG_QUIET;
+}
+
+/**
+ *	ata_eh_link_autopsy - analyze error and determine recovery action
+ *	@link: host link to perform autopsy on
+ *
+ *	Analyze why @link failed and determine which recovery actions
+ *	are needed.  This function also sets more detailed AC_ERR_*
+ *	values and fills sense data for ATAPI CHECK SENSE.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_link_autopsy(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_queued_cmd *qc;
+	struct ata_device *dev;
+	unsigned int all_err_mask = 0, eflags = 0;
+	int tag, nr_failed = 0, nr_quiet = 0;
+	u32 serror;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
+		return;
+
+	/* obtain and analyze SError */
+	rc = sata_scr_read(link, SCR_ERROR, &serror);
+	if (rc == 0) {
+		ehc->i.serror |= serror;
+		ata_eh_analyze_serror(link);
+	} else if (rc != -EOPNOTSUPP) {
+		/* SError read failed, force reset and probing */
+		ehc->i.probe_mask |= ATA_ALL_DEVICES;
+		ehc->i.action |= ATA_EH_RESET;
+		ehc->i.err_mask |= AC_ERR_OTHER;
+	}
+
+	/* analyze NCQ failure */
+	ata_eh_analyze_ncq_error(link);
+
+	/* any real error trumps AC_ERR_OTHER */
+	if (ehc->i.err_mask & ~AC_ERR_OTHER)
+		ehc->i.err_mask &= ~AC_ERR_OTHER;
+
+	all_err_mask |= ehc->i.err_mask;
+
+	ata_qc_for_each_raw(ap, qc, tag) {
+		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+		    ata_dev_phys_link(qc->dev) != link)
+			continue;
+
+		/* inherit upper level err_mask */
+		qc->err_mask |= ehc->i.err_mask;
+
+		/* analyze TF */
+		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
+
+		/* DEV errors are probably spurious in case of ATA_BUS error */
+		if (qc->err_mask & AC_ERR_ATA_BUS)
+			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
+					  AC_ERR_INVALID);
+
+		/* any real error trumps unknown error */
+		if (qc->err_mask & ~AC_ERR_OTHER)
+			qc->err_mask &= ~AC_ERR_OTHER;
+
+		/*
+		 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
+		 * layers will determine whether the command is worth retrying
+		 * based on the sense data and device class/type. Otherwise,
+		 * determine directly if the command is worth retrying using its
+		 * error mask and flags.
+		 */
+		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
+		else if (ata_eh_worth_retry(qc))
+			qc->flags |= ATA_QCFLAG_RETRY;
+
+		/* accumulate error info */
+		ehc->i.dev = qc->dev;
+		all_err_mask |= qc->err_mask;
+		if (qc->flags & ATA_QCFLAG_IO)
+			eflags |= ATA_EFLAG_IS_IO;
+		trace_ata_eh_link_autopsy_qc(qc);
+
+		/* Count quiet errors */
+		if (ata_eh_quiet(qc))
+			nr_quiet++;
+		nr_failed++;
+	}
+
+	/* If all failed commands requested silence, then be quiet */
+	if (nr_quiet == nr_failed)
+		ehc->i.flags |= ATA_EHI_QUIET;
+
+	/* enforce default EH actions */
+	if (ap->pflags & ATA_PFLAG_FROZEN ||
+	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
+		ehc->i.action |= ATA_EH_RESET;
+	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
+		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
+		ehc->i.action |= ATA_EH_REVALIDATE;
+
+	/* If we have offending qcs and the associated failed device,
+	 * perform per-dev EH action only on the offending device.
+	 */
+	if (ehc->i.dev) {
+		ehc->i.dev_action[ehc->i.dev->devno] |=
+			ehc->i.action & ATA_EH_PERDEV_MASK;
+		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
+	}
+
+	/* propagate timeout to host link */
+	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
+		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
+
+	/* record error and consider speeding down */
+	dev = ehc->i.dev;
+	if (!dev && ((ata_link_max_devices(link) == 1 &&
+		      ata_dev_enabled(link->device))))
+	    dev = link->device;
+
+	if (dev) {
+		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
+			eflags |= ATA_EFLAG_DUBIOUS_XFER;
+		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
+	}
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_eh_autopsy - analyze error and determine recovery action
+ *	@ap: host port to perform autopsy on
+ *
+ *	Analyze all links of @ap and determine why they failed and
+ *	which recovery actions are needed.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_eh_autopsy(struct ata_port *ap)
+{
+	struct ata_link *link;
+
+	ata_for_each_link(link, ap, EDGE)
+		ata_eh_link_autopsy(link);
+
+	/* Handle the frigging slave link.  Autopsy is done similarly
+	 * but actions and flags are transferred over to the master
+	 * link and handled from there.
+	 */
+	if (ap->slave_link) {
+		struct ata_eh_context *mehc = &ap->link.eh_context;
+		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
+
+		/* transfer control flags from master to slave */
+		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
+
+		/* perform autopsy on the slave link */
+		ata_eh_link_autopsy(ap->slave_link);
+
+		/* transfer actions from slave to master and clear slave */
+		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+		mehc->i.action		|= sehc->i.action;
+		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
+		mehc->i.flags		|= sehc->i.flags;
+		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+	}
+
+	/* Autopsy of fanout ports can affect host link autopsy.
+	 * Perform host link autopsy last.
+	 */
+	if (sata_pmp_attached(ap))
+		ata_eh_link_autopsy(&ap->link);
+}
+
+/**
+ *	ata_get_cmd_descript - get description for ATA command
+ *	@command: ATA command code to get description for
+ *
+ *	Return a textual description of the given command, or NULL if the
+ *	command is not known.
+ *
+ *	LOCKING:
+ *	None
+ */
+const char *ata_get_cmd_descript(u8 command)
+{
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+	static const struct
+	{
+		u8 command;
+		const char *text;
+	} cmd_descr[] = {
+		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
+		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
+		{ ATA_CMD_STANDBY,		"STANDBY" },
+		{ ATA_CMD_IDLE,			"IDLE" },
+		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
+		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
+		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
+		{ ATA_CMD_NOP,			"NOP" },
+		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
+		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
+		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
+		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
+		{ ATA_CMD_SERVICE,		"SERVICE" },
+		{ ATA_CMD_READ,			"READ DMA" },
+		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
+		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
+		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
+		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
+		{ ATA_CMD_WRITE,		"WRITE DMA" },
+		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
+		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
+		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
+		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
+		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
+		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
+		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
+		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
+		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
+		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
+		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
+		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
+		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
+		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
+		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
+		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
+		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
+		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
+		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
+		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
+		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
+		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
+		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
+		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
+		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
+		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
+		{ ATA_CMD_SLEEP,		"SLEEP" },
+		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
+		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
+		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
+		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
+		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
+		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
+		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
+		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
+		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
+		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
+		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
+		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
+		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
+		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
+		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
+		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
+		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
+		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
+		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
+		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
+		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
+		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
+		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
+		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
+		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
+		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
+		{ ATA_CMD_SMART,		"SMART" },
+		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
+		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
+		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
+		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
+		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
+		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
+		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
+		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
+		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
+		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
+		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
+		{ ATA_CMD_ZAC_MGMT_IN,		"ZAC MANAGEMENT IN" },
+		{ ATA_CMD_ZAC_MGMT_OUT,		"ZAC MANAGEMENT OUT" },
+		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
+		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
+		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
+		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
+		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
+		{ 0,				NULL } /* terminate list */
+	};
+
+	unsigned int i;
+	for (i = 0; cmd_descr[i].text; i++)
+		if (cmd_descr[i].command == command)
+			return cmd_descr[i].text;
+#endif
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
+
+/**
+ *	ata_eh_link_report - report error handling to user
+ *	@link: ATA link EH is going on
+ *
+ *	Report EH to user.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_eh_link_report(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_queued_cmd *qc;
+	const char *frozen, *desc;
+	char tries_buf[6] = "";
+	int tag, nr_failed = 0;
+
+	if (ehc->i.flags & ATA_EHI_QUIET)
+		return;
+
+	desc = NULL;
+	if (ehc->i.desc[0] != '\0')
+		desc = ehc->i.desc;
+
+	ata_qc_for_each_raw(ap, qc, tag) {
+		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+		    ata_dev_phys_link(qc->dev) != link ||
+		    ((qc->flags & ATA_QCFLAG_QUIET) &&
+		     qc->err_mask == AC_ERR_DEV))
+			continue;
+		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
+			continue;
+
+		nr_failed++;
+	}
+
+	if (!nr_failed && !ehc->i.err_mask)
+		return;
+
+	frozen = "";
+	if (ap->pflags & ATA_PFLAG_FROZEN)
+		frozen = " frozen";
+
+	if (ap->eh_tries < ATA_EH_MAX_TRIES)
+		snprintf(tries_buf, sizeof(tries_buf), " t%d",
+			 ap->eh_tries);
+
+	if (ehc->i.dev) {
+		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
+			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+			    ehc->i.err_mask, link->sactive, ehc->i.serror,
+			    ehc->i.action, frozen, tries_buf);
+		if (desc)
+			ata_dev_err(ehc->i.dev, "%s\n", desc);
+	} else {
+		ata_link_err(link, "exception Emask 0x%x "
+			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+			     ehc->i.err_mask, link->sactive, ehc->i.serror,
+			     ehc->i.action, frozen, tries_buf);
+		if (desc)
+			ata_link_err(link, "%s\n", desc);
+	}
+
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+	if (ehc->i.serror)
+		ata_link_err(link,
+		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
+		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
+		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
+		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
+		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
+		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
+		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
+		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
+		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
+		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
+		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
+		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
+		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
+		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
+		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
+		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
+		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
+		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+#endif
+
+	ata_qc_for_each_raw(ap, qc, tag) {
+		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
+		char data_buf[20] = "";
+		char cdb_buf[70] = "";
+
+		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
+			continue;
+
+		if (qc->dma_dir != DMA_NONE) {
+			static const char *dma_str[] = {
+				[DMA_BIDIRECTIONAL]	= "bidi",
+				[DMA_TO_DEVICE]		= "out",
+				[DMA_FROM_DEVICE]	= "in",
+			};
+			const char *prot_str = NULL;
+
+			switch (qc->tf.protocol) {
+			case ATA_PROT_UNKNOWN:
+				prot_str = "unknown";
+				break;
+			case ATA_PROT_NODATA:
+				prot_str = "nodata";
+				break;
+			case ATA_PROT_PIO:
+				prot_str = "pio";
+				break;
+			case ATA_PROT_DMA:
+				prot_str = "dma";
+				break;
+			case ATA_PROT_NCQ:
+				prot_str = "ncq dma";
+				break;
+			case ATA_PROT_NCQ_NODATA:
+				prot_str = "ncq nodata";
+				break;
+			case ATAPI_PROT_NODATA:
+				prot_str = "nodata";
+				break;
+			case ATAPI_PROT_PIO:
+				prot_str = "pio";
+				break;
+			case ATAPI_PROT_DMA:
+				prot_str = "dma";
+				break;
+			}
+			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
+				 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
+		}
+
+		if (ata_is_atapi(qc->tf.protocol)) {
+			const u8 *cdb = qc->cdb;
+			size_t cdb_len = qc->dev->cdb_len;
+
+			if (qc->scsicmd) {
+				cdb = qc->scsicmd->cmnd;
+				cdb_len = qc->scsicmd->cmd_len;
+			}
+			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
+					      cdb, cdb_len);
+		} else {
+			const char *descr = ata_get_cmd_descript(cmd->command);
+			if (descr)
+				ata_dev_err(qc->dev, "failed command: %s\n",
+					    descr);
+		}
+
+		ata_dev_err(qc->dev,
+			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"tag %d%s\n         %s"
+			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"Emask 0x%x (%s)%s\n",
+			cmd->command, cmd->feature, cmd->nsect,
+			cmd->lbal, cmd->lbam, cmd->lbah,
+			cmd->hob_feature, cmd->hob_nsect,
+			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
+			cmd->device, qc->tag, data_buf, cdb_buf,
+			res->command, res->feature, res->nsect,
+			res->lbal, res->lbam, res->lbah,
+			res->hob_feature, res->hob_nsect,
+			res->hob_lbal, res->hob_lbam, res->hob_lbah,
+			res->device, qc->err_mask, ata_err_string(qc->err_mask),
+			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
+
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+				    ATA_SENSE | ATA_ERR)) {
+			if (res->command & ATA_BUSY)
+				ata_dev_err(qc->dev, "status: { Busy }\n");
+			else
+				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+				  res->command & ATA_DRDY ? "DRDY " : "",
+				  res->command & ATA_DF ? "DF " : "",
+				  res->command & ATA_DRQ ? "DRQ " : "",
+				  res->command & ATA_SENSE ? "SENSE " : "",
+				  res->command & ATA_ERR ? "ERR " : "");
+		}
+
+		if (cmd->command != ATA_CMD_PACKET &&
+		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+				     ATA_IDNF | ATA_ABORTED)))
+			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
+			  res->feature & ATA_ICRC ? "ICRC " : "",
+			  res->feature & ATA_UNC ? "UNC " : "",
+			  res->feature & ATA_AMNF ? "AMNF " : "",
+			  res->feature & ATA_IDNF ? "IDNF " : "",
+			  res->feature & ATA_ABORTED ? "ABRT " : "");
+#endif
+	}
+}
+
+/**
+ *	ata_eh_report - report error handling to user
+ *	@ap: ATA port to report EH about
+ *
+ *	Report EH to user.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_report(struct ata_port *ap)
+{
+	struct ata_link *link;
+
+	ata_for_each_link(link, ap, HOST_FIRST)
+		ata_eh_link_report(link);
+}
+
+static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
+			unsigned int *classes, unsigned long deadline,
+			bool clear_classes)
+{
+	struct ata_device *dev;
+
+	if (clear_classes)
+		ata_for_each_dev(dev, link, ALL)
+			classes[dev->devno] = ATA_DEV_UNKNOWN;
+
+	return reset(link, classes, deadline);
+}
+
+static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
+{
+	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
+		return 0;
+	if (rc == -EAGAIN)
+		return 1;
+	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
+		return 1;
+	return 0;
+}
+
+int ata_eh_reset(struct ata_link *link, int classify,
+		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_link *slave = ap->slave_link;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
+	unsigned int *classes = ehc->classes;
+	unsigned int lflags = link->flags;
+	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
+	int max_tries = 0, try = 0;
+	struct ata_link *failed_link;
+	struct ata_device *dev;
+	unsigned long deadline, now;
+	ata_reset_fn_t reset;
+	unsigned long flags;
+	u32 sstatus;
+	int nr_unknown, rc;
+
+	/*
+	 * Prepare to reset
+	 */
+	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
+		max_tries++;
+	if (link->flags & ATA_LFLAG_RST_ONCE)
+		max_tries = 1;
+	if (link->flags & ATA_LFLAG_NO_HRST)
+		hardreset = NULL;
+	if (link->flags & ATA_LFLAG_NO_SRST)
+		softreset = NULL;
+
+	/* make sure each reset attempt is at least COOL_DOWN apart */
+	if (ehc->i.flags & ATA_EHI_DID_RESET) {
+		now = jiffies;
+		WARN_ON(time_after(ehc->last_reset, now));
+		deadline = ata_deadline(ehc->last_reset,
+					ATA_EH_RESET_COOL_DOWN);
+		if (time_before(now, deadline))
+			schedule_timeout_uninterruptible(deadline - now);
+	}
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags |= ATA_PFLAG_RESETTING;
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+
+	ata_for_each_dev(dev, link, ALL) {
+		/* If we issue an SRST then an ATA drive (not ATAPI)
+		 * may change configuration and be in PIO0 timing. If
+		 * we do a hard reset (or are coming from power on)
+		 * this is true for ATA or ATAPI. Until we've set a
+		 * suitable controller mode we should not touch the
+		 * bus as we may be talking too fast.
+		 */
+		dev->pio_mode = XFER_PIO_0;
+		dev->dma_mode = 0xff;
+
+		/* If the controller has a pio mode setup function
+		 * then use it to set the chipset to rights. Don't
+		 * touch the DMA setup as that will be dealt with when
+		 * configuring devices.
+		 */
+		if (ap->ops->set_piomode)
+			ap->ops->set_piomode(ap, dev);
+	}
+
+	/* prefer hardreset */
+	reset = NULL;
+	ehc->i.action &= ~ATA_EH_RESET;
+	if (hardreset) {
+		reset = hardreset;
+		ehc->i.action |= ATA_EH_HARDRESET;
+	} else if (softreset) {
+		reset = softreset;
+		ehc->i.action |= ATA_EH_SOFTRESET;
+	}
+
+	if (prereset) {
+		unsigned long deadline = ata_deadline(jiffies,
+						      ATA_EH_PRERESET_TIMEOUT);
+
+		if (slave) {
+			sehc->i.action &= ~ATA_EH_RESET;
+			sehc->i.action |= ehc->i.action;
+		}
+
+		rc = prereset(link, deadline);
+
+		/* If present, do prereset on slave link too.  Reset
+		 * is skipped iff both master and slave links report
+		 * -ENOENT or clear ATA_EH_RESET.
+		 */
+		if (slave && (rc == 0 || rc == -ENOENT)) {
+			int tmp;
+
+			tmp = prereset(slave, deadline);
+			if (tmp != -ENOENT)
+				rc = tmp;
+
+			ehc->i.action |= sehc->i.action;
+		}
+
+		if (rc) {
+			if (rc == -ENOENT) {
+				ata_link_dbg(link, "port disabled--ignoring\n");
+				ehc->i.action &= ~ATA_EH_RESET;
+
+				ata_for_each_dev(dev, link, ALL)
+					classes[dev->devno] = ATA_DEV_NONE;
+
+				rc = 0;
+			} else
+				ata_link_err(link,
+					     "prereset failed (errno=%d)\n",
+					     rc);
+			goto out;
+		}
+
+		/* prereset() might have cleared ATA_EH_RESET.  If so,
+		 * bang classes, thaw and return.
+		 */
+		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
+			ata_for_each_dev(dev, link, ALL)
+				classes[dev->devno] = ATA_DEV_NONE;
+			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
+			    ata_is_host_link(link))
+				ata_eh_thaw_port(ap);
+			rc = 0;
+			goto out;
+		}
+	}
+
+ retry:
+	/*
+	 * Perform reset
+	 */
+	if (ata_is_host_link(link))
+		ata_eh_freeze_port(ap);
+
+	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
+
+	if (reset) {
+		if (verbose)
+			ata_link_info(link, "%s resetting link\n",
+				      reset == softreset ? "soft" : "hard");
+
+		/* mark that this EH session started with reset */
+		ehc->last_reset = jiffies;
+		if (reset == hardreset)
+			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
+		else
+			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
+
+		rc = ata_do_reset(link, reset, classes, deadline, true);
+		if (rc && rc != -EAGAIN) {
+			failed_link = link;
+			goto fail;
+		}
+
+		/* hardreset slave link if existent */
+		if (slave && reset == hardreset) {
+			int tmp;
+
+			if (verbose)
+				ata_link_info(slave, "hard resetting link\n");
+
+			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
+			tmp = ata_do_reset(slave, reset, classes, deadline,
+					   false);
+			switch (tmp) {
+			case -EAGAIN:
+				rc = -EAGAIN;
+			case 0:
+				break;
+			default:
+				failed_link = slave;
+				rc = tmp;
+				goto fail;
+			}
+		}
+
+		/* perform follow-up SRST if necessary */
+		if (reset == hardreset &&
+		    ata_eh_followup_srst_needed(link, rc)) {
+			reset = softreset;
+
+			if (!reset) {
+				ata_link_err(link,
+	     "follow-up softreset required but no softreset available\n");
+				failed_link = link;
+				rc = -EINVAL;
+				goto fail;
+			}
+
+			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+			rc = ata_do_reset(link, reset, classes, deadline, true);
+			if (rc) {
+				failed_link = link;
+				goto fail;
+			}
+		}
+	} else {
+		if (verbose)
+			ata_link_info(link,
+	"no reset method available, skipping reset\n");
+		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
+			lflags |= ATA_LFLAG_ASSUME_ATA;
+	}
+
+	/*
+	 * Post-reset processing
+	 */
+	ata_for_each_dev(dev, link, ALL) {
+		/* After the reset, the device state is PIO 0 and the
+		 * controller state is undefined.  Reset also wakes up
+		 * drives from sleeping mode.
+		 */
+		dev->pio_mode = XFER_PIO_0;
+		dev->flags &= ~ATA_DFLAG_SLEEPING;
+
+		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
+			continue;
+
+		/* apply class override */
+		if (lflags & ATA_LFLAG_ASSUME_ATA)
+			classes[dev->devno] = ATA_DEV_ATA;
+		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
+			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
+	}
+
+	/* record current link speed */
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
+		link->sata_spd = (sstatus >> 4) & 0xf;
+	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
+		slave->sata_spd = (sstatus >> 4) & 0xf;
+
+	/* thaw the port */
+	if (ata_is_host_link(link))
+		ata_eh_thaw_port(ap);
+
+	/* postreset() should clear hardware SError.  Although SError
+	 * is cleared during link resume, clearing SError here is
+	 * necessary as some PHYs raise hotplug events after SRST.
+	 * This introduces race condition where hotplug occurs between
+	 * reset and here.  This race is mediated by cross checking
+	 * link onlineness and classification result later.
+	 */
+	if (postreset) {
+		postreset(link, classes);
+		if (slave)
+			postreset(slave, classes);
+	}
+
+	/*
+	 * Some controllers can't be frozen very well and may set spurious
+	 * error conditions during reset.  Clear accumulated error
+	 * information and re-thaw the port if frozen.  As reset is the
+	 * final recovery action and we cross check link onlineness against
+	 * device classification later, no hotplug event is lost by this.
+	 */
+	spin_lock_irqsave(link->ap->lock, flags);
+	memset(&link->eh_info, 0, sizeof(link->eh_info));
+	if (slave)
+		memset(&slave->eh_info, 0, sizeof(link->eh_info));
+	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+	spin_unlock_irqrestore(link->ap->lock, flags);
+
+	if (ap->pflags & ATA_PFLAG_FROZEN)
+		ata_eh_thaw_port(ap);
+
+	/*
+	 * Make sure onlineness and classification result correspond.
+	 * Hotplug could have happened during reset and some
+	 * controllers fail to wait while a drive is spinning up after
+	 * being hotplugged causing misdetection.  By cross checking
+	 * link on/offlineness and classification result, those
+	 * conditions can be reliably detected and retried.
+	 */
+	nr_unknown = 0;
+	ata_for_each_dev(dev, link, ALL) {
+		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
+			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
+				ata_dev_dbg(dev, "link online but device misclassified\n");
+				classes[dev->devno] = ATA_DEV_NONE;
+				nr_unknown++;
+			}
+		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+			if (ata_class_enabled(classes[dev->devno]))
+				ata_dev_dbg(dev,
+					    "link offline, clearing class %d to NONE\n",
+					    classes[dev->devno]);
+			classes[dev->devno] = ATA_DEV_NONE;
+		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
+			ata_dev_dbg(dev,
+				    "link status unknown, clearing UNKNOWN to NONE\n");
+			classes[dev->devno] = ATA_DEV_NONE;
+		}
+	}
+
+	if (classify && nr_unknown) {
+		if (try < max_tries) {
+			ata_link_warn(link,
+				      "link online but %d devices misclassified, retrying\n",
+				      nr_unknown);
+			failed_link = link;
+			rc = -EAGAIN;
+			goto fail;
+		}
+		ata_link_warn(link,
+			      "link online but %d devices misclassified, "
+			      "device detection might fail\n", nr_unknown);
+	}
+
+	/* reset successful, schedule revalidation */
+	ata_eh_done(link, NULL, ATA_EH_RESET);
+	if (slave)
+		ata_eh_done(slave, NULL, ATA_EH_RESET);
+	ehc->last_reset = jiffies;		/* update to completion time */
+	ehc->i.action |= ATA_EH_REVALIDATE;
+	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
+
+	rc = 0;
+ out:
+	/* clear hotplug flag */
+	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+	if (slave)
+		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags &= ~ATA_PFLAG_RESETTING;
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return rc;
+
+ fail:
+	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
+	if (!ata_is_host_link(link) &&
+	    sata_scr_read(link, SCR_STATUS, &sstatus))
+		rc = -ERESTART;
+
+	if (try >= max_tries) {
+		/*
+		 * Thaw host port even if reset failed, so that the port
+		 * can be retried on the next phy event.  This risks
+		 * repeated EH runs but seems to be a better tradeoff than
+		 * shutting down a port after a botched hotplug attempt.
+		 */
+		if (ata_is_host_link(link))
+			ata_eh_thaw_port(ap);
+		goto out;
+	}
+
+	now = jiffies;
+	if (time_before(now, deadline)) {
+		unsigned long delta = deadline - now;
+
+		ata_link_warn(failed_link,
+			"reset failed (errno=%d), retrying in %u secs\n",
+			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
+
+		ata_eh_release(ap);
+		while (delta)
+			delta = schedule_timeout_uninterruptible(delta);
+		ata_eh_acquire(ap);
+	}
+
+	/*
+	 * While disks spinup behind PMP, some controllers fail sending SRST.
+	 * They need to be reset - as well as the PMP - before retrying.
+	 */
+	if (rc == -ERESTART) {
+		if (ata_is_host_link(link))
+			ata_eh_thaw_port(ap);
+		goto out;
+	}
+
+	if (try == max_tries - 1) {
+		sata_down_spd_limit(link, 0);
+		if (slave)
+			sata_down_spd_limit(slave, 0);
+	} else if (rc == -EPIPE)
+		sata_down_spd_limit(failed_link, 0);
+
+	if (hardreset)
+		reset = hardreset;
+	goto retry;
+}
+
+static inline void ata_eh_pull_park_action(struct ata_port *ap)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned long flags;
+
+	/*
+	 * This function can be thought of as an extended version of
+	 * ata_eh_about_to_do() specially crafted to accommodate the
+	 * requirements of ATA_EH_PARK handling. Since the EH thread
+	 * does not leave the do {} while () loop in ata_eh_recover as
+	 * long as the timeout for a park request to *one* device on
+	 * the port has not expired, and since we still want to pick
+	 * up park requests to other devices on the same port or
+	 * timeout updates for the same device, we have to pull
+	 * ATA_EH_PARK actions from eh_info into eh_context.i
+	 * ourselves at the beginning of each pass over the loop.
+	 *
+	 * Additionally, all write accesses to &ap->park_req_pending
+	 * through reinit_completion() (see below) or complete_all()
+	 * (see ata_scsi_park_store()) are protected by the host lock.
+	 * As a result we have that park_req_pending.done is zero on
+	 * exit from this function, i.e. when ATA_EH_PARK actions for
+	 * *all* devices on port ap have been pulled into the
+	 * respective eh_context structs. If, and only if,
+	 * park_req_pending.done is non-zero by the time we reach
+	 * wait_for_completion_timeout(), another ATA_EH_PARK action
+	 * has been scheduled for at least one of the devices on port
+	 * ap and we have to cycle over the do {} while () loop in
+	 * ata_eh_recover() again.
+	 */
+
+	spin_lock_irqsave(ap->lock, flags);
+	reinit_completion(&ap->park_req_pending);
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ALL) {
+			struct ata_eh_info *ehi = &link->eh_info;
+
+			link->eh_context.i.dev_action[dev->devno] |=
+				ehi->dev_action[dev->devno] & ATA_EH_PARK;
+			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
+		}
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	ata_tf_init(dev, &tf);
+	if (park) {
+		ehc->unloaded_mask |= 1 << dev->devno;
+		tf.command = ATA_CMD_IDLEIMMEDIATE;
+		tf.feature = 0x44;
+		tf.lbal = 0x4c;
+		tf.lbam = 0x4e;
+		tf.lbah = 0x55;
+	} else {
+		ehc->unloaded_mask &= ~(1 << dev->devno);
+		tf.command = ATA_CMD_CHK_POWER;
+	}
+
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+	tf.protocol = ATA_PROT_NODATA;
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (park && (err_mask || tf.lbal != 0xc4)) {
+		ata_dev_err(dev, "head unload failed!\n");
+		ehc->unloaded_mask &= ~(1 << dev->devno);
+	}
+}
+
+static int ata_eh_revalidate_and_attach(struct ata_link *link,
+					struct ata_device **r_failed_dev)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev;
+	unsigned int new_mask = 0;
+	unsigned long flags;
+	int rc = 0;
+
+	DPRINTK("ENTER\n");
+
+	/* For PATA drive side cable detection to work, IDENTIFY must
+	 * be done backwards such that PDIAG- is released by the slave
+	 * device before the master device is identified.
+	 */
+	ata_for_each_dev(dev, link, ALL_REVERSE) {
+		unsigned int action = ata_eh_dev_action(dev);
+		unsigned int readid_flags = 0;
+
+		if (ehc->i.flags & ATA_EHI_DID_RESET)
+			readid_flags |= ATA_READID_POSTRESET;
+
+		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
+			WARN_ON(dev->class == ATA_DEV_PMP);
+
+			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+				rc = -EIO;
+				goto err;
+			}
+
+			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
+			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
+						readid_flags);
+			if (rc)
+				goto err;
+
+			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
+
+			/* Configuration may have changed, reconfigure
+			 * transfer mode.
+			 */
+			ehc->i.flags |= ATA_EHI_SETMODE;
+
+			/* schedule the scsi_rescan_device() here */
+			schedule_work(&(ap->scsi_rescan_task));
+		} else if (dev->class == ATA_DEV_UNKNOWN &&
+			   ehc->tries[dev->devno] &&
+			   ata_class_enabled(ehc->classes[dev->devno])) {
+			/* Temporarily set dev->class, it will be
+			 * permanently set once all configurations are
+			 * complete.  This is necessary because new
+			 * device configuration is done in two
+			 * separate loops.
+			 */
+			dev->class = ehc->classes[dev->devno];
+
+			if (dev->class == ATA_DEV_PMP)
+				rc = sata_pmp_attach(dev);
+			else
+				rc = ata_dev_read_id(dev, &dev->class,
+						     readid_flags, dev->id);
+
+			/* read_id might have changed class, store and reset */
+			ehc->classes[dev->devno] = dev->class;
+			dev->class = ATA_DEV_UNKNOWN;
+
+			switch (rc) {
+			case 0:
+				/* clear error info accumulated during probe */
+				ata_ering_clear(&dev->ering);
+				new_mask |= 1 << dev->devno;
+				break;
+			case -ENOENT:
+				/* IDENTIFY was issued to non-existent
+				 * device.  No need to reset.  Just
+				 * thaw and ignore the device.
+				 */
+				ata_eh_thaw_port(ap);
+				break;
+			default:
+				goto err;
+			}
+		}
+	}
+
+	/* PDIAG- should have been released, ask cable type if post-reset */
+	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
+		if (ap->ops->cable_detect)
+			ap->cbl = ap->ops->cable_detect(ap);
+		ata_force_cbl(ap);
+	}
+
+	/* Configure new devices forward such that user doesn't see
+	 * device detection messages backwards.
+	 */
+	ata_for_each_dev(dev, link, ALL) {
+		if (!(new_mask & (1 << dev->devno)))
+			continue;
+
+		dev->class = ehc->classes[dev->devno];
+
+		if (dev->class == ATA_DEV_PMP)
+			continue;
+
+		ehc->i.flags |= ATA_EHI_PRINTINFO;
+		rc = ata_dev_configure(dev);
+		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
+		if (rc) {
+			dev->class = ATA_DEV_UNKNOWN;
+			goto err;
+		}
+
+		spin_lock_irqsave(ap->lock, flags);
+		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* new device discovered, configure xfermode */
+		ehc->i.flags |= ATA_EHI_SETMODE;
+	}
+
+	return 0;
+
+ err:
+	*r_failed_dev = dev;
+	DPRINTK("EXIT rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+ *	@link: link on which timings will be programmed
+ *	@r_failed_dev: out parameter for failed device
+ *
+ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
+ *	ata_set_mode() fails, pointer to the failing device is
+ *	returned in @r_failed_dev.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+	int rc;
+
+	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
+	ata_for_each_dev(dev, link, ENABLED) {
+		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
+			struct ata_ering_entry *ent;
+
+			ent = ata_ering_top(&dev->ering);
+			if (ent)
+				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
+		}
+	}
+
+	/* has private set_mode? */
+	if (ap->ops->set_mode)
+		rc = ap->ops->set_mode(link, r_failed_dev);
+	else
+		rc = ata_do_set_mode(link, r_failed_dev);
+
+	/* if transfer mode has changed, set DUBIOUS_XFER on device */
+	ata_for_each_dev(dev, link, ENABLED) {
+		struct ata_eh_context *ehc = &link->eh_context;
+		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
+		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
+
+		if (dev->xfer_mode != saved_xfer_mode ||
+		    ata_ncq_enabled(dev) != saved_ncq)
+			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
+	}
+
+	return rc;
+}
+
+/**
+ *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
+ *	@dev: ATAPI device to clear UA for
+ *
+ *	Resets and other operations can make an ATAPI device raise
+ *	UNIT ATTENTION which causes the next operation to fail.  This
+ *	function clears UA.
+ *
+ *	LOCKING:
+ *	EH context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int atapi_eh_clear_ua(struct ata_device *dev)
+{
+	int i;
+
+	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
+		u8 *sense_buffer = dev->link->ap->sector_buf;
+		u8 sense_key = 0;
+		unsigned int err_mask;
+
+		err_mask = atapi_eh_tur(dev, &sense_key);
+		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
+			ata_dev_warn(dev,
+				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
+				     err_mask);
+			return -EIO;
+		}
+
+		if (!err_mask || sense_key != UNIT_ATTENTION)
+			return 0;
+
+		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
+		if (err_mask) {
+			ata_dev_warn(dev, "failed to clear "
+				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
+			return -EIO;
+		}
+	}
+
+	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
+		     ATA_EH_UA_TRIES);
+
+	return 0;
+}
+
+/**
+ *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
+ *	@dev: ATA device which may need FLUSH retry
+ *
+ *	If @dev failed FLUSH, it needs to be reported upper layer
+ *	immediately as it means that @dev failed to remap and already
+ *	lost at least a sector and further FLUSH retrials won't make
+ *	any difference to the lost sector.  However, if FLUSH failed
+ *	for other reasons, for example transmission error, FLUSH needs
+ *	to be retried.
+ *
+ *	This function determines whether FLUSH failure retry is
+ *	necessary and performs it if so.
+ *
+ *	RETURNS:
+ *	0 if EH can continue, -errno if EH needs to be repeated.
+ */
+static int ata_eh_maybe_retry_flush(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ata_queued_cmd *qc;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+	int rc = 0;
+
+	/* did flush fail for this device? */
+	if (!ata_tag_valid(link->active_tag))
+		return 0;
+
+	qc = __ata_qc_from_tag(ap, link->active_tag);
+	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
+			       qc->tf.command != ATA_CMD_FLUSH))
+		return 0;
+
+	/* if the device failed it, it should be reported to upper layers */
+	if (qc->err_mask & AC_ERR_DEV)
+		return 0;
+
+	/* flush failed for some other reason, give it another shot */
+	ata_tf_init(dev, &tf);
+
+	tf.command = qc->tf.command;
+	tf.flags |= ATA_TFLAG_DEVICE;
+	tf.protocol = ATA_PROT_NODATA;
+
+	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
+		       tf.command, qc->err_mask);
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (!err_mask) {
+		/*
+		 * FLUSH is complete but there's no way to
+		 * successfully complete a failed command from EH.
+		 * Making sure retry is allowed at least once and
+		 * retrying it should do the trick - whatever was in
+		 * the cache is already on the platter and this won't
+		 * cause infinite loop.
+		 */
+		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
+	} else {
+		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
+			       err_mask);
+		rc = -EIO;
+
+		/* if device failed it, report it to upper layers */
+		if (err_mask & AC_ERR_DEV) {
+			qc->err_mask |= AC_ERR_DEV;
+			qc->result_tf = tf;
+			if (!(ap->pflags & ATA_PFLAG_FROZEN))
+				rc = 0;
+		}
+	}
+	return rc;
+}
+
+/**
+ *	ata_eh_set_lpm - configure SATA interface power management
+ *	@link: link to configure power management
+ *	@policy: the link power management policy
+ *	@r_failed_dev: out parameter for failed device
+ *
+ *	Enable SATA Interface power management.  This will enable
+ *	Device Interface Power Management (DIPM) for min_power and
+ *	medium_power_with_dipm policies, and then call driver specific
+ *	callbacks for enabling Host Initiated Power management.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+			  struct ata_device **r_failed_dev)
+{
+	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+	enum ata_lpm_policy old_policy = link->lpm_policy;
+	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+	unsigned int err_mask;
+	int rc;
+
+	/* if the link or host doesn't do LPM, noop */
+	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+		return 0;
+
+	/*
+	 * DIPM is enabled only for MIN_POWER as some devices
+	 * misbehave when the host NACKs transition to SLUMBER.  Order
+	 * device and link configurations such that the host always
+	 * allows DIPM requests.
+	 */
+	ata_for_each_dev(dev, link, ENABLED) {
+		bool hipm = ata_id_has_hipm(dev->id);
+		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+
+		/* find the first enabled and LPM enabled devices */
+		if (!link_dev)
+			link_dev = dev;
+
+		if (!lpm_dev && (hipm || dipm))
+			lpm_dev = dev;
+
+		hints &= ~ATA_LPM_EMPTY;
+		if (!hipm)
+			hints &= ~ATA_LPM_HIPM;
+
+		/* disable DIPM before changing link config */
+		if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_DISABLE, SATA_DIPM);
+			if (err_mask && err_mask != AC_ERR_DEV) {
+				ata_dev_warn(dev,
+					     "failed to disable DIPM, Emask 0x%x\n",
+					     err_mask);
+				rc = -EIO;
+				goto fail;
+			}
+		}
+	}
+
+	if (ap) {
+		rc = ap->ops->set_lpm(link, policy, hints);
+		if (!rc && ap->slave_link)
+			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+	} else
+		rc = sata_pmp_set_lpm(link, policy, hints);
+
+	/*
+	 * Attribute link config failure to the first (LPM) enabled
+	 * device on the link.
+	 */
+	if (rc) {
+		if (rc == -EOPNOTSUPP) {
+			link->flags |= ATA_LFLAG_NO_LPM;
+			return 0;
+		}
+		dev = lpm_dev ? lpm_dev : link_dev;
+		goto fail;
+	}
+
+	/*
+	 * Low level driver acked the transition.  Issue DIPM command
+	 * with the new policy set.
+	 */
+	link->lpm_policy = policy;
+	if (ap && ap->slave_link)
+		ap->slave_link->lpm_policy = policy;
+
+	/* host config updated, enable DIPM if transitioning to MIN_POWER */
+	ata_for_each_dev(dev, link, ENABLED) {
+		if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
+		    ata_id_has_dipm(dev->id)) {
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_ENABLE, SATA_DIPM);
+			if (err_mask && err_mask != AC_ERR_DEV) {
+				ata_dev_warn(dev,
+					"failed to enable DIPM, Emask 0x%x\n",
+					err_mask);
+				rc = -EIO;
+				goto fail;
+			}
+		}
+	}
+
+	link->last_lpm_change = jiffies;
+	link->flags |= ATA_LFLAG_CHANGED;
+
+	return 0;
+
+fail:
+	/* restore the old policy */
+	link->lpm_policy = old_policy;
+	if (ap && ap->slave_link)
+		ap->slave_link->lpm_policy = old_policy;
+
+	/* if no device or only one more chance is left, disable LPM */
+	if (!dev || ehc->tries[dev->devno] <= 2) {
+		ata_link_warn(link, "disabling LPM on the link\n");
+		link->flags |= ATA_LFLAG_NO_LPM;
+	}
+	if (r_failed_dev)
+		*r_failed_dev = dev;
+	return rc;
+}
+
+int ata_link_nr_enabled(struct ata_link *link)
+{
+	struct ata_device *dev;
+	int cnt = 0;
+
+	ata_for_each_dev(dev, link, ENABLED)
+		cnt++;
+	return cnt;
+}
+
+static int ata_link_nr_vacant(struct ata_link *link)
+{
+	struct ata_device *dev;
+	int cnt = 0;
+
+	ata_for_each_dev(dev, link, ALL)
+		if (dev->class == ATA_DEV_UNKNOWN)
+			cnt++;
+	return cnt;
+}
+
+static int ata_eh_skip_recovery(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev;
+
+	/* skip disabled links */
+	if (link->flags & ATA_LFLAG_DISABLED)
+		return 1;
+
+	/* skip if explicitly requested */
+	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+		return 1;
+
+	/* thaw frozen port and recover failed devices */
+	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
+		return 0;
+
+	/* reset at least once if reset is requested */
+	if ((ehc->i.action & ATA_EH_RESET) &&
+	    !(ehc->i.flags & ATA_EHI_DID_RESET))
+		return 0;
+
+	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
+	ata_for_each_dev(dev, link, ALL) {
+		if (dev->class == ATA_DEV_UNKNOWN &&
+		    ehc->classes[dev->devno] != ATA_DEV_NONE)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
+	u64 now = get_jiffies_64();
+	int *trials = void_arg;
+
+	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
+	    (ent->timestamp < now - min(now, interval)))
+		return -1;
+
+	(*trials)++;
+	return 0;
+}
+
+static int ata_eh_schedule_probe(struct ata_device *dev)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	struct ata_link *link = ata_dev_phys_link(dev);
+	int trials = 0;
+
+	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
+	    (ehc->did_probe_mask & (1 << dev->devno)))
+		return 0;
+
+	ata_eh_detach_dev(dev);
+	ata_dev_init(dev);
+	ehc->did_probe_mask |= (1 << dev->devno);
+	ehc->i.action |= ATA_EH_RESET;
+	ehc->saved_xfer_mode[dev->devno] = 0;
+	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
+
+	/* the link maybe in a deep sleep, wake it up */
+	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
+		if (ata_is_host_link(link))
+			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
+					       ATA_LPM_EMPTY);
+		else
+			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
+					 ATA_LPM_EMPTY);
+	}
+
+	/* Record and count probe trials on the ering.  The specific
+	 * error mask used is irrelevant.  Because a successful device
+	 * detection clears the ering, this count accumulates only if
+	 * there are consecutive failed probes.
+	 *
+	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
+	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
+	 * forced to 1.5Gbps.
+	 *
+	 * This is to work around cases where failed link speed
+	 * negotiation results in device misdetection leading to
+	 * infinite DEVXCHG or PHRDY CHG events.
+	 */
+	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
+	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
+
+	if (trials > ATA_EH_PROBE_TRIALS)
+		sata_down_spd_limit(link, 1);
+
+	return 1;
+}
+
+static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+
+	/* -EAGAIN from EH routine indicates retry without prejudice.
+	 * The requester is responsible for ensuring forward progress.
+	 */
+	if (err != -EAGAIN)
+		ehc->tries[dev->devno]--;
+
+	switch (err) {
+	case -ENODEV:
+		/* device missing or wrong IDENTIFY data, schedule probing */
+		ehc->i.probe_mask |= (1 << dev->devno);
+		/* fall through */
+	case -EINVAL:
+		/* give it just one more chance */
+		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+		/* fall through */
+	case -EIO:
+		if (ehc->tries[dev->devno] == 1) {
+			/* This is the last chance, better to slow
+			 * down than lose it.
+			 */
+			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
+			if (dev->pio_mode > XFER_PIO_0)
+				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		}
+	}
+
+	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
+		/* disable device if it has used up all its chances */
+		ata_dev_disable(dev);
+
+		/* detach if offline */
+		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
+			ata_eh_detach_dev(dev);
+
+		/* schedule probe if necessary */
+		if (ata_eh_schedule_probe(dev)) {
+			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+			memset(ehc->cmd_timeout_idx[dev->devno], 0,
+			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
+		}
+
+		return 1;
+	} else {
+		ehc->i.action |= ATA_EH_RESET;
+		return 0;
+	}
+}
+
+/**
+ *	ata_eh_recover - recover host port after error
+ *	@ap: host port to recover
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method (can be NULL)
+ *	@hardreset: hardreset method (can be NULL)
+ *	@postreset: postreset method (can be NULL)
+ *	@r_failed_link: out parameter for failed link
+ *
+ *	This is the alpha and omega, eum and yang, heart and soul of
+ *	libata exception handling.  On entry, actions required to
+ *	recover each link and hotplug requests are recorded in the
+ *	link's eh_context.  This function executes all the operations
+ *	with appropriate retrials and fallbacks to resurrect failed
+ *	devices, detach goners and greet newcomers.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+		   ata_postreset_fn_t postreset,
+		   struct ata_link **r_failed_link)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+	int rc, nr_fails;
+	unsigned long flags, deadline;
+
+	DPRINTK("ENTER\n");
+
+	/* prep for recovery */
+	ata_for_each_link(link, ap, EDGE) {
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		/* re-enable link? */
+		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
+			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
+			spin_lock_irqsave(ap->lock, flags);
+			link->flags &= ~ATA_LFLAG_DISABLED;
+			spin_unlock_irqrestore(ap->lock, flags);
+			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
+		}
+
+		ata_for_each_dev(dev, link, ALL) {
+			if (link->flags & ATA_LFLAG_NO_RETRY)
+				ehc->tries[dev->devno] = 1;
+			else
+				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+
+			/* collect port action mask recorded in dev actions */
+			ehc->i.action |= ehc->i.dev_action[dev->devno] &
+					 ~ATA_EH_PERDEV_MASK;
+			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
+
+			/* process hotplug request */
+			if (dev->flags & ATA_DFLAG_DETACH)
+				ata_eh_detach_dev(dev);
+
+			/* schedule probe if necessary */
+			if (!ata_dev_enabled(dev))
+				ata_eh_schedule_probe(dev);
+		}
+	}
+
+ retry:
+	rc = 0;
+
+	/* if UNLOADING, finish immediately */
+	if (ap->pflags & ATA_PFLAG_UNLOADING)
+		goto out;
+
+	/* prep for EH */
+	ata_for_each_link(link, ap, EDGE) {
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		/* skip EH if possible. */
+		if (ata_eh_skip_recovery(link))
+			ehc->i.action = 0;
+
+		ata_for_each_dev(dev, link, ALL)
+			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
+	}
+
+	/* reset */
+	ata_for_each_link(link, ap, EDGE) {
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		if (!(ehc->i.action & ATA_EH_RESET))
+			continue;
+
+		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
+				  prereset, softreset, hardreset, postreset);
+		if (rc) {
+			ata_link_err(link, "reset failed, giving up\n");
+			goto out;
+		}
+	}
+
+	do {
+		unsigned long now;
+
+		/*
+		 * clears ATA_EH_PARK in eh_info and resets
+		 * ap->park_req_pending
+		 */
+		ata_eh_pull_park_action(ap);
+
+		deadline = jiffies;
+		ata_for_each_link(link, ap, EDGE) {
+			ata_for_each_dev(dev, link, ALL) {
+				struct ata_eh_context *ehc = &link->eh_context;
+				unsigned long tmp;
+
+				if (dev->class != ATA_DEV_ATA &&
+				    dev->class != ATA_DEV_ZAC)
+					continue;
+				if (!(ehc->i.dev_action[dev->devno] &
+				      ATA_EH_PARK))
+					continue;
+				tmp = dev->unpark_deadline;
+				if (time_before(deadline, tmp))
+					deadline = tmp;
+				else if (time_before_eq(tmp, jiffies))
+					continue;
+				if (ehc->unloaded_mask & (1 << dev->devno))
+					continue;
+
+				ata_eh_park_issue_cmd(dev, 1);
+			}
+		}
+
+		now = jiffies;
+		if (time_before_eq(deadline, now))
+			break;
+
+		ata_eh_release(ap);
+		deadline = wait_for_completion_timeout(&ap->park_req_pending,
+						       deadline - now);
+		ata_eh_acquire(ap);
+	} while (deadline);
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ALL) {
+			if (!(link->eh_context.unloaded_mask &
+			      (1 << dev->devno)))
+				continue;
+
+			ata_eh_park_issue_cmd(dev, 0);
+			ata_eh_done(link, dev, ATA_EH_PARK);
+		}
+	}
+
+	/* the rest */
+	nr_fails = 0;
+	ata_for_each_link(link, ap, PMP_FIRST) {
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		if (sata_pmp_attached(ap) && ata_is_host_link(link))
+			goto config_lpm;
+
+		/* revalidate existing devices and attach new ones */
+		rc = ata_eh_revalidate_and_attach(link, &dev);
+		if (rc)
+			goto rest_fail;
+
+		/* if PMP got attached, return, pmp EH will take care of it */
+		if (link->device->class == ATA_DEV_PMP) {
+			ehc->i.action = 0;
+			return 0;
+		}
+
+		/* configure transfer mode if necessary */
+		if (ehc->i.flags & ATA_EHI_SETMODE) {
+			rc = ata_set_mode(link, &dev);
+			if (rc)
+				goto rest_fail;
+			ehc->i.flags &= ~ATA_EHI_SETMODE;
+		}
+
+		/* If reset has been issued, clear UA to avoid
+		 * disrupting the current users of the device.
+		 */
+		if (ehc->i.flags & ATA_EHI_DID_RESET) {
+			ata_for_each_dev(dev, link, ALL) {
+				if (dev->class != ATA_DEV_ATAPI)
+					continue;
+				rc = atapi_eh_clear_ua(dev);
+				if (rc)
+					goto rest_fail;
+				if (zpodd_dev_enabled(dev))
+					zpodd_post_poweron(dev);
+			}
+		}
+
+		/* retry flush if necessary */
+		ata_for_each_dev(dev, link, ALL) {
+			if (dev->class != ATA_DEV_ATA &&
+			    dev->class != ATA_DEV_ZAC)
+				continue;
+			rc = ata_eh_maybe_retry_flush(dev);
+			if (rc)
+				goto rest_fail;
+		}
+
+	config_lpm:
+		/* configure link power saving */
+		if (link->lpm_policy != ap->target_lpm_policy) {
+			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+			if (rc)
+				goto rest_fail;
+		}
+
+		/* this link is okay now */
+		ehc->i.flags = 0;
+		continue;
+
+	rest_fail:
+		nr_fails++;
+		if (dev)
+			ata_eh_handle_dev_fail(dev, rc);
+
+		if (ap->pflags & ATA_PFLAG_FROZEN) {
+			/* PMP reset requires working host port.
+			 * Can't retry if it's frozen.
+			 */
+			if (sata_pmp_attached(ap))
+				goto out;
+			break;
+		}
+	}
+
+	if (nr_fails)
+		goto retry;
+
+ out:
+	if (rc && r_failed_link)
+		*r_failed_link = link;
+
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	ata_eh_finish - finish up EH
+ *	@ap: host port to finish EH for
+ *
+ *	Recovery is complete.  Clean up EH states and retry or finish
+ *	failed qcs.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_finish(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	int tag;
+
+	/* retry or finish qcs */
+	ata_qc_for_each_raw(ap, qc, tag) {
+		if (!(qc->flags & ATA_QCFLAG_FAILED))
+			continue;
+
+		if (qc->err_mask) {
+			/* FIXME: Once EH migration is complete,
+			 * generate sense data in this function,
+			 * considering both err_mask and tf.
+			 */
+			if (qc->flags & ATA_QCFLAG_RETRY)
+				ata_eh_qc_retry(qc);
+			else
+				ata_eh_qc_complete(qc);
+		} else {
+			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+				ata_eh_qc_complete(qc);
+			} else {
+				/* feed zero TF to sense generation */
+				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
+				ata_eh_qc_retry(qc);
+			}
+		}
+	}
+
+	/* make sure nr_active_links is zero after EH */
+	WARN_ON(ap->nr_active_links);
+	ap->nr_active_links = 0;
+}
+
+/**
+ *	ata_do_eh - do standard error handling
+ *	@ap: host port to handle error for
+ *
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method (can be NULL)
+ *	@hardreset: hardreset method (can be NULL)
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	Perform standard error handling sequence.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
+	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+	       ata_postreset_fn_t postreset)
+{
+	struct ata_device *dev;
+	int rc;
+
+	ata_eh_autopsy(ap);
+	ata_eh_report(ap);
+
+	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
+			    NULL);
+	if (rc) {
+		ata_for_each_dev(dev, &ap->link, ALL)
+			ata_dev_disable(dev);
+	}
+
+	ata_eh_finish(ap);
+}
+
+/**
+ *	ata_std_error_handler - standard error handler
+ *	@ap: host port to handle error for
+ *
+ *	Standard error handler
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_std_error_handler(struct ata_port *ap)
+{
+	struct ata_port_operations *ops = ap->ops;
+	ata_reset_fn_t hardreset = ops->hardreset;
+
+	/* ignore built-in hardreset if SCR access is not available */
+	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
+		hardreset = NULL;
+
+	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
+}
+
+#ifdef CONFIG_PM
+/**
+ *	ata_eh_handle_port_suspend - perform port suspend operation
+ *	@ap: port to suspend
+ *
+ *	Suspend @ap.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{
+	unsigned long flags;
+	int rc = 0;
+	struct ata_device *dev;
+
+	/* are we suspending? */
+	spin_lock_irqsave(ap->lock, flags);
+	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+	    ap->pm_mesg.event & PM_EVENT_RESUME) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+
+	/*
+	 * If we have a ZPODD attached, check its zero
+	 * power ready status before the port is frozen.
+	 * Only needed for runtime suspend.
+	 */
+	if (PMSG_IS_AUTO(ap->pm_mesg)) {
+		ata_for_each_dev(dev, &ap->link, ENABLED) {
+			if (zpodd_dev_enabled(dev))
+				zpodd_on_suspend(dev);
+		}
+	}
+
+	/* tell ACPI we're suspending */
+	rc = ata_acpi_on_suspend(ap);
+	if (rc)
+		goto out;
+
+	/* suspend */
+	ata_eh_freeze_port(ap);
+
+	if (ap->ops->port_suspend)
+		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
+
+	ata_acpi_set_state(ap, ap->pm_mesg);
+ out:
+	/* update the flags */
+	spin_lock_irqsave(ap->lock, flags);
+
+	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
+	if (rc == 0)
+		ap->pflags |= ATA_PFLAG_SUSPENDED;
+	else if (ap->pflags & ATA_PFLAG_FROZEN)
+		ata_port_schedule_eh(ap);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return;
+}
+
+/**
+ *	ata_eh_handle_port_resume - perform port resume operation
+ *	@ap: port to resume
+ *
+ *	Resume @ap.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned long flags;
+
+	/* are we resuming? */
+	spin_lock_irqsave(ap->lock, flags);
+	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
+		spin_unlock_irqrestore(ap->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
+
+	/*
+	 * Error timestamps are in jiffies which doesn't run while
+	 * suspended and PHY events during resume isn't too uncommon.
+	 * When the two are combined, it can lead to unnecessary speed
+	 * downs if the machine is suspended and resumed repeatedly.
+	 * Clear error history.
+	 */
+	ata_for_each_link(link, ap, HOST_FIRST)
+		ata_for_each_dev(dev, link, ALL)
+			ata_ering_clear(&dev->ering);
+
+	ata_acpi_set_state(ap, ap->pm_mesg);
+
+	if (ap->ops->port_resume)
+		ap->ops->port_resume(ap);
+
+	/* tell ACPI that we're resuming */
+	ata_acpi_on_resume(ap);
+
+	/* update the flags */
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
new file mode 100644
index 0000000..2ae1799
--- /dev/null
+++ b/drivers/ata/libata-pmp.c
@@ -0,0 +1,1113 @@
+/*
+ * libata-pmp.c - libata port multiplier support
+ *
+ * Copyright (c) 2007  SUSE Linux Products GmbH
+ * Copyright (c) 2007  Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/libata.h>
+#include <linux/slab.h>
+#include "libata.h"
+#include "libata-transport.h"
+
+const struct ata_port_operations sata_pmp_port_ops = {
+	.inherits		= &sata_port_ops,
+	.pmp_prereset		= ata_std_prereset,
+	.pmp_hardreset		= sata_std_hardreset,
+	.pmp_postreset		= ata_std_postreset,
+	.error_handler		= sata_pmp_error_handler,
+};
+
+/**
+ *	sata_pmp_read - read PMP register
+ *	@link: link to read PMP register for
+ *	@reg: register to read
+ *	@r_val: resulting value
+ *
+ *	Read PMP register.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *pmp_dev = ap->link.device;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	ata_tf_init(pmp_dev, &tf);
+	tf.command = ATA_CMD_PMP_READ;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+	tf.feature = reg;
+	tf.device = link->pmp;
+
+	err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+				     SATA_PMP_RW_TIMEOUT);
+	if (err_mask)
+		return err_mask;
+
+	*r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
+	return 0;
+}
+
+/**
+ *	sata_pmp_write - write PMP register
+ *	@link: link to write PMP register for
+ *	@reg: register to write
+ *	@r_val: value to write
+ *
+ *	Write PMP register.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *pmp_dev = ap->link.device;
+	struct ata_taskfile tf;
+
+	ata_tf_init(pmp_dev, &tf);
+	tf.command = ATA_CMD_PMP_WRITE;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+	tf.feature = reg;
+	tf.device = link->pmp;
+	tf.nsect = val & 0xff;
+	tf.lbal = (val >> 8) & 0xff;
+	tf.lbam = (val >> 16) & 0xff;
+	tf.lbah = (val >> 24) & 0xff;
+
+	return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+				 SATA_PMP_RW_TIMEOUT);
+}
+
+/**
+ *	sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP
+ *	@qc: ATA command in question
+ *
+ *	A host which has command switching PMP support cannot issue
+ *	commands to multiple links simultaneously.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_port *ap = link->ap;
+
+	if (ap->excl_link == NULL || ap->excl_link == link) {
+		if (ap->nr_active_links == 0 || ata_link_active(link)) {
+			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+			return ata_std_qc_defer(qc);
+		}
+
+		ap->excl_link = link;
+	}
+
+	return ATA_DEFER_PORT;
+}
+
+/**
+ *	sata_pmp_scr_read - read PSCR
+ *	@link: ATA link to read PSCR for
+ *	@reg: PSCR to read
+ *	@r_val: resulting value
+ *
+ *	Read PSCR @reg into @r_val for @link, to be called from
+ *	ata_scr_read().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
+{
+	unsigned int err_mask;
+
+	if (reg > SATA_PMP_PSCR_CONTROL)
+		return -EINVAL;
+
+	err_mask = sata_pmp_read(link, reg, r_val);
+	if (err_mask) {
+		ata_link_warn(link, "failed to read SCR %d (Emask=0x%x)\n",
+			      reg, err_mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	sata_pmp_scr_write - write PSCR
+ *	@link: ATA link to write PSCR for
+ *	@reg: PSCR to write
+ *	@val: value to be written
+ *
+ *	Write @val to PSCR @reg for @link, to be called from
+ *	ata_scr_write() and ata_scr_write_flush().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+	unsigned int err_mask;
+
+	if (reg > SATA_PMP_PSCR_CONTROL)
+		return -EINVAL;
+
+	err_mask = sata_pmp_write(link, reg, val);
+	if (err_mask) {
+		ata_link_warn(link, "failed to write SCR %d (Emask=0x%x)\n",
+			      reg, err_mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	sata_pmp_set_lpm - configure LPM for a PMP link
+ *	@link: PMP link to configure LPM for
+ *	@policy: target LPM policy
+ *	@hints: LPM hints
+ *
+ *	Configure LPM for @link.  This function will contain any PMP
+ *	specific workarounds if necessary.
+ *
+ *	LOCKING:
+ *	EH context.
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+		     unsigned hints)
+{
+	return sata_link_scr_lpm(link, policy, true);
+}
+
+/**
+ *	sata_pmp_read_gscr - read GSCR block of SATA PMP
+ *	@dev: PMP device
+ *	@gscr: buffer to read GSCR block into
+ *
+ *	Read selected PMP GSCRs from the PMP at @dev.  This will serve
+ *	as configuration and identification info for the PMP.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
+{
+	static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 };
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) {
+		int reg = gscr_to_read[i];
+		unsigned int err_mask;
+
+		err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
+		if (err_mask) {
+			ata_dev_err(dev, "failed to read PMP GSCR[%d] (Emask=0x%x)\n",
+				    reg, err_mask);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static const char *sata_pmp_spec_rev_str(const u32 *gscr)
+{
+	u32 rev = gscr[SATA_PMP_GSCR_REV];
+
+	if (rev & (1 << 3))
+		return "1.2";
+	if (rev & (1 << 2))
+		return "1.1";
+	if (rev & (1 << 1))
+		return "1.0";
+	return "<unknown>";
+}
+
+#define PMP_GSCR_SII_POL 129
+
+static int sata_pmp_configure(struct ata_device *dev, int print_info)
+{
+	struct ata_port *ap = dev->link->ap;
+	u32 *gscr = dev->gscr;
+	u16 vendor = sata_pmp_gscr_vendor(gscr);
+	u16 devid = sata_pmp_gscr_devid(gscr);
+	unsigned int err_mask = 0;
+	const char *reason;
+	int nr_ports, rc;
+
+	nr_ports = sata_pmp_gscr_ports(gscr);
+
+	if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) {
+		rc = -EINVAL;
+		reason = "invalid nr_ports";
+		goto fail;
+	}
+
+	if ((ap->flags & ATA_FLAG_AN) &&
+	    (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY))
+		dev->flags |= ATA_DFLAG_AN;
+
+	/* monitor SERR_PHYRDY_CHG on fan-out ports */
+	err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN,
+				  SERR_PHYRDY_CHG);
+	if (err_mask) {
+		rc = -EIO;
+		reason = "failed to write GSCR_ERROR_EN";
+		goto fail;
+	}
+
+	/* Disable sending Early R_OK.
+	 * With "cached read" HDD testing and multiple ports busy on a SATA
+	 * host controller, 3x26 PMP will very rarely drop a deferred
+	 * R_OK that was intended for the host. Symptom will be all
+	 * 5 drives under test will timeout, get reset, and recover.
+	 */
+	if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+		u32 reg;
+
+		err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+		if (err_mask) {
+			rc = -EIO;
+			reason = "failed to read Sil3x26 Private Register";
+			goto fail;
+		}
+		reg &= ~0x1;
+		err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+		if (err_mask) {
+			rc = -EIO;
+			reason = "failed to write Sil3x26 Private Register";
+			goto fail;
+		}
+	}
+
+	if (print_info) {
+		ata_dev_info(dev, "Port Multiplier %s, "
+			     "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
+			     sata_pmp_spec_rev_str(gscr), vendor, devid,
+			     sata_pmp_gscr_rev(gscr),
+			     nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
+			     gscr[SATA_PMP_GSCR_FEAT]);
+
+		if (!(dev->flags & ATA_DFLAG_AN))
+			ata_dev_info(dev,
+				"Asynchronous notification not supported, "
+				"hotplug won't work on fan-out ports. Use warm-plug instead.\n");
+	}
+
+	return 0;
+
+ fail:
+	ata_dev_err(dev,
+		    "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
+		    reason, err_mask);
+	return rc;
+}
+
+static int sata_pmp_init_links (struct ata_port *ap, int nr_ports)
+{
+	struct ata_link *pmp_link = ap->pmp_link;
+	int i, err;
+
+	if (!pmp_link) {
+		pmp_link = kcalloc(SATA_PMP_MAX_PORTS, sizeof(pmp_link[0]),
+				   GFP_NOIO);
+		if (!pmp_link)
+			return -ENOMEM;
+
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+			ata_link_init(ap, &pmp_link[i], i);
+
+		ap->pmp_link = pmp_link;
+
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
+			err = ata_tlink_add(&pmp_link[i]);
+			if (err) {
+				goto err_tlink;
+			}
+		}
+	}
+
+	for (i = 0; i < nr_ports; i++) {
+		struct ata_link *link = &pmp_link[i];
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		link->flags = 0;
+		ehc->i.probe_mask |= ATA_ALL_DEVICES;
+		ehc->i.action |= ATA_EH_RESET;
+	}
+
+	return 0;
+  err_tlink:
+	while (--i >= 0)
+		ata_tlink_delete(&pmp_link[i]);
+	kfree(pmp_link);
+	ap->pmp_link = NULL;
+	return err;
+}
+
+static void sata_pmp_quirks(struct ata_port *ap)
+{
+	u32 *gscr = ap->link.device->gscr;
+	u16 vendor = sata_pmp_gscr_vendor(gscr);
+	u16 devid = sata_pmp_gscr_devid(gscr);
+	struct ata_link *link;
+
+	if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+		/* sil3x26 quirks */
+		ata_for_each_link(link, ap, EDGE) {
+			/* link reports offline after LPM */
+			link->flags |= ATA_LFLAG_NO_LPM;
+
+			/*
+			 * Class code report is unreliable and SRST times
+			 * out under certain configurations.
+			 */
+			if (link->pmp < 5)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_ATA;
+
+			/* port 5 is for SEMB device and it doesn't like SRST */
+			if (link->pmp == 5)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_SEMB;
+		}
+	} else if (vendor == 0x1095 && devid == 0x4723) {
+		/*
+		 * sil4723 quirks
+		 *
+		 * Link reports offline after LPM.  Class code report is
+		 * unreliable.  SIMG PMPs never got SRST reliable and the
+		 * config device at port 2 locks up on SRST.
+		 */
+		ata_for_each_link(link, ap, EDGE)
+			link->flags |= ATA_LFLAG_NO_LPM |
+				       ATA_LFLAG_NO_SRST |
+				       ATA_LFLAG_ASSUME_ATA;
+	} else if (vendor == 0x1095 && devid == 0x4726) {
+		/* sil4726 quirks */
+		ata_for_each_link(link, ap, EDGE) {
+			/* link reports offline after LPM */
+			link->flags |= ATA_LFLAG_NO_LPM;
+
+			/* Class code report is unreliable and SRST
+			 * times out under certain configurations.
+			 * Config device can be at port 0 or 5 and
+			 * locks up on SRST.
+			 */
+			if (link->pmp <= 5)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_ATA;
+
+			/* Port 6 is for SEMB device which doesn't
+			 * like SRST either.
+			 */
+			if (link->pmp == 6)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_SEMB;
+		}
+	} else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 ||
+					devid == 0x5734 || devid == 0x5744)) {
+		/* sil5723/5744 quirks */
+
+		/* sil5723/5744 has either two or three downstream
+		 * ports depending on operation mode.  The last port
+		 * is empty if any actual IO device is available or
+		 * occupied by a pseudo configuration device
+		 * otherwise.  Don't try hard to recover it.
+		 */
+		ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+	} else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+		/*
+		 * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+		 * 0x0325: jmicron JMB394.
+		 */
+		ata_for_each_link(link, ap, EDGE) {
+			/* SRST breaks detection and disks get misclassified
+			 * LPM disabled to avoid potential problems
+			 */
+			link->flags |= ATA_LFLAG_NO_LPM |
+				       ATA_LFLAG_NO_SRST |
+				       ATA_LFLAG_ASSUME_ATA;
+		}
+	} else if (vendor == 0x11ab && devid == 0x4140) {
+		/* Marvell 4140 quirks */
+		ata_for_each_link(link, ap, EDGE) {
+			/* port 4 is for SEMB device and it doesn't like SRST */
+			if (link->pmp == 4)
+				link->flags |= ATA_LFLAG_DISABLED;
+		}
+	}
+}
+
+/**
+ *	sata_pmp_attach - attach a SATA PMP device
+ *	@dev: SATA PMP device to attach
+ *
+ *	Configure and attach SATA PMP device @dev.  This function is
+ *	also responsible for allocating and initializing PMP links.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_attach(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	unsigned long flags;
+	struct ata_link *tlink;
+	int rc;
+
+	/* is it hanging off the right place? */
+	if (!sata_pmp_supported(ap)) {
+		ata_dev_err(dev, "host does not support Port Multiplier\n");
+		return -EINVAL;
+	}
+
+	if (!ata_is_host_link(link)) {
+		ata_dev_err(dev, "Port Multipliers cannot be nested\n");
+		return -EINVAL;
+	}
+
+	if (dev->devno) {
+		ata_dev_err(dev, "Port Multiplier must be the first device\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(link->pmp != 0);
+	link->pmp = SATA_PMP_CTRL_PORT;
+
+	/* read GSCR block */
+	rc = sata_pmp_read_gscr(dev, dev->gscr);
+	if (rc)
+		goto fail;
+
+	/* config PMP */
+	rc = sata_pmp_configure(dev, 1);
+	if (rc)
+		goto fail;
+
+	rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
+	if (rc) {
+		ata_dev_info(dev, "failed to initialize PMP links\n");
+		goto fail;
+	}
+
+	/* attach it */
+	spin_lock_irqsave(ap->lock, flags);
+	WARN_ON(ap->nr_pmp_links);
+	ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr);
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	sata_pmp_quirks(ap);
+
+	if (ap->ops->pmp_attach)
+		ap->ops->pmp_attach(ap);
+
+	ata_for_each_link(tlink, ap, EDGE)
+		sata_link_init_spd(tlink);
+
+	return 0;
+
+ fail:
+	link->pmp = 0;
+	return rc;
+}
+
+/**
+ *	sata_pmp_detach - detach a SATA PMP device
+ *	@dev: SATA PMP device to detach
+ *
+ *	Detach SATA PMP device @dev.  This function is also
+ *	responsible for deconfiguring PMP links.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void sata_pmp_detach(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ata_link *tlink;
+	unsigned long flags;
+
+	ata_dev_info(dev, "Port Multiplier detaching\n");
+
+	WARN_ON(!ata_is_host_link(link) || dev->devno ||
+		link->pmp != SATA_PMP_CTRL_PORT);
+
+	if (ap->ops->pmp_detach)
+		ap->ops->pmp_detach(ap);
+
+	ata_for_each_link(tlink, ap, EDGE)
+		ata_eh_detach_dev(tlink->device);
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->nr_pmp_links = 0;
+	link->pmp = 0;
+	spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ *	sata_pmp_same_pmp - does new GSCR matches the configured PMP?
+ *	@dev: PMP device to compare against
+ *	@new_gscr: GSCR block of the new device
+ *
+ *	Compare @new_gscr against @dev and determine whether @dev is
+ *	the PMP described by @new_gscr.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if @dev matches @new_gscr, 0 otherwise.
+ */
+static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
+{
+	const u32 *old_gscr = dev->gscr;
+	u16 old_vendor, new_vendor, old_devid, new_devid;
+	int old_nr_ports, new_nr_ports;
+
+	old_vendor = sata_pmp_gscr_vendor(old_gscr);
+	new_vendor = sata_pmp_gscr_vendor(new_gscr);
+	old_devid = sata_pmp_gscr_devid(old_gscr);
+	new_devid = sata_pmp_gscr_devid(new_gscr);
+	old_nr_ports = sata_pmp_gscr_ports(old_gscr);
+	new_nr_ports = sata_pmp_gscr_ports(new_gscr);
+
+	if (old_vendor != new_vendor) {
+		ata_dev_info(dev,
+			     "Port Multiplier vendor mismatch '0x%x' != '0x%x'\n",
+			     old_vendor, new_vendor);
+		return 0;
+	}
+
+	if (old_devid != new_devid) {
+		ata_dev_info(dev,
+			     "Port Multiplier device ID mismatch '0x%x' != '0x%x'\n",
+			     old_devid, new_devid);
+		return 0;
+	}
+
+	if (old_nr_ports != new_nr_ports) {
+		ata_dev_info(dev,
+			     "Port Multiplier nr_ports mismatch '0x%x' != '0x%x'\n",
+			     old_nr_ports, new_nr_ports);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ *	sata_pmp_revalidate - revalidate SATA PMP
+ *	@dev: PMP device to revalidate
+ *	@new_class: new class code
+ *
+ *	Re-read GSCR block and make sure @dev is still attached to the
+ *	port and properly configured.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	u32 *gscr = (void *)ap->sector_buf;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
+
+	if (!ata_dev_enabled(dev)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	/* wrong class? */
+	if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	/* read GSCR */
+	rc = sata_pmp_read_gscr(dev, gscr);
+	if (rc)
+		goto fail;
+
+	/* is the pmp still there? */
+	if (!sata_pmp_same_pmp(dev, gscr)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS);
+
+	rc = sata_pmp_configure(dev, 0);
+	if (rc)
+		goto fail;
+
+	ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
+
+	DPRINTK("EXIT, rc=0\n");
+	return 0;
+
+ fail:
+	ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	sata_pmp_revalidate_quick - revalidate SATA PMP quickly
+ *	@dev: PMP device to revalidate
+ *
+ *	Make sure the attached PMP is accessible.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate_quick(struct ata_device *dev)
+{
+	unsigned int err_mask;
+	u32 prod_id;
+
+	err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
+	if (err_mask) {
+		ata_dev_err(dev,
+			    "failed to read PMP product ID (Emask=0x%x)\n",
+			    err_mask);
+		return -EIO;
+	}
+
+	if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
+		ata_dev_err(dev, "PMP product ID mismatch\n");
+		/* something weird is going on, request full PMP recovery */
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ *	sata_pmp_eh_recover_pmp - recover PMP
+ *	@ap: ATA port PMP is attached to
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method
+ *	@hardreset: hardreset method
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	Recover PMP attached to @ap.  Recovery procedure is somewhat
+ *	similar to that of ata_eh_recover() except that reset should
+ *	always be performed in hard->soft sequence and recovery
+ *	failure results in PMP detachment.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
+		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	struct ata_link *link = &ap->link;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev = link->device;
+	int tries = ATA_EH_PMP_TRIES;
+	int detach = 0, rc = 0;
+	int reval_failed = 0;
+
+	DPRINTK("ENTER\n");
+
+	if (dev->flags & ATA_DFLAG_DETACH) {
+		detach = 1;
+		goto fail;
+	}
+
+ retry:
+	ehc->classes[0] = ATA_DEV_UNKNOWN;
+
+	if (ehc->i.action & ATA_EH_RESET) {
+		struct ata_link *tlink;
+
+		/* reset */
+		rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
+				  postreset);
+		if (rc) {
+			ata_link_err(link, "failed to reset PMP, giving up\n");
+			goto fail;
+		}
+
+		/* PMP is reset, SErrors cannot be trusted, scan all */
+		ata_for_each_link(tlink, ap, EDGE) {
+			struct ata_eh_context *ehc = &tlink->eh_context;
+
+			ehc->i.probe_mask |= ATA_ALL_DEVICES;
+			ehc->i.action |= ATA_EH_RESET;
+		}
+	}
+
+	/* If revalidation is requested, revalidate and reconfigure;
+	 * otherwise, do quick revalidation.
+	 */
+	if (ehc->i.action & ATA_EH_REVALIDATE)
+		rc = sata_pmp_revalidate(dev, ehc->classes[0]);
+	else
+		rc = sata_pmp_revalidate_quick(dev);
+
+	if (rc) {
+		tries--;
+
+		if (rc == -ENODEV) {
+			ehc->i.probe_mask |= ATA_ALL_DEVICES;
+			detach = 1;
+			/* give it just two more chances */
+			tries = min(tries, 2);
+		}
+
+		if (tries) {
+			/* consecutive revalidation failures? speed down */
+			if (reval_failed)
+				sata_down_spd_limit(link, 0);
+			else
+				reval_failed = 1;
+
+			ehc->i.action |= ATA_EH_RESET;
+			goto retry;
+		} else {
+			ata_dev_err(dev,
+				    "failed to recover PMP after %d tries, giving up\n",
+				    ATA_EH_PMP_TRIES);
+			goto fail;
+		}
+	}
+
+	/* okay, PMP resurrected */
+	ehc->i.flags = 0;
+
+	DPRINTK("EXIT, rc=0\n");
+	return 0;
+
+ fail:
+	sata_pmp_detach(dev);
+	if (detach)
+		ata_eh_detach_dev(dev);
+	else
+		ata_dev_disable(dev);
+
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
+{
+	struct ata_link *link;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_for_each_link(link, ap, EDGE) {
+		if (!(link->flags & ATA_LFLAG_DISABLED))
+			continue;
+
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* Some PMPs require hardreset sequence to get
+		 * SError.N working.
+		 */
+		sata_link_hardreset(link, sata_deb_timing_normal,
+				ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK),
+				NULL, NULL);
+
+		/* unconditionally clear SError.N */
+		rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+		if (rc) {
+			ata_link_err(link,
+				     "failed to clear SError.N (errno=%d)\n",
+				     rc);
+			return rc;
+		}
+
+		spin_lock_irqsave(ap->lock, flags);
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return 0;
+}
+
+static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
+{
+	struct ata_port *ap = link->ap;
+	unsigned long flags;
+
+	if (link_tries[link->pmp] && --link_tries[link->pmp])
+		return 1;
+
+	/* disable this link */
+	if (!(link->flags & ATA_LFLAG_DISABLED)) {
+		ata_link_warn(link,
+			"failed to recover link after %d tries, disabling\n",
+			ATA_EH_PMP_LINK_TRIES);
+
+		spin_lock_irqsave(ap->lock, flags);
+		link->flags |= ATA_LFLAG_DISABLED;
+		spin_unlock_irqrestore(ap->lock, flags);
+	}
+
+	ata_dev_disable(link->device);
+	link->eh_context.i.action = 0;
+
+	return 0;
+}
+
+/**
+ *	sata_pmp_eh_recover - recover PMP-enabled port
+ *	@ap: ATA port to recover
+ *
+ *	Drive EH recovery operation for PMP enabled port @ap.  This
+ *	function recovers host and PMP ports with proper retrials and
+ *	fallbacks.  Actual recovery operations are performed using
+ *	ata_eh_recover() and sata_pmp_eh_recover_pmp().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover(struct ata_port *ap)
+{
+	struct ata_port_operations *ops = ap->ops;
+	int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
+	struct ata_link *pmp_link = &ap->link;
+	struct ata_device *pmp_dev = pmp_link->device;
+	struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
+	u32 *gscr = pmp_dev->gscr;
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned int err_mask;
+	u32 gscr_error, sntf;
+	int cnt, rc;
+
+	pmp_tries = ATA_EH_PMP_TRIES;
+	ata_for_each_link(link, ap, EDGE)
+		link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+ retry:
+	/* PMP attached? */
+	if (!sata_pmp_attached(ap)) {
+		rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
+				    ops->hardreset, ops->postreset, NULL);
+		if (rc) {
+			ata_for_each_dev(dev, &ap->link, ALL)
+				ata_dev_disable(dev);
+			return rc;
+		}
+
+		if (pmp_dev->class != ATA_DEV_PMP)
+			return 0;
+
+		/* new PMP online */
+		ata_for_each_link(link, ap, EDGE)
+			link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+		/* fall through */
+	}
+
+	/* recover pmp */
+	rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
+				     ops->hardreset, ops->postreset);
+	if (rc)
+		goto pmp_fail;
+
+	/* PHY event notification can disturb reset and other recovery
+	 * operations.  Turn it off.
+	 */
+	if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
+		gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
+
+		err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
+					  gscr[SATA_PMP_GSCR_FEAT_EN]);
+		if (err_mask) {
+			ata_link_warn(pmp_link,
+				"failed to disable NOTIFY (err_mask=0x%x)\n",
+				err_mask);
+			goto pmp_fail;
+		}
+	}
+
+	/* handle disabled links */
+	rc = sata_pmp_eh_handle_disabled_links(ap);
+	if (rc)
+		goto pmp_fail;
+
+	/* recover links */
+	rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
+			    ops->pmp_hardreset, ops->pmp_postreset, &link);
+	if (rc)
+		goto link_fail;
+
+	/* clear SNotification */
+	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+	if (rc == 0)
+		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+	/*
+	 * If LPM is active on any fan-out port, hotplug wouldn't
+	 * work.  Return w/ PHY event notification disabled.
+	 */
+	ata_for_each_link(link, ap, EDGE)
+		if (link->lpm_policy > ATA_LPM_MAX_POWER)
+			return 0;
+
+	/*
+	 * Connection status might have changed while resetting other
+	 * links, enable notification and check SATA_PMP_GSCR_ERROR
+	 * before returning.
+	 */
+
+	/* enable notification */
+	if (pmp_dev->flags & ATA_DFLAG_AN) {
+		gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
+
+		err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
+					  gscr[SATA_PMP_GSCR_FEAT_EN]);
+		if (err_mask) {
+			ata_dev_err(pmp_dev,
+				    "failed to write PMP_FEAT_EN (Emask=0x%x)\n",
+				    err_mask);
+			rc = -EIO;
+			goto pmp_fail;
+		}
+	}
+
+	/* check GSCR_ERROR */
+	err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
+	if (err_mask) {
+		ata_dev_err(pmp_dev,
+			    "failed to read PMP_GSCR_ERROR (Emask=0x%x)\n",
+			    err_mask);
+		rc = -EIO;
+		goto pmp_fail;
+	}
+
+	cnt = 0;
+	ata_for_each_link(link, ap, EDGE) {
+		if (!(gscr_error & (1 << link->pmp)))
+			continue;
+
+		if (sata_pmp_handle_link_fail(link, link_tries)) {
+			ata_ehi_hotplugged(&link->eh_context.i);
+			cnt++;
+		} else {
+			ata_link_warn(link,
+				"PHY status changed but maxed out on retries, giving up\n");
+			ata_link_warn(link,
+				"Manually issue scan to resume this link\n");
+		}
+	}
+
+	if (cnt) {
+		ata_port_info(ap,
+			"PMP SError.N set for some ports, repeating recovery\n");
+		goto retry;
+	}
+
+	return 0;
+
+ link_fail:
+	if (sata_pmp_handle_link_fail(link, link_tries)) {
+		pmp_ehc->i.action |= ATA_EH_RESET;
+		goto retry;
+	}
+
+	/* fall through */
+ pmp_fail:
+	/* Control always ends up here after detaching PMP.  Shut up
+	 * and return if we're unloading.
+	 */
+	if (ap->pflags & ATA_PFLAG_UNLOADING)
+		return rc;
+
+	if (!sata_pmp_attached(ap))
+		goto retry;
+
+	if (--pmp_tries) {
+		pmp_ehc->i.action |= ATA_EH_RESET;
+		goto retry;
+	}
+
+	ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n",
+		     ATA_EH_PMP_TRIES);
+	sata_pmp_detach(pmp_dev);
+	ata_dev_disable(pmp_dev);
+
+	return rc;
+}
+
+/**
+ *	sata_pmp_error_handler - do standard error handling for PMP-enabled host
+ *	@ap: host port to handle error for
+ *
+ *	Perform standard error handling sequence for PMP-enabled host
+ *	@ap.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void sata_pmp_error_handler(struct ata_port *ap)
+{
+	ata_eh_autopsy(ap);
+	ata_eh_report(ap);
+	sata_pmp_eh_recover(ap);
+	ata_eh_finish(ap);
+}
+
+EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
+EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
+EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
new file mode 100644
index 0000000..1984fc7
--- /dev/null
+++ b/drivers/ata/libata-scsi.c
@@ -0,0 +1,5150 @@
+/*
+ *  libata-scsi.c - helper library for ATA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available from
+ *  - http://www.t10.org/
+ *  - http://www.t13.org/
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <linux/libata.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+#include <asm/unaligned.h>
+#include <linux/ioprio.h>
+
+#include "libata.h"
+#include "libata-transport.h"
+
+#define ATA_SCSI_RBUF_SIZE	4096
+
+static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
+static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
+
+typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
+
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
+					const struct scsi_device *scsidev);
+static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
+					    const struct scsi_device *scsidev);
+
+#define RW_RECOVERY_MPAGE 0x1
+#define RW_RECOVERY_MPAGE_LEN 12
+#define CACHE_MPAGE 0x8
+#define CACHE_MPAGE_LEN 20
+#define CONTROL_MPAGE 0xa
+#define CONTROL_MPAGE_LEN 12
+#define ALL_MPAGES 0x3f
+#define ALL_SUB_MPAGES 0xff
+
+
+static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
+	RW_RECOVERY_MPAGE,
+	RW_RECOVERY_MPAGE_LEN - 2,
+	(1 << 7),	/* AWRE */
+	0,		/* read retry count */
+	0, 0, 0, 0,
+	0,		/* write retry count */
+	0, 0, 0
+};
+
+static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
+	CACHE_MPAGE,
+	CACHE_MPAGE_LEN - 2,
+	0,		/* contains WCE, needs to be 0 for logic */
+	0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0,		/* contains DRA, needs to be 0 for logic */
+	0, 0, 0, 0, 0, 0, 0
+};
+
+static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
+	CONTROL_MPAGE,
+	CONTROL_MPAGE_LEN - 2,
+	2,	/* DSENSE=0, GLTSD=1 */
+	0,	/* [QAM+QERR may be 1, see 05-359r1] */
+	0, 0, 0, 0, 0xff, 0xff,
+	0, 30	/* extended self test time, see 05-359r1 */
+};
+
+static const char *ata_lpm_policy_names[] = {
+	[ATA_LPM_UNKNOWN]		= "max_performance",
+	[ATA_LPM_MAX_POWER]		= "max_performance",
+	[ATA_LPM_MED_POWER]		= "medium_power",
+	[ATA_LPM_MED_POWER_WITH_DIPM]	= "med_power_with_dipm",
+	[ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
+	[ATA_LPM_MIN_POWER]		= "min_power",
+};
+
+static ssize_t ata_scsi_lpm_store(struct device *device,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(device);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	struct ata_link *link;
+	struct ata_device *dev;
+	enum ata_lpm_policy policy;
+	unsigned long flags;
+
+	/* UNKNOWN is internal state, iterate from MAX_POWER */
+	for (policy = ATA_LPM_MAX_POWER;
+	     policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
+		const char *name = ata_lpm_policy_names[policy];
+
+		if (strncmp(name, buf, strlen(name)) == 0)
+			break;
+	}
+	if (policy == ARRAY_SIZE(ata_lpm_policy_names))
+		return -EINVAL;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, &ap->link, ENABLED) {
+			if (dev->horkage & ATA_HORKAGE_NOLPM) {
+				count = -EOPNOTSUPP;
+				goto out_unlock;
+			}
+		}
+	}
+
+	ap->target_lpm_policy = policy;
+	ata_port_schedule_eh(ap);
+out_unlock:
+	spin_unlock_irqrestore(ap->lock, flags);
+	return count;
+}
+
+static ssize_t ata_scsi_lpm_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+
+	if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			ata_lpm_policy_names[ap->target_lpm_policy]);
+}
+DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+	    ata_scsi_lpm_show, ata_scsi_lpm_store);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+
+static ssize_t ata_scsi_park_show(struct device *device,
+				  struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned long now;
+	unsigned int uninitialized_var(msecs);
+	int rc = 0;
+
+	ap = ata_shost_to_port(sdev->host);
+
+	spin_lock_irq(ap->lock);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (!dev) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+	if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+		rc = -EOPNOTSUPP;
+		goto unlock;
+	}
+
+	link = dev->link;
+	now = jiffies;
+	if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
+	    link->eh_context.unloaded_mask & (1 << dev->devno) &&
+	    time_after(dev->unpark_deadline, now))
+		msecs = jiffies_to_msecs(dev->unpark_deadline - now);
+	else
+		msecs = 0;
+
+unlock:
+	spin_unlock_irq(ap->lock);
+
+	return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
+}
+
+static ssize_t ata_scsi_park_store(struct device *device,
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_device *dev;
+	long int input;
+	unsigned long flags;
+	int rc;
+
+	rc = kstrtol(buf, 10, &input);
+	if (rc)
+		return rc;
+	if (input < -2)
+		return -EINVAL;
+	if (input > ATA_TMOUT_MAX_PARK) {
+		rc = -EOVERFLOW;
+		input = ATA_TMOUT_MAX_PARK;
+	}
+
+	ap = ata_shost_to_port(sdev->host);
+
+	spin_lock_irqsave(ap->lock, flags);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (unlikely(!dev)) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+	if (dev->class != ATA_DEV_ATA &&
+	    dev->class != ATA_DEV_ZAC) {
+		rc = -EOPNOTSUPP;
+		goto unlock;
+	}
+
+	if (input >= 0) {
+		if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+			rc = -EOPNOTSUPP;
+			goto unlock;
+		}
+
+		dev->unpark_deadline = ata_deadline(jiffies, input);
+		dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
+		ata_port_schedule_eh(ap);
+		complete(&ap->park_req_pending);
+	} else {
+		switch (input) {
+		case -1:
+			dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
+			break;
+		case -2:
+			dev->flags |= ATA_DFLAG_NO_UNLOAD;
+			break;
+		}
+	}
+unlock:
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return rc ? rc : len;
+}
+DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
+	    ata_scsi_park_show, ata_scsi_park_store);
+EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_device *dev;
+	bool ncq_prio_enable;
+	int rc = 0;
+
+	ap = ata_shost_to_port(sdev->host);
+
+	spin_lock_irq(ap->lock);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (!dev) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+
+	ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+	spin_unlock_irq(ap->lock);
+
+	return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+					 struct device_attribute *attr,
+					 const char *buf, size_t len)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_device *dev;
+	long int input;
+	int rc;
+
+	rc = kstrtol(buf, 10, &input);
+	if (rc)
+		return rc;
+	if ((input < 0) || (input > 1))
+		return -EINVAL;
+
+	ap = ata_shost_to_port(sdev->host);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (unlikely(!dev))
+		return  -ENODEV;
+
+	spin_lock_irq(ap->lock);
+	if (input)
+		dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+	else
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+	dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+	dev->link->eh_info.flags |= ATA_EHI_QUIET;
+	ata_port_schedule_eh(ap);
+	spin_unlock_irq(ap->lock);
+
+	ata_port_wait_eh(ap);
+
+	if (input) {
+		spin_lock_irq(ap->lock);
+		if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+			dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+			rc = -EIO;
+		}
+		spin_unlock_irq(ap->lock);
+	}
+
+	return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+	    ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
+void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
+			u8 sk, u8 asc, u8 ascq)
+{
+	bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+	if (!cmd)
+		return;
+
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+	scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
+}
+
+void ata_scsi_set_sense_information(struct ata_device *dev,
+				    struct scsi_cmnd *cmd,
+				    const struct ata_taskfile *tf)
+{
+	u64 information;
+
+	if (!cmd)
+		return;
+
+	information = ata_tf_read_block(tf, dev);
+	if (information == U64_MAX)
+		return;
+
+	scsi_set_sense_information(cmd->sense_buffer,
+				   SCSI_SENSE_BUFFERSIZE, information);
+}
+
+static void ata_scsi_set_invalid_field(struct ata_device *dev,
+				       struct scsi_cmnd *cmd, u16 field, u8 bit)
+{
+	ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in CDB" */
+	scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+				     field, bit, 1);
+}
+
+static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
+					   struct scsi_cmnd *cmd, u16 field)
+{
+	/* "Invalid field in parameter list" */
+	ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
+	scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+				     field, 0xff, 0);
+}
+
+static ssize_t
+ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
+		return ap->ops->em_store(ap, buf, count);
+	return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+
+	if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
+		return ap->ops->em_show(ap, buf);
+	return -EINVAL;
+}
+DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
+		ata_scsi_em_message_show, ata_scsi_em_message_store);
+EXPORT_SYMBOL_GPL(dev_attr_em_message);
+
+static ssize_t
+ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+
+	return snprintf(buf, 23, "%d\n", ap->em_message_type);
+}
+DEVICE_ATTR(em_message_type, S_IRUGO,
+		  ata_scsi_em_message_type_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
+
+static ssize_t
+ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+	if (atadev && ap->ops->sw_activity_show &&
+	    (ap->flags & ATA_FLAG_SW_ACTIVITY))
+		return ap->ops->sw_activity_show(atadev, buf);
+	return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+	enum sw_activity val;
+	int rc;
+
+	if (atadev && ap->ops->sw_activity_store &&
+	    (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+		val = simple_strtoul(buf, NULL, 0);
+		switch (val) {
+		case OFF: case BLINK_ON: case BLINK_OFF:
+			rc = ap->ops->sw_activity_store(atadev, val);
+			if (!rc)
+				return count;
+			else
+				return rc;
+		}
+	}
+	return -EINVAL;
+}
+DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
+			ata_scsi_activity_store);
+EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+
+struct device_attribute *ata_common_sdev_attrs[] = {
+	&dev_attr_unload_heads,
+	&dev_attr_ncq_prio_enable,
+	NULL
+};
+EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
+
+/**
+ *	ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
+ *	@sdev: SCSI device for which BIOS geometry is to be determined
+ *	@bdev: block device associated with @sdev
+ *	@capacity: capacity of SCSI device
+ *	@geom: location to which geometry will be output
+ *
+ *	Generic bios head/sector/cylinder calculator
+ *	used by sd. Most BIOSes nowadays expect a XXX/255/16  (CHS)
+ *	mapping. Some situations may arise where the disk is not
+ *	bootable if this is not used.
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  We don't really care.
+ *
+ *	RETURNS:
+ *	Zero.
+ */
+int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+		       sector_t capacity, int geom[])
+{
+	geom[0] = 255;
+	geom[1] = 63;
+	sector_div(capacity, 255*63);
+	geom[2] = capacity;
+
+	return 0;
+}
+
+/**
+ *	ata_scsi_unlock_native_capacity - unlock native capacity
+ *	@sdev: SCSI device to adjust device capacity for
+ *
+ *	This function is called if a partition on @sdev extends beyond
+ *	the end of the device.  It requests EH to unlock HPA.
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  Might sleep.
+ */
+void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct ata_device *dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (dev && dev->n_sectors < dev->n_native_sectors) {
+		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
+		dev->link->eh_info.action |= ATA_EH_RESET;
+		ata_port_schedule_eh(ap);
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	ata_port_wait_eh(ap);
+}
+
+/**
+ *	ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
+ *	@ap: target port
+ *	@sdev: SCSI device to get identify data for
+ *	@arg: User buffer area for identify data
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  We don't really care.
+ *
+ *	RETURNS:
+ *	Zero on success, negative errno on error.
+ */
+static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
+			    void __user *arg)
+{
+	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
+	u16 __user *dst = arg;
+	char buf[40];
+
+	if (!dev)
+		return -ENOMSG;
+
+	if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
+	if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
+	if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+	if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
+ *	ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
+ *	@scsidev: Device to which we are issuing command
+ *	@arg: User provided data for issuing command
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  We don't really care.
+ *
+ *	RETURNS:
+ *	Zero on success, negative errno on error.
+ */
+int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
+{
+	int rc = 0;
+	u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
+	u8 scsi_cmd[MAX_COMMAND_SIZE];
+	u8 args[4], *argbuf = NULL;
+	int argsize = 0;
+	enum dma_data_direction data_dir;
+	struct scsi_sense_hdr sshdr;
+	int cmd_result;
+
+	if (arg == NULL)
+		return -EINVAL;
+
+	if (copy_from_user(args, arg, sizeof(args)))
+		return -EFAULT;
+
+	memset(sensebuf, 0, sizeof(sensebuf));
+	memset(scsi_cmd, 0, sizeof(scsi_cmd));
+
+	if (args[3]) {
+		argsize = ATA_SECT_SIZE * args[3];
+		argbuf = kmalloc(argsize, GFP_KERNEL);
+		if (argbuf == NULL) {
+			rc = -ENOMEM;
+			goto error;
+		}
+
+		scsi_cmd[1]  = (4 << 1); /* PIO Data-in */
+		scsi_cmd[2]  = 0x0e;     /* no off.line or cc, read from dev,
+					    block count in sector count field */
+		data_dir = DMA_FROM_DEVICE;
+	} else {
+		scsi_cmd[1]  = (3 << 1); /* Non-data */
+		scsi_cmd[2]  = 0x20;     /* cc but no off.line or data xfer */
+		data_dir = DMA_NONE;
+	}
+
+	scsi_cmd[0] = ATA_16;
+
+	scsi_cmd[4] = args[2];
+	if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
+		scsi_cmd[6]  = args[3];
+		scsi_cmd[8]  = args[1];
+		scsi_cmd[10] = 0x4f;
+		scsi_cmd[12] = 0xc2;
+	} else {
+		scsi_cmd[6]  = args[1];
+	}
+	scsi_cmd[14] = args[0];
+
+	/* Good values for timeout and retries?  Values below
+	   from scsi_ioctl_send_command() for default case... */
+	cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
+				  sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
+
+	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+		u8 *desc = sensebuf + 8;
+		cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+		/* If we set cc then ATA pass-through will cause a
+		 * check condition even if no error. Filter that. */
+		if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+			if (sshdr.sense_key == RECOVERED_ERROR &&
+			    sshdr.asc == 0 && sshdr.ascq == 0x1d)
+				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+		}
+
+		/* Send userspace a few ATA registers (same as drivers/ide) */
+		if (sensebuf[0] == 0x72 &&	/* format is "descriptor" */
+		    desc[0] == 0x09) {		/* code is "ATA Descriptor" */
+			args[0] = desc[13];	/* status */
+			args[1] = desc[3];	/* error */
+			args[2] = desc[5];	/* sector count (0:7) */
+			if (copy_to_user(arg, args, sizeof(args)))
+				rc = -EFAULT;
+		}
+	}
+
+
+	if (cmd_result) {
+		rc = -EIO;
+		goto error;
+	}
+
+	if ((argbuf)
+	 && copy_to_user(arg + sizeof(args), argbuf, argsize))
+		rc = -EFAULT;
+error:
+	kfree(argbuf);
+	return rc;
+}
+
+/**
+ *	ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
+ *	@scsidev: Device to which we are issuing command
+ *	@arg: User provided data for issuing command
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  We don't really care.
+ *
+ *	RETURNS:
+ *	Zero on success, negative errno on error.
+ */
+int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
+{
+	int rc = 0;
+	u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
+	u8 scsi_cmd[MAX_COMMAND_SIZE];
+	u8 args[7];
+	struct scsi_sense_hdr sshdr;
+	int cmd_result;
+
+	if (arg == NULL)
+		return -EINVAL;
+
+	if (copy_from_user(args, arg, sizeof(args)))
+		return -EFAULT;
+
+	memset(sensebuf, 0, sizeof(sensebuf));
+	memset(scsi_cmd, 0, sizeof(scsi_cmd));
+	scsi_cmd[0]  = ATA_16;
+	scsi_cmd[1]  = (3 << 1); /* Non-data */
+	scsi_cmd[2]  = 0x20;     /* cc but no off.line or data xfer */
+	scsi_cmd[4]  = args[1];
+	scsi_cmd[6]  = args[2];
+	scsi_cmd[8]  = args[3];
+	scsi_cmd[10] = args[4];
+	scsi_cmd[12] = args[5];
+	scsi_cmd[13] = args[6] & 0x4f;
+	scsi_cmd[14] = args[0];
+
+	/* Good values for timeout and retries?  Values below
+	   from scsi_ioctl_send_command() for default case... */
+	cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
+				sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
+
+	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+		u8 *desc = sensebuf + 8;
+		cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+		/* If we set cc then ATA pass-through will cause a
+		 * check condition even if no error. Filter that. */
+		if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+			if (sshdr.sense_key == RECOVERED_ERROR &&
+			    sshdr.asc == 0 && sshdr.ascq == 0x1d)
+				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+		}
+
+		/* Send userspace ATA registers */
+		if (sensebuf[0] == 0x72 &&	/* format is "descriptor" */
+				desc[0] == 0x09) {/* code is "ATA Descriptor" */
+			args[0] = desc[13];	/* status */
+			args[1] = desc[3];	/* error */
+			args[2] = desc[5];	/* sector count (0:7) */
+			args[3] = desc[7];	/* lbal */
+			args[4] = desc[9];	/* lbam */
+			args[5] = desc[11];	/* lbah */
+			args[6] = desc[12];	/* select */
+			if (copy_to_user(arg, args, sizeof(args)))
+				rc = -EFAULT;
+		}
+	}
+
+	if (cmd_result) {
+		rc = -EIO;
+		goto error;
+	}
+
+ error:
+	return rc;
+}
+
+static int ata_ioc32(struct ata_port *ap)
+{
+	if (ap->flags & ATA_FLAG_PIO_DMA)
+		return 1;
+	if (ap->pflags & ATA_PFLAG_PIO32)
+		return 1;
+	return 0;
+}
+
+int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
+		     int cmd, void __user *arg)
+{
+	unsigned long val;
+	int rc = -EINVAL;
+	unsigned long flags;
+
+	switch (cmd) {
+	case HDIO_GET_32BIT:
+		spin_lock_irqsave(ap->lock, flags);
+		val = ata_ioc32(ap);
+		spin_unlock_irqrestore(ap->lock, flags);
+		return put_user(val, (unsigned long __user *)arg);
+
+	case HDIO_SET_32BIT:
+		val = (unsigned long) arg;
+		rc = 0;
+		spin_lock_irqsave(ap->lock, flags);
+		if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
+			if (val)
+				ap->pflags |= ATA_PFLAG_PIO32;
+			else
+				ap->pflags &= ~ATA_PFLAG_PIO32;
+		} else {
+			if (val != ata_ioc32(ap))
+				rc = -EINVAL;
+		}
+		spin_unlock_irqrestore(ap->lock, flags);
+		return rc;
+
+	case HDIO_GET_IDENTITY:
+		return ata_get_identity(ap, scsidev, arg);
+
+	case HDIO_DRIVE_CMD:
+		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		return ata_cmd_ioctl(scsidev, arg);
+
+	case HDIO_DRIVE_TASK:
+		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+			return -EACCES;
+		return ata_task_ioctl(scsidev, arg);
+
+	default:
+		rc = -ENOTTY;
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
+
+int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
+{
+	return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
+				scsidev, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
+
+/**
+ *	ata_scsi_qc_new - acquire new ata_queued_cmd reference
+ *	@dev: ATA device to which the new command is attached
+ *	@cmd: SCSI command that originated this ATA command
+ *
+ *	Obtain a reference to an unused ata_queued_cmd structure,
+ *	which is the basic libata structure representing a single
+ *	ATA command sent to the hardware.
+ *
+ *	If a command was available, fill in the SCSI-specific
+ *	portions of the structure with information on the
+ *	current command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Command allocated, or %NULL if none available.
+ */
+static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+					      struct scsi_cmnd *cmd)
+{
+	struct ata_queued_cmd *qc;
+
+	qc = ata_qc_new_init(dev, cmd->request->tag);
+	if (qc) {
+		qc->scsicmd = cmd;
+		qc->scsidone = cmd->scsi_done;
+
+		qc->sg = scsi_sglist(cmd);
+		qc->n_elem = scsi_sg_count(cmd);
+
+		if (cmd->request->rq_flags & RQF_QUIET)
+			qc->flags |= ATA_QCFLAG_QUIET;
+	} else {
+		cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+		cmd->scsi_done(cmd);
+	}
+
+	return qc;
+}
+
+static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+
+	qc->extrabytes = scmd->request->extra_len;
+	qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
+}
+
+/**
+ *	ata_dump_status - user friendly display of error info
+ *	@id: id of the port in question
+ *	@tf: ptr to filled out taskfile
+ *
+ *	Decode and dump the ATA error/status registers for the user so
+ *	that they have some idea what really happened at the non
+ *	make-believe layer.
+ *
+ *	LOCKING:
+ *	inherited from caller
+ */
+static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
+{
+	u8 stat = tf->command, err = tf->feature;
+
+	pr_warn("ata%u: status=0x%02x { ", id, stat);
+	if (stat & ATA_BUSY) {
+		pr_cont("Busy }\n");	/* Data is not valid in this case */
+	} else {
+		if (stat & ATA_DRDY)	pr_cont("DriveReady ");
+		if (stat & ATA_DF)	pr_cont("DeviceFault ");
+		if (stat & ATA_DSC)	pr_cont("SeekComplete ");
+		if (stat & ATA_DRQ)	pr_cont("DataRequest ");
+		if (stat & ATA_CORR)	pr_cont("CorrectedError ");
+		if (stat & ATA_SENSE)	pr_cont("Sense ");
+		if (stat & ATA_ERR)	pr_cont("Error ");
+		pr_cont("}\n");
+
+		if (err) {
+			pr_warn("ata%u: error=0x%02x { ", id, err);
+			if (err & ATA_ABORTED)	pr_cont("DriveStatusError ");
+			if (err & ATA_ICRC) {
+				if (err & ATA_ABORTED)
+						pr_cont("BadCRC ");
+				else		pr_cont("Sector ");
+			}
+			if (err & ATA_UNC)	pr_cont("UncorrectableError ");
+			if (err & ATA_IDNF)	pr_cont("SectorIdNotFound ");
+			if (err & ATA_TRK0NF)	pr_cont("TrackZeroNotFound ");
+			if (err & ATA_AMNF)	pr_cont("AddrMarkNotFound ");
+			pr_cont("}\n");
+		}
+	}
+}
+
+/**
+ *	ata_to_sense_error - convert ATA error to SCSI error
+ *	@id: ATA device number
+ *	@drv_stat: value contained in ATA status register
+ *	@drv_err: value contained in ATA error register
+ *	@sk: the sense key we'll fill out
+ *	@asc: the additional sense code we'll fill out
+ *	@ascq: the additional sense code qualifier we'll fill out
+ *	@verbose: be verbose
+ *
+ *	Converts an ATA error into a SCSI error.  Fill out pointers to
+ *	SK, ASC, and ASCQ bytes for later use in fixed or descriptor
+ *	format sense blocks.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
+			       u8 *asc, u8 *ascq, int verbose)
+{
+	int i;
+
+	/* Based on the 3ware driver translation table */
+	static const unsigned char sense_table[][4] = {
+		/* BBD|ECC|ID|MAR */
+		{0xd1,		ABORTED_COMMAND, 0x00, 0x00},
+			// Device busy                  Aborted command
+		/* BBD|ECC|ID */
+		{0xd0,		ABORTED_COMMAND, 0x00, 0x00},
+			// Device busy                  Aborted command
+		/* ECC|MC|MARK */
+		{0x61,		HARDWARE_ERROR, 0x00, 0x00},
+			// Device fault                 Hardware error
+		/* ICRC|ABRT */		/* NB: ICRC & !ABRT is BBD */
+		{0x84,		ABORTED_COMMAND, 0x47, 0x00},
+			// Data CRC error               SCSI parity error
+		/* MC|ID|ABRT|TRK0|MARK */
+		{0x37,		NOT_READY, 0x04, 0x00},
+			// Unit offline                 Not ready
+		/* MCR|MARK */
+		{0x09,		NOT_READY, 0x04, 0x00},
+			// Unrecovered disk error       Not ready
+		/*  Bad address mark */
+		{0x01,		MEDIUM_ERROR, 0x13, 0x00},
+			// Address mark not found for data field
+		/* TRK0 - Track 0 not found */
+		{0x02,		HARDWARE_ERROR, 0x00, 0x00},
+			// Hardware error
+		/* Abort: 0x04 is not translated here, see below */
+		/* Media change request */
+		{0x08,		NOT_READY, 0x04, 0x00},
+			// FIXME: faking offline
+		/* SRV/IDNF - ID not found */
+		{0x10,		ILLEGAL_REQUEST, 0x21, 0x00},
+			// Logical address out of range
+		/* MC - Media Changed */
+		{0x20,		UNIT_ATTENTION, 0x28, 0x00},
+			// Not ready to ready change, medium may have changed
+		/* ECC - Uncorrectable ECC error */
+		{0x40,		MEDIUM_ERROR, 0x11, 0x04},
+			// Unrecovered read error
+		/* BBD - block marked bad */
+		{0x80,		MEDIUM_ERROR, 0x11, 0x04},
+			// Block marked bad	Medium error, unrecovered read error
+		{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+	};
+	static const unsigned char stat_table[][4] = {
+		/* Must be first because BUSY means no other bits valid */
+		{0x80,		ABORTED_COMMAND, 0x47, 0x00},
+		// Busy, fake parity for now
+		{0x40,		ILLEGAL_REQUEST, 0x21, 0x04},
+		// Device ready, unaligned write command
+		{0x20,		HARDWARE_ERROR,  0x44, 0x00},
+		// Device fault, internal target failure
+		{0x08,		ABORTED_COMMAND, 0x47, 0x00},
+		// Timed out in xfer, fake parity for now
+		{0x04,		RECOVERED_ERROR, 0x11, 0x00},
+		// Recovered ECC error	  Medium error, recovered
+		{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+	};
+
+	/*
+	 *	Is this an error we can process/parse
+	 */
+	if (drv_stat & ATA_BUSY) {
+		drv_err = 0;	/* Ignore the err bits, they're invalid */
+	}
+
+	if (drv_err) {
+		/* Look for drv_err */
+		for (i = 0; sense_table[i][0] != 0xFF; i++) {
+			/* Look for best matches first */
+			if ((sense_table[i][0] & drv_err) ==
+			    sense_table[i][0]) {
+				*sk = sense_table[i][1];
+				*asc = sense_table[i][2];
+				*ascq = sense_table[i][3];
+				goto translate_done;
+			}
+		}
+	}
+
+	/*
+	 * Fall back to interpreting status bits.  Note that if the drv_err
+	 * has only the ABRT bit set, we decode drv_stat.  ABRT by itself
+	 * is not descriptive enough.
+	 */
+	for (i = 0; stat_table[i][0] != 0xFF; i++) {
+		if (stat_table[i][0] & drv_stat) {
+			*sk = stat_table[i][1];
+			*asc = stat_table[i][2];
+			*ascq = stat_table[i][3];
+			goto translate_done;
+		}
+	}
+
+	/*
+	 * We need a sensible error return here, which is tricky, and one
+	 * that won't cause people to do things like return a disk wrongly.
+	 */
+	*sk = ABORTED_COMMAND;
+	*asc = 0x00;
+	*ascq = 0x00;
+
+ translate_done:
+	if (verbose)
+		pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
+		       id, drv_stat, drv_err, *sk, *asc, *ascq);
+	return;
+}
+
+/*
+ *	ata_gen_passthru_sense - Generate check condition sense block.
+ *	@qc: Command that completed.
+ *
+ *	This function is specific to the ATA descriptor format sense
+ *	block specified for the ATA pass through commands.  Regardless
+ *	of whether the command errored or not, return a sense
+ *	block. Copy all controller registers into the sense
+ *	block. If there was no error, we get the request from an ATA
+ *	passthrough command, so we use the following sense data:
+ *	sk = RECOVERED ERROR
+ *	asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+ *      
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	struct ata_taskfile *tf = &qc->result_tf;
+	unsigned char *sb = cmd->sense_buffer;
+	unsigned char *desc = sb + 8;
+	int verbose = qc->ap->ops->error_handler == NULL;
+	u8 sense_key, asc, ascq;
+
+	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+	/*
+	 * Use ata_to_sense_error() to map status register bits
+	 * onto sense key, asc & ascq.
+	 */
+	if (qc->err_mask ||
+	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+				   &sense_key, &asc, &ascq, verbose);
+		ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
+	} else {
+		/*
+		 * ATA PASS-THROUGH INFORMATION AVAILABLE
+		 * Always in descriptor format sense.
+		 */
+		scsi_build_sense_buffer(1, cmd->sense_buffer,
+					RECOVERED_ERROR, 0, 0x1D);
+	}
+
+	if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+		u8 len;
+
+		/* descriptor format */
+		len = sb[7];
+		desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+		if (!desc) {
+			if (SCSI_SENSE_BUFFERSIZE < len + 14)
+				return;
+			sb[7] = len + 14;
+			desc = sb + 8 + len;
+		}
+		desc[0] = 9;
+		desc[1] = 12;
+		/*
+		 * Copy registers into sense buffer.
+		 */
+		desc[2] = 0x00;
+		desc[3] = tf->feature;	/* == error reg */
+		desc[5] = tf->nsect;
+		desc[7] = tf->lbal;
+		desc[9] = tf->lbam;
+		desc[11] = tf->lbah;
+		desc[12] = tf->device;
+		desc[13] = tf->command; /* == status reg */
+
+		/*
+		 * Fill in Extend bit, and the high order bytes
+		 * if applicable.
+		 */
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			desc[2] |= 0x01;
+			desc[4] = tf->hob_nsect;
+			desc[6] = tf->hob_lbal;
+			desc[8] = tf->hob_lbam;
+			desc[10] = tf->hob_lbah;
+		}
+	} else {
+		/* Fixed sense format */
+		desc[0] = tf->feature;
+		desc[1] = tf->command; /* status */
+		desc[2] = tf->device;
+		desc[3] = tf->nsect;
+		desc[7] = 0;
+		if (tf->flags & ATA_TFLAG_LBA48)  {
+			desc[8] |= 0x80;
+			if (tf->hob_nsect)
+				desc[8] |= 0x40;
+			if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+				desc[8] |= 0x20;
+		}
+		desc[9] = tf->lbal;
+		desc[10] = tf->lbam;
+		desc[11] = tf->lbah;
+	}
+}
+
+/**
+ *	ata_gen_ata_sense - generate a SCSI fixed sense block
+ *	@qc: Command that we are erroring out
+ *
+ *	Generate sense block for a failed ATA command @qc.  Descriptor
+ *	format is used to accommodate LBA48 block address.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
+{
+	struct ata_device *dev = qc->dev;
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	struct ata_taskfile *tf = &qc->result_tf;
+	unsigned char *sb = cmd->sense_buffer;
+	int verbose = qc->ap->ops->error_handler == NULL;
+	u64 block;
+	u8 sense_key, asc, ascq;
+
+	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+	if (ata_dev_disabled(dev)) {
+		/* Device disabled after error recovery */
+		/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
+		ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
+		return;
+	}
+	/* Use ata_to_sense_error() to map status register bits
+	 * onto sense key, asc & ascq.
+	 */
+	if (qc->err_mask ||
+	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+				   &sense_key, &asc, &ascq, verbose);
+		ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
+	} else {
+		/* Could not decode error */
+		ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
+			     tf->command, qc->err_mask);
+		ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+		return;
+	}
+
+	block = ata_tf_read_block(&qc->result_tf, dev);
+	if (block == U64_MAX)
+		return;
+
+	scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
+}
+
+static void ata_scsi_sdev_config(struct scsi_device *sdev)
+{
+	sdev->use_10_for_rw = 1;
+	sdev->use_10_for_ms = 1;
+	sdev->no_write_same = 1;
+
+	/* Schedule policy is determined by ->qc_defer() callback and
+	 * it needs to see every deferred qc.  Set dev_blocked to 1 to
+	 * prevent SCSI midlayer from automatically deferring
+	 * requests.
+	 */
+	sdev->max_device_blocked = 1;
+}
+
+/**
+ *	atapi_drain_needed - Check whether data transfer may overflow
+ *	@rq: request to be checked
+ *
+ *	ATAPI commands which transfer variable length data to host
+ *	might overflow due to application error or hardware bug.  This
+ *	function checks whether overflow should be drained and ignored
+ *	for @request.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if ; otherwise, 0.
+ */
+static int atapi_drain_needed(struct request *rq)
+{
+	if (likely(!blk_rq_is_passthrough(rq)))
+		return 0;
+
+	if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
+		return 0;
+
+	return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
+}
+
+static int ata_scsi_dev_config(struct scsi_device *sdev,
+			       struct ata_device *dev)
+{
+	struct request_queue *q = sdev->request_queue;
+
+	if (!ata_id_has_unload(dev->id))
+		dev->flags |= ATA_DFLAG_NO_UNLOAD;
+
+	/* configure max sectors */
+	blk_queue_max_hw_sectors(q, dev->max_sectors);
+
+	if (dev->class == ATA_DEV_ATAPI) {
+		void *buf;
+
+		sdev->sector_size = ATA_SECT_SIZE;
+
+		/* set DMA padding */
+		blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
+
+		/* configure draining */
+		buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
+		if (!buf) {
+			ata_dev_err(dev, "drain buffer allocation failed\n");
+			return -ENOMEM;
+		}
+
+		blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
+	} else {
+		sdev->sector_size = ata_id_logical_sector_size(dev->id);
+		sdev->manage_start_stop = 1;
+	}
+
+	/*
+	 * ata_pio_sectors() expects buffer for each sector to not cross
+	 * page boundary.  Enforce it by requiring buffers to be sector
+	 * aligned, which works iff sector_size is not larger than
+	 * PAGE_SIZE.  ATAPI devices also need the alignment as
+	 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
+	 */
+	if (sdev->sector_size > PAGE_SIZE)
+		ata_dev_warn(dev,
+			"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+			sdev->sector_size);
+
+	blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+
+	if (dev->flags & ATA_DFLAG_AN)
+		set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
+
+	if (dev->flags & ATA_DFLAG_NCQ) {
+		int depth;
+
+		depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
+		depth = min(ATA_MAX_QUEUE, depth);
+		scsi_change_queue_depth(sdev, depth);
+	}
+
+	blk_queue_flush_queueable(q, false);
+
+	if (dev->flags & ATA_DFLAG_TRUSTED)
+		sdev->security_supported = 1;
+
+	dev->sdev = sdev;
+	return 0;
+}
+
+/**
+ *	ata_scsi_slave_config - Set SCSI device attributes
+ *	@sdev: SCSI device to examine
+ *
+ *	This is called before we actually start reading
+ *	and writing to the device, to configure certain
+ *	SCSI mid-layer behaviors.
+ *
+ *	LOCKING:
+ *	Defined by SCSI layer.  We don't really care.
+ */
+
+int ata_scsi_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+	int rc = 0;
+
+	ata_scsi_sdev_config(sdev);
+
+	if (dev)
+		rc = ata_scsi_dev_config(sdev, dev);
+
+	return rc;
+}
+
+/**
+ *	ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ *	@sdev: SCSI device to be destroyed
+ *
+ *	@sdev is about to be destroyed for hot/warm unplugging.  If
+ *	this unplugging was initiated by libata as indicated by NULL
+ *	dev->sdev, this function doesn't have to do anything.
+ *	Otherwise, SCSI layer initiated warm-unplug is in progress.
+ *	Clear dev->sdev, schedule the device for ATA detach and invoke
+ *	EH.
+ *
+ *	LOCKING:
+ *	Defined by SCSI layer.  We don't really care.
+ */
+void ata_scsi_slave_destroy(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct request_queue *q = sdev->request_queue;
+	unsigned long flags;
+	struct ata_device *dev;
+
+	if (!ap->ops->error_handler)
+		return;
+
+	spin_lock_irqsave(ap->lock, flags);
+	dev = __ata_scsi_find_dev(ap, sdev);
+	if (dev && dev->sdev) {
+		/* SCSI device already in CANCEL state, no need to offline it */
+		dev->sdev = NULL;
+		dev->flags |= ATA_DFLAG_DETACH;
+		ata_port_schedule_eh(ap);
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	kfree(q->dma_drain_buffer);
+	q->dma_drain_buffer = NULL;
+	q->dma_drain_size = 0;
+}
+
+/**
+ *	__ata_change_queue_depth - helper for ata_scsi_change_queue_depth
+ *	@ap: ATA port to which the device change the queue depth
+ *	@sdev: SCSI device to configure queue depth for
+ *	@queue_depth: new queue depth
+ *
+ *	libsas and libata have different approaches for associating a sdev to
+ *	its ata_port.
+ *
+ */
+int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+			     int queue_depth)
+{
+	struct ata_device *dev;
+	unsigned long flags;
+
+	if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+		return sdev->queue_depth;
+
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (!dev || !ata_dev_enabled(dev))
+		return sdev->queue_depth;
+
+	/* NCQ enabled? */
+	spin_lock_irqsave(ap->lock, flags);
+	dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+	if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
+		dev->flags |= ATA_DFLAG_NCQ_OFF;
+		queue_depth = 1;
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* limit and apply queue depth */
+	queue_depth = min(queue_depth, sdev->host->can_queue);
+	queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+	queue_depth = min(queue_depth, ATA_MAX_QUEUE);
+
+	if (sdev->queue_depth == queue_depth)
+		return -EINVAL;
+
+	return scsi_change_queue_depth(sdev, queue_depth);
+}
+
+/**
+ *	ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ *	@sdev: SCSI device to configure queue depth for
+ *	@queue_depth: new queue depth
+ *
+ *	This is libata standard hostt->change_queue_depth callback.
+ *	SCSI will call into this callback when user tries to set queue
+ *	depth via sysfs.
+ *
+ *	LOCKING:
+ *	SCSI layer (we don't care)
+ *
+ *	RETURNS:
+ *	Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+
+	return __ata_change_queue_depth(ap, sdev, queue_depth);
+}
+
+/**
+ *	ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
+ *	@qc: Storage for translated ATA taskfile
+ *
+ *	Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
+ *	(to start). Perhaps these commands should be preceded by
+ *	CHECK POWER MODE to see what power mode the device is already in.
+ *	[See SAT revision 5 at www.t10.org]
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_taskfile *tf = &qc->tf;
+	const u8 *cdb = scmd->cmnd;
+	u16 fp;
+	u8 bp = 0xff;
+
+	if (scmd->cmd_len < 5) {
+		fp = 4;
+		goto invalid_fld;
+	}
+
+	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+	tf->protocol = ATA_PROT_NODATA;
+	if (cdb[1] & 0x1) {
+		;	/* ignore IMMED bit, violates sat-r05 */
+	}
+	if (cdb[4] & 0x2) {
+		fp = 4;
+		bp = 1;
+		goto invalid_fld;       /* LOEJ bit set not supported */
+	}
+	if (((cdb[4] >> 4) & 0xf) != 0) {
+		fp = 4;
+		bp = 3;
+		goto invalid_fld;       /* power conditions not supported */
+	}
+
+	if (cdb[4] & 0x1) {
+		tf->nsect = 1;	/* 1 sector, lba=0 */
+
+		if (qc->dev->flags & ATA_DFLAG_LBA) {
+			tf->flags |= ATA_TFLAG_LBA;
+
+			tf->lbah = 0x0;
+			tf->lbam = 0x0;
+			tf->lbal = 0x0;
+			tf->device |= ATA_LBA;
+		} else {
+			/* CHS */
+			tf->lbal = 0x1; /* sect */
+			tf->lbam = 0x0; /* cyl low */
+			tf->lbah = 0x0; /* cyl high */
+		}
+
+		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
+	} else {
+		/* Some odd clown BIOSen issue spindown on power off (ACPI S4
+		 * or S5) causing some drives to spin up and down again.
+		 */
+		if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
+		    system_state == SYSTEM_POWER_OFF)
+			goto skip;
+
+		if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+		     system_entering_hibernation())
+			goto skip;
+
+		/* Issue ATA STANDBY IMMEDIATE command */
+		tf->command = ATA_CMD_STANDBYNOW1;
+	}
+
+	/*
+	 * Standby and Idle condition timers could be implemented but that
+	 * would require libata to implement the Power condition mode page
+	 * and allow the user to change it. Changing mode pages requires
+	 * MODE SELECT to be implemented.
+	 */
+
+	return 0;
+
+ invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
+	return 1;
+ skip:
+	scmd->result = SAM_STAT_GOOD;
+	return 1;
+}
+
+
+/**
+ *	ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
+ *	@qc: Storage for translated ATA taskfile
+ *
+ *	Sets up an ATA taskfile to issue FLUSH CACHE or
+ *	FLUSH CACHE EXT.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+
+	tf->flags |= ATA_TFLAG_DEVICE;
+	tf->protocol = ATA_PROT_NODATA;
+
+	if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
+		tf->command = ATA_CMD_FLUSH_EXT;
+	else
+		tf->command = ATA_CMD_FLUSH;
+
+	/* flush is critical for IO integrity, consider it an IO command */
+	qc->flags |= ATA_QCFLAG_IO;
+
+	return 0;
+}
+
+/**
+ *	scsi_6_lba_len - Get LBA and transfer length
+ *	@cdb: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 6-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len;
+
+	VPRINTK("six-byte command\n");
+
+	lba |= ((u64)(cdb[1] & 0x1f)) << 16;
+	lba |= ((u64)cdb[2]) << 8;
+	lba |= ((u64)cdb[3]);
+
+	len = cdb[4];
+
+	*plba = lba;
+	*plen = len;
+}
+
+/**
+ *	scsi_10_lba_len - Get LBA and transfer length
+ *	@cdb: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 10-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len = 0;
+
+	VPRINTK("ten-byte command\n");
+
+	lba |= ((u64)cdb[2]) << 24;
+	lba |= ((u64)cdb[3]) << 16;
+	lba |= ((u64)cdb[4]) << 8;
+	lba |= ((u64)cdb[5]);
+
+	len |= ((u32)cdb[7]) << 8;
+	len |= ((u32)cdb[8]);
+
+	*plba = lba;
+	*plen = len;
+}
+
+/**
+ *	scsi_16_lba_len - Get LBA and transfer length
+ *	@cdb: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 16-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len = 0;
+
+	VPRINTK("sixteen-byte command\n");
+
+	lba |= ((u64)cdb[2]) << 56;
+	lba |= ((u64)cdb[3]) << 48;
+	lba |= ((u64)cdb[4]) << 40;
+	lba |= ((u64)cdb[5]) << 32;
+	lba |= ((u64)cdb[6]) << 24;
+	lba |= ((u64)cdb[7]) << 16;
+	lba |= ((u64)cdb[8]) << 8;
+	lba |= ((u64)cdb[9]);
+
+	len |= ((u32)cdb[10]) << 24;
+	len |= ((u32)cdb[11]) << 16;
+	len |= ((u32)cdb[12]) << 8;
+	len |= ((u32)cdb[13]);
+
+	*plba = lba;
+	*plen = len;
+}
+
+/**
+ *	ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
+ *	@qc: Storage for translated ATA taskfile
+ *
+ *	Converts SCSI VERIFY command to an ATA READ VERIFY command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+	u64 dev_sectors = qc->dev->n_sectors;
+	const u8 *cdb = scmd->cmnd;
+	u64 block;
+	u32 n_block;
+	u16 fp;
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->protocol = ATA_PROT_NODATA;
+
+	if (cdb[0] == VERIFY) {
+		if (scmd->cmd_len < 10) {
+			fp = 9;
+			goto invalid_fld;
+		}
+		scsi_10_lba_len(cdb, &block, &n_block);
+	} else if (cdb[0] == VERIFY_16) {
+		if (scmd->cmd_len < 16) {
+			fp = 15;
+			goto invalid_fld;
+		}
+		scsi_16_lba_len(cdb, &block, &n_block);
+	} else {
+		fp = 0;
+		goto invalid_fld;
+	}
+
+	if (!n_block)
+		goto nothing_to_do;
+	if (block >= dev_sectors)
+		goto out_of_range;
+	if ((block + n_block) > dev_sectors)
+		goto out_of_range;
+
+	if (dev->flags & ATA_DFLAG_LBA) {
+		tf->flags |= ATA_TFLAG_LBA;
+
+		if (lba_28_ok(block, n_block)) {
+			/* use LBA28 */
+			tf->command = ATA_CMD_VERIFY;
+			tf->device |= (block >> 24) & 0xf;
+		} else if (lba_48_ok(block, n_block)) {
+			if (!(dev->flags & ATA_DFLAG_LBA48))
+				goto out_of_range;
+
+			/* use LBA48 */
+			tf->flags |= ATA_TFLAG_LBA48;
+			tf->command = ATA_CMD_VERIFY_EXT;
+
+			tf->hob_nsect = (n_block >> 8) & 0xff;
+
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else
+			/* request too large even for LBA48 */
+			goto out_of_range;
+
+		tf->nsect = n_block & 0xff;
+
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device |= ATA_LBA;
+	} else {
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		if (!lba_28_ok(block, n_block))
+			goto out_of_range;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+
+		/* Check whether the converted CHS can fit.
+		   Cylinder: 0-65535
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+			goto out_of_range;
+
+		tf->command = ATA_CMD_VERIFY;
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
+	}
+
+	return 0;
+
+invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
+	return 1;
+
+out_of_range:
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	/* "Logical Block Address out of range" */
+	return 1;
+
+nothing_to_do:
+	scmd->result = SAM_STAT_GOOD;
+	return 1;
+}
+
+/**
+ *	ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
+ *	@qc: Storage for translated ATA taskfile
+ *
+ *	Converts any of six SCSI read/write commands into the
+ *	ATA counterpart, including starting sector (LBA),
+ *	sector count, and taking into account the device's LBA48
+ *	support.
+ *
+ *	Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
+ *	%WRITE_16 are currently supported.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	struct request *rq = scmd->request;
+	int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+	unsigned int tf_flags = 0;
+	u64 block;
+	u32 n_block;
+	int rc;
+	u16 fp = 0;
+
+	if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
+		tf_flags |= ATA_TFLAG_WRITE;
+
+	/* Calculate the SCSI LBA, transfer length and FUA. */
+	switch (cdb[0]) {
+	case READ_10:
+	case WRITE_10:
+		if (unlikely(scmd->cmd_len < 10)) {
+			fp = 9;
+			goto invalid_fld;
+		}
+		scsi_10_lba_len(cdb, &block, &n_block);
+		if (cdb[1] & (1 << 3))
+			tf_flags |= ATA_TFLAG_FUA;
+		break;
+	case READ_6:
+	case WRITE_6:
+		if (unlikely(scmd->cmd_len < 6)) {
+			fp = 5;
+			goto invalid_fld;
+		}
+		scsi_6_lba_len(cdb, &block, &n_block);
+
+		/* for 6-byte r/w commands, transfer length 0
+		 * means 256 blocks of data, not 0 block.
+		 */
+		if (!n_block)
+			n_block = 256;
+		break;
+	case READ_16:
+	case WRITE_16:
+		if (unlikely(scmd->cmd_len < 16)) {
+			fp = 15;
+			goto invalid_fld;
+		}
+		scsi_16_lba_len(cdb, &block, &n_block);
+		if (cdb[1] & (1 << 3))
+			tf_flags |= ATA_TFLAG_FUA;
+		break;
+	default:
+		DPRINTK("no-byte command\n");
+		fp = 0;
+		goto invalid_fld;
+	}
+
+	/* Check and compose ATA command */
+	if (!n_block)
+		/* For 10-byte and 16-byte SCSI R/W commands, transfer
+		 * length 0 means transfer 0 block of data.
+		 * However, for ATA R/W commands, sector count 0 means
+		 * 256 or 65536 sectors, not 0 sectors as in SCSI.
+		 *
+		 * WARNING: one or two older ATA drives treat 0 as 0...
+		 */
+		goto nothing_to_do;
+
+	qc->flags |= ATA_QCFLAG_IO;
+	qc->nbytes = n_block * scmd->device->sector_size;
+
+	rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
+			     qc->hw_tag, class);
+
+	if (likely(rc == 0))
+		return 0;
+
+	if (rc == -ERANGE)
+		goto out_of_range;
+	/* treat all other errors as -EINVAL, fall through */
+invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
+	return 1;
+
+out_of_range:
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	/* "Logical Block Address out of range" */
+	return 1;
+
+nothing_to_do:
+	scmd->result = SAM_STAT_GOOD;
+	return 1;
+}
+
+static void ata_qc_done(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	void (*done)(struct scsi_cmnd *) = qc->scsidone;
+
+	ata_qc_free(qc);
+	done(cmd);
+}
+
+static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	u8 *cdb = cmd->cmnd;
+	int need_sense = (qc->err_mask != 0);
+
+	/* For ATA pass thru (SAT) commands, generate a sense block if
+	 * user mandated it or if there's an error.  Note that if we
+	 * generate because the user forced us to [CK_COND =1], a check
+	 * condition is generated and the ATA register values are returned
+	 * whether the command completed successfully or not. If there
+	 * was no error, we use the following sense data:
+	 * sk = RECOVERED ERROR
+	 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+	 */
+	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
+	    ((cdb[2] & 0x20) || need_sense))
+		ata_gen_passthru_sense(qc);
+	else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+		cmd->result = SAM_STAT_CHECK_CONDITION;
+	else if (need_sense)
+		ata_gen_ata_sense(qc);
+	else
+		cmd->result = SAM_STAT_GOOD;
+
+	if (need_sense && !ap->ops->error_handler)
+		ata_dump_status(ap->print_id, &qc->result_tf);
+
+	ata_qc_done(qc);
+}
+
+/**
+ *	ata_scsi_translate - Translate then issue SCSI command to ATA device
+ *	@dev: ATA device to which the command is addressed
+ *	@cmd: SCSI command to execute
+ *	@xlat_func: Actor which translates @cmd to an ATA taskfile
+ *
+ *	Our ->queuecommand() function has decided that the SCSI
+ *	command issued can be directly translated into an ATA
+ *	command, rather than handled internally.
+ *
+ *	This function sets up an ata_queued_cmd structure for the
+ *	SCSI command, and sends that ata_queued_cmd to the hardware.
+ *
+ *	The xlat_func argument (actor) returns 0 if ready to execute
+ *	ATA command, else 1 to finish translation. If 1 is returned
+ *	then cmd->result (and possibly cmd->sense_buffer) are assumed
+ *	to be set reflecting an error condition or clean (early)
+ *	termination.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
+ *	needs to be deferred.
+ */
+static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+			      ata_xlat_func_t xlat_func)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_queued_cmd *qc;
+	int rc;
+
+	VPRINTK("ENTER\n");
+
+	qc = ata_scsi_qc_new(dev, cmd);
+	if (!qc)
+		goto err_mem;
+
+	/* data is present; dma-map it */
+	if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+	    cmd->sc_data_direction == DMA_TO_DEVICE) {
+		if (unlikely(scsi_bufflen(cmd) < 1)) {
+			ata_dev_warn(dev, "WARNING: zero len r/w req\n");
+			goto err_did;
+		}
+
+		ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
+
+		qc->dma_dir = cmd->sc_data_direction;
+	}
+
+	qc->complete_fn = ata_scsi_qc_complete;
+
+	if (xlat_func(qc))
+		goto early_finish;
+
+	if (ap->ops->qc_defer) {
+		if ((rc = ap->ops->qc_defer(qc)))
+			goto defer;
+	}
+
+	/* select device, send command to hardware */
+	ata_qc_issue(qc);
+
+	VPRINTK("EXIT\n");
+	return 0;
+
+early_finish:
+	ata_qc_free(qc);
+	cmd->scsi_done(cmd);
+	DPRINTK("EXIT - early finish (good or error)\n");
+	return 0;
+
+err_did:
+	ata_qc_free(qc);
+	cmd->result = (DID_ERROR << 16);
+	cmd->scsi_done(cmd);
+err_mem:
+	DPRINTK("EXIT - internal\n");
+	return 0;
+
+defer:
+	ata_qc_free(qc);
+	DPRINTK("EXIT - defer\n");
+	if (rc == ATA_DEFER_LINK)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	else
+		return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+struct ata_scsi_args {
+	struct ata_device	*dev;
+	u16			*id;
+	struct scsi_cmnd	*cmd;
+};
+
+/**
+ *	ata_scsi_rbuf_get - Map response buffer.
+ *	@cmd: SCSI command containing buffer to be mapped.
+ *	@flags: unsigned long variable to store irq enable status
+ *	@copy_in: copy in from user buffer
+ *
+ *	Prepare buffer for simulated SCSI commands.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(ata_scsi_rbuf_lock) on success
+ *
+ *	RETURNS:
+ *	Pointer to response buffer.
+ */
+static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
+			       unsigned long *flags)
+{
+	spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
+
+	memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
+	if (copy_in)
+		sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+				  ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+	return ata_scsi_rbuf;
+}
+
+/**
+ *	ata_scsi_rbuf_put - Unmap response buffer.
+ *	@cmd: SCSI command containing buffer to be unmapped.
+ *	@copy_out: copy out result
+ *	@flags: @flags passed to ata_scsi_rbuf_get()
+ *
+ *	Returns rbuf buffer.  The result is copied to @cmd's buffer if
+ *	@copy_back is true.
+ *
+ *	LOCKING:
+ *	Unlocks ata_scsi_rbuf_lock.
+ */
+static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
+				     unsigned long *flags)
+{
+	if (copy_out)
+		sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+				    ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+	spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
+}
+
+/**
+ *	ata_scsi_rbuf_fill - wrapper for SCSI command simulators
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@actor: Callback hook for desired SCSI command simulator
+ *
+ *	Takes care of the hard work of simulating a SCSI command...
+ *	Mapping the response buffer, calling the command's handler,
+ *	and handling the handler's return value.  This return value
+ *	indicates whether the handler wishes the SCSI command to be
+ *	completed successfully (0), or not (in which case cmd->result
+ *	and sense buffer are assumed to be set).
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+		unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+{
+	u8 *rbuf;
+	unsigned int rc;
+	struct scsi_cmnd *cmd = args->cmd;
+	unsigned long flags;
+
+	rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
+	rc = actor(args, rbuf);
+	ata_scsi_rbuf_put(cmd, rc == 0, &flags);
+
+	if (rc == 0)
+		cmd->result = SAM_STAT_GOOD;
+}
+
+/**
+ *	ata_scsiop_inq_std - Simulate INQUIRY command
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Returns standard device identification data associated
+ *	with non-VPD INQUIRY command output.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+{
+	static const u8 versions[] = {
+		0x00,
+		0x60,	/* SAM-3 (no version claimed) */
+
+		0x03,
+		0x20,	/* SBC-2 (no version claimed) */
+
+		0x03,
+		0x00	/* SPC-3 (no version claimed) */
+	};
+	static const u8 versions_zbc[] = {
+		0x00,
+		0xA0,	/* SAM-5 (no version claimed) */
+
+		0x06,
+		0x00,	/* SBC-4 (no version claimed) */
+
+		0x05,
+		0xC0,	/* SPC-5 (no version claimed) */
+
+		0x60,
+		0x24,   /* ZBC r05 */
+	};
+
+	u8 hdr[] = {
+		TYPE_DISK,
+		0,
+		0x5,	/* claim SPC-3 version compatibility */
+		2,
+		95 - 4,
+		0,
+		0,
+		2
+	};
+
+	VPRINTK("ENTER\n");
+
+	/* set scsi removable (RMB) bit per ata bit, or if the
+	 * AHCI port says it's external (Hotplug-capable, eSATA).
+	 */
+	if (ata_id_removable(args->id) ||
+	    (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+		hdr[1] |= (1 << 7);
+
+	if (args->dev->class == ATA_DEV_ZAC) {
+		hdr[0] = TYPE_ZBC;
+		hdr[2] = 0x7; /* claim SPC-5 version compatibility */
+	}
+
+	memcpy(rbuf, hdr, sizeof(hdr));
+	memcpy(&rbuf[8], "ATA     ", 8);
+	ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+
+	/* From SAT, use last 2 words from fw rev unless they are spaces */
+	ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+	if (strncmp(&rbuf[32], "    ", 4) == 0)
+		ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+
+	if (rbuf[32] == 0 || rbuf[32] == ' ')
+		memcpy(&rbuf[32], "n/a ", 4);
+
+	if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
+		memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
+	else
+		memcpy(rbuf + 58, versions, sizeof(versions));
+
+	return 0;
+}
+
+/**
+ *	ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Returns list of inquiry VPD pages available.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+{
+	int num_pages;
+	static const u8 pages[] = {
+		0x00,	/* page 0x00, this page */
+		0x80,	/* page 0x80, unit serial no page */
+		0x83,	/* page 0x83, device ident page */
+		0x89,	/* page 0x89, ata info page */
+		0xb0,	/* page 0xb0, block limits page */
+		0xb1,	/* page 0xb1, block device characteristics page */
+		0xb2,	/* page 0xb2, thin provisioning page */
+		0xb6,	/* page 0xb6, zoned block device characteristics */
+	};
+
+	num_pages = sizeof(pages);
+	if (!(args->dev->flags & ATA_DFLAG_ZAC))
+		num_pages--;
+	rbuf[3] = num_pages;	/* number of supported VPD pages */
+	memcpy(rbuf + 4, pages, num_pages);
+	return 0;
+}
+
+/**
+ *	ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Returns ATA device serial number.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+{
+	static const u8 hdr[] = {
+		0,
+		0x80,			/* this page code */
+		0,
+		ATA_ID_SERNO_LEN,	/* page len */
+	};
+
+	memcpy(rbuf, hdr, sizeof(hdr));
+	ata_id_string(args->id, (unsigned char *) &rbuf[4],
+		      ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+	return 0;
+}
+
+/**
+ *	ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Yields two logical unit device identification designators:
+ *	 - vendor specific ASCII containing the ATA serial number
+ *	 - SAT defined "t10 vendor id based" containing ASCII vendor
+ *	   name ("ATA     "), model and serial numbers.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+{
+	const int sat_model_serial_desc_len = 68;
+	int num;
+
+	rbuf[1] = 0x83;			/* this page code */
+	num = 4;
+
+	/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
+	rbuf[num + 0] = 2;
+	rbuf[num + 3] = ATA_ID_SERNO_LEN;
+	num += 4;
+	ata_id_string(args->id, (unsigned char *) rbuf + num,
+		      ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+	num += ATA_ID_SERNO_LEN;
+
+	/* SAT defined lu model and serial numbers descriptor */
+	/* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
+	rbuf[num + 0] = 2;
+	rbuf[num + 1] = 1;
+	rbuf[num + 3] = sat_model_serial_desc_len;
+	num += 4;
+	memcpy(rbuf + num, "ATA     ", 8);
+	num += 8;
+	ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+		      ATA_ID_PROD_LEN);
+	num += ATA_ID_PROD_LEN;
+	ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+		      ATA_ID_SERNO_LEN);
+	num += ATA_ID_SERNO_LEN;
+
+	if (ata_id_has_wwn(args->id)) {
+		/* SAT defined lu world wide name */
+		/* piv=0, assoc=lu, code_set=binary, designator=NAA */
+		rbuf[num + 0] = 1;
+		rbuf[num + 1] = 3;
+		rbuf[num + 3] = ATA_ID_WWN_LEN;
+		num += 4;
+		ata_id_string(args->id, (unsigned char *) rbuf + num,
+			      ATA_ID_WWN, ATA_ID_WWN_LEN);
+		num += ATA_ID_WWN_LEN;
+	}
+	rbuf[3] = num - 4;    /* page len (assume less than 256 bytes) */
+	return 0;
+}
+
+/**
+ *	ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Yields SAT-specified ATA VPD page.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+{
+	struct ata_taskfile tf;
+
+	memset(&tf, 0, sizeof(tf));
+
+	rbuf[1] = 0x89;			/* our page code */
+	rbuf[2] = (0x238 >> 8);		/* page size fixed at 238h */
+	rbuf[3] = (0x238 & 0xff);
+
+	memcpy(&rbuf[8], "linux   ", 8);
+	memcpy(&rbuf[16], "libata          ", 16);
+	memcpy(&rbuf[32], DRV_VERSION, 4);
+
+	/* we don't store the ATA device signature, so we fake it */
+
+	tf.command = ATA_DRDY;		/* really, this is Status reg */
+	tf.lbal = 0x1;
+	tf.nsect = 0x1;
+
+	ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);	/* TODO: PMP? */
+	rbuf[36] = 0x34;		/* force D2H Reg FIS (34h) */
+
+	rbuf[56] = ATA_CMD_ID_ATA;
+
+	memcpy(&rbuf[60], &args->id[0], 512);
+	return 0;
+}
+
+static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+{
+	u16 min_io_sectors;
+
+	rbuf[1] = 0xb0;
+	rbuf[3] = 0x3c;		/* required VPD size with unmap support */
+
+	/*
+	 * Optimal transfer length granularity.
+	 *
+	 * This is always one physical block, but for disks with a smaller
+	 * logical than physical sector size we need to figure out what the
+	 * latter is.
+	 */
+	min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
+	put_unaligned_be16(min_io_sectors, &rbuf[6]);
+
+	/*
+	 * Optimal unmap granularity.
+	 *
+	 * The ATA spec doesn't even know about a granularity or alignment
+	 * for the TRIM command.  We can leave away most of the unmap related
+	 * VPD page entries, but we have specifify a granularity to signal
+	 * that we support some form of unmap - in thise case via WRITE SAME
+	 * with the unmap bit set.
+	 */
+	if (ata_id_has_trim(args->id)) {
+		put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
+		put_unaligned_be32(1, &rbuf[28]);
+	}
+
+	return 0;
+}
+
+static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+{
+	int form_factor = ata_id_form_factor(args->id);
+	int media_rotation_rate = ata_id_rotation_rate(args->id);
+	u8 zoned = ata_id_zoned_cap(args->id);
+
+	rbuf[1] = 0xb1;
+	rbuf[3] = 0x3c;
+	rbuf[4] = media_rotation_rate >> 8;
+	rbuf[5] = media_rotation_rate;
+	rbuf[7] = form_factor;
+	if (zoned)
+		rbuf[8] = (zoned << 4);
+
+	return 0;
+}
+
+static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+{
+	/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
+	rbuf[1] = 0xb2;
+	rbuf[3] = 0x4;
+	rbuf[5] = 1 << 6;	/* TPWS */
+
+	return 0;
+}
+
+static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+{
+	/*
+	 * zbc-r05 SCSI Zoned Block device characteristics VPD page
+	 */
+	rbuf[1] = 0xb6;
+	rbuf[3] = 0x3C;
+
+	/*
+	 * URSWRZ bit is only meaningful for host-managed ZAC drives
+	 */
+	if (args->dev->zac_zoned_cap & 1)
+		rbuf[4] |= 1;
+	put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
+	put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
+	put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+
+	return 0;
+}
+
+/**
+ *	modecpy - Prepare response for MODE SENSE
+ *	@dest: output buffer
+ *	@src: data being copied
+ *	@n: length of mode page
+ *	@changeable: whether changeable parameters are requested
+ *
+ *	Generate a generic MODE SENSE page for either current or changeable
+ *	parameters.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
+{
+	if (changeable) {
+		memcpy(dest, src, 2);
+		memset(dest + 2, 0, n - 2);
+	} else {
+		memcpy(dest, src, n);
+	}
+}
+
+/**
+ *	ata_msense_caching - Simulate MODE SENSE caching info page
+ *	@id: device IDENTIFY data
+ *	@buf: output buffer
+ *	@changeable: whether changeable parameters are requested
+ *
+ *	Generate a caching info page, which conditionally indicates
+ *	write caching to the SCSI layer, depending on device
+ *	capabilities.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
+{
+	modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
+	if (changeable) {
+		buf[2] |= (1 << 2);	/* ata_mselect_caching() */
+	} else {
+		buf[2] |= (ata_id_wcache_enabled(id) << 2);	/* write cache enable */
+		buf[12] |= (!ata_id_rahead_enabled(id) << 5);	/* disable read ahead */
+	}
+	return sizeof(def_cache_mpage);
+}
+
+/**
+ *	ata_msense_control - Simulate MODE SENSE control mode page
+ *	@dev: ATA device of interest
+ *	@buf: output buffer
+ *	@changeable: whether changeable parameters are requested
+ *
+ *	Generate a generic MODE SENSE control mode page.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
+					bool changeable)
+{
+	modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
+	if (changeable) {
+		buf[2] |= (1 << 2);	/* ata_mselect_control() */
+	} else {
+		bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+		buf[2] |= (d_sense << 2);	/* descriptor format sense data */
+	}
+	return sizeof(def_control_mpage);
+}
+
+/**
+ *	ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
+ *	@buf: output buffer
+ *	@changeable: whether changeable parameters are requested
+ *
+ *	Generate a generic MODE SENSE r/w error recovery page.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
+{
+	modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage),
+		changeable);
+	return sizeof(def_rw_recovery_mpage);
+}
+
+/*
+ * We can turn this into a real blacklist if it's needed, for now just
+ * blacklist any Maxtor BANC1G10 revision firmware
+ */
+static int ata_dev_supports_fua(u16 *id)
+{
+	unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
+
+	if (!libata_fua)
+		return 0;
+	if (!ata_id_has_fua(id))
+		return 0;
+
+	ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+	ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
+
+	if (strcmp(model, "Maxtor"))
+		return 1;
+	if (strcmp(fw, "BANC1G10"))
+		return 1;
+
+	return 0; /* blacklisted */
+}
+
+/**
+ *	ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Simulate MODE SENSE commands. Assume this is invoked for direct
+ *	access devices (e.g. disks) only. There should be no block
+ *	descriptor for other device types.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+{
+	struct ata_device *dev = args->dev;
+	u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+	static const u8 sat_blk_desc[] = {
+		0, 0, 0, 0,	/* number of blocks: sat unspecified */
+		0,
+		0, 0x2, 0x0	/* block length: 512 bytes */
+	};
+	u8 pg, spg;
+	unsigned int ebd, page_control, six_byte;
+	u8 dpofua, bp = 0xff;
+	u16 fp;
+
+	VPRINTK("ENTER\n");
+
+	six_byte = (scsicmd[0] == MODE_SENSE);
+	ebd = !(scsicmd[1] & 0x8);      /* dbd bit inverted == edb */
+	/*
+	 * LLBA bit in msense(10) ignored (compliant)
+	 */
+
+	page_control = scsicmd[2] >> 6;
+	switch (page_control) {
+	case 0: /* current */
+	case 1: /* changeable */
+	case 2: /* defaults */
+		break;  /* supported */
+	case 3: /* saved */
+		goto saving_not_supp;
+	default:
+		fp = 2;
+		bp = 6;
+		goto invalid_fld;
+	}
+
+	if (six_byte)
+		p += 4 + (ebd ? 8 : 0);
+	else
+		p += 8 + (ebd ? 8 : 0);
+
+	pg = scsicmd[2] & 0x3f;
+	spg = scsicmd[3];
+	/*
+	 * No mode subpages supported (yet) but asking for _all_
+	 * subpages may be valid
+	 */
+	if (spg && (spg != ALL_SUB_MPAGES)) {
+		fp = 3;
+		goto invalid_fld;
+	}
+
+	switch(pg) {
+	case RW_RECOVERY_MPAGE:
+		p += ata_msense_rw_recovery(p, page_control == 1);
+		break;
+
+	case CACHE_MPAGE:
+		p += ata_msense_caching(args->id, p, page_control == 1);
+		break;
+
+	case CONTROL_MPAGE:
+		p += ata_msense_control(args->dev, p, page_control == 1);
+		break;
+
+	case ALL_MPAGES:
+		p += ata_msense_rw_recovery(p, page_control == 1);
+		p += ata_msense_caching(args->id, p, page_control == 1);
+		p += ata_msense_control(args->dev, p, page_control == 1);
+		break;
+
+	default:		/* invalid page code */
+		fp = 2;
+		goto invalid_fld;
+	}
+
+	dpofua = 0;
+	if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
+	    (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
+		dpofua = 1 << 4;
+
+	if (six_byte) {
+		rbuf[0] = p - rbuf - 1;
+		rbuf[2] |= dpofua;
+		if (ebd) {
+			rbuf[3] = sizeof(sat_blk_desc);
+			memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
+		}
+	} else {
+		unsigned int output_len = p - rbuf - 2;
+
+		rbuf[0] = output_len >> 8;
+		rbuf[1] = output_len;
+		rbuf[3] |= dpofua;
+		if (ebd) {
+			rbuf[7] = sizeof(sat_blk_desc);
+			memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+		}
+	}
+	return 0;
+
+invalid_fld:
+	ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
+	return 1;
+
+saving_not_supp:
+	ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+	 /* "Saving parameters not supported" */
+	return 1;
+}
+
+/**
+ *	ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Simulate READ CAPACITY commands.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+{
+	struct ata_device *dev = args->dev;
+	u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
+	u32 sector_size; /* physical sector size in bytes */
+	u8 log2_per_phys;
+	u16 lowest_aligned;
+
+	sector_size = ata_id_logical_sector_size(dev->id);
+	log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
+	lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
+
+	VPRINTK("ENTER\n");
+
+	if (args->cmd->cmnd[0] == READ_CAPACITY) {
+		if (last_lba >= 0xffffffffULL)
+			last_lba = 0xffffffff;
+
+		/* sector count, 32-bit */
+		rbuf[0] = last_lba >> (8 * 3);
+		rbuf[1] = last_lba >> (8 * 2);
+		rbuf[2] = last_lba >> (8 * 1);
+		rbuf[3] = last_lba;
+
+		/* sector size */
+		rbuf[4] = sector_size >> (8 * 3);
+		rbuf[5] = sector_size >> (8 * 2);
+		rbuf[6] = sector_size >> (8 * 1);
+		rbuf[7] = sector_size;
+	} else {
+		/* sector count, 64-bit */
+		rbuf[0] = last_lba >> (8 * 7);
+		rbuf[1] = last_lba >> (8 * 6);
+		rbuf[2] = last_lba >> (8 * 5);
+		rbuf[3] = last_lba >> (8 * 4);
+		rbuf[4] = last_lba >> (8 * 3);
+		rbuf[5] = last_lba >> (8 * 2);
+		rbuf[6] = last_lba >> (8 * 1);
+		rbuf[7] = last_lba;
+
+		/* sector size */
+		rbuf[ 8] = sector_size >> (8 * 3);
+		rbuf[ 9] = sector_size >> (8 * 2);
+		rbuf[10] = sector_size >> (8 * 1);
+		rbuf[11] = sector_size;
+
+		rbuf[12] = 0;
+		rbuf[13] = log2_per_phys;
+		rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+		rbuf[15] = lowest_aligned;
+
+		if (ata_id_has_trim(args->id) &&
+		    !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+			rbuf[14] |= 0x80; /* LBPME */
+
+			if (ata_id_has_zero_after_trim(args->id) &&
+			    dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+				ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+				rbuf[14] |= 0x40; /* LBPRZ */
+			}
+		}
+		if (ata_id_zoned_cap(args->id) ||
+		    args->dev->class == ATA_DEV_ZAC)
+			rbuf[12] = (1 << 4); /* RC_BASIS */
+	}
+	return 0;
+}
+
+/**
+ *	ata_scsiop_report_luns - Simulate REPORT LUNS command
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Simulate REPORT LUNS command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+{
+	VPRINTK("ENTER\n");
+	rbuf[3] = 8;	/* just one lun, LUN 0, size 8 bytes */
+
+	return 0;
+}
+
+static void atapi_sense_complete(struct ata_queued_cmd *qc)
+{
+	if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
+		/* FIXME: not quite right; we don't want the
+		 * translation of taskfile registers into
+		 * a sense descriptors, since that's only
+		 * correct for ATA, not ATAPI
+		 */
+		ata_gen_passthru_sense(qc);
+	}
+
+	ata_qc_done(qc);
+}
+
+/* is it pointless to prefer PIO for "safety reasons"? */
+static inline int ata_pio_use_silly(struct ata_port *ap)
+{
+	return (ap->flags & ATA_FLAG_PIO_DMA);
+}
+
+static void atapi_request_sense(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scsi_cmnd *cmd = qc->scsicmd;
+
+	DPRINTK("ATAPI request sense\n");
+
+	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+#ifdef CONFIG_ATA_SFF
+	if (ap->ops->sff_tf_read)
+		ap->ops->sff_tf_read(ap, &qc->tf);
+#endif
+
+	/* fill these in, for the case where they are -not- overwritten */
+	cmd->sense_buffer[0] = 0x70;
+	cmd->sense_buffer[2] = qc->tf.feature >> 4;
+
+	ata_qc_reinit(qc);
+
+	/* setup sg table and init transfer direction */
+	sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+	ata_sg_init(qc, &qc->sgent, 1);
+	qc->dma_dir = DMA_FROM_DEVICE;
+
+	memset(&qc->cdb, 0, qc->dev->cdb_len);
+	qc->cdb[0] = REQUEST_SENSE;
+	qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.command = ATA_CMD_PACKET;
+
+	if (ata_pio_use_silly(ap)) {
+		qc->tf.protocol = ATAPI_PROT_DMA;
+		qc->tf.feature |= ATAPI_PKT_DMA;
+	} else {
+		qc->tf.protocol = ATAPI_PROT_PIO;
+		qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
+		qc->tf.lbah = 0;
+	}
+	qc->nbytes = SCSI_SENSE_BUFFERSIZE;
+
+	qc->complete_fn = atapi_sense_complete;
+
+	ata_qc_issue(qc);
+
+	DPRINTK("EXIT\n");
+}
+
+/*
+ * ATAPI devices typically report zero for their SCSI version, and sometimes
+ * deviate from the spec WRT response data format.  If SCSI version is
+ * reported as zero like normal, then we make the following fixups:
+ *   1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
+ *	modern device.
+ *   2) Ensure response data format / ATAPI information are always correct.
+ */
+static void atapi_fixup_inquiry(struct scsi_cmnd *cmd)
+{
+	u8 buf[4];
+
+	sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
+	if (buf[2] == 0) {
+		buf[2] = 0x5;
+		buf[3] = 0x32;
+	}
+	sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
+}
+
+static void atapi_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	unsigned int err_mask = qc->err_mask;
+
+	VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
+
+	/* handle completion from new EH */
+	if (unlikely(qc->ap->ops->error_handler &&
+		     (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+
+		if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+			/* FIXME: not quite right; we don't want the
+			 * translation of taskfile registers into a
+			 * sense descriptors, since that's only
+			 * correct for ATA, not ATAPI
+			 */
+			ata_gen_passthru_sense(qc);
+		}
+
+		/* SCSI EH automatically locks door if sdev->locked is
+		 * set.  Sometimes door lock request continues to
+		 * fail, for example, when no media is present.  This
+		 * creates a loop - SCSI EH issues door lock which
+		 * fails and gets invoked again to acquire sense data
+		 * for the failed command.
+		 *
+		 * If door lock fails, always clear sdev->locked to
+		 * avoid this infinite loop.
+		 *
+		 * This may happen before SCSI scan is complete.  Make
+		 * sure qc->dev->sdev isn't NULL before dereferencing.
+		 */
+		if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
+			qc->dev->sdev->locked = 0;
+
+		qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
+		ata_qc_done(qc);
+		return;
+	}
+
+	/* successful completion or old EH failure path */
+	if (unlikely(err_mask & AC_ERR_DEV)) {
+		cmd->result = SAM_STAT_CHECK_CONDITION;
+		atapi_request_sense(qc);
+		return;
+	} else if (unlikely(err_mask)) {
+		/* FIXME: not quite right; we don't want the
+		 * translation of taskfile registers into
+		 * a sense descriptors, since that's only
+		 * correct for ATA, not ATAPI
+		 */
+		ata_gen_passthru_sense(qc);
+	} else {
+		if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
+			atapi_fixup_inquiry(cmd);
+		cmd->result = SAM_STAT_GOOD;
+	}
+
+	ata_qc_done(qc);
+}
+/**
+ *	atapi_xlat - Initialize PACKET taskfile
+ *	@qc: command structure to be initialized
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on failure.
+ */
+static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_device *dev = qc->dev;
+	int nodata = (scmd->sc_data_direction == DMA_NONE);
+	int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
+	unsigned int nbytes;
+
+	memset(qc->cdb, 0, dev->cdb_len);
+	memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
+
+	qc->complete_fn = atapi_qc_complete;
+
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+		qc->tf.flags |= ATA_TFLAG_WRITE;
+		DPRINTK("direction: write\n");
+	}
+
+	qc->tf.command = ATA_CMD_PACKET;
+	ata_qc_set_pc_nbytes(qc);
+
+	/* check whether ATAPI DMA is safe */
+	if (!nodata && !using_pio && atapi_check_dma(qc))
+		using_pio = 1;
+
+	/* Some controller variants snoop this value for Packet
+	 * transfers to do state machine and FIFO management.  Thus we
+	 * want to set it properly, and for DMA where it is
+	 * effectively meaningless.
+	 */
+	nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
+
+	/* Most ATAPI devices which honor transfer chunk size don't
+	 * behave according to the spec when odd chunk size which
+	 * matches the transfer length is specified.  If the number of
+	 * bytes to transfer is 2n+1.  According to the spec, what
+	 * should happen is to indicate that 2n+1 is going to be
+	 * transferred and transfer 2n+2 bytes where the last byte is
+	 * padding.
+	 *
+	 * In practice, this doesn't happen.  ATAPI devices first
+	 * indicate and transfer 2n bytes and then indicate and
+	 * transfer 2 bytes where the last byte is padding.
+	 *
+	 * This inconsistency confuses several controllers which
+	 * perform PIO using DMA such as Intel AHCIs and sil3124/32.
+	 * These controllers use actual number of transferred bytes to
+	 * update DMA poitner and transfer of 4n+2 bytes make those
+	 * controller push DMA pointer by 4n+4 bytes because SATA data
+	 * FISes are aligned to 4 bytes.  This causes data corruption
+	 * and buffer overrun.
+	 *
+	 * Always setting nbytes to even number solves this problem
+	 * because then ATAPI devices don't have to split data at 2n
+	 * boundaries.
+	 */
+	if (nbytes & 0x1)
+		nbytes++;
+
+	qc->tf.lbam = (nbytes & 0xFF);
+	qc->tf.lbah = (nbytes >> 8);
+
+	if (nodata)
+		qc->tf.protocol = ATAPI_PROT_NODATA;
+	else if (using_pio)
+		qc->tf.protocol = ATAPI_PROT_PIO;
+	else {
+		/* DMA data xfer */
+		qc->tf.protocol = ATAPI_PROT_DMA;
+		qc->tf.feature |= ATAPI_PKT_DMA;
+
+		if ((dev->flags & ATA_DFLAG_DMADIR) &&
+		    (scmd->sc_data_direction != DMA_TO_DEVICE))
+			/* some SATA bridges need us to indicate data xfer direction */
+			qc->tf.feature |= ATAPI_DMADIR;
+	}
+
+
+	/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
+	   as ATAPI tape drives don't get this right otherwise */
+	return 0;
+}
+
+static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+{
+	if (!sata_pmp_attached(ap)) {
+		if (likely(devno >= 0 &&
+			   devno < ata_link_max_devices(&ap->link)))
+			return &ap->link.device[devno];
+	} else {
+		if (likely(devno >= 0 &&
+			   devno < ap->nr_pmp_links))
+			return &ap->pmp_link[devno].device[0];
+	}
+
+	return NULL;
+}
+
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
+					      const struct scsi_device *scsidev)
+{
+	int devno;
+
+	/* skip commands not addressed to targets we simulate */
+	if (!sata_pmp_attached(ap)) {
+		if (unlikely(scsidev->channel || scsidev->lun))
+			return NULL;
+		devno = scsidev->id;
+	} else {
+		if (unlikely(scsidev->id || scsidev->lun))
+			return NULL;
+		devno = scsidev->channel;
+	}
+
+	return ata_find_dev(ap, devno);
+}
+
+/**
+ *	ata_scsi_find_dev - lookup ata_device from scsi_cmnd
+ *	@ap: ATA port to which the device is attached
+ *	@scsidev: SCSI device from which we derive the ATA device
+ *
+ *	Given various information provided in struct scsi_cmnd,
+ *	map that onto an ATA bus, and using that mapping
+ *	determine which ata_device is associated with the
+ *	SCSI command to be sent.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Associated ATA device, or %NULL if not found.
+ */
+static struct ata_device *
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
+{
+	struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
+
+	if (unlikely(!dev || !ata_dev_enabled(dev)))
+		return NULL;
+
+	return dev;
+}
+
+/*
+ *	ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
+ *	@byte1: Byte 1 from pass-thru CDB.
+ *
+ *	RETURNS:
+ *	ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
+ */
+static u8
+ata_scsi_map_proto(u8 byte1)
+{
+	switch((byte1 & 0x1e) >> 1) {
+	case 3:		/* Non-data */
+		return ATA_PROT_NODATA;
+
+	case 6:		/* DMA */
+	case 10:	/* UDMA Data-in */
+	case 11:	/* UDMA Data-Out */
+		return ATA_PROT_DMA;
+
+	case 4:		/* PIO Data-in */
+	case 5:		/* PIO Data-out */
+		return ATA_PROT_PIO;
+
+	case 12:	/* FPDMA */
+		return ATA_PROT_NCQ;
+
+	case 0:		/* Hard Reset */
+	case 1:		/* SRST */
+	case 8:		/* Device Diagnostic */
+	case 9:		/* Device Reset */
+	case 7:		/* DMA Queued */
+	case 15:	/* Return Response Info */
+	default:	/* Reserved */
+		break;
+	}
+
+	return ATA_PROT_UNKNOWN;
+}
+
+/**
+ *	ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
+ *	@qc: command structure to be initialized
+ *
+ *	Handles either 12, 16, or 32-byte versions of the CDB.
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on failure.
+ */
+static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &(qc->tf);
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_device *dev = qc->dev;
+	const u8 *cdb = scmd->cmnd;
+	u16 fp;
+	u16 cdb_offset = 0;
+
+	/* 7Fh variable length cmd means a ata pass-thru(32) */
+	if (cdb[0] == VARIABLE_LENGTH_CMD)
+		cdb_offset = 9;
+
+	tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]);
+	if (tf->protocol == ATA_PROT_UNKNOWN) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
+		tf->protocol = ATA_PROT_NCQ_NODATA;
+
+	/* enable LBA */
+	tf->flags |= ATA_TFLAG_LBA;
+
+	/*
+	 * 12 and 16 byte CDBs use different offsets to
+	 * provide the various register values.
+	 */
+	if (cdb[0] == ATA_16) {
+		/*
+		 * 16-byte CDB - may contain extended commands.
+		 *
+		 * If that is the case, copy the upper byte register values.
+		 */
+		if (cdb[1] & 0x01) {
+			tf->hob_feature = cdb[3];
+			tf->hob_nsect = cdb[5];
+			tf->hob_lbal = cdb[7];
+			tf->hob_lbam = cdb[9];
+			tf->hob_lbah = cdb[11];
+			tf->flags |= ATA_TFLAG_LBA48;
+		} else
+			tf->flags &= ~ATA_TFLAG_LBA48;
+
+		/*
+		 * Always copy low byte, device and command registers.
+		 */
+		tf->feature = cdb[4];
+		tf->nsect = cdb[6];
+		tf->lbal = cdb[8];
+		tf->lbam = cdb[10];
+		tf->lbah = cdb[12];
+		tf->device = cdb[13];
+		tf->command = cdb[14];
+	} else if (cdb[0] == ATA_12) {
+		/*
+		 * 12-byte CDB - incapable of extended commands.
+		 */
+		tf->flags &= ~ATA_TFLAG_LBA48;
+
+		tf->feature = cdb[3];
+		tf->nsect = cdb[4];
+		tf->lbal = cdb[5];
+		tf->lbam = cdb[6];
+		tf->lbah = cdb[7];
+		tf->device = cdb[8];
+		tf->command = cdb[9];
+	} else {
+		/*
+		 * 32-byte CDB - may contain extended command fields.
+		 *
+		 * If that is the case, copy the upper byte register values.
+		 */
+		if (cdb[10] & 0x01) {
+			tf->hob_feature = cdb[20];
+			tf->hob_nsect = cdb[22];
+			tf->hob_lbal = cdb[16];
+			tf->hob_lbam = cdb[15];
+			tf->hob_lbah = cdb[14];
+			tf->flags |= ATA_TFLAG_LBA48;
+		} else
+			tf->flags &= ~ATA_TFLAG_LBA48;
+
+		tf->feature = cdb[21];
+		tf->nsect = cdb[23];
+		tf->lbal = cdb[19];
+		tf->lbam = cdb[18];
+		tf->lbah = cdb[17];
+		tf->device = cdb[24];
+		tf->command = cdb[25];
+		tf->auxiliary = get_unaligned_be32(&cdb[28]);
+	}
+
+	/* For NCQ commands copy the tag value */
+	if (ata_is_ncq(tf->protocol))
+		tf->nsect = qc->hw_tag << 3;
+
+	/* enforce correct master/slave bit */
+	tf->device = dev->devno ?
+		tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
+
+	switch (tf->command) {
+	/* READ/WRITE LONG use a non-standard sect_size */
+	case ATA_CMD_READ_LONG:
+	case ATA_CMD_READ_LONG_ONCE:
+	case ATA_CMD_WRITE_LONG:
+	case ATA_CMD_WRITE_LONG_ONCE:
+		if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
+			fp = 1;
+			goto invalid_fld;
+		}
+		qc->sect_size = scsi_bufflen(scmd);
+		break;
+
+	/* commands using reported Logical Block size (e.g. 512 or 4K) */
+	case ATA_CMD_CFA_WRITE_NE:
+	case ATA_CMD_CFA_TRANS_SECT:
+	case ATA_CMD_CFA_WRITE_MULT_NE:
+	/* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
+	case ATA_CMD_READ:
+	case ATA_CMD_READ_EXT:
+	case ATA_CMD_READ_QUEUED:
+	/* XXX: case ATA_CMD_READ_QUEUED_EXT: */
+	case ATA_CMD_FPDMA_READ:
+	case ATA_CMD_READ_MULTI:
+	case ATA_CMD_READ_MULTI_EXT:
+	case ATA_CMD_PIO_READ:
+	case ATA_CMD_PIO_READ_EXT:
+	case ATA_CMD_READ_STREAM_DMA_EXT:
+	case ATA_CMD_READ_STREAM_EXT:
+	case ATA_CMD_VERIFY:
+	case ATA_CMD_VERIFY_EXT:
+	case ATA_CMD_WRITE:
+	case ATA_CMD_WRITE_EXT:
+	case ATA_CMD_WRITE_FUA_EXT:
+	case ATA_CMD_WRITE_QUEUED:
+	case ATA_CMD_WRITE_QUEUED_FUA_EXT:
+	case ATA_CMD_FPDMA_WRITE:
+	case ATA_CMD_WRITE_MULTI:
+	case ATA_CMD_WRITE_MULTI_EXT:
+	case ATA_CMD_WRITE_MULTI_FUA_EXT:
+	case ATA_CMD_PIO_WRITE:
+	case ATA_CMD_PIO_WRITE_EXT:
+	case ATA_CMD_WRITE_STREAM_DMA_EXT:
+	case ATA_CMD_WRITE_STREAM_EXT:
+		qc->sect_size = scmd->device->sector_size;
+		break;
+
+	/* Everything else uses 512 byte "sectors" */
+	default:
+		qc->sect_size = ATA_SECT_SIZE;
+	}
+
+	/*
+	 * Set flags so that all registers will be written, pass on
+	 * write indication (used for PIO/DMA setup), result TF is
+	 * copied back and we don't whine too much about its failure.
+	 */
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	if (scmd->sc_data_direction == DMA_TO_DEVICE)
+		tf->flags |= ATA_TFLAG_WRITE;
+
+	qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+
+	/*
+	 * Set transfer length.
+	 *
+	 * TODO: find out if we need to do more here to
+	 *       cover scatter/gather case.
+	 */
+	ata_qc_set_pc_nbytes(qc);
+
+	/* We may not issue DMA commands if no DMA mode is set */
+	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	/* We may not issue NCQ commands to devices not supporting NCQ */
+	if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	/* sanity check for pio multi commands */
+	if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	if (is_multi_taskfile(tf)) {
+		unsigned int multi_count = 1 << (cdb[1] >> 5);
+
+		/* compare the passed through multi_count
+		 * with the cached multi_count of libata
+		 */
+		if (multi_count != dev->multi_count)
+			ata_dev_warn(dev, "invalid multi_count %u ignored\n",
+				     multi_count);
+	}
+
+	/*
+	 * Filter SET_FEATURES - XFER MODE command -- otherwise,
+	 * SET_FEATURES - XFER MODE must be preceded/succeeded
+	 * by an update to hardware-specific registers for each
+	 * controller (i.e. the reason for ->set_piomode(),
+	 * ->set_dmamode(), and ->post_set_mode() hooks).
+	 */
+	if (tf->command == ATA_CMD_SET_FEATURES &&
+	    tf->feature == SETFEATURES_XFER) {
+		fp = (cdb[0] == ATA_16) ? 4 : 3;
+		goto invalid_fld;
+	}
+
+	/*
+	 * Filter TPM commands by default. These provide an
+	 * essentially uncontrolled encrypted "back door" between
+	 * applications and the disk. Set libata.allow_tpm=1 if you
+	 * have a real reason for wanting to use them. This ensures
+	 * that installed software cannot easily mess stuff up without
+	 * user intent. DVR type users will probably ship with this enabled
+	 * for movie content management.
+	 *
+	 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
+	 * for this and should do in future but that it is not sufficient as
+	 * DCS is an optional feature set. Thus we also do the software filter
+	 * so that we comply with the TC consortium stated goal that the user
+	 * can turn off TC features of their system.
+	 */
+	if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
+		fp = (cdb[0] == ATA_16) ? 14 : 9;
+		goto invalid_fld;
+	}
+
+	return 0;
+
+ invalid_fld:
+	ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
+	return 1;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ *   63:48 Range Length
+ *   47:0  LBA
+ *
+ *  Range Length of 0 is ignored.
+ *  LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+					u64 sector, u32 count)
+{
+	struct scsi_device *sdp = cmd->device;
+	size_t len = sdp->sector_size;
+	size_t r;
+	__le64 *buf;
+	u32 i = 0;
+	unsigned long flags;
+
+	WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+	if (len > ATA_SCSI_RBUF_SIZE)
+		len = ATA_SCSI_RBUF_SIZE;
+
+	spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+	buf = ((void *)ata_scsi_rbuf);
+	memset(buf, 0, len);
+	while (i < trmax) {
+		u64 entry = sector |
+			((u64)(count > 0xffff ? 0xffff : count) << 48);
+		buf[i++] = __cpu_to_le64(entry);
+		if (count <= 0xffff)
+			break;
+		count -= 0xffff;
+		sector += 0xffff;
+	}
+	r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+	spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+	return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag:
+ *
+ *   - When set translate to DSM TRIM
+ *   - When clear translate to SCT Write Same
+ */
+static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct scsi_device *sdp = scmd->device;
+	size_t len = sdp->sector_size;
+	struct ata_device *dev = qc->dev;
+	const u8 *cdb = scmd->cmnd;
+	u64 block;
+	u32 n_block;
+	const u32 trmax = len >> 3;
+	u32 size;
+	u16 fp;
+	u8 bp = 0xff;
+	u8 unmap = cdb[1] & 0x8;
+
+	/* we may not issue DMA commands if no DMA mode is set */
+	if (unlikely(!dev->dma_mode))
+		goto invalid_opcode;
+
+	/*
+	 * We only allow sending this command through the block layer,
+	 * as it modifies the DATA OUT buffer, which would corrupt user
+	 * memory for SG_IO commands.
+	 */
+	if (unlikely(blk_rq_is_passthrough(scmd->request)))
+		goto invalid_opcode;
+
+	if (unlikely(scmd->cmd_len < 16)) {
+		fp = 15;
+		goto invalid_fld;
+	}
+	scsi_16_lba_len(cdb, &block, &n_block);
+
+	if (!unmap ||
+	    (dev->horkage & ATA_HORKAGE_NOTRIM) ||
+	    !ata_id_has_trim(dev->id)) {
+		fp = 1;
+		bp = 3;
+		goto invalid_fld;
+	}
+	/* If the request is too large the cmd is invalid */
+	if (n_block > 0xffff * trmax) {
+		fp = 2;
+		goto invalid_fld;
+	}
+
+	/*
+	 * WRITE SAME always has a sector sized buffer as payload, this
+	 * should never be a multiple entry S/G list.
+	 */
+	if (!scsi_sg_count(scmd))
+		goto invalid_param_len;
+
+	/*
+	 * size must match sector size in bytes
+	 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+	 * is defined as number of 512 byte blocks to be transferred.
+	 */
+
+	size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+	if (size != len)
+		goto invalid_param_len;
+
+	if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+		/* Newer devices support queued TRIM commands */
+		tf->protocol = ATA_PROT_NCQ;
+		tf->command = ATA_CMD_FPDMA_SEND;
+		tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+		tf->nsect = qc->hw_tag << 3;
+		tf->hob_feature = (size / 512) >> 8;
+		tf->feature = size / 512;
+
+		tf->auxiliary = 1;
+	} else {
+		tf->protocol = ATA_PROT_DMA;
+		tf->hob_feature = 0;
+		tf->feature = ATA_DSM_TRIM;
+		tf->hob_nsect = (size / 512) >> 8;
+		tf->nsect = size / 512;
+		tf->command = ATA_CMD_DSM;
+	}
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
+		     ATA_TFLAG_WRITE;
+
+	ata_qc_set_pc_nbytes(qc);
+
+	return 0;
+
+invalid_fld:
+	ata_scsi_set_invalid_field(dev, scmd, fp, bp);
+	return 1;
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+invalid_opcode:
+	/* "Invalid command operation code" */
+	ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
+	return 1;
+}
+
+/**
+ *	ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ *	@args: device MAINTENANCE_IN data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ *	Yields a subset to satisfy scsi_report_opcode()
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+	struct ata_device *dev = args->dev;
+	u8 *cdb = args->cmd->cmnd;
+	u8 supported = 0;
+	unsigned int err = 0;
+
+	if (cdb[2] != 1) {
+		ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+		err = 2;
+		goto out;
+	}
+	switch (cdb[3]) {
+	case INQUIRY:
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_CAPACITY:
+	case SERVICE_ACTION_IN_16:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+	case SYNCHRONIZE_CACHE:
+	case REZERO_UNIT:
+	case SEEK_6:
+	case SEEK_10:
+	case TEST_UNIT_READY:
+	case SEND_DIAGNOSTIC:
+	case MAINTENANCE_IN:
+	case READ_6:
+	case READ_10:
+	case READ_16:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_16:
+	case ATA_12:
+	case ATA_16:
+	case VERIFY:
+	case VERIFY_16:
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+	case START_STOP:
+		supported = 3;
+		break;
+	case ZBC_IN:
+	case ZBC_OUT:
+		if (ata_id_zoned_cap(dev->id) ||
+		    dev->class == ATA_DEV_ZAC)
+			supported = 3;
+		break;
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		if (dev->flags & ATA_DFLAG_TRUSTED)
+			supported = 3;
+		break;
+	default:
+		break;
+	}
+out:
+	rbuf[1] = supported; /* supported */
+	return err;
+}
+
+/**
+ *	ata_scsi_report_zones_complete - convert ATA output
+ *	@qc: command structure returning the data
+ *
+ *	Convert T-13 little-endian field representation into
+ *	T-10 big-endian field representation.
+ *	What a mess.
+ */
+static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int bytes = 0;
+
+	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+	local_irq_save(flags);
+	while (sg_miter_next(&miter)) {
+		unsigned int offset = 0;
+
+		if (bytes == 0) {
+			char *hdr;
+			u32 list_length;
+			u64 max_lba, opt_lba;
+			u16 same;
+
+			/* Swizzle header */
+			hdr = miter.addr;
+			list_length = get_unaligned_le32(&hdr[0]);
+			same = get_unaligned_le16(&hdr[4]);
+			max_lba = get_unaligned_le64(&hdr[8]);
+			opt_lba = get_unaligned_le64(&hdr[16]);
+			put_unaligned_be32(list_length, &hdr[0]);
+			hdr[4] = same & 0xf;
+			put_unaligned_be64(max_lba, &hdr[8]);
+			put_unaligned_be64(opt_lba, &hdr[16]);
+			offset += 64;
+			bytes += 64;
+		}
+		while (offset < miter.length) {
+			char *rec;
+			u8 cond, type, non_seq, reset;
+			u64 size, start, wp;
+
+			/* Swizzle zone descriptor */
+			rec = miter.addr + offset;
+			type = rec[0] & 0xf;
+			cond = (rec[1] >> 4) & 0xf;
+			non_seq = (rec[1] & 2);
+			reset = (rec[1] & 1);
+			size = get_unaligned_le64(&rec[8]);
+			start = get_unaligned_le64(&rec[16]);
+			wp = get_unaligned_le64(&rec[24]);
+			rec[0] = type;
+			rec[1] = (cond << 4) | non_seq | reset;
+			put_unaligned_be64(size, &rec[8]);
+			put_unaligned_be64(start, &rec[16]);
+			put_unaligned_be64(wp, &rec[24]);
+			WARN_ON(offset + 64 > miter.length);
+			offset += 64;
+			bytes += 64;
+		}
+	}
+	sg_miter_stop(&miter);
+	local_irq_restore(flags);
+
+	ata_scsi_qc_complete(qc);
+}
+
+static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	u16 sect, fp = (u16)-1;
+	u8 sa, options, bp = 0xff;
+	u64 block;
+	u32 n_block;
+
+	if (unlikely(scmd->cmd_len < 16)) {
+		ata_dev_warn(qc->dev, "invalid cdb length %d\n",
+			     scmd->cmd_len);
+		fp = 15;
+		goto invalid_fld;
+	}
+	scsi_16_lba_len(cdb, &block, &n_block);
+	if (n_block != scsi_bufflen(scmd)) {
+		ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
+			     n_block, scsi_bufflen(scmd));
+		goto invalid_param_len;
+	}
+	sa = cdb[1] & 0x1f;
+	if (sa != ZI_REPORT_ZONES) {
+		ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
+		fp = 1;
+		goto invalid_fld;
+	}
+	/*
+	 * ZAC allows only for transfers in 512 byte blocks,
+	 * and uses a 16 bit value for the transfer count.
+	 */
+	if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
+		ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
+		goto invalid_param_len;
+	}
+	sect = n_block / 512;
+	options = cdb[14] & 0xbf;
+
+	if (ata_ncq_enabled(qc->dev) &&
+	    ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
+		tf->protocol = ATA_PROT_NCQ;
+		tf->command = ATA_CMD_FPDMA_RECV;
+		tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
+		tf->nsect = qc->hw_tag << 3;
+		tf->feature = sect & 0xff;
+		tf->hob_feature = (sect >> 8) & 0xff;
+		tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
+	} else {
+		tf->command = ATA_CMD_ZAC_MGMT_IN;
+		tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+		tf->protocol = ATA_PROT_DMA;
+		tf->hob_feature = options;
+		tf->hob_nsect = (sect >> 8) & 0xff;
+		tf->nsect = sect & 0xff;
+	}
+	tf->device = ATA_LBA;
+	tf->lbah = (block >> 16) & 0xff;
+	tf->lbam = (block >> 8) & 0xff;
+	tf->lbal = block & 0xff;
+	tf->hob_lbah = (block >> 40) & 0xff;
+	tf->hob_lbam = (block >> 32) & 0xff;
+	tf->hob_lbal = (block >> 24) & 0xff;
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+	qc->flags |= ATA_QCFLAG_RESULT_TF;
+
+	ata_qc_set_pc_nbytes(qc);
+
+	qc->complete_fn = ata_scsi_report_zones_complete;
+
+	return 0;
+
+invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
+	return 1;
+
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+}
+
+static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	struct ata_device *dev = qc->dev;
+	const u8 *cdb = scmd->cmnd;
+	u8 all, sa;
+	u64 block;
+	u32 n_block;
+	u16 fp = (u16)-1;
+
+	if (unlikely(scmd->cmd_len < 16)) {
+		fp = 15;
+		goto invalid_fld;
+	}
+
+	sa = cdb[1] & 0x1f;
+	if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
+	    (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
+		fp = 1;
+		goto invalid_fld;
+	}
+
+	scsi_16_lba_len(cdb, &block, &n_block);
+	if (n_block) {
+		/*
+		 * ZAC MANAGEMENT OUT doesn't define any length
+		 */
+		goto invalid_param_len;
+	}
+
+	all = cdb[14] & 0x1;
+	if (all) {
+		/*
+		 * Ignore the block address (zone ID) as defined by ZBC.
+		 */
+		block = 0;
+	} else if (block >= dev->n_sectors) {
+		/*
+		 * Block must be a valid zone ID (a zone start LBA).
+		 */
+		fp = 2;
+		goto invalid_fld;
+	}
+
+	if (ata_ncq_enabled(qc->dev) &&
+	    ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
+		tf->protocol = ATA_PROT_NCQ_NODATA;
+		tf->command = ATA_CMD_NCQ_NON_DATA;
+		tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
+		tf->nsect = qc->hw_tag << 3;
+		tf->auxiliary = sa | ((u16)all << 8);
+	} else {
+		tf->protocol = ATA_PROT_NODATA;
+		tf->command = ATA_CMD_ZAC_MGMT_OUT;
+		tf->feature = sa;
+		tf->hob_feature = all;
+	}
+	tf->lbah = (block >> 16) & 0xff;
+	tf->lbam = (block >> 8) & 0xff;
+	tf->lbal = block & 0xff;
+	tf->hob_lbah = (block >> 40) & 0xff;
+	tf->hob_lbam = (block >> 32) & 0xff;
+	tf->hob_lbal = (block >> 24) & 0xff;
+	tf->device = ATA_LBA;
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+
+	return 0;
+
+ invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
+	return 1;
+invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+}
+
+/**
+ *	ata_mselect_caching - Simulate MODE SELECT for caching info page
+ *	@qc: Storage for translated ATA taskfile
+ *	@buf: input buffer
+ *	@len: number of valid bytes in the input buffer
+ *	@fp: out parameter for the failed field on error
+ *
+ *	Prepare a taskfile to modify caching information for the device.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static int ata_mselect_caching(struct ata_queued_cmd *qc,
+			       const u8 *buf, int len, u16 *fp)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+	u8 mpage[CACHE_MPAGE_LEN];
+	u8 wce;
+	int i;
+
+	/*
+	 * The first two bytes of def_cache_mpage are a header, so offsets
+	 * in mpage are off by 2 compared to buf.  Same for len.
+	 */
+
+	if (len != CACHE_MPAGE_LEN - 2) {
+		if (len < CACHE_MPAGE_LEN - 2)
+			*fp = len;
+		else
+			*fp = CACHE_MPAGE_LEN - 2;
+		return -EINVAL;
+	}
+
+	wce = buf[0] & (1 << 2);
+
+	/*
+	 * Check that read-only bits are not modified.
+	 */
+	ata_msense_caching(dev->id, mpage, false);
+	for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
+		if (i == 0)
+			continue;
+		if (mpage[i + 2] != buf[i]) {
+			*fp = i;
+			return -EINVAL;
+		}
+	}
+
+	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+	tf->protocol = ATA_PROT_NODATA;
+	tf->nsect = 0;
+	tf->command = ATA_CMD_SET_FEATURES;
+	tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF;
+	return 0;
+}
+
+/**
+ *	ata_mselect_control - Simulate MODE SELECT for control page
+ *	@qc: Storage for translated ATA taskfile
+ *	@buf: input buffer
+ *	@len: number of valid bytes in the input buffer
+ *	@fp: out parameter for the failed field on error
+ *
+ *	Prepare a taskfile to modify caching information for the device.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static int ata_mselect_control(struct ata_queued_cmd *qc,
+			       const u8 *buf, int len, u16 *fp)
+{
+	struct ata_device *dev = qc->dev;
+	u8 mpage[CONTROL_MPAGE_LEN];
+	u8 d_sense;
+	int i;
+
+	/*
+	 * The first two bytes of def_control_mpage are a header, so offsets
+	 * in mpage are off by 2 compared to buf.  Same for len.
+	 */
+
+	if (len != CONTROL_MPAGE_LEN - 2) {
+		if (len < CONTROL_MPAGE_LEN - 2)
+			*fp = len;
+		else
+			*fp = CONTROL_MPAGE_LEN - 2;
+		return -EINVAL;
+	}
+
+	d_sense = buf[0] & (1 << 2);
+
+	/*
+	 * Check that read-only bits are not modified.
+	 */
+	ata_msense_control(dev, mpage, false);
+	for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
+		if (i == 0)
+			continue;
+		if (mpage[2 + i] != buf[i]) {
+			*fp = i;
+			return -EINVAL;
+		}
+	}
+	if (d_sense & (1 << 2))
+		dev->flags |= ATA_DFLAG_D_SENSE;
+	else
+		dev->flags &= ~ATA_DFLAG_D_SENSE;
+	return 0;
+}
+
+/**
+ *	ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
+ *	@qc: Storage for translated ATA taskfile
+ *
+ *	Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
+ *	Assume this is invoked for direct access devices (e.g. disks) only.
+ *	There should be no block descriptor for other device types.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	const u8 *p;
+	u8 pg, spg;
+	unsigned six_byte, pg_len, hdr_len, bd_len;
+	int len;
+	u16 fp = (u16)-1;
+	u8 bp = 0xff;
+
+	VPRINTK("ENTER\n");
+
+	six_byte = (cdb[0] == MODE_SELECT);
+	if (six_byte) {
+		if (scmd->cmd_len < 5) {
+			fp = 4;
+			goto invalid_fld;
+		}
+
+		len = cdb[4];
+		hdr_len = 4;
+	} else {
+		if (scmd->cmd_len < 9) {
+			fp = 8;
+			goto invalid_fld;
+		}
+
+		len = (cdb[7] << 8) + cdb[8];
+		hdr_len = 8;
+	}
+
+	/* We only support PF=1, SP=0.  */
+	if ((cdb[1] & 0x11) != 0x10) {
+		fp = 1;
+		bp = (cdb[1] & 0x01) ? 1 : 5;
+		goto invalid_fld;
+	}
+
+	/* Test early for possible overrun.  */
+	if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
+		goto invalid_param_len;
+
+	p = page_address(sg_page(scsi_sglist(scmd)));
+
+	/* Move past header and block descriptors.  */
+	if (len < hdr_len)
+		goto invalid_param_len;
+
+	if (six_byte)
+		bd_len = p[3];
+	else
+		bd_len = (p[6] << 8) + p[7];
+
+	len -= hdr_len;
+	p += hdr_len;
+	if (len < bd_len)
+		goto invalid_param_len;
+	if (bd_len != 0 && bd_len != 8) {
+		fp = (six_byte) ? 3 : 6;
+		fp += bd_len + hdr_len;
+		goto invalid_param;
+	}
+
+	len -= bd_len;
+	p += bd_len;
+	if (len == 0)
+		goto skip;
+
+	/* Parse both possible formats for the mode page headers.  */
+	pg = p[0] & 0x3f;
+	if (p[0] & 0x40) {
+		if (len < 4)
+			goto invalid_param_len;
+
+		spg = p[1];
+		pg_len = (p[2] << 8) | p[3];
+		p += 4;
+		len -= 4;
+	} else {
+		if (len < 2)
+			goto invalid_param_len;
+
+		spg = 0;
+		pg_len = p[1];
+		p += 2;
+		len -= 2;
+	}
+
+	/*
+	 * No mode subpages supported (yet) but asking for _all_
+	 * subpages may be valid
+	 */
+	if (spg && (spg != ALL_SUB_MPAGES)) {
+		fp = (p[0] & 0x40) ? 1 : 0;
+		fp += hdr_len + bd_len;
+		goto invalid_param;
+	}
+	if (pg_len > len)
+		goto invalid_param_len;
+
+	switch (pg) {
+	case CACHE_MPAGE:
+		if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
+			fp += hdr_len + bd_len;
+			goto invalid_param;
+		}
+		break;
+	case CONTROL_MPAGE:
+		if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
+			fp += hdr_len + bd_len;
+			goto invalid_param;
+		} else {
+			goto skip; /* No ATA command to send */
+		}
+		break;
+	default:		/* invalid page code */
+		fp = bd_len + hdr_len;
+		goto invalid_param;
+	}
+
+	/*
+	 * Only one page has changeable data, so we only support setting one
+	 * page at a time.
+	 */
+	if (len > pg_len)
+		goto invalid_param;
+
+	return 0;
+
+ invalid_fld:
+	ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
+	return 1;
+
+ invalid_param:
+	ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
+	return 1;
+
+ invalid_param_len:
+	/* "Parameter list length error" */
+	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+	return 1;
+
+ skip:
+	scmd->result = SAM_STAT_GOOD;
+	return 1;
+}
+
+static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma)
+{
+	if (len == 0)
+		return ATA_CMD_TRUSTED_NONDATA;
+	else if (send)
+		return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND;
+	else
+		return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV;
+}
+
+static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	struct ata_taskfile *tf = &qc->tf;
+	u8 secp = cdb[1];
+	bool send = (cdb[0] == SECURITY_PROTOCOL_OUT);
+	u16 spsp = get_unaligned_be16(&cdb[2]);
+	u32 len = get_unaligned_be32(&cdb[6]);
+	bool dma = !(qc->dev->flags & ATA_DFLAG_PIO);
+
+	/*
+	 * We don't support the ATA "security" protocol.
+	 */
+	if (secp == 0xef) {
+		ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0);
+		return 1;
+	}
+
+	if (cdb[4] & 7) { /* INC_512 */
+		if (len > 0xffff) {
+			ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
+			return 1;
+		}
+	} else {
+		if (len > 0x01fffe00) {
+			ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
+			return 1;
+		}
+
+		/* convert to the sector-based ATA addressing */
+		len = (len + 511) / 512;
+	}
+
+	tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO;
+	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA;
+	if (send)
+		tf->flags |= ATA_TFLAG_WRITE;
+	tf->command = ata_scsi_trusted_op(len, send, dma);
+	tf->feature = secp;
+	tf->lbam = spsp & 0xff;
+	tf->lbah = spsp >> 8;
+
+	if (len) {
+		tf->nsect = len & 0xff;
+		tf->lbal = len >> 8;
+	} else {
+		if (!send)
+			tf->lbah = (1 << 7);
+	}
+
+	ata_qc_set_pc_nbytes(qc);
+	return 0;
+}
+
+/**
+ *	ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
+ *	@qc: Command to be translated
+ *
+ *	Translate a SCSI variable length CDB to specified commands.
+ *	It checks a service action value in CDB to call corresponding handler.
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on failure
+ *
+ */
+static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	const u16 sa = get_unaligned_be16(&cdb[8]);
+
+	/*
+	 * if service action represents a ata pass-thru(32) command,
+	 * then pass it to ata_scsi_pass_thru handler.
+	 */
+	if (sa == ATA_32)
+		return ata_scsi_pass_thru(qc);
+
+	/* unsupported service action */
+	return 1;
+}
+
+/**
+ *	ata_get_xlat_func - check if SCSI to ATA translation is possible
+ *	@dev: ATA device
+ *	@cmd: SCSI command opcode to consider
+ *
+ *	Look up the SCSI command given, and determine whether the
+ *	SCSI command is to be translated or simulated.
+ *
+ *	RETURNS:
+ *	Pointer to translation function if possible, %NULL if not.
+ */
+
+static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
+{
+	switch (cmd) {
+	case READ_6:
+	case READ_10:
+	case READ_16:
+
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_16:
+		return ata_scsi_rw_xlat;
+
+	case WRITE_SAME_16:
+		return ata_scsi_write_same_xlat;
+
+	case SYNCHRONIZE_CACHE:
+		if (ata_try_flush_cache(dev))
+			return ata_scsi_flush_xlat;
+		break;
+
+	case VERIFY:
+	case VERIFY_16:
+		return ata_scsi_verify_xlat;
+
+	case ATA_12:
+	case ATA_16:
+		return ata_scsi_pass_thru;
+
+	case VARIABLE_LENGTH_CMD:
+		return ata_scsi_var_len_cdb_xlat;
+
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+		return ata_scsi_mode_select_xlat;
+		break;
+
+	case ZBC_IN:
+		return ata_scsi_zbc_in_xlat;
+
+	case ZBC_OUT:
+		return ata_scsi_zbc_out_xlat;
+
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		if (!(dev->flags & ATA_DFLAG_TRUSTED))
+			break;
+		return ata_scsi_security_inout_xlat;
+
+	case START_STOP:
+		return ata_scsi_start_stop_xlat;
+	}
+
+	return NULL;
+}
+
+/**
+ *	ata_scsi_dump_cdb - dump SCSI command contents to dmesg
+ *	@ap: ATA port to which the command was being sent
+ *	@cmd: SCSI command to dump
+ *
+ *	Prints the contents of a SCSI command via printk().
+ */
+
+static inline void ata_scsi_dump_cdb(struct ata_port *ap,
+				     struct scsi_cmnd *cmd)
+{
+#ifdef ATA_VERBOSE_DEBUG
+	struct scsi_device *scsidev = cmd->device;
+
+	VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
+		ap->print_id,
+		scsidev->channel, scsidev->id, scsidev->lun,
+		cmd->cmnd);
+#endif
+}
+
+static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
+				      struct ata_device *dev)
+{
+	u8 scsi_op = scmd->cmnd[0];
+	ata_xlat_func_t xlat_func;
+	int rc = 0;
+
+	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
+		if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+			goto bad_cdb_len;
+
+		xlat_func = ata_get_xlat_func(dev, scsi_op);
+	} else {
+		if (unlikely(!scmd->cmd_len))
+			goto bad_cdb_len;
+
+		xlat_func = NULL;
+		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+			/* relay SCSI command to ATAPI device */
+			int len = COMMAND_SIZE(scsi_op);
+			if (unlikely(len > scmd->cmd_len ||
+				     len > dev->cdb_len ||
+				     scmd->cmd_len > ATAPI_CDB_LEN))
+				goto bad_cdb_len;
+
+			xlat_func = atapi_xlat;
+		} else {
+			/* ATA_16 passthru, treat as an ATA command */
+			if (unlikely(scmd->cmd_len > 16))
+				goto bad_cdb_len;
+
+			xlat_func = ata_get_xlat_func(dev, scsi_op);
+		}
+	}
+
+	if (xlat_func)
+		rc = ata_scsi_translate(dev, scmd, xlat_func);
+	else
+		ata_scsi_simulate(dev, scmd);
+
+	return rc;
+
+ bad_cdb_len:
+	DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
+		scmd->cmd_len, scsi_op, dev->cdb_len);
+	scmd->result = DID_ERROR << 16;
+	scmd->scsi_done(scmd);
+	return 0;
+}
+
+/**
+ *	ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
+ *	@shost: SCSI host of command to be sent
+ *	@cmd: SCSI command to be sent
+ *
+ *	In some cases, this function translates SCSI commands into
+ *	ATA taskfiles, and queues the taskfiles to be sent to
+ *	hardware.  In other cases, this function simulates a
+ *	SCSI device by evaluating and responding to certain
+ *	SCSI commands.  This creates the overall effect of
+ *	ATA and ATAPI devices appearing as SCSI devices.
+ *
+ *	LOCKING:
+ *	ATA host lock
+ *
+ *	RETURNS:
+ *	Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ *	0 otherwise.
+ */
+int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+	struct ata_port *ap;
+	struct ata_device *dev;
+	struct scsi_device *scsidev = cmd->device;
+	int rc = 0;
+	unsigned long irq_flags;
+
+	ap = ata_shost_to_port(shost);
+
+	spin_lock_irqsave(ap->lock, irq_flags);
+
+	ata_scsi_dump_cdb(ap, cmd);
+
+	dev = ata_scsi_find_dev(ap, scsidev);
+	if (likely(dev))
+		rc = __ata_scsi_queuecmd(cmd, dev);
+	else {
+		cmd->result = (DID_BAD_TARGET << 16);
+		cmd->scsi_done(cmd);
+	}
+
+	spin_unlock_irqrestore(ap->lock, irq_flags);
+
+	return rc;
+}
+
+/**
+ *	ata_scsi_simulate - simulate SCSI command on ATA device
+ *	@dev: the target device
+ *	@cmd: SCSI command being sent to device.
+ *
+ *	Interprets and directly executes a select list of SCSI commands
+ *	that can be handled internally.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+
+void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+{
+	struct ata_scsi_args args;
+	const u8 *scsicmd = cmd->cmnd;
+	u8 tmp8;
+
+	args.dev = dev;
+	args.id = dev->id;
+	args.cmd = cmd;
+
+	switch(scsicmd[0]) {
+	case INQUIRY:
+		if (scsicmd[1] & 2)		   /* is CmdDt set?  */
+			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+		else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
+		else switch (scsicmd[2]) {
+		case 0x00:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
+			break;
+		case 0x80:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
+			break;
+		case 0x83:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
+			break;
+		case 0x89:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
+			break;
+		case 0xb0:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
+			break;
+		case 0xb1:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
+			break;
+		case 0xb2:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
+			break;
+		case 0xb6:
+			if (dev->flags & ATA_DFLAG_ZAC) {
+				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
+				break;
+			}
+			/* Fallthrough */
+		default:
+			ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+			break;
+		}
+		break;
+
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+		break;
+
+	case READ_CAPACITY:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+		break;
+
+	case SERVICE_ACTION_IN_16:
+		if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
+			ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+		else
+			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+		break;
+
+	case REPORT_LUNS:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+		break;
+
+	case REQUEST_SENSE:
+		ata_scsi_set_sense(dev, cmd, 0, 0, 0);
+		cmd->result = (DRIVER_SENSE << 24);
+		break;
+
+	/* if we reach this, then writeback caching is disabled,
+	 * turning this into a no-op.
+	 */
+	case SYNCHRONIZE_CACHE:
+		/* fall through */
+
+	/* no-op's, complete with success */
+	case REZERO_UNIT:
+	case SEEK_6:
+	case SEEK_10:
+	case TEST_UNIT_READY:
+		break;
+
+	case SEND_DIAGNOSTIC:
+		tmp8 = scsicmd[1] & ~(1 << 3);
+		if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4])
+			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+		break;
+
+	case MAINTENANCE_IN:
+		if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+			ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+		else
+			ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+		break;
+
+	/* all other commands */
+	default:
+		ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+		/* "Invalid command operation code" */
+		break;
+	}
+
+	cmd->scsi_done(cmd);
+}
+
+int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+{
+	int i, rc;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct Scsi_Host *shost;
+
+		rc = -ENOMEM;
+		shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
+		if (!shost)
+			goto err_alloc;
+
+		shost->eh_noresume = 1;
+		*(struct ata_port **)&shost->hostdata[0] = ap;
+		ap->scsi_host = shost;
+
+		shost->transportt = ata_scsi_transport_template;
+		shost->unique_id = ap->print_id;
+		shost->max_id = 16;
+		shost->max_lun = 1;
+		shost->max_channel = 1;
+		shost->max_cmd_len = 32;
+
+		/* Schedule policy is determined by ->qc_defer()
+		 * callback and it needs to see every deferred qc.
+		 * Set host_blocked to 1 to prevent SCSI midlayer from
+		 * automatically deferring requests.
+		 */
+		shost->max_host_blocked = 1;
+
+		rc = scsi_add_host_with_dma(ap->scsi_host,
+						&ap->tdev, ap->host->dev);
+		if (rc)
+			goto err_add;
+	}
+
+	return 0;
+
+ err_add:
+	scsi_host_put(host->ports[i]->scsi_host);
+ err_alloc:
+	while (--i >= 0) {
+		struct Scsi_Host *shost = host->ports[i]->scsi_host;
+
+		scsi_remove_host(shost);
+		scsi_host_put(shost);
+	}
+	return rc;
+}
+
+void ata_scsi_scan_host(struct ata_port *ap, int sync)
+{
+	int tries = 5;
+	struct ata_device *last_failed_dev = NULL;
+	struct ata_link *link;
+	struct ata_device *dev;
+
+ repeat:
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ENABLED) {
+			struct scsi_device *sdev;
+			int channel = 0, id = 0;
+
+			if (dev->sdev)
+				continue;
+
+			if (ata_is_host_link(link))
+				id = dev->devno;
+			else
+				channel = link->pmp;
+
+			sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
+						 NULL);
+			if (!IS_ERR(sdev)) {
+				dev->sdev = sdev;
+				scsi_device_put(sdev);
+			} else {
+				dev->sdev = NULL;
+			}
+		}
+	}
+
+	/* If we scanned while EH was in progress or allocation
+	 * failure occurred, scan would have failed silently.  Check
+	 * whether all devices are attached.
+	 */
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ENABLED) {
+			if (!dev->sdev)
+				goto exit_loop;
+		}
+	}
+ exit_loop:
+	if (!link)
+		return;
+
+	/* we're missing some SCSI devices */
+	if (sync) {
+		/* If caller requested synchrnous scan && we've made
+		 * any progress, sleep briefly and repeat.
+		 */
+		if (dev != last_failed_dev) {
+			msleep(100);
+			last_failed_dev = dev;
+			goto repeat;
+		}
+
+		/* We might be failing to detect boot device, give it
+		 * a few more chances.
+		 */
+		if (--tries) {
+			msleep(100);
+			goto repeat;
+		}
+
+		ata_port_err(ap,
+			     "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
+	}
+
+	queue_delayed_work(system_long_wq, &ap->hotplug_task,
+			   round_jiffies_relative(HZ));
+}
+
+/**
+ *	ata_scsi_offline_dev - offline attached SCSI device
+ *	@dev: ATA device to offline attached SCSI device for
+ *
+ *	This function is called from ata_eh_hotplug() and responsible
+ *	for taking the SCSI device attached to @dev offline.  This
+ *	function is called with host lock which protects dev->sdev
+ *	against clearing.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	1 if attached SCSI device exists, 0 otherwise.
+ */
+int ata_scsi_offline_dev(struct ata_device *dev)
+{
+	if (dev->sdev) {
+		scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *	ata_scsi_remove_dev - remove attached SCSI device
+ *	@dev: ATA device to remove attached SCSI device for
+ *
+ *	This function is called from ata_eh_scsi_hotplug() and
+ *	responsible for removing the SCSI device attached to @dev.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void ata_scsi_remove_dev(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct scsi_device *sdev;
+	unsigned long flags;
+
+	/* Alas, we need to grab scan_mutex to ensure SCSI device
+	 * state doesn't change underneath us and thus
+	 * scsi_device_get() always succeeds.  The mutex locking can
+	 * be removed if there is __scsi_device_get() interface which
+	 * increments reference counts regardless of device state.
+	 */
+	mutex_lock(&ap->scsi_host->scan_mutex);
+	spin_lock_irqsave(ap->lock, flags);
+
+	/* clearing dev->sdev is protected by host lock */
+	sdev = dev->sdev;
+	dev->sdev = NULL;
+
+	if (sdev) {
+		/* If user initiated unplug races with us, sdev can go
+		 * away underneath us after the host lock and
+		 * scan_mutex are released.  Hold onto it.
+		 */
+		if (scsi_device_get(sdev) == 0) {
+			/* The following ensures the attached sdev is
+			 * offline on return from ata_scsi_offline_dev()
+			 * regardless it wins or loses the race
+			 * against this function.
+			 */
+			scsi_device_set_state(sdev, SDEV_OFFLINE);
+		} else {
+			WARN_ON(1);
+			sdev = NULL;
+		}
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	mutex_unlock(&ap->scsi_host->scan_mutex);
+
+	if (sdev) {
+		ata_dev_info(dev, "detaching (SCSI %s)\n",
+			     dev_name(&sdev->sdev_gendev));
+
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+}
+
+static void ata_scsi_handle_link_detach(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ALL) {
+		unsigned long flags;
+
+		if (!(dev->flags & ATA_DFLAG_DETACHED))
+			continue;
+
+		spin_lock_irqsave(ap->lock, flags);
+		dev->flags &= ~ATA_DFLAG_DETACHED;
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		if (zpodd_dev_enabled(dev))
+			zpodd_exit(dev);
+
+		ata_scsi_remove_dev(dev);
+	}
+}
+
+/**
+ *	ata_scsi_media_change_notify - send media change event
+ *	@dev: Pointer to the disk device with media change event
+ *
+ *	Tell the block layer to send a media change notification
+ *	event.
+ *
+ * 	LOCKING:
+ * 	spin_lock_irqsave(host lock)
+ */
+void ata_scsi_media_change_notify(struct ata_device *dev)
+{
+	if (dev->sdev)
+		sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
+				     GFP_ATOMIC);
+}
+
+/**
+ *	ata_scsi_hotplug - SCSI part of hotplug
+ *	@work: Pointer to ATA port to perform SCSI hotplug on
+ *
+ *	Perform SCSI part of hotplug.  It's executed from a separate
+ *	workqueue after EH completes.  This is necessary because SCSI
+ *	hot plugging requires working EH and hot unplugging is
+ *	synchronized with hot plugging with a mutex.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_scsi_hotplug(struct work_struct *work)
+{
+	struct ata_port *ap =
+		container_of(work, struct ata_port, hotplug_task.work);
+	int i;
+
+	if (ap->pflags & ATA_PFLAG_UNLOADING) {
+		DPRINTK("ENTER/EXIT - unloading\n");
+		return;
+	}
+
+	/*
+	 * XXX - UGLY HACK
+	 *
+	 * The block layer suspend/resume path is fundamentally broken due
+	 * to freezable kthreads and workqueue and may deadlock if a block
+	 * device gets removed while resume is in progress.  I don't know
+	 * what the solution is short of removing freezable kthreads and
+	 * workqueues altogether.
+	 *
+	 * The following is an ugly hack to avoid kicking off device
+	 * removal while freezer is active.  This is a joke but does avoid
+	 * this particular deadlock scenario.
+	 *
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+	 * http://marc.info/?l=linux-kernel&m=138695698516487
+	 */
+#ifdef CONFIG_FREEZER
+	while (pm_freezing)
+		msleep(10);
+#endif
+
+	DPRINTK("ENTER\n");
+	mutex_lock(&ap->scsi_scan_mutex);
+
+	/* Unplug detached devices.  We cannot use link iterator here
+	 * because PMP links have to be scanned even if PMP is
+	 * currently not attached.  Iterate manually.
+	 */
+	ata_scsi_handle_link_detach(&ap->link);
+	if (ap->pmp_link)
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+			ata_scsi_handle_link_detach(&ap->pmp_link[i]);
+
+	/* scan for new ones */
+	ata_scsi_scan_host(ap, 0);
+
+	mutex_unlock(&ap->scsi_scan_mutex);
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_scsi_user_scan - indication for user-initiated bus scan
+ *	@shost: SCSI host to scan
+ *	@channel: Channel to scan
+ *	@id: ID to scan
+ *	@lun: LUN to scan
+ *
+ *	This function is called when user explicitly requests bus
+ *	scan.  Set probe pending flag and invoke EH.
+ *
+ *	LOCKING:
+ *	SCSI layer (we don't care)
+ *
+ *	RETURNS:
+ *	Zero.
+ */
+int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+		       unsigned int id, u64 lun)
+{
+	struct ata_port *ap = ata_shost_to_port(shost);
+	unsigned long flags;
+	int devno, rc = 0;
+
+	if (!ap->ops->error_handler)
+		return -EOPNOTSUPP;
+
+	if (lun != SCAN_WILD_CARD && lun)
+		return -EINVAL;
+
+	if (!sata_pmp_attached(ap)) {
+		if (channel != SCAN_WILD_CARD && channel)
+			return -EINVAL;
+		devno = id;
+	} else {
+		if (id != SCAN_WILD_CARD && id)
+			return -EINVAL;
+		devno = channel;
+	}
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	if (devno == SCAN_WILD_CARD) {
+		struct ata_link *link;
+
+		ata_for_each_link(link, ap, EDGE) {
+			struct ata_eh_info *ehi = &link->eh_info;
+			ehi->probe_mask |= ATA_ALL_DEVICES;
+			ehi->action |= ATA_EH_RESET;
+		}
+	} else {
+		struct ata_device *dev = ata_find_dev(ap, devno);
+
+		if (dev) {
+			struct ata_eh_info *ehi = &dev->link->eh_info;
+			ehi->probe_mask |= 1 << dev->devno;
+			ehi->action |= ATA_EH_RESET;
+		} else
+			rc = -EINVAL;
+	}
+
+	if (rc == 0) {
+		ata_port_schedule_eh(ap);
+		spin_unlock_irqrestore(ap->lock, flags);
+		ata_port_wait_eh(ap);
+	} else
+		spin_unlock_irqrestore(ap->lock, flags);
+
+	return rc;
+}
+
+/**
+ *	ata_scsi_dev_rescan - initiate scsi_rescan_device()
+ *	@work: Pointer to ATA port to perform scsi_rescan_device()
+ *
+ *	After ATA pass thru (SAT) commands are executed successfully,
+ *	libata need to propagate the changes to SCSI layer.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_scsi_dev_rescan(struct work_struct *work)
+{
+	struct ata_port *ap =
+		container_of(work, struct ata_port, scsi_rescan_task);
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned long flags;
+
+	mutex_lock(&ap->scsi_scan_mutex);
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_for_each_link(link, ap, EDGE) {
+		ata_for_each_dev(dev, link, ENABLED) {
+			struct scsi_device *sdev = dev->sdev;
+
+			if (!sdev)
+				continue;
+			if (scsi_device_get(sdev))
+				continue;
+
+			spin_unlock_irqrestore(ap->lock, flags);
+			scsi_rescan_device(&(sdev->sdev_gendev));
+			scsi_device_put(sdev);
+			spin_lock_irqsave(ap->lock, flags);
+		}
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+	mutex_unlock(&ap->scsi_scan_mutex);
+}
+
+/**
+ *	ata_sas_port_alloc - Allocate port for a SAS attached SATA device
+ *	@host: ATA host container for all SAS ports
+ *	@port_info: Information from low-level host driver
+ *	@shost: SCSI host that the scsi device is attached to
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	ata_port pointer on success / NULL on failure.
+ */
+
+struct ata_port *ata_sas_port_alloc(struct ata_host *host,
+				    struct ata_port_info *port_info,
+				    struct Scsi_Host *shost)
+{
+	struct ata_port *ap;
+
+	ap = ata_port_alloc(host);
+	if (!ap)
+		return NULL;
+
+	ap->port_no = 0;
+	ap->lock = &host->lock;
+	ap->pio_mask = port_info->pio_mask;
+	ap->mwdma_mask = port_info->mwdma_mask;
+	ap->udma_mask = port_info->udma_mask;
+	ap->flags |= port_info->flags;
+	ap->ops = port_info->port_ops;
+	ap->cbl = ATA_CBL_SATA;
+
+	return ap;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
+
+/**
+ *	ata_sas_port_start - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.
+ *
+ *	May be used as the port_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+int ata_sas_port_start(struct ata_port *ap)
+{
+	/*
+	 * the port is marked as frozen at allocation time, but if we don't
+	 * have new eh, we won't thaw it
+	 */
+	if (!ap->ops->error_handler)
+		ap->pflags &= ~ATA_PFLAG_FROZEN;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_start);
+
+/**
+ *	ata_port_stop - Undo ata_sas_port_start()
+ *	@ap: Port to shut down
+ *
+ *	May be used as the port_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_sas_port_stop(struct ata_port *ap)
+{
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_stop);
+
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
+{
+	__ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
+
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+	return ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
+
+/**
+ *	ata_sas_port_init - Initialize a SATA device
+ *	@ap: SATA port to initialize
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+
+int ata_sas_port_init(struct ata_port *ap)
+{
+	int rc = ap->ops->port_start(ap);
+
+	if (rc)
+		return rc;
+	ap->print_id = atomic_inc_return(&ata_print_id);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_init);
+
+int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
+{
+	return ata_tport_add(parent, ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_add);
+
+void ata_sas_tport_delete(struct ata_port *ap)
+{
+	ata_tport_delete(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
+
+/**
+ *	ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
+ *	@ap: SATA port to destroy
+ *
+ */
+
+void ata_sas_port_destroy(struct ata_port *ap)
+{
+	if (ap->ops->port_stop)
+		ap->ops->port_stop(ap);
+	kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
+
+/**
+ *	ata_sas_slave_configure - Default slave_config routine for libata devices
+ *	@sdev: SCSI device to configure
+ *	@ap: ATA port to which SCSI device is attached
+ *
+ *	RETURNS:
+ *	Zero.
+ */
+
+int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+{
+	ata_scsi_sdev_config(sdev);
+	ata_scsi_dev_config(sdev, ap->link.device);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+
+/**
+ *	ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
+ *	@cmd: SCSI command to be sent
+ *	@ap:	ATA port to which the command is being sent
+ *
+ *	RETURNS:
+ *	Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ *	0 otherwise.
+ */
+
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
+{
+	int rc = 0;
+
+	ata_scsi_dump_cdb(ap, cmd);
+
+	if (likely(ata_dev_enabled(ap->link.device)))
+		rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+	else {
+		cmd->result = (DID_BAD_TARGET << 16);
+		cmd->scsi_done(cmd);
+	}
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
+
+int ata_sas_allocate_tag(struct ata_port *ap)
+{
+	unsigned int max_queue = ap->host->n_tags;
+	unsigned int i, tag;
+
+	for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
+		tag = tag < max_queue ? tag : 0;
+
+		/* the last tag is reserved for internal command. */
+		if (ata_tag_internal(tag))
+			continue;
+
+		if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
+			ap->sas_last_tag = tag;
+			return tag;
+		}
+	}
+	return -1;
+}
+
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
+{
+	clear_bit(tag, &ap->sas_tag_allocated);
+}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
new file mode 100644
index 0000000..c5ea0fc
--- /dev/null
+++ b/drivers/ata/libata-sff.c
@@ -0,0 +1,3290 @@
+/*
+ *  libata-sff.c - helper library for PCI IDE BMDMA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2006 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available from http://www.t13.org/ and
+ *  http://www.sata-io.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/highmem.h>
+
+#include "libata.h"
+
+static struct workqueue_struct *ata_sff_wq;
+
+const struct ata_port_operations ata_sff_port_ops = {
+	.inherits		= &ata_base_port_ops,
+
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= ata_sff_qc_issue,
+	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
+
+	.freeze			= ata_sff_freeze,
+	.thaw			= ata_sff_thaw,
+	.prereset		= ata_sff_prereset,
+	.softreset		= ata_sff_softreset,
+	.hardreset		= sata_sff_hardreset,
+	.postreset		= ata_sff_postreset,
+	.error_handler		= ata_sff_error_handler,
+
+	.sff_dev_select		= ata_sff_dev_select,
+	.sff_check_status	= ata_sff_check_status,
+	.sff_tf_load		= ata_sff_tf_load,
+	.sff_tf_read		= ata_sff_tf_read,
+	.sff_exec_command	= ata_sff_exec_command,
+	.sff_data_xfer		= ata_sff_data_xfer,
+	.sff_drain_fifo		= ata_sff_drain_fifo,
+
+	.lost_interrupt		= ata_sff_lost_interrupt,
+};
+EXPORT_SYMBOL_GPL(ata_sff_port_ops);
+
+/**
+ *	ata_sff_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_sff_check_status(struct ata_port *ap)
+{
+	return ioread8(ap->ioaddr.status_addr);
+}
+EXPORT_SYMBOL_GPL(ata_sff_check_status);
+
+/**
+ *	ata_sff_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile alternate status register for
+ *	currently-selected device and return its value.
+ *
+ *	Note: may NOT be used as the check_altstatus() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_sff_altstatus(struct ata_port *ap)
+{
+	if (ap->ops->sff_check_altstatus)
+		return ap->ops->sff_check_altstatus(ap);
+
+	return ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ *	ata_sff_irq_status - Check if the device is busy
+ *	@ap: port where the device is
+ *
+ *	Determine if the port is currently busy. Uses altstatus
+ *	if available in order to avoid clearing shared IRQ status
+ *	when finding an IRQ source. Non ctl capable devices don't
+ *	share interrupt lines fortunately for us.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_sff_irq_status(struct ata_port *ap)
+{
+	u8 status;
+
+	if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+		status = ata_sff_altstatus(ap);
+		/* Not us: We are busy */
+		if (status & ATA_BUSY)
+			return status;
+	}
+	/* Clear INTRQ latch */
+	status = ap->ops->sff_check_status(ap);
+	return status;
+}
+
+/**
+ *	ata_sff_sync - Flush writes
+ *	@ap: Port to wait for.
+ *
+ *	CAUTION:
+ *	If we have an mmio device with no ctl and no altstatus
+ *	method this will fail. No such devices are known to exist.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_sff_sync(struct ata_port *ap)
+{
+	if (ap->ops->sff_check_altstatus)
+		ap->ops->sff_check_altstatus(ap);
+	else if (ap->ioaddr.altstatus_addr)
+		ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ *	ata_sff_pause		-	Flush writes and wait 400nS
+ *	@ap: Port to pause for.
+ *
+ *	CAUTION:
+ *	If we have an mmio device with no ctl and no altstatus
+ *	method this will fail. No such devices are known to exist.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_sff_pause(struct ata_port *ap)
+{
+	ata_sff_sync(ap);
+	ndelay(400);
+}
+EXPORT_SYMBOL_GPL(ata_sff_pause);
+
+/**
+ *	ata_sff_dma_pause	-	Pause before commencing DMA
+ *	@ap: Port to pause for.
+ *
+ *	Perform I/O fencing and ensure sufficient cycle delays occur
+ *	for the HDMA1:0 transition
+ */
+
+void ata_sff_dma_pause(struct ata_port *ap)
+{
+	if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+		/* An altstatus read will cause the needed delay without
+		   messing up the IRQ status */
+		ata_sff_altstatus(ap);
+		return;
+	}
+	/* There are no DMA controllers without ctl. BUG here to ensure
+	   we never violate the HDMA1:0 transition timing and risk
+	   corruption. */
+	BUG();
+}
+EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
+
+/**
+ *	ata_sff_busy_sleep - sleep until BSY clears, or timeout
+ *	@ap: port containing status register to be polled
+ *	@tmout_pat: impatience timeout in msecs
+ *	@tmout: overall timeout in msecs
+ *
+ *	Sleep until ATA Status register bit BSY clears,
+ *	or a timeout occurs.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_busy_sleep(struct ata_port *ap,
+		       unsigned long tmout_pat, unsigned long tmout)
+{
+	unsigned long timer_start, timeout;
+	u8 status;
+
+	status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
+	timer_start = jiffies;
+	timeout = ata_deadline(timer_start, tmout_pat);
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
+		ata_msleep(ap, 50);
+		status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
+	}
+
+	if (status != 0xff && (status & ATA_BUSY))
+		ata_port_warn(ap,
+			      "port is slow to respond, please be patient (Status 0x%x)\n",
+			      status);
+
+	timeout = ata_deadline(timer_start, tmout);
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
+		ata_msleep(ap, 50);
+		status = ap->ops->sff_check_status(ap);
+	}
+
+	if (status == 0xff)
+		return -ENODEV;
+
+	if (status & ATA_BUSY) {
+		ata_port_err(ap,
+			     "port failed to respond (%lu secs, Status 0x%x)\n",
+			     DIV_ROUND_UP(tmout, 1000), status);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
+
+static int ata_sff_check_ready(struct ata_link *link)
+{
+	u8 status = link->ap->ops->sff_check_status(link->ap);
+
+	return ata_check_ready(status);
+}
+
+/**
+ *	ata_sff_wait_ready - sleep until BSY clears, or timeout
+ *	@link: SFF link to wait ready status for
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Sleep until ATA Status register bit BSY clears, or timeout
+ *	occurs.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
+{
+	return ata_wait_ready(link, deadline, ata_sff_check_ready);
+}
+EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
+
+/**
+ *	ata_sff_set_devctl - Write device control reg
+ *	@ap: port where the device is
+ *	@ctl: value to write
+ *
+ *	Writes ATA taskfile device control register.
+ *
+ *	Note: may NOT be used as the sff_set_devctl() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+{
+	if (ap->ops->sff_set_devctl)
+		ap->ops->sff_set_devctl(ap, ctl);
+	else
+		iowrite8(ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
+ *	ata_sff_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.  Works with both PIO and MMIO.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
+{
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	iowrite8(tmp, ap->ioaddr.device_addr);
+	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
+}
+EXPORT_SYMBOL_GPL(ata_sff_dev_select);
+
+/**
+ *	ata_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *	@wait: non-zero to wait for Status register BSY bit to clear
+ *	@can_sleep: non-zero if context allows sleeping
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.
+ *
+ *	This is a high-level version of ata_sff_dev_select(), which
+ *	additionally provides the services of inserting the proper
+ *	pauses and status polling, where needed.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static void ata_dev_select(struct ata_port *ap, unsigned int device,
+			   unsigned int wait, unsigned int can_sleep)
+{
+	if (ata_msg_probe(ap))
+		ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
+			      device, wait);
+
+	if (wait)
+		ata_wait_idle(ap);
+
+	ap->ops->sff_dev_select(ap, device);
+
+	if (wait) {
+		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
+			ata_msleep(ap, 150);
+		ata_wait_idle(ap);
+	}
+}
+
+/**
+ *	ata_sff_irq_on - Enable interrupts on a port.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Enable interrupts on a legacy IDE device using MMIO or PIO,
+ *	wait for idle, clear any pending interrupts.
+ *
+ *	Note: may NOT be used as the sff_irq_on() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_irq_on(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	if (ap->ops->sff_irq_on) {
+		ap->ops->sff_irq_on(ap);
+		return;
+	}
+
+	ap->ctl &= ~ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
+		ata_sff_set_devctl(ap, ap->ctl);
+	ata_wait_idle(ap);
+
+	if (ap->ops->sff_irq_clear)
+		ap->ops->sff_irq_clear(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sff_irq_on);
+
+/**
+ *	ata_sff_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		if (ioaddr->ctl_addr)
+			iowrite8(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		WARN_ON_ONCE(!ioaddr->ctl_addr);
+		iowrite8(tf->hob_feature, ioaddr->feature_addr);
+		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
+		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
+		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
+		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		iowrite8(tf->feature, ioaddr->feature_addr);
+		iowrite8(tf->nsect, ioaddr->nsect_addr);
+		iowrite8(tf->lbal, ioaddr->lbal_addr);
+		iowrite8(tf->lbam, ioaddr->lbam_addr);
+		iowrite8(tf->lbah, ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		iowrite8(tf->device, ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sff_tf_load);
+
+/**
+ *	ata_sff_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf. Assumes the device has a fully SFF compliant task file
+ *	layout and behaviour. If you device does not (eg has a different
+ *	status method) then you will need to provide a replacement tf_read
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ata_sff_check_status(ap);
+	tf->feature = ioread8(ioaddr->error_addr);
+	tf->nsect = ioread8(ioaddr->nsect_addr);
+	tf->lbal = ioread8(ioaddr->lbal_addr);
+	tf->lbam = ioread8(ioaddr->lbam_addr);
+	tf->lbah = ioread8(ioaddr->lbah_addr);
+	tf->device = ioread8(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		if (likely(ioaddr->ctl_addr)) {
+			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+			tf->hob_feature = ioread8(ioaddr->error_addr);
+			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+			iowrite8(tf->ctl, ioaddr->ctl_addr);
+			ap->last_ctl = tf->ctl;
+		} else
+			WARN_ON_ONCE(1);
+	}
+}
+EXPORT_SYMBOL_GPL(ata_sff_tf_read);
+
+/**
+ *	ata_sff_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues ATA command, with proper synchronization with interrupt
+ *	handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	iowrite8(tf->command, ap->ioaddr.command_addr);
+	ata_sff_pause(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sff_exec_command);
+
+/**
+ *	ata_tf_to_host - issue ATA taskfile to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues ATA taskfile register set to ATA host controller,
+ *	with proper synchronization with interrupt handler and
+ *	other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static inline void ata_tf_to_host(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	ap->ops->sff_tf_load(ap, tf);
+	ap->ops->sff_exec_command(ap, tf);
+}
+
+/**
+ *	ata_sff_data_xfer - Transfer data by PIO
+ *	@qc: queued command
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	Bytes consumed.
+ */
+unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
+			       unsigned int buflen, int rw)
+{
+	struct ata_port *ap = qc->dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ)
+		ioread16_rep(data_addr, buf, words);
+	else
+		iowrite16_rep(data_addr, buf, words);
+
+	/* Transfer trailing byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned char pad[2] = { };
+
+		/* Point buf to the tail of buffer */
+		buf += buflen - 1;
+
+		/*
+		 * Use io*16_rep() accessors here as well to avoid pointlessly
+		 * swapping bytes to and from on the big endian machines...
+		 */
+		if (rw == READ) {
+			ioread16_rep(data_addr, pad, 1);
+			*buf = pad[0];
+		} else {
+			pad[0] = *buf;
+			iowrite16_rep(data_addr, pad, 1);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
+
+/**
+ *	ata_sff_data_xfer32 - Transfer data by PIO
+ *	@qc: queued command
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by PIO using 32bit
+ *	I/O operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	Bytes consumed.
+ */
+
+unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
+			       unsigned int buflen, int rw)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_port *ap = dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 2;
+	int slop = buflen & 3;
+
+	if (!(ap->pflags & ATA_PFLAG_PIO32))
+		return ata_sff_data_xfer(qc, buf, buflen, rw);
+
+	/* Transfer multiple of 4 bytes */
+	if (rw == READ)
+		ioread32_rep(data_addr, buf, words);
+	else
+		iowrite32_rep(data_addr, buf, words);
+
+	/* Transfer trailing bytes, if any */
+	if (unlikely(slop)) {
+		unsigned char pad[4] = { };
+
+		/* Point buf to the tail of buffer */
+		buf += buflen - slop;
+
+		/*
+		 * Use io*_rep() accessors here as well to avoid pointlessly
+		 * swapping bytes to and from on the big endian machines...
+		 */
+		if (rw == READ) {
+			if (slop < 3)
+				ioread16_rep(data_addr, pad, 1);
+			else
+				ioread32_rep(data_addr, pad, 1);
+			memcpy(buf, pad, slop);
+		} else {
+			memcpy(pad, buf, slop);
+			if (slop < 3)
+				iowrite16_rep(data_addr, pad, 1);
+			else
+				iowrite32_rep(data_addr, pad, 1);
+		}
+	}
+	return (buflen + 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
+
+/**
+ *	ata_pio_sector - Transfer a sector of data.
+ *	@qc: Command on going
+ *
+ *	Transfer qc->sect_size bytes of data from/to the ATA device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void ata_pio_sector(struct ata_queued_cmd *qc)
+{
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned int offset;
+	unsigned char *buf;
+
+	if (qc->curbytes == qc->nbytes - qc->sect_size)
+		ap->hsm_task_state = HSM_ST_LAST;
+
+	page = sg_page(qc->cursg);
+	offset = qc->cursg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	buf = kmap_atomic(page);
+	ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
+	kunmap_atomic(buf);
+
+	if (!do_write && !PageSlab(page))
+		flush_dcache_page(page);
+
+	qc->curbytes += qc->sect_size;
+	qc->cursg_ofs += qc->sect_size;
+
+	if (qc->cursg_ofs == qc->cursg->length) {
+		qc->cursg = sg_next(qc->cursg);
+		qc->cursg_ofs = 0;
+	}
+}
+
+/**
+ *	ata_pio_sectors - Transfer one or many sectors.
+ *	@qc: Command on going
+ *
+ *	Transfer one or many sectors of data from/to the
+ *	ATA device for the DRQ request.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void ata_pio_sectors(struct ata_queued_cmd *qc)
+{
+	if (is_multi_taskfile(&qc->tf)) {
+		/* READ/WRITE MULTIPLE */
+		unsigned int nsect;
+
+		WARN_ON_ONCE(qc->dev->multi_count == 0);
+
+		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
+			    qc->dev->multi_count);
+		while (nsect--)
+			ata_pio_sector(qc);
+	} else
+		ata_pio_sector(qc);
+
+	ata_sff_sync(qc->ap); /* flush */
+}
+
+/**
+ *	atapi_send_cdb - Write CDB bytes to hardware
+ *	@ap: Port to which ATAPI device is attached.
+ *	@qc: Taskfile currently active
+ *
+ *	When device has indicated its readiness to accept
+ *	a CDB, this function is called.  Send the CDB.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	/* send SCSI cdb */
+	DPRINTK("send cdb\n");
+	WARN_ON_ONCE(qc->dev->cdb_len < 12);
+
+	ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
+	ata_sff_sync(ap);
+	/* FIXME: If the CDB is for DMA do we need to do the transition delay
+	   or is bmdma_start guaranteed to do it ? */
+	switch (qc->tf.protocol) {
+	case ATAPI_PROT_PIO:
+		ap->hsm_task_state = HSM_ST;
+		break;
+	case ATAPI_PROT_NODATA:
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+#ifdef CONFIG_ATA_BMDMA
+	case ATAPI_PROT_DMA:
+		ap->hsm_task_state = HSM_ST_LAST;
+		/* initiate bmdma */
+		ap->ops->bmdma_start(qc);
+		break;
+#endif /* CONFIG_ATA_BMDMA */
+	default:
+		BUG();
+	}
+}
+
+/**
+ *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *	@bytes: number of bytes
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ */
+static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	struct ata_eh_info *ehi = &dev->link->eh_info;
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned char *buf;
+	unsigned int offset, count, consumed;
+
+next_sg:
+	sg = qc->cursg;
+	if (unlikely(!sg)) {
+		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
+				  "buf=%u cur=%u bytes=%u",
+				  qc->nbytes, qc->curbytes, bytes);
+		return -1;
+	}
+
+	page = sg_page(sg);
+	offset = sg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	/* don't overrun current sg */
+	count = min(sg->length - qc->cursg_ofs, bytes);
+
+	/* don't cross page boundaries */
+	count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	buf = kmap_atomic(page);
+	consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
+	kunmap_atomic(buf);
+
+	bytes -= min(bytes, consumed);
+	qc->curbytes += count;
+	qc->cursg_ofs += count;
+
+	if (qc->cursg_ofs == sg->length) {
+		qc->cursg = sg_next(qc->cursg);
+		qc->cursg_ofs = 0;
+	}
+
+	/*
+	 * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
+	 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
+	 * check correctly as it doesn't know if it is the last request being
+	 * made. Somebody should implement a proper sanity check.
+	 */
+	if (bytes)
+		goto next_sg;
+	return 0;
+}
+
+/**
+ *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	struct ata_eh_info *ehi = &dev->link->eh_info;
+	unsigned int ireason, bc_lo, bc_hi, bytes;
+	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	/* Abuse qc->result_tf for temp storage of intermediate TF
+	 * here to save some kernel stack usage.
+	 * For normal completion, qc->result_tf is not relevant. For
+	 * error, qc->result_tf is later overwritten by ata_qc_complete().
+	 * So, the correctness of qc->result_tf is not affected.
+	 */
+	ap->ops->sff_tf_read(ap, &qc->result_tf);
+	ireason = qc->result_tf.nsect;
+	bc_lo = qc->result_tf.lbam;
+	bc_hi = qc->result_tf.lbah;
+	bytes = (bc_hi << 8) | bc_lo;
+
+	/* shall be cleared to zero, indicating xfer of data */
+	if (unlikely(ireason & ATAPI_COD))
+		goto atapi_check;
+
+	/* make sure transfer direction matches expected */
+	i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
+	if (unlikely(do_write != i_write))
+		goto atapi_check;
+
+	if (unlikely(!bytes))
+		goto atapi_check;
+
+	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+
+	if (unlikely(__atapi_pio_bytes(qc, bytes)))
+		goto err_out;
+	ata_sff_sync(ap); /* flush */
+
+	return;
+
+ atapi_check:
+	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
+			  ireason, bytes);
+ err_out:
+	qc->err_mask |= AC_ERR_HSM;
+	ap->hsm_task_state = HSM_ST_ERR;
+}
+
+/**
+ *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
+ *	@ap: the target ata_port
+ *	@qc: qc on going
+ *
+ *	RETURNS:
+ *	1 if ok in workqueue, 0 otherwise.
+ */
+static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+						struct ata_queued_cmd *qc)
+{
+	if (qc->tf.flags & ATA_TFLAG_POLLING)
+		return 1;
+
+	if (ap->hsm_task_state == HSM_ST_FIRST) {
+		if (qc->tf.protocol == ATA_PROT_PIO &&
+		   (qc->tf.flags & ATA_TFLAG_WRITE))
+		    return 1;
+
+		if (ata_is_atapi(qc->tf.protocol) &&
+		   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_hsm_qc_complete - finish a qc running on standard HSM
+ *	@qc: Command to complete
+ *	@in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ *	Finish @qc which is running on standard HSM.
+ *
+ *	LOCKING:
+ *	If @in_wq is zero, spin_lock_irqsave(host lock).
+ *	Otherwise, none on entry and grabs host lock.
+ */
+static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (ap->ops->error_handler) {
+		if (in_wq) {
+			/* EH might have kicked in while host lock is
+			 * released.
+			 */
+			qc = ata_qc_from_tag(ap, qc->tag);
+			if (qc) {
+				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
+					ata_sff_irq_on(ap);
+					ata_qc_complete(qc);
+				} else
+					ata_port_freeze(ap);
+			}
+		} else {
+			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+				ata_qc_complete(qc);
+			else
+				ata_port_freeze(ap);
+		}
+	} else {
+		if (in_wq) {
+			ata_sff_irq_on(ap);
+			ata_qc_complete(qc);
+		} else
+			ata_qc_complete(qc);
+	}
+}
+
+/**
+ *	ata_sff_hsm_move - move the HSM to the next state.
+ *	@ap: the target ata_port
+ *	@qc: qc on going
+ *	@status: current device status
+ *	@in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ *	RETURNS:
+ *	1 when poll next status needed, 0 otherwise.
+ */
+int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+		     u8 status, int in_wq)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_eh_info *ehi = &link->eh_info;
+	int poll_next;
+
+	lockdep_assert_held(ap->lock);
+
+	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+	/* Make sure ata_sff_qc_issue() does not throw things
+	 * like DMA polling into the workqueue. Notice that
+	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
+	 */
+	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
+
+fsm_start:
+	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST_FIRST:
+		/* Send first data block or PACKET CDB */
+
+		/* If polling, we will stay in the work queue after
+		 * sending the data. Otherwise, interrupt handler
+		 * takes over after sending the data.
+		 */
+		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
+
+		/* check device status */
+		if (unlikely((status & ATA_DRQ) == 0)) {
+			/* handle BSY=0, DRQ=0 as error */
+			if (likely(status & (ATA_ERR | ATA_DF)))
+				/* device stops HSM for abort/error */
+				qc->err_mask |= AC_ERR_DEV;
+			else {
+				/* HSM violation. Let EH handle this */
+				ata_ehi_push_desc(ehi,
+					"ST_FIRST: !(DRQ|ERR|DF)");
+				qc->err_mask |= AC_ERR_HSM;
+			}
+
+			ap->hsm_task_state = HSM_ST_ERR;
+			goto fsm_start;
+		}
+
+		/* Device should not ask for data transfer (DRQ=1)
+		 * when it finds something wrong.
+		 * We ignore DRQ here and stop the HSM by
+		 * changing hsm_task_state to HSM_ST_ERR and
+		 * let the EH abort the command or reset the device.
+		 */
+		if (unlikely(status & (ATA_ERR | ATA_DF))) {
+			/* Some ATAPI tape drives forget to clear the ERR bit
+			 * when doing the next command (mostly request sense).
+			 * We ignore ERR here to workaround and proceed sending
+			 * the CDB.
+			 */
+			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+				ata_ehi_push_desc(ehi, "ST_FIRST: "
+					"DRQ=1 with device error, "
+					"dev_stat 0x%X", status);
+				qc->err_mask |= AC_ERR_HSM;
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+		}
+
+		if (qc->tf.protocol == ATA_PROT_PIO) {
+			/* PIO data out protocol.
+			 * send first data block.
+			 */
+
+			/* ata_pio_sectors() might change the state
+			 * to HSM_ST_LAST. so, the state is changed here
+			 * before ata_pio_sectors().
+			 */
+			ap->hsm_task_state = HSM_ST;
+			ata_pio_sectors(qc);
+		} else
+			/* send CDB */
+			atapi_send_cdb(ap, qc);
+
+		/* if polling, ata_sff_pio_task() handles the rest.
+		 * otherwise, interrupt handler takes over from here.
+		 */
+		break;
+
+	case HSM_ST:
+		/* complete command or read/write the data register */
+		if (qc->tf.protocol == ATAPI_PROT_PIO) {
+			/* ATAPI PIO protocol */
+			if ((status & ATA_DRQ) == 0) {
+				/* No more data to transfer or device error.
+				 * Device error will be tagged in HSM_ST_LAST.
+				 */
+				ap->hsm_task_state = HSM_ST_LAST;
+				goto fsm_start;
+			}
+
+			/* Device should not ask for data transfer (DRQ=1)
+			 * when it finds something wrong.
+			 * We ignore DRQ here and stop the HSM by
+			 * changing hsm_task_state to HSM_ST_ERR and
+			 * let the EH abort the command or reset the device.
+			 */
+			if (unlikely(status & (ATA_ERR | ATA_DF))) {
+				ata_ehi_push_desc(ehi, "ST-ATAPI: "
+					"DRQ=1 with device error, "
+					"dev_stat 0x%X", status);
+				qc->err_mask |= AC_ERR_HSM;
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			atapi_pio_bytes(qc);
+
+			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
+				/* bad ireason reported by device */
+				goto fsm_start;
+
+		} else {
+			/* ATA PIO protocol */
+			if (unlikely((status & ATA_DRQ) == 0)) {
+				/* handle BSY=0, DRQ=0 as error */
+				if (likely(status & (ATA_ERR | ATA_DF))) {
+					/* device stops HSM for abort/error */
+					qc->err_mask |= AC_ERR_DEV;
+
+					/* If diagnostic failed and this is
+					 * IDENTIFY, it's likely a phantom
+					 * device.  Mark hint.
+					 */
+					if (qc->dev->horkage &
+					    ATA_HORKAGE_DIAGNOSTIC)
+						qc->err_mask |=
+							AC_ERR_NODEV_HINT;
+				} else {
+					/* HSM violation. Let EH handle this.
+					 * Phantom devices also trigger this
+					 * condition.  Mark hint.
+					 */
+					ata_ehi_push_desc(ehi, "ST-ATA: "
+						"DRQ=0 without device error, "
+						"dev_stat 0x%X", status);
+					qc->err_mask |= AC_ERR_HSM |
+							AC_ERR_NODEV_HINT;
+				}
+
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			/* For PIO reads, some devices may ask for
+			 * data transfer (DRQ=1) alone with ERR=1.
+			 * We respect DRQ here and transfer one
+			 * block of junk data before changing the
+			 * hsm_task_state to HSM_ST_ERR.
+			 *
+			 * For PIO writes, ERR=1 DRQ=1 doesn't make
+			 * sense since the data block has been
+			 * transferred to the device.
+			 */
+			if (unlikely(status & (ATA_ERR | ATA_DF))) {
+				/* data might be corrputed */
+				qc->err_mask |= AC_ERR_DEV;
+
+				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
+					ata_pio_sectors(qc);
+					status = ata_wait_idle(ap);
+				}
+
+				if (status & (ATA_BUSY | ATA_DRQ)) {
+					ata_ehi_push_desc(ehi, "ST-ATA: "
+						"BUSY|DRQ persists on ERR|DF, "
+						"dev_stat 0x%X", status);
+					qc->err_mask |= AC_ERR_HSM;
+				}
+
+				/* There are oddball controllers with
+				 * status register stuck at 0x7f and
+				 * lbal/m/h at zero which makes it
+				 * pass all other presence detection
+				 * mechanisms we have.  Set NODEV_HINT
+				 * for it.  Kernel bz#7241.
+				 */
+				if (status == 0x7f)
+					qc->err_mask |= AC_ERR_NODEV_HINT;
+
+				/* ata_pio_sectors() might change the
+				 * state to HSM_ST_LAST. so, the state
+				 * is changed after ata_pio_sectors().
+				 */
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
+
+			ata_pio_sectors(qc);
+
+			if (ap->hsm_task_state == HSM_ST_LAST &&
+			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+				/* all data read */
+				status = ata_wait_idle(ap);
+				goto fsm_start;
+			}
+		}
+
+		poll_next = 1;
+		break;
+
+	case HSM_ST_LAST:
+		if (unlikely(!ata_ok(status))) {
+			qc->err_mask |= __ac_err_mask(status);
+			ap->hsm_task_state = HSM_ST_ERR;
+			goto fsm_start;
+		}
+
+		/* no more data to transfer */
+		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
+			ap->print_id, qc->dev->devno, status);
+
+		WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
+
+		ap->hsm_task_state = HSM_ST_IDLE;
+
+		/* complete taskfile transaction */
+		ata_hsm_qc_complete(qc, in_wq);
+
+		poll_next = 0;
+		break;
+
+	case HSM_ST_ERR:
+		ap->hsm_task_state = HSM_ST_IDLE;
+
+		/* complete taskfile transaction */
+		ata_hsm_qc_complete(qc, in_wq);
+
+		poll_next = 0;
+		break;
+	default:
+		poll_next = 0;
+		WARN(true, "ata%d: SFF host state machine in invalid state %d",
+		     ap->print_id, ap->hsm_task_state);
+	}
+
+	return poll_next;
+}
+EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
+
+void ata_sff_queue_work(struct work_struct *work)
+{
+	queue_work(ata_sff_wq, work);
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_work);
+
+void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
+{
+	queue_delayed_work(ata_sff_wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
+
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
+{
+	struct ata_port *ap = link->ap;
+
+	WARN_ON((ap->sff_pio_task_link != NULL) &&
+		(ap->sff_pio_task_link != link));
+	ap->sff_pio_task_link = link;
+
+	/* may fail if ata_sff_flush_pio_task() in progress */
+	ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
+
+void ata_sff_flush_pio_task(struct ata_port *ap)
+{
+	DPRINTK("ENTER\n");
+
+	cancel_delayed_work_sync(&ap->sff_pio_task);
+
+	/*
+	 * We wanna reset the HSM state to IDLE.  If we do so without
+	 * grabbing the port lock, critical sections protected by it which
+	 * expect the HSM state to stay stable may get surprised.  For
+	 * example, we may set IDLE in between the time
+	 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+	 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+	 */
+	spin_lock_irq(ap->lock);
+	ap->hsm_task_state = HSM_ST_IDLE;
+	spin_unlock_irq(ap->lock);
+
+	ap->sff_pio_task_link = NULL;
+
+	if (ata_msg_ctl(ap))
+		ata_port_dbg(ap, "%s: EXIT\n", __func__);
+}
+
+static void ata_sff_pio_task(struct work_struct *work)
+{
+	struct ata_port *ap =
+		container_of(work, struct ata_port, sff_pio_task.work);
+	struct ata_link *link = ap->sff_pio_task_link;
+	struct ata_queued_cmd *qc;
+	u8 status;
+	int poll_next;
+
+	spin_lock_irq(ap->lock);
+
+	BUG_ON(ap->sff_pio_task_link == NULL);
+	/* qc can be NULL if timeout occurred */
+	qc = ata_qc_from_tag(ap, link->active_tag);
+	if (!qc) {
+		ap->sff_pio_task_link = NULL;
+		goto out_unlock;
+	}
+
+fsm_start:
+	WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
+
+	/*
+	 * This is purely heuristic.  This is a fast path.
+	 * Sometimes when we enter, BSY will be cleared in
+	 * a chk-status or two.  If not, the drive is probably seeking
+	 * or something.  Snooze for a couple msecs, then
+	 * chk-status again.  If still busy, queue delayed work.
+	 */
+	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+	if (status & ATA_BUSY) {
+		spin_unlock_irq(ap->lock);
+		ata_msleep(ap, 2);
+		spin_lock_irq(ap->lock);
+
+		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+		if (status & ATA_BUSY) {
+			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+			goto out_unlock;
+		}
+	}
+
+	/*
+	 * hsm_move() may trigger another command to be processed.
+	 * clean the link beforehand.
+	 */
+	ap->sff_pio_task_link = NULL;
+	/* move the HSM */
+	poll_next = ata_sff_hsm_move(ap, qc, status, 1);
+
+	/* another command or interrupt handler
+	 * may be running at this point.
+	 */
+	if (poll_next)
+		goto fsm_start;
+out_unlock:
+	spin_unlock_irq(ap->lock);
+}
+
+/**
+ *	ata_sff_qc_issue - issue taskfile to a SFF controller
+ *	@qc: command to issue to device
+ *
+ *	This function issues a PIO or NODATA command to a SFF
+ *	controller.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_link *link = qc->dev->link;
+
+	/* Use polling pio if the LLD doesn't handle
+	 * interrupt driven pio and atapi CDB interrupt.
+	 */
+	if (ap->flags & ATA_FLAG_PIO_POLLING)
+		qc->tf.flags |= ATA_TFLAG_POLLING;
+
+	/* select the device */
+	ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+	/* start the command */
+	switch (qc->tf.protocol) {
+	case ATA_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+		ap->hsm_task_state = HSM_ST_LAST;
+
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_sff_queue_pio_task(link, 0);
+
+		break;
+
+	case ATA_PROT_PIO:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+			/* PIO data out protocol */
+			ap->hsm_task_state = HSM_ST_FIRST;
+			ata_sff_queue_pio_task(link, 0);
+
+			/* always send first data block using the
+			 * ata_sff_pio_task() codepath.
+			 */
+		} else {
+			/* PIO data in protocol */
+			ap->hsm_task_state = HSM_ST;
+
+			if (qc->tf.flags & ATA_TFLAG_POLLING)
+				ata_sff_queue_pio_task(link, 0);
+
+			/* if polling, ata_sff_pio_task() handles the
+			 * rest.  otherwise, interrupt handler takes
+			 * over from here.
+			 */
+		}
+
+		break;
+
+	case ATAPI_PROT_PIO:
+	case ATAPI_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			ata_qc_set_polling(qc);
+
+		ata_tf_to_host(ap, &qc->tf);
+
+		ap->hsm_task_state = HSM_ST_FIRST;
+
+		/* send cdb by polling if no cdb interrupt */
+		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
+		    (qc->tf.flags & ATA_TFLAG_POLLING))
+			ata_sff_queue_pio_task(link, 0);
+		break;
+
+	default:
+		return AC_ERR_SYSTEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
+
+/**
+ *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
+ *	@qc: qc to fill result TF for
+ *
+ *	@qc is finished and result TF needs to be filled.  Fill it
+ *	using ->sff_tf_read.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	true indicating that result TF is successfully filled.
+ */
+bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
+	return true;
+}
+EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
+
+static unsigned int ata_sff_idle_irq(struct ata_port *ap)
+{
+	ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+	if ((ap->stats.idle_irq % 1000) == 0) {
+		ap->ops->sff_check_status(ap);
+		if (ap->ops->sff_irq_clear)
+			ap->ops->sff_irq_clear(ap);
+		ata_port_warn(ap, "irq trap\n");
+		return 1;
+	}
+#endif
+	return 0;	/* irq not handled */
+}
+
+static unsigned int __ata_sff_port_intr(struct ata_port *ap,
+					struct ata_queued_cmd *qc,
+					bool hsmv_on_idle)
+{
+	u8 status;
+
+	VPRINTK("ata%u: protocol %d task_state %d\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+	/* Check whether we are expecting interrupt in this state */
+	switch (ap->hsm_task_state) {
+	case HSM_ST_FIRST:
+		/* Some pre-ATAPI-4 devices assert INTRQ
+		 * at this state when ready to receive CDB.
+		 */
+
+		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+		 * The flag was turned on only for atapi devices.  No
+		 * need to check ata_is_atapi(qc->tf.protocol) again.
+		 */
+		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			return ata_sff_idle_irq(ap);
+		break;
+	case HSM_ST_IDLE:
+		return ata_sff_idle_irq(ap);
+	default:
+		break;
+	}
+
+	/* check main status, clearing INTRQ if needed */
+	status = ata_sff_irq_status(ap);
+	if (status & ATA_BUSY) {
+		if (hsmv_on_idle) {
+			/* BMDMA engine is already stopped, we're screwed */
+			qc->err_mask |= AC_ERR_HSM;
+			ap->hsm_task_state = HSM_ST_ERR;
+		} else
+			return ata_sff_idle_irq(ap);
+	}
+
+	/* clear irq events */
+	if (ap->ops->sff_irq_clear)
+		ap->ops->sff_irq_clear(ap);
+
+	ata_sff_hsm_move(ap, qc, status, 0);
+
+	return 1;	/* irq handled */
+}
+
+/**
+ *	ata_sff_port_intr - Handle SFF port interrupt
+ *	@ap: Port on which interrupt arrived (possibly...)
+ *	@qc: Taskfile currently active in engine
+ *
+ *	Handle port interrupt for given queued command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	One if interrupt was handled, zero if not (shared irq).
+ */
+unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	return __ata_sff_port_intr(ap, qc, false);
+}
+EXPORT_SYMBOL_GPL(ata_sff_port_intr);
+
+static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
+	unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
+{
+	struct ata_host *host = dev_instance;
+	bool retried = false;
+	unsigned int i;
+	unsigned int handled, idle, polling;
+	unsigned long flags;
+
+	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+	spin_lock_irqsave(&host->lock, flags);
+
+retry:
+	handled = idle = polling = 0;
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_queued_cmd *qc;
+
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (qc) {
+			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+				handled |= port_intr(ap, qc);
+			else
+				polling |= 1 << i;
+		} else
+			idle |= 1 << i;
+	}
+
+	/*
+	 * If no port was expecting IRQ but the controller is actually
+	 * asserting IRQ line, nobody cared will ensue.  Check IRQ
+	 * pending status if available and clear spurious IRQ.
+	 */
+	if (!handled && !retried) {
+		bool retry = false;
+
+		for (i = 0; i < host->n_ports; i++) {
+			struct ata_port *ap = host->ports[i];
+
+			if (polling & (1 << i))
+				continue;
+
+			if (!ap->ops->sff_irq_check ||
+			    !ap->ops->sff_irq_check(ap))
+				continue;
+
+			if (idle & (1 << i)) {
+				ap->ops->sff_check_status(ap);
+				if (ap->ops->sff_irq_clear)
+					ap->ops->sff_irq_clear(ap);
+			} else {
+				/* clear INTRQ and check if BUSY cleared */
+				if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
+					retry |= true;
+				/*
+				 * With command in flight, we can't do
+				 * sff_irq_clear() w/o racing with completion.
+				 */
+			}
+		}
+
+		if (retry) {
+			retried = true;
+			goto retry;
+		}
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+/**
+ *	ata_sff_interrupt - Default SFF ATA host interrupt handler
+ *	@irq: irq line (unused)
+ *	@dev_instance: pointer to our ata_host information structure
+ *
+ *	Default interrupt handler for PCI IDE devices.  Calls
+ *	ata_sff_port_intr() for each port that is not disabled.
+ *
+ *	LOCKING:
+ *	Obtains host lock during operation.
+ *
+ *	RETURNS:
+ *	IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+	return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
+}
+EXPORT_SYMBOL_GPL(ata_sff_interrupt);
+
+/**
+ *	ata_sff_lost_interrupt	-	Check for an apparent lost interrupt
+ *	@ap: port that appears to have timed out
+ *
+ *	Called from the libata error handlers when the core code suspects
+ *	an interrupt has been lost. If it has complete anything we can and
+ *	then return. Interface must support altstatus for this faster
+ *	recovery to occur.
+ *
+ *	Locking:
+ *	Caller holds host lock
+ */
+
+void ata_sff_lost_interrupt(struct ata_port *ap)
+{
+	u8 status;
+	struct ata_queued_cmd *qc;
+
+	/* Only one outstanding command per SFF channel */
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	/* We cannot lose an interrupt on a non-existent or polled command */
+	if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
+		return;
+	/* See if the controller thinks it is still busy - if so the command
+	   isn't a lost IRQ but is still in progress */
+	status = ata_sff_altstatus(ap);
+	if (status & ATA_BUSY)
+		return;
+
+	/* There was a command running, we are no longer busy and we have
+	   no interrupt. */
+	ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
+								status);
+	/* Run the host interrupt logic as if the interrupt had not been
+	   lost */
+	ata_sff_port_intr(ap, qc);
+}
+EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
+
+/**
+ *	ata_sff_freeze - Freeze SFF controller port
+ *	@ap: port to freeze
+ *
+ *	Freeze SFF controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_freeze(struct ata_port *ap)
+{
+	ap->ctl |= ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
+		ata_sff_set_devctl(ap, ap->ctl);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ap->ops->sff_check_status(ap);
+
+	if (ap->ops->sff_irq_clear)
+		ap->ops->sff_irq_clear(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sff_freeze);
+
+/**
+ *	ata_sff_thaw - Thaw SFF controller port
+ *	@ap: port to thaw
+ *
+ *	Thaw SFF controller port.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_sff_thaw(struct ata_port *ap)
+{
+	/* clear & re-enable interrupts */
+	ap->ops->sff_check_status(ap);
+	if (ap->ops->sff_irq_clear)
+		ap->ops->sff_irq_clear(ap);
+	ata_sff_irq_on(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sff_thaw);
+
+/**
+ *	ata_sff_prereset - prepare SFF link for reset
+ *	@link: SFF link to be reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SFF link @link is about to be reset.  Initialize it.  It first
+ *	calls ata_std_prereset() and wait for !BSY if the port is
+ *	being softreset.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	int rc;
+
+	rc = ata_std_prereset(link, deadline);
+	if (rc)
+		return rc;
+
+	/* if we're about to do hardreset, nothing more to do */
+	if (ehc->i.action & ATA_EH_HARDRESET)
+		return 0;
+
+	/* wait for !BSY if we don't know that no device is attached */
+	if (!ata_link_offline(link)) {
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc && rc != -ENODEV) {
+			ata_link_warn(link,
+				      "device not ready (errno=%d), forcing hardreset\n",
+				      rc);
+			ehc->i.action |= ATA_EH_HARDRESET;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sff_prereset);
+
+/**
+ *	ata_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->sff_dev_select(ap, device);
+
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
+
+	iowrite8(0xaa, ioaddr->nsect_addr);
+	iowrite8(0x55, ioaddr->lbal_addr);
+
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
+
+	nsect = ioread8(ioaddr->nsect_addr);
+	lbal = ioread8(ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_sff_dev_classify - Parse returned ATA device signature
+ *	@dev: ATA device to classify (starting at zero)
+ *	@present: device seems present
+ *	@r_err: Value of error register on completion
+ *
+ *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
+ *	an ATA/ATAPI-defined set of values is placed in the ATA
+ *	shadow registers, indicating the results of device detection
+ *	and diagnostics.
+ *
+ *	Select the ATA device, and read the values from the ATA shadow
+ *	registers.  Then parse according to the Error register value,
+ *	and the spec-defined values examined by ata_dev_classify().
+ *
+ *	LOCKING:
+ *	caller.
+ *
+ *	RETURNS:
+ *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
+ */
+unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
+				  u8 *r_err)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_taskfile tf;
+	unsigned int class;
+	u8 err;
+
+	ap->ops->sff_dev_select(ap, dev->devno);
+
+	memset(&tf, 0, sizeof(tf));
+
+	ap->ops->sff_tf_read(ap, &tf);
+	err = tf.feature;
+	if (r_err)
+		*r_err = err;
+
+	/* see if device passed diags: continue and warn later */
+	if (err == 0)
+		/* diagnostic fail : do nothing _YET_ */
+		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+	else if (err == 1)
+		/* do nothing */ ;
+	else if ((dev->devno == 0) && (err == 0x81))
+		/* do nothing */ ;
+	else
+		return ATA_DEV_NONE;
+
+	/* determine if device is ATA or ATAPI */
+	class = ata_dev_classify(&tf);
+
+	if (class == ATA_DEV_UNKNOWN) {
+		/* If the device failed diagnostic, it's likely to
+		 * have reported incorrect device signature too.
+		 * Assume ATA device if the device seems present but
+		 * device signature is invalid with diagnostic
+		 * failure.
+		 */
+		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+			class = ATA_DEV_ATA;
+		else
+			class = ATA_DEV_NONE;
+	} else if ((class == ATA_DEV_ATA) &&
+		   (ap->ops->sff_check_status(ap) == 0))
+		class = ATA_DEV_NONE;
+
+	return class;
+}
+EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
+
+/**
+ *	ata_sff_wait_after_reset - wait for devices to become ready after reset
+ *	@link: SFF link which is just reset
+ *	@devmask: mask of present devices
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Wait devices attached to SFF @link to become ready after
+ *	reset.  It contains preceding 150ms wait to avoid accessing TF
+ *	status register too early.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -ENODEV if some or all of devices in @devmask
+ *	don't seem to exist.  -errno on other errors.
+ */
+int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
+			     unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	int rc, ret = 0;
+
+	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+	/* always check readiness of the master device */
+	rc = ata_sff_wait_ready(link, deadline);
+	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
+	 * and TF status is 0xff, bail out on it too.
+	 */
+	if (rc)
+		return rc;
+
+	/* if device 1 was found in ata_devchk, wait for register
+	 * access briefly, then wait for BSY to clear.
+	 */
+	if (dev1) {
+		int i;
+
+		ap->ops->sff_dev_select(ap, 1);
+
+		/* Wait for register access.  Some ATAPI devices fail
+		 * to set nsect/lbal after reset, so don't waste too
+		 * much time on it.  We're gonna wait for !BSY anyway.
+		 */
+		for (i = 0; i < 2; i++) {
+			u8 nsect, lbal;
+
+			nsect = ioread8(ioaddr->nsect_addr);
+			lbal = ioread8(ioaddr->lbal_addr);
+			if ((nsect == 1) && (lbal == 1))
+				break;
+			ata_msleep(ap, 50);	/* give drive a breather */
+		}
+
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
+	}
+
+	/* is all this really necessary? */
+	ap->ops->sff_dev_select(ap, 0);
+	if (dev1)
+		ap->ops->sff_dev_select(ap, 1);
+	if (dev0)
+		ap->ops->sff_dev_select(ap, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
+
+static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+			     unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+	if (ap->ioaddr.ctl_addr) {
+		/* software reset.  causes dev0 to be selected */
+		iowrite8(ap->ctl, ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		iowrite8(ap->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = ap->ctl;
+	}
+
+	/* wait the port to become ready */
+	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+/**
+ *	ata_sff_softreset - reset host port via ATA SRST
+ *	@link: ATA link to reset
+ *	@classes: resulting classes of attached devices
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Reset host port using ATA SRST.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
+		      unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	unsigned int devmask = 0;
+	int rc;
+	u8 err;
+
+	DPRINTK("ENTER\n");
+
+	/* determine if device 0/1 are present */
+	if (ata_devchk(ap, 0))
+		devmask |= (1 << 0);
+	if (slave_possible && ata_devchk(ap, 1))
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->sff_dev_select(ap, 0);
+
+	/* issue bus reset */
+	DPRINTK("about to softreset, devmask=%x\n", devmask);
+	rc = ata_bus_softreset(ap, devmask, deadline);
+	/* if link is occupied, -ENODEV too is an error */
+	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&link->device[0],
+					  devmask & (1 << 0), &err);
+	if (slave_possible && err != 0x81)
+		classes[1] = ata_sff_dev_classify(&link->device[1],
+						  devmask & (1 << 1), &err);
+
+	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sff_softreset);
+
+/**
+ *	sata_sff_hardreset - reset host port via SATA phy reset
+ *	@link: link to reset
+ *	@class: resulting class of attached device
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SATA phy-reset host port using DET bits of SControl register,
+ *	wait for !BSY and classify the attached device.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+		       unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	const unsigned long *timing = sata_ehc_deb_timing(ehc);
+	bool online;
+	int rc;
+
+	rc = sata_link_hardreset(link, timing, deadline, &online,
+				 ata_sff_check_ready);
+	if (online)
+		*class = ata_sff_dev_classify(link->device, 1, NULL);
+
+	DPRINTK("EXIT, class=%u\n", *class);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(sata_sff_hardreset);
+
+/**
+ *	ata_sff_postreset - SFF postreset callback
+ *	@link: the target SFF ata_link
+ *	@classes: classes of attached devices
+ *
+ *	This function is invoked after a successful reset.  It first
+ *	calls ata_std_postreset() and performs SFF specific postreset
+ *	processing.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
+{
+	struct ata_port *ap = link->ap;
+
+	ata_std_postreset(link, classes);
+
+	/* is double-select really necessary? */
+	if (classes[0] != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 1);
+	if (classes[1] != ATA_DEV_NONE)
+		ap->ops->sff_dev_select(ap, 0);
+
+	/* bail out if no device is present */
+	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+		DPRINTK("EXIT, no device\n");
+		return;
+	}
+
+	/* set up device control */
+	if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
+		ata_sff_set_devctl(ap, ap->ctl);
+		ap->last_ctl = ap->ctl;
+	}
+}
+EXPORT_SYMBOL_GPL(ata_sff_postreset);
+
+/**
+ *	ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
+ *	@qc: command
+ *
+ *	Drain the FIFO and device of any stuck data following a command
+ *	failing to complete. In some cases this is necessary before a
+ *	reset will recover the device.
+ *
+ */
+
+void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
+{
+	int count;
+	struct ata_port *ap;
+
+	/* We only need to flush incoming data when a command was running */
+	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+		return;
+
+	ap = qc->ap;
+	/* Drain up to 64K of data before we give up this recovery method */
+	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
+						&& count < 65536; count += 2)
+		ioread16(ap->ioaddr.data_addr);
+
+	/* Can become DEBUG later */
+	if (count)
+		ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+
+}
+EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
+
+/**
+ *	ata_sff_error_handler - Stock error handler for SFF controller
+ *	@ap: port to handle error for
+ *
+ *	Stock error handler for SFF controller.  It can handle both
+ *	PATA and SATA controllers.  Many controllers should be able to
+ *	use this EH as-is or with some added handling before and
+ *	after.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_sff_error_handler(struct ata_port *ap)
+{
+	ata_reset_fn_t softreset = ap->ops->softreset;
+	ata_reset_fn_t hardreset = ap->ops->hardreset;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+
+	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+		qc = NULL;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	/*
+	 * We *MUST* do FIFO draining before we issue a reset as
+	 * several devices helpfully clear their internal state and
+	 * will lock solid if we touch the data port post reset. Pass
+	 * qc in case anyone wants to do different PIO/DMA recovery or
+	 * has per command fixups
+	 */
+	if (ap->ops->sff_drain_fifo)
+		ap->ops->sff_drain_fifo(qc);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* ignore built-in hardresets if SCR access is not available */
+	if ((hardreset == sata_std_hardreset ||
+	     hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
+		hardreset = NULL;
+
+	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
+		  ap->ops->postreset);
+}
+EXPORT_SYMBOL_GPL(ata_sff_error_handler);
+
+/**
+ *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
+ *	@ioaddr: IO address structure to be initialized
+ *
+ *	Utility function which initializes data_addr, error_addr,
+ *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
+ *	device_addr, status_addr, and command_addr to standard offsets
+ *	relative to cmd_addr.
+ *
+ *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
+ */
+void ata_sff_std_ports(struct ata_ioports *ioaddr)
+{
+	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
+	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
+	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+EXPORT_SYMBOL_GPL(ata_sff_std_ports);
+
+#ifdef CONFIG_PCI
+
+static int ata_resources_present(struct pci_dev *pdev, int port)
+{
+	int i;
+
+	/* Check the PCI resources for this channel are enabled */
+	port = port * 2;
+	for (i = 0; i < 2; i++) {
+		if (pci_resource_start(pdev, port + i) == 0 ||
+		    pci_resource_len(pdev, port + i) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ *	ata_pci_sff_init_host - acquire native PCI ATA resources and init host
+ *	@host: target ATA host
+ *
+ *	Acquire native PCI ATA resources for @host and initialize the
+ *	first two ports of @host accordingly.  Ports marked dummy are
+ *	skipped and allocation failure makes the port dummy.
+ *
+ *	Note that native PCI resources are valid even for legacy hosts
+ *	as we fix up pdev resources array early in boot, so this
+ *	function can be used for both native and legacy SFF hosts.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 if at least one port is initialized, -ENODEV if no port is
+ *	available.
+ */
+int ata_pci_sff_init_host(struct ata_host *host)
+{
+	struct device *gdev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(gdev);
+	unsigned int mask = 0;
+	int i, rc;
+
+	/* request, iomap BARs and init port addresses accordingly */
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+		int base = i * 2;
+		void __iomem * const *iomap;
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		/* Discard disabled ports.  Some controllers show
+		 * their unused channels this way.  Disabled ports are
+		 * made dummy.
+		 */
+		if (!ata_resources_present(pdev, i)) {
+			ap->ops = &ata_dummy_port_ops;
+			continue;
+		}
+
+		rc = pcim_iomap_regions(pdev, 0x3 << base,
+					dev_driver_string(gdev));
+		if (rc) {
+			dev_warn(gdev,
+				 "failed to request/iomap BARs for port %d (errno=%d)\n",
+				 i, rc);
+			if (rc == -EBUSY)
+				pcim_pin_device(pdev);
+			ap->ops = &ata_dummy_port_ops;
+			continue;
+		}
+		host->iomap = iomap = pcim_iomap_table(pdev);
+
+		ap->ioaddr.cmd_addr = iomap[base];
+		ap->ioaddr.altstatus_addr =
+		ap->ioaddr.ctl_addr = (void __iomem *)
+			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
+		ata_sff_std_ports(&ap->ioaddr);
+
+		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, base),
+			(unsigned long long)pci_resource_start(pdev, base + 1));
+
+		mask |= 1 << i;
+	}
+
+	if (!mask) {
+		dev_err(gdev, "no available native port\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
+
+/**
+ *	ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
+ *	@pdev: target PCI device
+ *	@ppi: array of port_info, must be enough for two ports
+ *	@r_host: out argument for the initialized ATA host
+ *
+ *	Helper to allocate PIO-only SFF ATA host for @pdev, acquire
+ *	all PCI resources and initialize it accordingly in one go.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_pci_sff_prepare_host(struct pci_dev *pdev,
+			     const struct ata_port_info * const *ppi,
+			     struct ata_host **r_host)
+{
+	struct ata_host *host;
+	int rc;
+
+	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate ATA host\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	rc = ata_pci_sff_init_host(host);
+	if (rc)
+		goto err_out;
+
+	devres_remove_group(&pdev->dev, NULL);
+	*r_host = host;
+	return 0;
+
+err_out:
+	devres_release_group(&pdev->dev, NULL);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
+
+/**
+ *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
+ *	@host: target SFF ATA host
+ *	@irq_handler: irq_handler used when requesting IRQ(s)
+ *	@sht: scsi_host_template to use when registering the host
+ *
+ *	This is the counterpart of ata_host_activate() for SFF ATA
+ *	hosts.  This separate helper is necessary because SFF hosts
+ *	use two separate interrupts in legacy mode.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_pci_sff_activate_host(struct ata_host *host,
+			      irq_handler_t irq_handler,
+			      struct scsi_host_template *sht)
+{
+	struct device *dev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	const char *drv_name = dev_driver_string(host->dev);
+	int legacy_mode = 0, rc;
+
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+
+	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+		u8 tmp8, mask = 0;
+
+		/*
+		 * ATA spec says we should use legacy mode when one
+		 * port is in legacy mode, but disabled ports on some
+		 * PCI hosts appear as fixed legacy ports, e.g SB600/700
+		 * on which the secondary port is not wired, so
+		 * ignore ports that are marked as 'dummy' during
+		 * this check
+		 */
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+		if (!ata_port_is_dummy(host->ports[0]))
+			mask |= (1 << 0);
+		if (!ata_port_is_dummy(host->ports[1]))
+			mask |= (1 << 2);
+		if ((tmp8 & mask) != mask)
+			legacy_mode = 1;
+	}
+
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	if (!legacy_mode && pdev->irq) {
+		int i;
+
+		rc = devm_request_irq(dev, pdev->irq, irq_handler,
+				      IRQF_SHARED, drv_name, host);
+		if (rc)
+			goto out;
+
+		for (i = 0; i < 2; i++) {
+			if (ata_port_is_dummy(host->ports[i]))
+				continue;
+			ata_port_desc(host->ports[i], "irq %d", pdev->irq);
+		}
+	} else if (legacy_mode) {
+		if (!ata_port_is_dummy(host->ports[0])) {
+			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
+					      irq_handler, IRQF_SHARED,
+					      drv_name, host);
+			if (rc)
+				goto out;
+
+			ata_port_desc(host->ports[0], "irq %d",
+				      ATA_PRIMARY_IRQ(pdev));
+		}
+
+		if (!ata_port_is_dummy(host->ports[1])) {
+			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
+					      irq_handler, IRQF_SHARED,
+					      drv_name, host);
+			if (rc)
+				goto out;
+
+			ata_port_desc(host->ports[1], "irq %d",
+				      ATA_SECONDARY_IRQ(pdev));
+		}
+	}
+
+	rc = ata_host_register(host, sht);
+out:
+	if (rc == 0)
+		devres_remove_group(dev, NULL);
+	else
+		devres_release_group(dev, NULL);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+
+static const struct ata_port_info *ata_sff_find_valid_pi(
+					const struct ata_port_info * const *ppi)
+{
+	int i;
+
+	/* look up the first valid port_info */
+	for (i = 0; i < 2 && ppi[i]; i++)
+		if (ppi[i]->port_ops != &ata_dummy_port_ops)
+			return ppi[i];
+
+	return NULL;
+}
+
+static int ata_pci_init_one(struct pci_dev *pdev,
+		const struct ata_port_info * const *ppi,
+		struct scsi_host_template *sht, void *host_priv,
+		int hflags, bool bmdma)
+{
+	struct device *dev = &pdev->dev;
+	const struct ata_port_info *pi;
+	struct ata_host *host = NULL;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	pi = ata_sff_find_valid_pi(ppi);
+	if (!pi) {
+		dev_err(&pdev->dev, "no valid port_info specified\n");
+		return -EINVAL;
+	}
+
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		goto out;
+
+#ifdef CONFIG_ATA_BMDMA
+	if (bmdma)
+		/* prepare and activate BMDMA host */
+		rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	else
+#endif
+		/* prepare and activate SFF host */
+		rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+	if (rc)
+		goto out;
+	host->private_data = host_priv;
+	host->flags |= hflags;
+
+#ifdef CONFIG_ATA_BMDMA
+	if (bmdma) {
+		pci_set_master(pdev);
+		rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+	} else
+#endif
+		rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
+out:
+	if (rc == 0)
+		devres_remove_group(&pdev->dev, NULL);
+	else
+		devres_release_group(&pdev->dev, NULL);
+
+	return rc;
+}
+
+/**
+ *	ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
+ *	@pdev: Controller to be initialized
+ *	@ppi: array of port_info, must be enough for two ports
+ *	@sht: scsi_host_template to use when registering the host
+ *	@host_priv: host private_data
+ *	@hflag: host flags
+ *
+ *	This is a helper function which can be called from a driver's
+ *	xxx_init_one() probe function if the hardware uses traditional
+ *	IDE taskfile registers and is PIO only.
+ *
+ *	ASSUMPTION:
+ *	Nobody makes a single channel controller that appears solely as
+ *	the secondary legacy port on PCI.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_sff_init_one(struct pci_dev *pdev,
+		 const struct ata_port_info * const *ppi,
+		 struct scsi_host_template *sht, void *host_priv, int hflag)
+{
+	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
+}
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
+
+#endif /* CONFIG_PCI */
+
+/*
+ *	BMDMA support
+ */
+
+#ifdef CONFIG_ATA_BMDMA
+
+const struct ata_port_operations ata_bmdma_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	.qc_prep		= ata_bmdma_qc_prep,
+	.qc_issue		= ata_bmdma_qc_issue,
+
+	.sff_irq_clear		= ata_bmdma_irq_clear,
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+
+	.port_start		= ata_bmdma_port_start,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+
+const struct ata_port_operations ata_bmdma32_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.sff_data_xfer		= ata_sff_data_xfer32,
+	.port_start		= ata_bmdma_port_start32,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
+
+/**
+ *	ata_bmdma_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_bmdma_prd *prd = ap->bmdma_prd;
+	struct scatterlist *sg;
+	unsigned int si, pi;
+
+	pi = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			prd[pi].addr = cpu_to_le32(addr);
+			prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+			pi++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *	ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command. Perform the fill
+ *	so that we avoid writing any length 64K records for
+ *	controllers that don't follow the spec.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_bmdma_prd *prd = ap->bmdma_prd;
+	struct scatterlist *sg;
+	unsigned int si, pi;
+
+	pi = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len, len, blen;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			blen = len & 0xffff;
+			prd[pi].addr = cpu_to_le32(addr);
+			if (blen == 0) {
+				/* Some PATA chipsets like the CS5530 can't
+				   cope with 0x0000 meaning 64K as the spec
+				   says */
+				prd[pi].flags_len = cpu_to_le32(0x8000);
+				blen = 0x8000;
+				prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+			}
+			prd[pi].flags_len = cpu_to_le32(blen);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+			pi++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ *	ata_bmdma_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_bmdma_fill_sg(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+
+/**
+ *	ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_bmdma_fill_sg_dumb(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
+
+/**
+ *	ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
+ *	@qc: command to issue to device
+ *
+ *	This function issues a PIO, NODATA or DMA command to a
+ *	SFF/BMDMA controller.  PIO and NODATA are handled by
+ *	ata_sff_qc_issue().
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_link *link = qc->dev->link;
+
+	/* defer PIO handling to sff_qc_issue */
+	if (!ata_is_dma(qc->tf.protocol))
+		return ata_sff_qc_issue(qc);
+
+	/* select the device */
+	ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+	/* start the command */
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+
+	case ATAPI_PROT_DMA:
+		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->hsm_task_state = HSM_ST_FIRST;
+
+		/* send cdb by polling if no cdb interrupt */
+		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			ata_sff_queue_pio_task(link, 0);
+		break;
+
+	default:
+		WARN_ON(1);
+		return AC_ERR_SYSTEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
+
+/**
+ *	ata_bmdma_port_intr - Handle BMDMA port interrupt
+ *	@ap: Port on which interrupt arrived (possibly...)
+ *	@qc: Taskfile currently active in engine
+ *
+ *	Handle port interrupt for given queued command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	One if interrupt was handled, zero if not (shared irq).
+ */
+unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u8 host_stat = 0;
+	bool bmdma_stopped = false;
+	unsigned int handled;
+
+	if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
+		/* check status of DMA engine */
+		host_stat = ap->ops->bmdma_status(ap);
+		VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+
+		/* if it's not our irq... */
+		if (!(host_stat & ATA_DMA_INTR))
+			return ata_sff_idle_irq(ap);
+
+		/* before we do anything else, clear DMA-Start bit */
+		ap->ops->bmdma_stop(qc);
+		bmdma_stopped = true;
+
+		if (unlikely(host_stat & ATA_DMA_ERR)) {
+			/* error when transferring data to/from memory */
+			qc->err_mask |= AC_ERR_HOST_BUS;
+			ap->hsm_task_state = HSM_ST_ERR;
+		}
+	}
+
+	handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
+
+	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+	return handled;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
+
+/**
+ *	ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
+ *	@irq: irq line (unused)
+ *	@dev_instance: pointer to our ata_host information structure
+ *
+ *	Default interrupt handler for PCI IDE devices.  Calls
+ *	ata_bmdma_port_intr() for each port that is not disabled.
+ *
+ *	LOCKING:
+ *	Obtains host lock during operation.
+ *
+ *	RETURNS:
+ *	IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
+{
+	return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
+
+/**
+ *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
+ *	@ap: port to handle error for
+ *
+ *	Stock error handler for BMDMA controller.  It can handle both
+ *	PATA and SATA controllers.  Most BMDMA controllers should be
+ *	able to use this EH as-is or with some added handling before
+ *	and after.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_bmdma_error_handler(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	bool thaw = false;
+
+	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+		qc = NULL;
+
+	/* reset PIO HSM and stop DMA engine */
+	spin_lock_irqsave(ap->lock, flags);
+
+	if (qc && ata_is_dma(qc->tf.protocol)) {
+		u8 host_stat;
+
+		host_stat = ap->ops->bmdma_status(ap);
+
+		/* BMDMA controllers indicate host bus error by
+		 * setting DMA_ERR bit and timing out.  As it wasn't
+		 * really a timeout event, adjust error mask and
+		 * cancel frozen state.
+		 */
+		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
+			qc->err_mask = AC_ERR_HOST_BUS;
+			thaw = true;
+		}
+
+		ap->ops->bmdma_stop(qc);
+
+		/* if we're gonna thaw, make sure IRQ is clear */
+		if (thaw) {
+			ap->ops->sff_check_status(ap);
+			if (ap->ops->sff_irq_clear)
+				ap->ops->sff_irq_clear(ap);
+		}
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	if (thaw)
+		ata_eh_thaw_port(ap);
+
+	ata_sff_error_handler(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
+
+/**
+ *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
+ *	@qc: internal command to clean up
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned long flags;
+
+	if (ata_is_dma(qc->tf.protocol)) {
+		spin_lock_irqsave(ap->lock, flags);
+		ap->ops->bmdma_stop(qc);
+		spin_unlock_irqrestore(ap->lock, flags);
+	}
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
+
+/**
+ *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Clear interrupt and error flags in DMA status register.
+ *
+ *	May be used as the irq_clear() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
+		return;
+
+	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+
+/**
+ *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+
+/**
+ *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* Strictly, one may wish to issue an ioread8() here, to
+	 * flush the mmio write.  However, control also passes
+	 * to the hardware at this point, and it will interrupt
+	 * us when we are to resume control.  So, in effect,
+	 * we don't care when the mmio write flushes.
+	 * Further, a read of the DMA status register _immediately_
+	 * following the write may not be what certain flaky hardware
+	 * is expected, so I think it is best to not add a readb()
+	 * without first all the MMIO ATA cards/mobos.
+	 * Or maybe I'm just being paranoid.
+	 *
+	 * FIXME: The posting of this write means I/O starts are
+	 * unnecessarily delayed for MMIO
+	 */
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+
+/**
+ *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ *	@qc: Command we are ending DMA for
+ *
+ *	Clears the ATA_DMA_START flag in the dma control register
+ *
+ *	May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	/* clear start/stop bit */
+	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+		 mmio + ATA_DMA_CMD);
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_sff_dma_pause(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+
+/**
+ *	ata_bmdma_status - Read PCI IDE BMDMA status
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Read and return BMDMA status register.
+ *
+ *	May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+
+
+/**
+ *	ata_bmdma_port_start - Set port up for bmdma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.  Allocates space for PRD table.
+ *
+ *	May be used as the port_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+int ata_bmdma_port_start(struct ata_port *ap)
+{
+	if (ap->mwdma_mask || ap->udma_mask) {
+		ap->bmdma_prd =
+			dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
+					    &ap->bmdma_prd_dma, GFP_KERNEL);
+		if (!ap->bmdma_prd)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
+
+/**
+ *	ata_bmdma_port_start32 - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.  Enables 32bit PIO and allocates space for PRD
+ *	table.
+ *
+ *	May be used as the port_start() entry in ata_port_operations for
+ *	devices that are capable of 32bit PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+int ata_bmdma_port_start32(struct ata_port *ap)
+{
+	ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+	return ata_bmdma_port_start(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
+
+#ifdef CONFIG_PCI
+
+/**
+ *	ata_pci_bmdma_clear_simplex -	attempt to kick device out of simplex
+ *	@pdev: PCI device
+ *
+ *	Some PCI ATA devices report simplex mode but in fact can be told to
+ *	enter non simplex mode. This implements the necessary logic to
+ *	perform the task on such devices. Calling it on other devices will
+ *	have -undefined- behaviour.
+ */
+int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
+{
+	unsigned long bmdma = pci_resource_start(pdev, 4);
+	u8 simplex;
+
+	if (bmdma == 0)
+		return -ENOENT;
+
+	simplex = inb(bmdma + 0x02);
+	outb(simplex & 0x60, bmdma + 0x02);
+	simplex = inb(bmdma + 0x02);
+	if (simplex & 0x80)
+		return -EOPNOTSUPP;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
+
+static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
+{
+	int i;
+
+	dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
+
+	for (i = 0; i < 2; i++) {
+		host->ports[i]->mwdma_mask = 0;
+		host->ports[i]->udma_mask = 0;
+	}
+}
+
+/**
+ *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
+ *	@host: target ATA host
+ *
+ *	Acquire PCI BMDMA resources and initialize @host accordingly.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+void ata_pci_bmdma_init(struct ata_host *host)
+{
+	struct device *gdev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(gdev);
+	int i, rc;
+
+	/* No BAR4 allocation: No DMA */
+	if (pci_resource_start(pdev, 4) == 0) {
+		ata_bmdma_nodma(host, "BAR4 is zero");
+		return;
+	}
+
+	/*
+	 * Some controllers require BMDMA region to be initialized
+	 * even if DMA is not in use to clear IRQ status via
+	 * ->sff_irq_clear method.  Try to initialize bmdma_addr
+	 * regardless of dma masks.
+	 */
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		ata_bmdma_nodma(host, "failed to set dma mask");
+	if (!rc) {
+		rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+		if (rc)
+			ata_bmdma_nodma(host,
+					"failed to set consistent dma mask");
+	}
+
+	/* request and iomap DMA region */
+	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
+	if (rc) {
+		ata_bmdma_nodma(host, "failed to request/iomap BAR4");
+		return;
+	}
+	host->iomap = pcim_iomap_table(pdev);
+
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *bmdma = host->iomap[4] + 8 * i;
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		ap->ioaddr.bmdma_addr = bmdma;
+		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
+		    (ioread8(bmdma + 2) & 0x80))
+			host->flags |= ATA_HOST_SIMPLEX;
+
+		ata_port_desc(ap, "bmdma 0x%llx",
+		    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
+	}
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+
+/**
+ *	ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
+ *	@pdev: target PCI device
+ *	@ppi: array of port_info, must be enough for two ports
+ *	@r_host: out argument for the initialized ATA host
+ *
+ *	Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
+ *	resources and initialize it accordingly in one go.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
+			       const struct ata_port_info * const * ppi,
+			       struct ata_host **r_host)
+{
+	int rc;
+
+	rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
+	if (rc)
+		return rc;
+
+	ata_pci_bmdma_init(*r_host);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
+
+/**
+ *	ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
+ *	@pdev: Controller to be initialized
+ *	@ppi: array of port_info, must be enough for two ports
+ *	@sht: scsi_host_template to use when registering the host
+ *	@host_priv: host private_data
+ *	@hflags: host flags
+ *
+ *	This function is similar to ata_pci_sff_init_one() but also
+ *	takes care of BMDMA initialization.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_bmdma_init_one(struct pci_dev *pdev,
+			   const struct ata_port_info * const * ppi,
+			   struct scsi_host_template *sht, void *host_priv,
+			   int hflags)
+{
+	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
+
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_ATA_BMDMA */
+
+/**
+ *	ata_sff_port_init - Initialize SFF/BMDMA ATA port
+ *	@ap: Port to initialize
+ *
+ *	Called on port allocation to initialize SFF/BMDMA specific
+ *	fields.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_sff_port_init(struct ata_port *ap)
+{
+	INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
+	ap->ctl = ATA_DEVCTL_OBS;
+	ap->last_ctl = 0xFF;
+}
+
+int __init ata_sff_init(void)
+{
+	ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
+	if (!ata_sff_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void ata_sff_exit(void)
+{
+	destroy_workqueue(ata_sff_wq);
+}
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
new file mode 100644
index 0000000..f8c550d
--- /dev/null
+++ b/drivers/ata/libata-trace.c
@@ -0,0 +1,223 @@
+/*
+ * libata-trace.c - trace functions for libata
+ *
+ * Copyright 2015 Hannes Reinecke
+ * Copyright 2015 SUSE Linux GmbH
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/trace_seq.h>
+#include <trace/events/libata.h>
+
+const char *
+libata_trace_parse_status(struct trace_seq *p, unsigned char status)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	trace_seq_printf(p, "{ ");
+	if (status & ATA_BUSY)
+		trace_seq_printf(p, "BUSY ");
+	if (status & ATA_DRDY)
+		trace_seq_printf(p, "DRDY ");
+	if (status & ATA_DF)
+		trace_seq_printf(p, "DF ");
+	if (status & ATA_DSC)
+		trace_seq_printf(p, "DSC ");
+	if (status & ATA_DRQ)
+		trace_seq_printf(p, "DRQ ");
+	if (status & ATA_CORR)
+		trace_seq_printf(p, "CORR ");
+	if (status & ATA_SENSE)
+		trace_seq_printf(p, "SENSE ");
+	if (status & ATA_ERR)
+		trace_seq_printf(p, "ERR ");
+	trace_seq_putc(p, '}');
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+const char *
+libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	trace_seq_printf(p, "%x", eh_action);
+	if (eh_action) {
+		trace_seq_printf(p, "{ ");
+		if (eh_action & ATA_EH_REVALIDATE)
+			trace_seq_printf(p, "REVALIDATE ");
+		if (eh_action & (ATA_EH_SOFTRESET | ATA_EH_HARDRESET))
+			trace_seq_printf(p, "RESET ");
+		else if (eh_action & ATA_EH_SOFTRESET)
+			trace_seq_printf(p, "SOFTRESET ");
+		else if (eh_action & ATA_EH_HARDRESET)
+			trace_seq_printf(p, "HARDRESET ");
+		if (eh_action & ATA_EH_ENABLE_LINK)
+			trace_seq_printf(p, "ENABLE_LINK ");
+		if (eh_action & ATA_EH_PARK)
+			trace_seq_printf(p, "PARK ");
+		trace_seq_putc(p, '}');
+	}
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+const char *
+libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	trace_seq_printf(p, "%x", eh_err_mask);
+	if (eh_err_mask) {
+		trace_seq_printf(p, "{ ");
+		if (eh_err_mask & AC_ERR_DEV)
+			trace_seq_printf(p, "DEV ");
+		if (eh_err_mask & AC_ERR_HSM)
+			trace_seq_printf(p, "HSM ");
+		if (eh_err_mask & AC_ERR_TIMEOUT)
+			trace_seq_printf(p, "TIMEOUT ");
+		if (eh_err_mask & AC_ERR_MEDIA)
+			trace_seq_printf(p, "MEDIA ");
+		if (eh_err_mask & AC_ERR_ATA_BUS)
+			trace_seq_printf(p, "ATA_BUS ");
+		if (eh_err_mask & AC_ERR_HOST_BUS)
+			trace_seq_printf(p, "HOST_BUS ");
+		if (eh_err_mask & AC_ERR_SYSTEM)
+			trace_seq_printf(p, "SYSTEM ");
+		if (eh_err_mask & AC_ERR_INVALID)
+			trace_seq_printf(p, "INVALID ");
+		if (eh_err_mask & AC_ERR_OTHER)
+			trace_seq_printf(p, "OTHER ");
+		if (eh_err_mask & AC_ERR_NODEV_HINT)
+			trace_seq_printf(p, "NODEV_HINT ");
+		if (eh_err_mask & AC_ERR_NCQ)
+			trace_seq_printf(p, "NCQ ");
+		trace_seq_putc(p, '}');
+	}
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+const char *
+libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	trace_seq_printf(p, "%x", qc_flags);
+	if (qc_flags) {
+		trace_seq_printf(p, "{ ");
+		if (qc_flags & ATA_QCFLAG_ACTIVE)
+			trace_seq_printf(p, "ACTIVE ");
+		if (qc_flags & ATA_QCFLAG_DMAMAP)
+			trace_seq_printf(p, "DMAMAP ");
+		if (qc_flags & ATA_QCFLAG_IO)
+			trace_seq_printf(p, "IO ");
+		if (qc_flags & ATA_QCFLAG_RESULT_TF)
+			trace_seq_printf(p, "RESULT_TF ");
+		if (qc_flags & ATA_QCFLAG_CLEAR_EXCL)
+			trace_seq_printf(p, "CLEAR_EXCL ");
+		if (qc_flags & ATA_QCFLAG_QUIET)
+			trace_seq_printf(p, "QUIET ");
+		if (qc_flags & ATA_QCFLAG_RETRY)
+			trace_seq_printf(p, "RETRY ");
+		if (qc_flags & ATA_QCFLAG_FAILED)
+			trace_seq_printf(p, "FAILED ");
+		if (qc_flags & ATA_QCFLAG_SENSE_VALID)
+			trace_seq_printf(p, "SENSE_VALID ");
+		if (qc_flags & ATA_QCFLAG_EH_SCHEDULED)
+			trace_seq_printf(p, "EH_SCHEDULED ");
+		trace_seq_putc(p, '}');
+	}
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+const char *
+libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
+			  unsigned char feature, unsigned char hob_nsect)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	switch (cmd) {
+	case ATA_CMD_FPDMA_RECV:
+		switch (hob_nsect & 0x5f) {
+		case ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT:
+			trace_seq_printf(p, " READ_LOG_DMA_EXT");
+			break;
+		case ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN:
+			trace_seq_printf(p, " ZAC_MGMT_IN");
+			break;
+		}
+		break;
+	case ATA_CMD_FPDMA_SEND:
+		switch (hob_nsect & 0x5f) {
+		case ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT:
+			trace_seq_printf(p, " WRITE_LOG_DMA_EXT");
+			break;
+		case ATA_SUBCMD_FPDMA_SEND_DSM:
+			trace_seq_printf(p, " DATASET_MANAGEMENT");
+			break;
+		}
+		break;
+	case ATA_CMD_NCQ_NON_DATA:
+		switch (feature) {
+		case ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE:
+			trace_seq_printf(p, " ABORT_QUEUE");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES:
+			trace_seq_printf(p, " SET_FEATURES");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT:
+			trace_seq_printf(p, " ZERO_EXT");
+			break;
+		case ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT:
+			trace_seq_printf(p, " ZAC_MGMT_OUT");
+			break;
+		}
+		break;
+	case ATA_CMD_ZAC_MGMT_IN:
+		switch (feature) {
+		case ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES:
+			trace_seq_printf(p, " REPORT_ZONES");
+			break;
+		}
+		break;
+	case ATA_CMD_ZAC_MGMT_OUT:
+		switch (feature) {
+		case ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE:
+			trace_seq_printf(p, " CLOSE_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE:
+			trace_seq_printf(p, " FINISH_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE:
+			trace_seq_printf(p, " OPEN_ZONE");
+			break;
+		case ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER:
+			trace_seq_printf(p, " RESET_WRITE_POINTER");
+			break;
+		}
+		break;
+	}
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
new file mode 100644
index 0000000..a0b0b4d
--- /dev/null
+++ b/drivers/ata/libata-transport.c
@@ -0,0 +1,811 @@
+/*
+ *  Copyright 2008 ioogle, Inc.  All rights reserved.
+ *	Released under GPL v2.
+ *
+ * Libata transport class.
+ *
+ * The ATA transport class contains common code to deal with ATA HBAs,
+ * an approximated representation of ATA topologies in the driver model,
+ * and various sysfs attributes to expose these topologies and management
+ * interfaces to user-space.
+ *
+ * There are 3 objects defined in in this class:
+ * - ata_port
+ * - ata_link
+ * - ata_device
+ * Each port has a link object. Each link can have up to two devices for PATA
+ * and generally one for SATA.
+ * If there is SATA port multiplier [PMP], 15 additional ata_link object are
+ * created.
+ *
+ * These objects are created when the ata host is initialized and when a PMP is
+ * found. They are removed only when the HBA is removed, cleaned before the
+ * error handler runs.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <scsi/scsi_transport.h>
+#include <linux/libata.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+
+#include "libata.h"
+#include "libata-transport.h"
+
+#define ATA_PORT_ATTRS		3
+#define ATA_LINK_ATTRS		3
+#define ATA_DEV_ATTRS		9
+
+struct scsi_transport_template;
+struct scsi_transport_template *ata_scsi_transport_template;
+
+struct ata_internal {
+	struct scsi_transport_template t;
+
+	struct device_attribute private_port_attrs[ATA_PORT_ATTRS];
+	struct device_attribute private_link_attrs[ATA_LINK_ATTRS];
+	struct device_attribute private_dev_attrs[ATA_DEV_ATTRS];
+
+	struct transport_container link_attr_cont;
+	struct transport_container dev_attr_cont;
+
+	/*
+	 * The array of null terminated pointers to attributes
+	 * needed by scsi_sysfs.c
+	 */
+	struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1];
+	struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1];
+	struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1];
+};
+#define to_ata_internal(tmpl)	container_of(tmpl, struct ata_internal, t)
+
+
+#define tdev_to_device(d)					\
+	container_of((d), struct ata_device, tdev)
+#define transport_class_to_dev(dev)				\
+	tdev_to_device((dev)->parent)
+
+#define tdev_to_link(d)						\
+	container_of((d), struct ata_link, tdev)
+#define transport_class_to_link(dev)				\
+	tdev_to_link((dev)->parent)
+
+#define tdev_to_port(d)						\
+	container_of((d), struct ata_port, tdev)
+#define transport_class_to_port(dev)				\
+	tdev_to_port((dev)->parent)
+
+
+/* Device objects are always created whit link objects */
+static int ata_tdev_add(struct ata_device *dev);
+static void ata_tdev_delete(struct ata_device *dev);
+
+
+/*
+ * Hack to allow attributes of the same name in different objects.
+ */
+#define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
+	struct device_attribute device_attr_##_prefix##_##_name = \
+	__ATTR(_name,_mode,_show,_store)
+
+#define ata_bitfield_name_match(title, table)			\
+static ssize_t							\
+get_ata_##title##_names(u32 table_key, char *buf)		\
+{								\
+	char *prefix = "";					\
+	ssize_t len = 0;					\
+	int i;							\
+								\
+	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
+		if (table[i].value & table_key) {		\
+			len += sprintf(buf + len, "%s%s",	\
+				prefix, table[i].name);		\
+			prefix = ", ";				\
+		}						\
+	}							\
+	len += sprintf(buf + len, "\n");			\
+	return len;						\
+}
+
+#define ata_bitfield_name_search(title, table)			\
+static ssize_t							\
+get_ata_##title##_names(u32 table_key, char *buf)		\
+{								\
+	ssize_t len = 0;					\
+	int i;							\
+								\
+	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
+		if (table[i].value == table_key) {		\
+			len += sprintf(buf + len, "%s",		\
+				table[i].name);			\
+			break;					\
+		}						\
+	}							\
+	len += sprintf(buf + len, "\n");			\
+	return len;						\
+}
+
+static struct {
+	u32		value;
+	char		*name;
+} ata_class_names[] = {
+	{ ATA_DEV_UNKNOWN,		"unknown" },
+	{ ATA_DEV_ATA,			"ata" },
+	{ ATA_DEV_ATA_UNSUP,		"ata" },
+	{ ATA_DEV_ATAPI,		"atapi" },
+	{ ATA_DEV_ATAPI_UNSUP,		"atapi" },
+	{ ATA_DEV_PMP,			"pmp" },
+	{ ATA_DEV_PMP_UNSUP,		"pmp" },
+	{ ATA_DEV_SEMB,			"semb" },
+	{ ATA_DEV_SEMB_UNSUP,		"semb" },
+	{ ATA_DEV_ZAC,			"zac" },
+	{ ATA_DEV_NONE,			"none" }
+};
+ata_bitfield_name_search(class, ata_class_names)
+
+
+static struct {
+	u32		value;
+	char		*name;
+} ata_err_names[] = {
+	{ AC_ERR_DEV,			"DeviceError" },
+	{ AC_ERR_HSM,			"HostStateMachineError" },
+	{ AC_ERR_TIMEOUT,		"Timeout" },
+	{ AC_ERR_MEDIA,			"MediaError" },
+	{ AC_ERR_ATA_BUS,		"BusError" },
+	{ AC_ERR_HOST_BUS,		"HostBusError" },
+	{ AC_ERR_SYSTEM,		"SystemError" },
+	{ AC_ERR_INVALID,		"InvalidArg" },
+	{ AC_ERR_OTHER,			"Unknown" },
+	{ AC_ERR_NODEV_HINT,		"NoDeviceHint" },
+	{ AC_ERR_NCQ,		 	"NCQError" }
+};
+ata_bitfield_name_match(err, ata_err_names)
+
+static struct {
+	u32		value;
+	char		*name;
+} ata_xfer_names[] = {
+	{ XFER_UDMA_7,			"XFER_UDMA_7" },
+	{ XFER_UDMA_6,			"XFER_UDMA_6" },
+	{ XFER_UDMA_5,			"XFER_UDMA_5" },
+	{ XFER_UDMA_4,			"XFER_UDMA_4" },
+	{ XFER_UDMA_3,			"XFER_UDMA_3" },
+	{ XFER_UDMA_2,			"XFER_UDMA_2" },
+	{ XFER_UDMA_1,			"XFER_UDMA_1" },
+	{ XFER_UDMA_0,			"XFER_UDMA_0" },
+	{ XFER_MW_DMA_4,		"XFER_MW_DMA_4" },
+	{ XFER_MW_DMA_3,		"XFER_MW_DMA_3" },
+	{ XFER_MW_DMA_2,		"XFER_MW_DMA_2" },
+	{ XFER_MW_DMA_1,		"XFER_MW_DMA_1" },
+	{ XFER_MW_DMA_0,		"XFER_MW_DMA_0" },
+	{ XFER_SW_DMA_2,		"XFER_SW_DMA_2" },
+	{ XFER_SW_DMA_1,		"XFER_SW_DMA_1" },
+	{ XFER_SW_DMA_0,		"XFER_SW_DMA_0" },
+	{ XFER_PIO_6,			"XFER_PIO_6" },
+	{ XFER_PIO_5,			"XFER_PIO_5" },
+	{ XFER_PIO_4,			"XFER_PIO_4" },
+	{ XFER_PIO_3,			"XFER_PIO_3" },
+	{ XFER_PIO_2,			"XFER_PIO_2" },
+	{ XFER_PIO_1,			"XFER_PIO_1" },
+	{ XFER_PIO_0,			"XFER_PIO_0" },
+	{ XFER_PIO_SLOW,		"XFER_PIO_SLOW" }
+};
+ata_bitfield_name_match(xfer,ata_xfer_names)
+
+/*
+ * ATA Port attributes
+ */
+#define ata_port_show_simple(field, name, format_string, cast)		\
+static ssize_t								\
+show_ata_port_##name(struct device *dev,				\
+		     struct device_attribute *attr, char *buf)		\
+{									\
+	struct ata_port *ap = transport_class_to_port(dev);		\
+									\
+	return snprintf(buf, 20, format_string, cast ap->field);	\
+}
+
+#define ata_port_simple_attr(field, name, format_string, type)		\
+	ata_port_show_simple(field, name, format_string, (type))	\
+static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL)
+
+ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int);
+ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long);
+ata_port_simple_attr(local_port_no, port_no, "%u\n", unsigned int);
+
+static DECLARE_TRANSPORT_CLASS(ata_port_class,
+			       "ata_port", NULL, NULL, NULL);
+
+static void ata_tport_release(struct device *dev)
+{
+	struct ata_port *ap = tdev_to_port(dev);
+	ata_host_put(ap->host);
+}
+
+/**
+ * ata_is_port --  check if a struct device represents a ATA port
+ * @dev:	device to check
+ *
+ * Returns:
+ *	%1 if the device represents a ATA Port, %0 else
+ */
+static int ata_is_port(const struct device *dev)
+{
+	return dev->release == ata_tport_release;
+}
+
+static int ata_tport_match(struct attribute_container *cont,
+			   struct device *dev)
+{
+	if (!ata_is_port(dev))
+		return 0;
+	return &ata_scsi_transport_template->host_attrs.ac == cont;
+}
+
+/**
+ * ata_tport_delete  --  remove ATA PORT
+ * @port:	ATA PORT to remove
+ *
+ * Removes the specified ATA PORT.  Remove the associated link as well.
+ */
+void ata_tport_delete(struct ata_port *ap)
+{
+	struct device *dev = &ap->tdev;
+
+	ata_tlink_delete(&ap->link);
+
+	transport_remove_device(dev);
+	device_del(dev);
+	transport_destroy_device(dev);
+	put_device(dev);
+}
+
+/** ata_tport_add - initialize a transport ATA port structure
+ *
+ * @parent:	parent device
+ * @ap:		existing ata_port structure
+ *
+ * Initialize a ATA port structure for sysfs.  It will be added to the device
+ * tree below the device specified by @parent which could be a PCI device.
+ *
+ * Returns %0 on success
+ */
+int ata_tport_add(struct device *parent,
+		  struct ata_port *ap)
+{
+	int error;
+	struct device *dev = &ap->tdev;
+
+	device_initialize(dev);
+	dev->type = &ata_port_type;
+
+	dev->parent = parent;
+	ata_host_get(ap->host);
+	dev->release = ata_tport_release;
+	dev_set_name(dev, "ata%d", ap->print_id);
+	transport_setup_device(dev);
+	ata_acpi_bind_port(ap);
+	error = device_add(dev);
+	if (error) {
+		goto tport_err;
+	}
+
+	device_enable_async_suspend(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	pm_runtime_forbid(dev);
+
+	transport_add_device(dev);
+	transport_configure_device(dev);
+
+	error = ata_tlink_add(&ap->link);
+	if (error) {
+		goto tport_link_err;
+	}
+	return 0;
+
+ tport_link_err:
+	transport_remove_device(dev);
+	device_del(dev);
+
+ tport_err:
+	transport_destroy_device(dev);
+	put_device(dev);
+	ata_host_put(ap->host);
+	return error;
+}
+
+
+/*
+ * ATA link attributes
+ */
+static int noop(int x) { return x; }
+
+#define ata_link_show_linkspeed(field, format)			        \
+static ssize_t								\
+show_ata_link_##field(struct device *dev,				\
+		      struct device_attribute *attr, char *buf)		\
+{									\
+	struct ata_link *link = transport_class_to_link(dev);		\
+									\
+	return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
+}
+
+#define ata_link_linkspeed_attr(field, format)				\
+	ata_link_show_linkspeed(field, format)				\
+static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
+
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
+
+
+static DECLARE_TRANSPORT_CLASS(ata_link_class,
+		"ata_link", NULL, NULL, NULL);
+
+static void ata_tlink_release(struct device *dev)
+{
+}
+
+/**
+ * ata_is_link --  check if a struct device represents a ATA link
+ * @dev:	device to check
+ *
+ * Returns:
+ *	%1 if the device represents a ATA link, %0 else
+ */
+static int ata_is_link(const struct device *dev)
+{
+	return dev->release == ata_tlink_release;
+}
+
+static int ata_tlink_match(struct attribute_container *cont,
+			   struct device *dev)
+{
+	struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
+	if (!ata_is_link(dev))
+		return 0;
+	return &i->link_attr_cont.ac == cont;
+}
+
+/**
+ * ata_tlink_delete  --  remove ATA LINK
+ * @port:	ATA LINK to remove
+ *
+ * Removes the specified ATA LINK.  remove associated ATA device(s) as well.
+ */
+void ata_tlink_delete(struct ata_link *link)
+{
+	struct device *dev = &link->tdev;
+	struct ata_device *ata_dev;
+
+	ata_for_each_dev(ata_dev, link, ALL) {
+		ata_tdev_delete(ata_dev);
+	}
+
+	transport_remove_device(dev);
+	device_del(dev);
+	transport_destroy_device(dev);
+	put_device(dev);
+}
+
+/**
+ * ata_tlink_add  --  initialize a transport ATA link structure
+ * @link:	allocated ata_link structure.
+ *
+ * Initialize an ATA LINK structure for sysfs.  It will be added in the
+ * device tree below the ATA PORT it belongs to.
+ *
+ * Returns %0 on success
+ */
+int ata_tlink_add(struct ata_link *link)
+{
+	struct device *dev = &link->tdev;
+	struct ata_port *ap = link->ap;
+	struct ata_device *ata_dev;
+	int error;
+
+	device_initialize(dev);
+	dev->parent = &ap->tdev;
+	dev->release = ata_tlink_release;
+	if (ata_is_host_link(link))
+		dev_set_name(dev, "link%d", ap->print_id);
+        else
+		dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
+
+	transport_setup_device(dev);
+
+	error = device_add(dev);
+	if (error) {
+		goto tlink_err;
+	}
+
+	transport_add_device(dev);
+	transport_configure_device(dev);
+
+	ata_for_each_dev(ata_dev, link, ALL) {
+		error = ata_tdev_add(ata_dev);
+		if (error) {
+			goto tlink_dev_err;
+		}
+	}
+	return 0;
+  tlink_dev_err:
+	while (--ata_dev >= link->device) {
+		ata_tdev_delete(ata_dev);
+	}
+	transport_remove_device(dev);
+	device_del(dev);
+  tlink_err:
+	transport_destroy_device(dev);
+	put_device(dev);
+	return error;
+}
+
+/*
+ * ATA device attributes
+ */
+
+#define ata_dev_show_class(title, field)				\
+static ssize_t								\
+show_ata_dev_##field(struct device *dev,				\
+		     struct device_attribute *attr, char *buf)		\
+{									\
+	struct ata_device *ata_dev = transport_class_to_dev(dev);	\
+									\
+	return get_ata_##title##_names(ata_dev->field, buf);		\
+}
+
+#define ata_dev_attr(title, field)					\
+	ata_dev_show_class(title, field)				\
+static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL)
+
+ata_dev_attr(class, class);
+ata_dev_attr(xfer, pio_mode);
+ata_dev_attr(xfer, dma_mode);
+ata_dev_attr(xfer, xfer_mode);
+
+
+#define ata_dev_show_simple(field, format_string, cast)		\
+static ssize_t								\
+show_ata_dev_##field(struct device *dev,				\
+		     struct device_attribute *attr, char *buf)		\
+{									\
+	struct ata_device *ata_dev = transport_class_to_dev(dev);	\
+									\
+	return snprintf(buf, 20, format_string, cast ata_dev->field);	\
+}
+
+#define ata_dev_simple_attr(field, format_string, type)	\
+	ata_dev_show_simple(field, format_string, (type))	\
+static DEVICE_ATTR(field, S_IRUGO, 			\
+		   show_ata_dev_##field, NULL)
+
+ata_dev_simple_attr(spdn_cnt, "%d\n", int);
+
+struct ata_show_ering_arg {
+	char* buf;
+	int written;
+};
+
+static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
+{
+	struct ata_show_ering_arg* arg = void_arg;
+	u64 seconds;
+	u32 rem;
+
+	seconds = div_u64_rem(ent->timestamp, HZ, &rem);
+	arg->written += sprintf(arg->buf + arg->written,
+			        "[%5llu.%09lu]", seconds,
+				rem * NSEC_PER_SEC / HZ);
+	arg->written += get_ata_err_names(ent->err_mask,
+					  arg->buf + arg->written);
+	return 0;
+}
+
+static ssize_t
+show_ata_dev_ering(struct device *dev,
+		   struct device_attribute *attr, char *buf)
+{
+	struct ata_device *ata_dev = transport_class_to_dev(dev);
+	struct ata_show_ering_arg arg = { buf, 0 };
+
+	ata_ering_map(&ata_dev->ering, ata_show_ering, &arg);
+	return arg.written;
+}
+
+
+static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL);
+
+static ssize_t
+show_ata_dev_id(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ata_device *ata_dev = transport_class_to_dev(dev);
+	int written = 0, i = 0;
+
+	if (ata_dev->class == ATA_DEV_PMP)
+		return 0;
+	for(i=0;i<ATA_ID_WORDS;i++)  {
+		written += snprintf(buf+written, 20, "%04x%c",
+				    ata_dev->id[i],
+				    ((i+1) & 7) ? ' ' : '\n');
+	}
+	return written;
+}
+
+static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL);
+
+static ssize_t
+show_ata_dev_gscr(struct device *dev,
+		  struct device_attribute *attr, char *buf)
+{
+	struct ata_device *ata_dev = transport_class_to_dev(dev);
+	int written = 0, i = 0;
+
+	if (ata_dev->class != ATA_DEV_PMP)
+		return 0;
+	for(i=0;i<SATA_PMP_GSCR_DWORDS;i++)  {
+		written += snprintf(buf+written, 20, "%08x%c",
+				    ata_dev->gscr[i],
+				    ((i+1) & 3) ? ' ' : '\n');
+	}
+	if (SATA_PMP_GSCR_DWORDS & 3)
+		buf[written-1] = '\n';
+	return written;
+}
+
+static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
+
+static ssize_t
+show_ata_dev_trim(struct device *dev,
+		  struct device_attribute *attr, char *buf)
+{
+	struct ata_device *ata_dev = transport_class_to_dev(dev);
+	unsigned char *mode;
+
+	if (!ata_id_has_trim(ata_dev->id))
+		mode = "unsupported";
+	else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+		mode = "forced_unsupported";
+	else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
+			mode = "forced_unqueued";
+	else if (ata_fpdma_dsm_supported(ata_dev))
+		mode = "queued";
+	else
+		mode = "unqueued";
+
+	return snprintf(buf, 20, "%s\n", mode);
+}
+
+static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
+
+static DECLARE_TRANSPORT_CLASS(ata_dev_class,
+			       "ata_device", NULL, NULL, NULL);
+
+static void ata_tdev_release(struct device *dev)
+{
+}
+
+/**
+ * ata_is_ata_dev  --  check if a struct device represents a ATA device
+ * @dev:	device to check
+ *
+ * Returns:
+ *	%1 if the device represents a ATA device, %0 else
+ */
+static int ata_is_ata_dev(const struct device *dev)
+{
+	return dev->release == ata_tdev_release;
+}
+
+static int ata_tdev_match(struct attribute_container *cont,
+			  struct device *dev)
+{
+	struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
+	if (!ata_is_ata_dev(dev))
+		return 0;
+	return &i->dev_attr_cont.ac == cont;
+}
+
+/**
+ * ata_tdev_free  --  free a ATA LINK
+ * @dev:	ATA PHY to free
+ *
+ * Frees the specified ATA PHY.
+ *
+ * Note:
+ *   This function must only be called on a PHY that has not
+ *   successfully been added using ata_tdev_add().
+ */
+static void ata_tdev_free(struct ata_device *dev)
+{
+	transport_destroy_device(&dev->tdev);
+	put_device(&dev->tdev);
+}
+
+/**
+ * ata_tdev_delete  --  remove ATA device
+ * @port:	ATA PORT to remove
+ *
+ * Removes the specified ATA device.
+ */
+static void ata_tdev_delete(struct ata_device *ata_dev)
+{
+	struct device *dev = &ata_dev->tdev;
+
+	transport_remove_device(dev);
+	device_del(dev);
+	ata_tdev_free(ata_dev);
+}
+
+
+/**
+ * ata_tdev_add  --  initialize a transport ATA device structure.
+ * @ata_dev:	ata_dev structure.
+ *
+ * Initialize an ATA device structure for sysfs.  It will be added in the
+ * device tree below the ATA LINK device it belongs to.
+ *
+ * Returns %0 on success
+ */
+static int ata_tdev_add(struct ata_device *ata_dev)
+{
+	struct device *dev = &ata_dev->tdev;
+	struct ata_link *link = ata_dev->link;
+	struct ata_port *ap = link->ap;
+	int error;
+
+	device_initialize(dev);
+	dev->parent = &link->tdev;
+	dev->release = ata_tdev_release;
+	if (ata_is_host_link(link))
+		dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
+        else
+		dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
+
+	transport_setup_device(dev);
+	ata_acpi_bind_dev(ata_dev);
+	error = device_add(dev);
+	if (error) {
+		ata_tdev_free(ata_dev);
+		return error;
+	}
+
+	transport_add_device(dev);
+	transport_configure_device(dev);
+	return 0;
+}
+
+
+/*
+ * Setup / Teardown code
+ */
+
+#define SETUP_TEMPLATE(attrb, field, perm, test)			\
+	i->private_##attrb[count] = dev_attr_##field;		       	\
+	i->private_##attrb[count].attr.mode = perm;			\
+	i->attrb[count] = &i->private_##attrb[count];			\
+	if (test)							\
+		count++
+
+#define SETUP_LINK_ATTRIBUTE(field)					\
+	SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1)
+
+#define SETUP_PORT_ATTRIBUTE(field)					\
+	SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
+
+#define SETUP_DEV_ATTRIBUTE(field)					\
+	SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1)
+
+/**
+ * ata_attach_transport  --  instantiate ATA transport template
+ */
+struct scsi_transport_template *ata_attach_transport(void)
+{
+	struct ata_internal *i;
+	int count;
+
+	i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL);
+	if (!i)
+		return NULL;
+
+	i->t.eh_strategy_handler	= ata_scsi_error;
+	i->t.user_scan			= ata_scsi_user_scan;
+
+	i->t.host_attrs.ac.attrs = &i->port_attrs[0];
+	i->t.host_attrs.ac.class = &ata_port_class.class;
+	i->t.host_attrs.ac.match = ata_tport_match;
+	transport_container_register(&i->t.host_attrs);
+
+	i->link_attr_cont.ac.class = &ata_link_class.class;
+	i->link_attr_cont.ac.attrs = &i->link_attrs[0];
+	i->link_attr_cont.ac.match = ata_tlink_match;
+	transport_container_register(&i->link_attr_cont);
+
+	i->dev_attr_cont.ac.class = &ata_dev_class.class;
+	i->dev_attr_cont.ac.attrs = &i->dev_attrs[0];
+	i->dev_attr_cont.ac.match = ata_tdev_match;
+	transport_container_register(&i->dev_attr_cont);
+
+	count = 0;
+	SETUP_PORT_ATTRIBUTE(nr_pmp_links);
+	SETUP_PORT_ATTRIBUTE(idle_irq);
+	SETUP_PORT_ATTRIBUTE(port_no);
+	BUG_ON(count > ATA_PORT_ATTRS);
+	i->port_attrs[count] = NULL;
+
+	count = 0;
+	SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit);
+	SETUP_LINK_ATTRIBUTE(sata_spd_limit);
+	SETUP_LINK_ATTRIBUTE(sata_spd);
+	BUG_ON(count > ATA_LINK_ATTRS);
+	i->link_attrs[count] = NULL;
+
+	count = 0;
+	SETUP_DEV_ATTRIBUTE(class);
+	SETUP_DEV_ATTRIBUTE(pio_mode);
+	SETUP_DEV_ATTRIBUTE(dma_mode);
+	SETUP_DEV_ATTRIBUTE(xfer_mode);
+	SETUP_DEV_ATTRIBUTE(spdn_cnt);
+	SETUP_DEV_ATTRIBUTE(ering);
+	SETUP_DEV_ATTRIBUTE(id);
+	SETUP_DEV_ATTRIBUTE(gscr);
+	SETUP_DEV_ATTRIBUTE(trim);
+	BUG_ON(count > ATA_DEV_ATTRS);
+	i->dev_attrs[count] = NULL;
+
+	return &i->t;
+}
+
+/**
+ * ata_release_transport  --  release ATA transport template instance
+ * @t:		transport template instance
+ */
+void ata_release_transport(struct scsi_transport_template *t)
+{
+	struct ata_internal *i = to_ata_internal(t);
+
+	transport_container_unregister(&i->t.host_attrs);
+	transport_container_unregister(&i->link_attr_cont);
+	transport_container_unregister(&i->dev_attr_cont);
+
+	kfree(i);
+}
+
+__init int libata_transport_init(void)
+{
+	int error;
+
+	error = transport_class_register(&ata_link_class);
+	if (error)
+		goto out_unregister_transport;
+	error = transport_class_register(&ata_port_class);
+	if (error)
+		goto out_unregister_link;
+	error = transport_class_register(&ata_dev_class);
+	if (error)
+		goto out_unregister_port;
+	return 0;
+
+ out_unregister_port:
+	transport_class_unregister(&ata_port_class);
+ out_unregister_link:
+	transport_class_unregister(&ata_link_class);
+ out_unregister_transport:
+	return error;
+
+}
+
+void __exit libata_transport_exit(void)
+{
+	transport_class_unregister(&ata_link_class);
+	transport_class_unregister(&ata_port_class);
+	transport_class_unregister(&ata_dev_class);
+}
diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h
new file mode 100644
index 0000000..08a57fb
--- /dev/null
+++ b/drivers/ata/libata-transport.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LIBATA_TRANSPORT_H
+#define _LIBATA_TRANSPORT_H
+
+
+extern struct scsi_transport_template *ata_scsi_transport_template;
+
+int ata_tlink_add(struct ata_link *link);
+void ata_tlink_delete(struct ata_link *link);
+
+int ata_tport_add(struct device *parent, struct ata_port *ap);
+void ata_tport_delete(struct ata_port *ap);
+
+struct scsi_transport_template *ata_attach_transport(void);
+void ata_release_transport(struct scsi_transport_template *t);
+
+__init int libata_transport_init(void);
+void __exit libata_transport_exit(void);
+#endif
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
new file mode 100644
index 0000000..b3ed8f9
--- /dev/null
+++ b/drivers/ata/libata-zpodd.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/libata.h>
+#include <linux/cdrom.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/pm_qos.h>
+#include <scsi/scsi_device.h>
+
+#include "libata.h"
+
+static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */
+module_param(zpodd_poweroff_delay, int, 0644);
+MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds");
+
+enum odd_mech_type {
+	ODD_MECH_TYPE_SLOT,
+	ODD_MECH_TYPE_DRAWER,
+	ODD_MECH_TYPE_UNSUPPORTED,
+};
+
+struct zpodd {
+	enum odd_mech_type	mech_type; /* init during probe, RO afterwards */
+	struct ata_device	*dev;
+
+	/* The following fields are synchronized by PM core. */
+	bool			from_notify; /* resumed as a result of
+					      * acpi wake notification */
+	bool			zp_ready; /* ZP ready state */
+	unsigned long		last_ready; /* last ZP ready timestamp */
+	bool			zp_sampled; /* ZP ready state sampled */
+	bool			powered_off; /* ODD is powered off
+					      *	during suspend */
+};
+
+static int eject_tray(struct ata_device *dev)
+{
+	struct ata_taskfile tf;
+	static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_START_STOP_UNIT,
+		0, 0, 0,
+		0x02,     /* LoEj */
+		0, 0, 0, 0, 0, 0, 0,
+	};
+
+	ata_tf_init(dev, &tf);
+	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.command = ATA_CMD_PACKET;
+	tf.protocol = ATAPI_PROT_NODATA;
+
+	return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+}
+
+/* Per the spec, only slot type and drawer type ODD can be supported */
+static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
+{
+	char buf[16];
+	unsigned int ret;
+	struct rm_feature_desc *desc = (void *)(buf + 8);
+	struct ata_taskfile tf;
+	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+			2,      /* only 1 feature descriptor requested */
+			0, 3,   /* 3, removable medium feature */
+			0, 0, 0,/* reserved */
+			0, sizeof(buf),
+			0, 0, 0,
+	};
+
+	ata_tf_init(dev, &tf);
+	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.command = ATA_CMD_PACKET;
+	tf.protocol = ATAPI_PROT_PIO;
+	tf.lbam = sizeof(buf);
+
+	ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+				buf, sizeof(buf), 0);
+	if (ret)
+		return ODD_MECH_TYPE_UNSUPPORTED;
+
+	if (be16_to_cpu(desc->feature_code) != 3)
+		return ODD_MECH_TYPE_UNSUPPORTED;
+
+	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+		return ODD_MECH_TYPE_SLOT;
+	else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+		return ODD_MECH_TYPE_DRAWER;
+	else
+		return ODD_MECH_TYPE_UNSUPPORTED;
+}
+
+/* Test if ODD is zero power ready by sense code */
+static bool zpready(struct ata_device *dev)
+{
+	u8 sense_key, *sense_buf;
+	unsigned int ret, asc, ascq, add_len;
+	struct zpodd *zpodd = dev->zpodd;
+
+	ret = atapi_eh_tur(dev, &sense_key);
+
+	if (!ret || sense_key != NOT_READY)
+		return false;
+
+	sense_buf = dev->link->ap->sector_buf;
+	ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
+	if (ret)
+		return false;
+
+	/* sense valid */
+	if ((sense_buf[0] & 0x7f) != 0x70)
+		return false;
+
+	add_len = sense_buf[7];
+	/* has asc and ascq */
+	if (add_len < 6)
+		return false;
+
+	asc = sense_buf[12];
+	ascq = sense_buf[13];
+
+	if (zpodd->mech_type == ODD_MECH_TYPE_SLOT)
+		/* no media inside */
+		return asc == 0x3a;
+	else
+		/* no media inside and door closed */
+		return asc == 0x3a && ascq == 0x01;
+}
+
+/*
+ * Update the zpodd->zp_ready field. This field will only be set
+ * if the ODD has stayed in ZP ready state for zpodd_poweroff_delay
+ * time, and will be used to decide if power off is allowed. If it
+ * is set, it will be cleared during resume from powered off state.
+ */
+void zpodd_on_suspend(struct ata_device *dev)
+{
+	struct zpodd *zpodd = dev->zpodd;
+	unsigned long expires;
+
+	if (!zpready(dev)) {
+		zpodd->zp_sampled = false;
+		zpodd->zp_ready = false;
+		return;
+	}
+
+	if (!zpodd->zp_sampled) {
+		zpodd->zp_sampled = true;
+		zpodd->last_ready = jiffies;
+		return;
+	}
+
+	expires = zpodd->last_ready +
+		  msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+	if (time_before(jiffies, expires))
+		return;
+
+	zpodd->zp_ready = true;
+}
+
+bool zpodd_zpready(struct ata_device *dev)
+{
+	struct zpodd *zpodd = dev->zpodd;
+	return zpodd->zp_ready;
+}
+
+/*
+ * Enable runtime wake capability through ACPI and set the powered_off flag,
+ * this flag will be used during resume to decide what operations are needed
+ * to take.
+ *
+ * Also, media poll needs to be silenced, so that it doesn't bring the ODD
+ * back to full power state every few seconds.
+ */
+void zpodd_enable_run_wake(struct ata_device *dev)
+{
+	struct zpodd *zpodd = dev->zpodd;
+
+	sdev_disable_disk_events(dev->sdev);
+
+	zpodd->powered_off = true;
+	acpi_pm_set_device_wakeup(&dev->tdev, true);
+}
+
+/* Disable runtime wake capability if it is enabled */
+void zpodd_disable_run_wake(struct ata_device *dev)
+{
+	struct zpodd *zpodd = dev->zpodd;
+
+	if (zpodd->powered_off)
+		acpi_pm_set_device_wakeup(&dev->tdev, false);
+}
+
+/*
+ * Post power on processing after the ODD has been recovered. If the
+ * ODD wasn't powered off during suspend, it doesn't do anything.
+ *
+ * For drawer type ODD, if it is powered on due to user pressed the
+ * eject button, the tray needs to be ejected. This can only be done
+ * after the ODD has been recovered, i.e. link is initialized and
+ * device is able to process NON_DATA PIO command, as eject needs to
+ * send command for the ODD to process.
+ *
+ * The from_notify flag set in wake notification handler function
+ * zpodd_wake_dev represents if power on is due to user's action.
+ *
+ * For both types of ODD, several fields need to be reset.
+ */
+void zpodd_post_poweron(struct ata_device *dev)
+{
+	struct zpodd *zpodd = dev->zpodd;
+
+	if (!zpodd->powered_off)
+		return;
+
+	zpodd->powered_off = false;
+
+	if (zpodd->from_notify) {
+		zpodd->from_notify = false;
+		if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER)
+			eject_tray(dev);
+	}
+
+	zpodd->zp_sampled = false;
+	zpodd->zp_ready = false;
+
+	sdev_enable_disk_events(dev->sdev);
+}
+
+static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context)
+{
+	struct ata_device *ata_dev = context;
+	struct zpodd *zpodd = ata_dev->zpodd;
+	struct device *dev = &ata_dev->sdev->sdev_gendev;
+
+	if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) {
+		zpodd->from_notify = true;
+		pm_runtime_resume(dev);
+	}
+}
+
+static void ata_acpi_add_pm_notifier(struct ata_device *dev)
+{
+	acpi_handle handle = ata_dev_acpi_handle(dev);
+	acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+				    zpodd_wake_dev, dev);
+}
+
+static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
+{
+	acpi_handle handle = ata_dev_acpi_handle(dev);
+	acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
+}
+
+void zpodd_init(struct ata_device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(&dev->tdev);
+	enum odd_mech_type mech_type;
+	struct zpodd *zpodd;
+
+	if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev))
+		return;
+
+	mech_type = zpodd_get_mech_type(dev);
+	if (mech_type == ODD_MECH_TYPE_UNSUPPORTED)
+		return;
+
+	zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL);
+	if (!zpodd)
+		return;
+
+	zpodd->mech_type = mech_type;
+
+	ata_acpi_add_pm_notifier(dev);
+	zpodd->dev = dev;
+	dev->zpodd = zpodd;
+	dev_pm_qos_expose_flags(&dev->tdev, 0);
+}
+
+void zpodd_exit(struct ata_device *dev)
+{
+	ata_acpi_remove_pm_notifier(dev);
+	kfree(dev->zpodd);
+	dev->zpodd = NULL;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
new file mode 100644
index 0000000..f953cb4
--- /dev/null
+++ b/drivers/ata/libata.h
@@ -0,0 +1,255 @@
+/*
+ *  libata.h - helper library for ATA
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ */
+
+#ifndef __LIBATA_H__
+#define __LIBATA_H__
+
+#define DRV_NAME	"libata"
+#define DRV_VERSION	"3.00"	/* must be exactly four chars */
+
+/* libata-core.c */
+enum {
+	/* flags for ata_dev_read_id() */
+	ATA_READID_POSTRESET	= (1 << 0), /* reading ID after reset */
+
+	/* selector for ata_down_xfermask_limit() */
+	ATA_DNXFER_PIO		= 0,	/* speed down PIO */
+	ATA_DNXFER_DMA		= 1,	/* speed down DMA */
+	ATA_DNXFER_40C		= 2,	/* apply 40c cable limit */
+	ATA_DNXFER_FORCE_PIO	= 3,	/* force PIO */
+	ATA_DNXFER_FORCE_PIO0	= 4,	/* force PIO0 */
+
+	ATA_DNXFER_QUIET	= (1 << 31),
+};
+
+extern atomic_t ata_print_id;
+extern int atapi_passthru16;
+extern int libata_fua;
+extern int libata_noacpi;
+extern int libata_allow_tpm;
+extern const struct device_type ata_port_type;
+extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+extern void ata_force_cbl(struct ata_port *ap);
+extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
+extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
+extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
+extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+			   u64 block, u32 n_block, unsigned int tf_flags,
+			   unsigned int tag, int class);
+extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
+			     struct ata_device *dev);
+extern unsigned ata_exec_internal(struct ata_device *dev,
+				  struct ata_taskfile *tf, const u8 *cdb,
+				  int dma_dir, void *buf, unsigned int buflen,
+				  unsigned long timeout);
+extern unsigned ata_exec_internal_sg(struct ata_device *dev,
+				     struct ata_taskfile *tf, const u8 *cdb,
+				     int dma_dir, struct scatterlist *sg,
+				     unsigned int n_elem, unsigned long timeout);
+extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+			  int (*check_ready)(struct ata_link *link));
+extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
+			   unsigned int flags, u16 *id);
+extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
+extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+			      unsigned int readid_flags);
+extern int ata_dev_configure(struct ata_device *dev);
+extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
+extern unsigned int ata_dev_set_feature(struct ata_device *dev,
+					u8 enable, u8 feature);
+extern void ata_qc_free(struct ata_queued_cmd *qc);
+extern void ata_qc_issue(struct ata_queued_cmd *qc);
+extern void __ata_qc_complete(struct ata_queued_cmd *qc);
+extern int atapi_check_dma(struct ata_queued_cmd *qc);
+extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
+extern bool ata_phys_link_online(struct ata_link *link);
+extern bool ata_phys_link_offline(struct ata_link *link);
+extern void ata_dev_init(struct ata_device *dev);
+extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
+extern int sata_link_init_spd(struct ata_link *link);
+extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
+extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern const char *sata_spd_string(unsigned int spd);
+extern int ata_port_probe(struct ata_port *ap);
+extern void __ata_port_probe(struct ata_port *ap);
+extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+				      u8 page, void *buf, unsigned int sectors);
+
+#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+
+/* libata-acpi.c */
+#ifdef CONFIG_ATA_ACPI
+extern unsigned int ata_acpi_gtf_filter;
+extern void ata_acpi_dissociate(struct ata_host *host);
+extern int ata_acpi_on_suspend(struct ata_port *ap);
+extern void ata_acpi_on_resume(struct ata_port *ap);
+extern int ata_acpi_on_devcfg(struct ata_device *dev);
+extern void ata_acpi_on_disable(struct ata_device *dev);
+extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
+extern void ata_acpi_bind_port(struct ata_port *ap);
+extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
+#else
+static inline void ata_acpi_dissociate(struct ata_host *host) { }
+static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
+static inline void ata_acpi_on_resume(struct ata_port *ap) { }
+static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
+static inline void ata_acpi_on_disable(struct ata_device *dev) { }
+static inline void ata_acpi_set_state(struct ata_port *ap,
+				      pm_message_t state) { }
+static inline void ata_acpi_bind_port(struct ata_port *ap) {}
+static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+#endif
+
+/* libata-scsi.c */
+extern int ata_scsi_add_hosts(struct ata_host *host,
+			      struct scsi_host_template *sht);
+extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
+extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern void ata_scsi_set_sense(struct ata_device *dev,
+			       struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
+extern void ata_scsi_set_sense_information(struct ata_device *dev,
+					   struct scsi_cmnd *cmd,
+					   const struct ata_taskfile *tf);
+extern void ata_scsi_media_change_notify(struct ata_device *dev);
+extern void ata_scsi_hotplug(struct work_struct *work);
+extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
+extern int ata_bus_probe(struct ata_port *ap);
+extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+			      unsigned int id, u64 lun);
+int ata_sas_allocate_tag(struct ata_port *ap);
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
+
+
+/* libata-eh.c */
+extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
+extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
+extern void ata_eh_acquire(struct ata_port *ap);
+extern void ata_eh_release(struct ata_port *ap);
+extern void ata_scsi_error(struct Scsi_Host *host);
+extern void ata_eh_fastdrain_timerfn(struct timer_list *t);
+extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
+extern void ata_dev_disable(struct ata_device *dev);
+extern void ata_eh_detach_dev(struct ata_device *dev);
+extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+			       unsigned int action);
+extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+			unsigned int action);
+extern void ata_eh_autopsy(struct ata_port *ap);
+const char *ata_get_cmd_descript(u8 command);
+extern void ata_eh_report(struct ata_port *ap);
+extern int ata_eh_reset(struct ata_link *link, int classify,
+			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
+extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+			  ata_postreset_fn_t postreset,
+			  struct ata_link **r_failed_disk);
+extern void ata_eh_finish(struct ata_port *ap);
+extern int ata_ering_map(struct ata_ering *ering,
+			 int (*map_fn)(struct ata_ering_entry *, void *),
+		  	 void *arg);
+extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
+extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
+					   u8 *sense_buf, u8 dfl_sense_key);
+
+/* libata-pmp.c */
+#ifdef CONFIG_SATA_PMP
+extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+			    unsigned hints);
+extern int sata_pmp_attach(struct ata_device *dev);
+#else /* CONFIG_SATA_PMP */
+static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+	return -EINVAL;
+}
+
+static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+	return -EINVAL;
+}
+
+static inline int sata_pmp_set_lpm(struct ata_link *link,
+				   enum ata_lpm_policy policy, unsigned hints)
+{
+	return -EINVAL;
+}
+
+static inline int sata_pmp_attach(struct ata_device *dev)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_SATA_PMP */
+
+/* libata-sff.c */
+#ifdef CONFIG_ATA_SFF
+extern void ata_sff_flush_pio_task(struct ata_port *ap);
+extern void ata_sff_port_init(struct ata_port *ap);
+extern int ata_sff_init(void);
+extern void ata_sff_exit(void);
+#else /* CONFIG_ATA_SFF */
+static inline void ata_sff_flush_pio_task(struct ata_port *ap)
+{ }
+static inline void ata_sff_port_init(struct ata_port *ap)
+{ }
+static inline int ata_sff_init(void)
+{ return 0; }
+static inline void ata_sff_exit(void)
+{ }
+#endif /* CONFIG_ATA_SFF */
+
+/* libata-zpodd.c */
+#ifdef CONFIG_SATA_ZPODD
+void zpodd_init(struct ata_device *dev);
+void zpodd_exit(struct ata_device *dev);
+static inline bool zpodd_dev_enabled(struct ata_device *dev)
+{
+	return dev->zpodd != NULL;
+}
+void zpodd_on_suspend(struct ata_device *dev);
+bool zpodd_zpready(struct ata_device *dev);
+void zpodd_enable_run_wake(struct ata_device *dev);
+void zpodd_disable_run_wake(struct ata_device *dev);
+void zpodd_post_poweron(struct ata_device *dev);
+#else /* CONFIG_SATA_ZPODD */
+static inline void zpodd_init(struct ata_device *dev) {}
+static inline void zpodd_exit(struct ata_device *dev) {}
+static inline bool zpodd_dev_enabled(struct ata_device *dev) { return false; }
+static inline void zpodd_on_suspend(struct ata_device *dev) {}
+static inline bool zpodd_zpready(struct ata_device *dev) { return false; }
+static inline void zpodd_enable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_disable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_post_poweron(struct ata_device *dev) {}
+#endif /* CONFIG_SATA_ZPODD */
+
+#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
new file mode 100644
index 0000000..b70fce2
--- /dev/null
+++ b/drivers/ata/pata_acpi.c
@@ -0,0 +1,280 @@
+/*
+ *	ACPI PATA driver
+ *
+ *	(c) 2007 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/acpi.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME	"pata_acpi"
+#define DRV_VERSION	"0.2.3"
+
+struct pata_acpi {
+	struct ata_acpi_gtm gtm;
+	void *last;
+	unsigned long mask[2];
+};
+
+/**
+ *	pacpi_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ */
+
+static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pata_acpi *acpi = ap->private_data;
+	if (ACPI_HANDLE(&ap->tdev) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
+		return -ENODEV;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	pacpi_cable_detect	-	cable type detection
+ *	@ap: port to detect
+ *
+ *	Perform device specific cable detection
+ */
+
+static int pacpi_cable_detect(struct ata_port *ap)
+{
+	struct pata_acpi *acpi = ap->private_data;
+
+	if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA))
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	pacpi_discover_modes	-	filter non ACPI modes
+ *	@adev: ATA device
+ *	@mask: proposed modes
+ *
+ *	Try the modes available and see which ones the ACPI method will
+ *	set up sensibly. From this we get a mask of ACPI modes we can use
+ */
+
+static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pata_acpi *acpi = ap->private_data;
+	struct ata_acpi_gtm probe;
+	unsigned int xfer_mask;
+
+	probe = acpi->gtm;
+
+	ata_acpi_gtm(ap, &probe);
+
+	xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
+
+	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+		ap->cbl = ATA_CBL_PATA80;
+
+	return xfer_mask;
+}
+
+/**
+ *	pacpi_mode_filter	-	mode filter for ACPI
+ *	@adev: device
+ *	@mask: mask of valid modes
+ *
+ *	Filter the valid mode list according to our own specific rules, in
+ *	this case the list of discovered valid modes obtained by ACPI probing
+ */
+
+static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	struct pata_acpi *acpi = adev->link->ap->private_data;
+	return mask & acpi->mask[adev->devno];
+}
+
+/**
+ *	pacpi_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ */
+
+static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int unit = adev->devno;
+	struct pata_acpi *acpi = ap->private_data;
+	const struct ata_timing *t;
+
+	if (!(acpi->gtm.flags & 0x10))
+		unit = 0;
+
+	/* Now stuff the nS values into the structure */
+	t = ata_timing_find_mode(adev->pio_mode);
+	acpi->gtm.drive[unit].pio = t->cycle;
+	ata_acpi_stm(ap, &acpi->gtm);
+	/* See what mode we actually got */
+	ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ *	pacpi_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ */
+
+static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	int unit = adev->devno;
+	struct pata_acpi *acpi = ap->private_data;
+	const struct ata_timing *t;
+
+	if (!(acpi->gtm.flags & 0x10))
+		unit = 0;
+
+	/* Now stuff the nS values into the structure */
+	t = ata_timing_find_mode(adev->dma_mode);
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		acpi->gtm.drive[unit].dma = t->udma;
+		acpi->gtm.flags |= (1 << (2 * unit));
+	} else {
+		acpi->gtm.drive[unit].dma = t->cycle;
+		acpi->gtm.flags &= ~(1 << (2 * unit));
+	}
+	ata_acpi_stm(ap, &acpi->gtm);
+	/* See what mode we actually got */
+	ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ *	pacpi_qc_issue	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary.
+ */
+
+static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct pata_acpi *acpi = ap->private_data;
+
+	if (acpi->gtm.flags & 0x10)
+		return ata_bmdma_qc_issue(qc);
+
+	if (adev != acpi->last) {
+		pacpi_set_piomode(ap, adev);
+		if (ata_dma_enabled(adev))
+			pacpi_set_dmamode(ap, adev);
+		acpi->last = adev;
+	}
+	return ata_bmdma_qc_issue(qc);
+}
+
+/**
+ *	pacpi_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	Use the port_start hook to maintain private control structures
+ */
+
+static int pacpi_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct pata_acpi *acpi;
+
+	if (ACPI_HANDLE(&ap->tdev) == NULL)
+		return -ENODEV;
+
+	acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
+	if (ap->private_data == NULL)
+		return -ENOMEM;
+	acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
+	acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
+	return ata_bmdma_port_start(ap);
+}
+
+static struct scsi_host_template pacpi_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pacpi_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= pacpi_qc_issue,
+	.cable_detect		= pacpi_cable_detect,
+	.mode_filter		= pacpi_mode_filter,
+	.set_piomode		= pacpi_set_piomode,
+	.set_dmamode		= pacpi_set_dmamode,
+	.prereset		= pacpi_pre_reset,
+	.port_start		= pacpi_port_start,
+};
+
+
+/**
+ *	pacpi_init_one - Register ACPI ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in pacpi_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA6,
+
+		.port_ops	= &pacpi_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	if (pdev->vendor == PCI_VENDOR_ID_ATI) {
+		int rc = pcim_enable_device(pdev);
+		if (rc < 0)
+			return rc;
+		pcim_pin_device(pdev);
+	}
+	return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
+}
+
+static const struct pci_device_id pacpi_pci_tbl[] = {
+	{ PCI_ANY_ID,		PCI_ANY_ID,			   PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pacpi_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pacpi_pci_tbl,
+	.probe			= pacpi_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(pacpi_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
new file mode 100644
index 0000000..0b122f9
--- /dev/null
+++ b/drivers/ata/pata_ali.c
@@ -0,0 +1,652 @@
+/*
+ * pata_ali.c 	- ALI 15x3 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *
+ * based in part upon
+ * linux/drivers/ide/pci/alim15x3.c		Version 0.17	2003/01/02
+ *
+ *  Copyright (C) 1998-2000 Michel Aubry, Maintainer
+ *  Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
+ *  Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
+ *
+ *  Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
+ *  May be copied or modified under the terms of the GNU General Public License
+ *  Copyright (C) 2002 Alan Cox <alan@redhat.com>
+ *  ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
+ *
+ *  Documentation
+ *	Chipset documentation available under NDA only
+ *
+ *  TODO/CHECK
+ *	Cannot have ATAPI on both master & slave for rev < c2 (???) but
+ *	otherwise should do atapi DMA (For now for old we do PIO only for
+ *	ATAPI)
+ *	Review Sunblade workaround.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_ali"
+#define DRV_VERSION "0.7.8"
+
+static int ali_atapi_dma = 0;
+module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
+MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
+
+static struct pci_dev *ali_isa_bridge;
+
+/*
+ *	Cable special cases
+ */
+
+static const struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "HP Pavilion N5430",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
+		},
+	},
+	{
+		.ident = "Toshiba Satellite S1800-814",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
+		},
+	},
+	{ }
+};
+
+static int ali_cable_override(struct pci_dev *pdev)
+{
+	/* Fujitsu P2000 */
+	if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
+	   	return 1;
+	/* Mitac 8317 (Winbook-A) and relatives */
+	if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
+		return 1;
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+	return 0;
+}
+
+/**
+ *	ali_c2_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection for C2 and later revisions
+ */
+
+static int ali_c2_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+
+	/* Certain laptops use short but suitable cables and don't
+	   implement the detect logic */
+
+	if (ali_cable_override(pdev))
+		return ATA_CBL_PATA40_SHORT;
+
+	/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
+	   Bit set for 40 pin */
+	pci_read_config_byte(pdev, 0x4A, &ata66);
+	if (ata66 & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	ali_20_filter		-	filter for earlier ALI DMA
+ *	@ap: ALi ATA port
+ *	@adev: attached device
+ *
+ *	Ensure that we do not do DMA on CD devices. We may be able to
+ *	fix that later on. Also ensure we do not do UDMA on WDC drives
+ */
+
+static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
+{
+	char model_num[ATA_ID_PROD_LEN + 1];
+	/* No DMA on anything but a disk for now */
+	if (adev->class != ATA_DEV_ATA)
+		mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+	if (strstr(model_num, "WDC"))
+		return mask &= ~ATA_MASK_UDMA;
+	return mask;
+}
+
+/**
+ *	ali_fifo_control	-	FIFO manager
+ *	@ap: ALi channel to control
+ *	@adev: device for FIFO control
+ *	@on: 0 for off 1 for on
+ *
+ *	Enable or disable the FIFO on a given device. Because of the way the
+ *	ALi FIFO works it provides a boost on ATA disk but can be confused by
+ *	ATAPI and we must therefore manage it.
+ */
+
+static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio_fifo = 0x54 + ap->port_no;
+	u8 fifo;
+	int shift = 4 * adev->devno;
+
+	/* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
+	   0x00. Not all the docs agree but the behaviour we now use is the
+	   one stated in the BIOS Programming Guide */
+
+	pci_read_config_byte(pdev, pio_fifo, &fifo);
+	fifo &= ~(0x0F << shift);
+	fifo |= (on << shift);
+	pci_write_config_byte(pdev, pio_fifo, fifo);
+}
+
+/**
+ *	ali_program_modes	-	load mode registers
+ *	@ap: ALi channel to load
+ *	@adev: Device the timing is for
+ *	@t: timing data
+ *	@ultra: UDMA timing or zero for off
+ *
+ *	Loads the timing registers for cmd/data and disable UDMA if
+ *	ultra is zero. If ultra is set then load and enable the UDMA
+ *	timing but do not touch the command/data timing.
+ */
+
+static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int cas = 0x58 + 4 * ap->port_no;	/* Command timing */
+	int cbt = 0x59 + 4 * ap->port_no;	/* Command timing */
+	int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
+	int udmat = 0x56 + ap->port_no;	/* UDMA timing */
+	int shift = 4 * adev->devno;
+	u8 udma;
+
+	if (t != NULL) {
+		t->setup = clamp_val(t->setup, 1, 8) & 7;
+		t->act8b = clamp_val(t->act8b, 1, 8) & 7;
+		t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
+		t->active = clamp_val(t->active, 1, 8) & 7;
+		t->recover = clamp_val(t->recover, 1, 16) & 15;
+
+		pci_write_config_byte(pdev, cas, t->setup);
+		pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
+		pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
+	}
+
+	/* Set up the UDMA enable */
+	pci_read_config_byte(pdev, udmat, &udma);
+	udma &= ~(0x0F << shift);
+	udma |= ultra << shift;
+	pci_write_config_byte(pdev, udmat, udma);
+}
+
+/**
+ *	ali_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the ALi registers for PIO mode.
+ */
+
+static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	struct ata_timing t;
+	unsigned long T =  1000000000 / 33333;	/* PCI clock based */
+
+	ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
+	if (pair) {
+		struct ata_timing p;
+		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+		if (pair->dma_mode) {
+			ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+		}
+	}
+
+	/* PIO FIFO is only permitted on ATA disk */
+	if (adev->class != ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x00);
+	ali_program_modes(ap, adev, &t, 0);
+	if (adev->class == ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x05);
+
+}
+
+/**
+ *	ali_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the ALi registers for DMA mode.
+ */
+
+static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
+	struct ata_device *pair = ata_dev_pair(adev);
+	struct ata_timing t;
+	unsigned long T =  1000000000 / 33333;	/* PCI clock based */
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+
+	if (adev->class == ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x08);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
+		if (adev->dma_mode >= XFER_UDMA_3) {
+			u8 reg4b;
+			pci_read_config_byte(pdev, 0x4B, &reg4b);
+			reg4b |= 1;
+			pci_write_config_byte(pdev, 0x4B, reg4b);
+		}
+	} else {
+		ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
+		if (pair) {
+			struct ata_timing p;
+			ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+			if (pair->dma_mode) {
+				ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+				ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+			}
+		}
+		ali_program_modes(ap, adev, &t, 0);
+	}
+}
+
+/**
+ *	ali_warn_atapi_dma	-	Warn about ATAPI DMA disablement
+ *	@adev: Device
+ *
+ *	Whine about ATAPI DMA disablement if @adev is an ATAPI device.
+ *	Can be used as ->dev_config.
+ */
+
+static void ali_warn_atapi_dma(struct ata_device *adev)
+{
+	struct ata_eh_context *ehc = &adev->link->eh_context;
+	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+
+	if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
+		ata_dev_warn(adev,
+			     "WARNING: ATAPI DMA disabled for reliability issues.  It can be enabled\n");
+		ata_dev_warn(adev,
+			     "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
+	}
+}
+
+/**
+ *	ali_lock_sectors	-	Keep older devices to 255 sector mode
+ *	@adev: Device
+ *
+ *	Called during the bus probe for each device that is found. We use
+ *	this call to lock the sector count of the device to 255 or less on
+ *	older ALi controllers. If we didn't do this then large I/O's would
+ *	require LBA48 commands which the older ALi requires are issued by
+ *	slower PIO methods
+ */
+
+static void ali_lock_sectors(struct ata_device *adev)
+{
+	adev->max_sectors = 255;
+	ali_warn_atapi_dma(adev);
+}
+
+/**
+ *	ali_check_atapi_dma	-	DMA check for most ALi controllers
+ *	@adev: Device
+ *
+ *	Called to decide whether commands should be sent by DMA or PIO
+ */
+
+static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	if (!ali_atapi_dma) {
+		/* FIXME: pata_ali can't do ATAPI DMA reliably but the
+		 * IDE alim15x3 driver can.  I tried lots of things
+		 * but couldn't find what the actual difference was.
+		 * If you got an idea, please write it to
+		 * linux-ide@vger.kernel.org and cc htejun@gmail.com.
+		 *
+		 * Disable ATAPI DMA for now.
+		 */
+		return -EOPNOTSUPP;
+	}
+
+	/* If its not a media command, its not worth it */
+	if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC)
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
+{
+	u8 r;
+	int port_bit = 4 << link->ap->port_no;
+
+	/* If our bridge is an ALI 1533 then do the extra work */
+	if (ali_isa_bridge) {
+		/* Tristate and re-enable the bus signals */
+		pci_read_config_byte(ali_isa_bridge, 0x58, &r);
+		r &= ~port_bit;
+		pci_write_config_byte(ali_isa_bridge, 0x58, r);
+		r |= port_bit;
+		pci_write_config_byte(ali_isa_bridge, 0x58, r);
+	}
+	ata_sff_postreset(link, classes);
+}
+
+static struct scsi_host_template ali_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ *	Port operations for PIO only ALi
+ */
+
+static struct ata_port_operations ali_early_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= ali_set_piomode,
+	.sff_data_xfer  = ata_sff_data_xfer32,
+};
+
+static const struct ata_port_operations ali_dma_base_ops = {
+	.inherits	= &ata_bmdma32_port_ops,
+	.set_piomode	= ali_set_piomode,
+	.set_dmamode	= ali_set_dmamode,
+};
+
+/*
+ *	Port operations for DMA capable ALi without cable
+ *	detect
+ */
+static struct ata_port_operations ali_20_port_ops = {
+	.inherits	= &ali_dma_base_ops,
+	.cable_detect	= ata_cable_40wire,
+	.mode_filter	= ali_20_filter,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.dev_config	= ali_lock_sectors,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect
+ */
+static struct ata_port_operations ali_c2_port_ops = {
+	.inherits	= &ali_dma_base_ops,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.cable_detect	= ali_c2_cable_detect,
+	.dev_config	= ali_lock_sectors,
+	.postreset	= ali_c2_c3_postreset,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect
+ */
+static struct ata_port_operations ali_c4_port_ops = {
+	.inherits	= &ali_dma_base_ops,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.cable_detect	= ali_c2_cable_detect,
+	.dev_config	= ali_lock_sectors,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect and LBA48
+ */
+static struct ata_port_operations ali_c5_port_ops = {
+	.inherits	= &ali_dma_base_ops,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.dev_config	= ali_warn_atapi_dma,
+	.cable_detect	= ali_c2_cable_detect,
+};
+
+
+/**
+ *	ali_init_chipset	-	chip setup function
+ *	@pdev: PCI device of ATA controller
+ *
+ *	Perform the setup on the device that must be done both at boot
+ *	and at resume time.
+ */
+
+static void ali_init_chipset(struct pci_dev *pdev)
+{
+	u8 tmp;
+	struct pci_dev *north;
+
+	/*
+	 * The chipset revision selects the driver operations and
+	 * mode data.
+	 */
+
+	if (pdev->revision <= 0x20) {
+		pci_read_config_byte(pdev, 0x53, &tmp);
+		tmp |= 0x03;
+		pci_write_config_byte(pdev, 0x53, tmp);
+	} else {
+		pci_read_config_byte(pdev, 0x4a, &tmp);
+		pci_write_config_byte(pdev, 0x4a, tmp | 0x20);
+		pci_read_config_byte(pdev, 0x4B, &tmp);
+		if (pdev->revision < 0xC2)
+			/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+			/* Clear CD-ROM DMA write bit */
+			tmp &= 0x7F;
+		/* Cable and UDMA */
+		if (pdev->revision >= 0xc2)
+			tmp |= 0x01;
+		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
+		/*
+		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
+		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
+		 * via 0x54/55.
+		 */
+		pci_read_config_byte(pdev, 0x53, &tmp);
+		if (pdev->revision >= 0xc7)
+			tmp |= 0x03;
+		else
+			tmp |= 0x01;	/* CD_ROM enable for DMA */
+		pci_write_config_byte(pdev, 0x53, tmp);
+	}
+	north = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
+					    PCI_DEVFN(0, 0));
+	if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
+		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
+		   Set the south bridge enable bit */
+		pci_read_config_byte(ali_isa_bridge, 0x79, &tmp);
+		if (pdev->revision == 0xC2)
+			pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04);
+		else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
+			pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02);
+	}
+	pci_dev_put(north);
+	ata_pci_bmdma_clear_simplex(pdev);
+}
+/**
+ *	ali_init_one		-	discovery callback
+ *	@pdev: PCI device ID
+ *	@id: PCI table info
+ *
+ *	An ALi IDE interface has been discovered. Figure out what revision
+ *	and perform configuration work before handing it to the ATA layer
+ */
+
+static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_early = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &ali_early_port_ops
+	};
+	/* Revision 0x20 added DMA */
+	static const struct ata_port_info info_20 = {
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+							ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &ali_20_port_ops
+	};
+	/* Revision 0x20 with support logic added UDMA */
+	static const struct ata_port_info info_20_udma = {
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+							ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &ali_20_port_ops
+	};
+	/* Revision 0xC2 adds UDMA66 */
+	static const struct ata_port_info info_c2 = {
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+							ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &ali_c2_port_ops
+	};
+	/* Revision 0xC3 is UDMA66 for now */
+	static const struct ata_port_info info_c3 = {
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+							ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &ali_c2_port_ops
+	};
+	/* Revision 0xC4 is UDMA100 */
+	static const struct ata_port_info info_c4 = {
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
+							ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &ali_c4_port_ops
+	};
+	/* Revision 0xC5 is UDMA133 with LBA48 DMA */
+	static const struct ata_port_info info_c5 = {
+		.flags = ATA_FLAG_SLAVE_POSS | 	ATA_FLAG_IGN_SIMPLEX,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &ali_c5_port_ops
+	};
+
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	u8 tmp;
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/*
+	 * The chipset revision selects the driver operations and
+	 * mode data.
+	 */
+
+	if (pdev->revision < 0x20) {
+		ppi[0] = &info_early;
+	} else if (pdev->revision < 0xC2) {
+        	ppi[0] = &info_20;
+	} else if (pdev->revision == 0xC2) {
+        	ppi[0] = &info_c2;
+	} else if (pdev->revision == 0xC3) {
+        	ppi[0] = &info_c3;
+	} else if (pdev->revision == 0xC4) {
+        	ppi[0] = &info_c4;
+	} else
+        	ppi[0] = &info_c5;
+
+	ali_init_chipset(pdev);
+
+	if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
+		/* Are we paired with a UDMA capable chip */
+		pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp);
+		if ((tmp & 0x1E) == 0x12)
+	        	ppi[0] = &info_20_udma;
+	}
+
+	if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
+		return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+	else
+		return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ali_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+	ali_init_chipset(pdev);
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id ali[] = {
+	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
+	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
+
+	{ },
+};
+
+static struct pci_driver ali_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ali,
+	.probe 		= ali_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ali_reinit_one,
+#endif
+};
+
+static int __init ali_init(void)
+{
+	int ret;
+	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+	ret = pci_register_driver(&ali_pci_driver);
+	if (ret < 0)
+		pci_dev_put(ali_isa_bridge);
+	return ret;
+}
+
+
+static void __exit ali_exit(void)
+{
+	pci_unregister_driver(&ali_pci_driver);
+	pci_dev_put(ali_isa_bridge);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ALi PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ali);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ali_init);
+module_exit(ali_exit);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
new file mode 100644
index 0000000..8706533
--- /dev/null
+++ b/drivers/ata/pata_amd.c
@@ -0,0 +1,641 @@
+/*
+ * pata_amd.c 	- AMD PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *
+ *  Based on pata-sil680. Errata information is taken from data sheets
+ *  and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
+ *  claimed by sata-nv.c.
+ *
+ *  TODO:
+ *	Variable system clock when/if it makes sense
+ *	Power management on ports
+ *
+ *
+ *  Documentation publicly available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_amd"
+#define DRV_VERSION "0.4.1"
+
+/**
+ *	timing_setup		-	shared timing computation and load
+ *	@ap: ATA port being set up
+ *	@adev: drive being configured
+ *	@offset: port offset
+ *	@speed: target speed
+ *	@clock: clock multiplier (number of times 33MHz for this part)
+ *
+ *	Perform the actual timing set up for Nvidia or AMD PATA devices.
+ *	The actual devices vary so they all call into this helper function
+ *	providing the clock multipler and offset (because AMD and Nvidia put
+ *	the ports at different locations).
+ */
+
+static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
+{
+	static const unsigned char amd_cyc2udma[] = {
+		6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *peer = ata_dev_pair(adev);
+	int dn = ap->port_no * 2 + adev->devno;
+	struct ata_timing at, apeer;
+	int T, UT;
+	const int amd_clock = 33333;	/* KHz. */
+	u8 t;
+
+	T = 1000000000 / amd_clock;
+	UT = T;
+	if (clock >= 2)
+		UT = T / 2;
+
+	if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
+		dev_err(&pdev->dev, "unknown mode %d\n", speed);
+		return;
+	}
+
+	if (peer) {
+		/* This may be over conservative */
+		if (peer->dma_mode) {
+			ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
+			ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+		}
+		ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
+		ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+	}
+
+	if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
+	if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
+
+	/*
+	 *	Now do the setup work
+	 */
+
+	/* Configure the address set up timing */
+	pci_read_config_byte(pdev, offset + 0x0C, &t);
+	t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
+	pci_write_config_byte(pdev, offset + 0x0C , t);
+
+	/* Configure the 8bit I/O timing */
+	pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
+		((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
+
+	/* Drive timing */
+	pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
+		((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
+
+	switch (clock) {
+		case 1:
+		t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
+		break;
+
+		case 2:
+		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
+		break;
+
+		case 3:
+		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
+		break;
+
+		case 4:
+		t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
+		break;
+
+		default:
+			return;
+	}
+
+	/* UDMA timing */
+	if (at.udma)
+		pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
+}
+
+/**
+ *	amd_pre_reset		-	perform reset handling
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Reset sequence checking enable bits to see which ports are
+ *	active.
+ */
+
+static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits amd_enable_bits[] = {
+		{ 0x40, 1, 0x02, 0x02 },
+		{ 0x40, 1, 0x01, 0x01 }
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	amd_cable_detect	-	report cable type
+ *	@ap: port
+ *
+ *	AMD controller/BIOS setups record the cable type in word 0x42
+ */
+
+static int amd_cable_detect(struct ata_port *ap)
+{
+	static const u32 bitmask[2] = {0x03, 0x0C};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+
+	pci_read_config_byte(pdev, 0x42, &ata66);
+	if (ata66 & bitmask[ap->port_no])
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	amd_fifo_setup		-	set the PIO FIFO for ATA/ATAPI
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Set the PCI fifo for this device according to the devices present
+ *	on the bus at this point in time. We need to turn the post write buffer
+ *	off for ATAPI devices as we may need to issue a word sized write to the
+ *	device as the final I/O
+ */
+
+static void amd_fifo_setup(struct ata_port *ap)
+{
+	struct ata_device *adev;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const u8 fifobit[2] = { 0xC0, 0x30};
+	u8 fifo = fifobit[ap->port_no];
+	u8 r;
+
+
+	ata_for_each_dev(adev, &ap->link, ENABLED) {
+		if (adev->class == ATA_DEV_ATAPI)
+			fifo = 0;
+	}
+	if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */
+		fifo = 0;
+
+	/* On the later chips the read prefetch bits become no-op bits */
+	pci_read_config_byte(pdev, 0x41, &r);
+	r &= ~fifobit[ap->port_no];
+	r |= fifo;
+	pci_write_config_byte(pdev, 0x41, r);
+}
+
+/**
+ *	amd33_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the AMD registers for PIO mode.
+ */
+
+static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	amd_fifo_setup(ap);
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
+}
+
+static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	amd_fifo_setup(ap);
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
+}
+
+static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	amd_fifo_setup(ap);
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
+}
+
+static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	amd_fifo_setup(ap);
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
+}
+
+/**
+ *	amd33_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the AMD and Nvidia
+ *	chipset.
+ */
+
+static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
+}
+
+static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
+}
+
+static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
+}
+
+static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
+}
+
+/* Both host-side and drive-side detection results are worthless on NV
+ * PATAs.  Ignore them and just follow what BIOS configured.  Both the
+ * current configuration in PCI config reg and ACPI GTM result are
+ * cached during driver attach and are consulted to select transfer
+ * mode.
+ */
+static unsigned long nv_mode_filter(struct ata_device *dev,
+				    unsigned long xfer_mask)
+{
+	static const unsigned int udma_mask_map[] =
+		{ ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
+		  ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
+	struct ata_port *ap = dev->link->ap;
+	char acpi_str[32] = "";
+	u32 saved_udma, udma;
+	const struct ata_acpi_gtm *gtm;
+	unsigned long bios_limit = 0, acpi_limit = 0, limit;
+
+	/* find out what BIOS configured */
+	udma = saved_udma = (unsigned long)ap->host->private_data;
+
+	if (ap->port_no == 0)
+		udma >>= 16;
+	if (dev->devno == 0)
+		udma >>= 8;
+
+	if ((udma & 0xc0) == 0xc0)
+		bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
+
+	/* consult ACPI GTM too */
+	gtm = ata_acpi_init_gtm(ap);
+	if (gtm) {
+		acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
+
+		snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
+			 gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
+	}
+
+	/* be optimistic, EH can take care of things if something goes wrong */
+	limit = bios_limit | acpi_limit;
+
+	/* If PIO or DMA isn't configured at all, don't limit.  Let EH
+	 * handle it.
+	 */
+	if (!(limit & ATA_MASK_PIO))
+		limit |= ATA_MASK_PIO;
+	if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
+		limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
+	/* PIO4, MWDMA2, UDMA2 should always be supported regardless of
+	   cable detection result */
+	limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
+
+	ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
+			"BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
+			xfer_mask, limit, xfer_mask & limit, bios_limit,
+			saved_udma, acpi_limit, acpi_str);
+
+	return xfer_mask & limit;
+}
+
+/**
+ *	nv_probe_init	-	cable detection
+ *	@lin: ATA link
+ *
+ *	Perform cable detection. The BIOS stores this in PCI config
+ *	space for us.
+ */
+
+static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits nv_enable_bits[] = {
+		{ 0x50, 1, 0x02, 0x02 },
+		{ 0x50, 1, 0x01, 0x01 }
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	nv100_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the AMD registers for PIO mode.
+ */
+
+static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
+}
+
+static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
+}
+
+/**
+ *	nv100_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the AMD and Nvidia
+ *	chipset.
+ */
+
+static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
+}
+
+static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
+}
+
+static void nv_host_stop(struct ata_host *host)
+{
+	u32 udma = (unsigned long)host->private_data;
+
+	/* restore PCI config register 0x60 */
+	pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
+}
+
+static struct scsi_host_template amd_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations amd_base_port_ops = {
+	.inherits	= &ata_bmdma32_port_ops,
+	.prereset	= amd_pre_reset,
+};
+
+static struct ata_port_operations amd33_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= amd33_set_piomode,
+	.set_dmamode	= amd33_set_dmamode,
+};
+
+static struct ata_port_operations amd66_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_unknown,
+	.set_piomode	= amd66_set_piomode,
+	.set_dmamode	= amd66_set_dmamode,
+};
+
+static struct ata_port_operations amd100_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= ata_cable_unknown,
+	.set_piomode	= amd100_set_piomode,
+	.set_dmamode	= amd100_set_dmamode,
+};
+
+static struct ata_port_operations amd133_port_ops = {
+	.inherits	= &amd_base_port_ops,
+	.cable_detect	= amd_cable_detect,
+	.set_piomode	= amd133_set_piomode,
+	.set_dmamode	= amd133_set_dmamode,
+};
+
+static const struct ata_port_operations nv_base_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_ignore,
+	.mode_filter	= nv_mode_filter,
+	.prereset	= nv_pre_reset,
+	.host_stop	= nv_host_stop,
+};
+
+static struct ata_port_operations nv100_port_ops = {
+	.inherits	= &nv_base_port_ops,
+	.set_piomode	= nv100_set_piomode,
+	.set_dmamode	= nv100_set_dmamode,
+};
+
+static struct ata_port_operations nv133_port_ops = {
+	.inherits	= &nv_base_port_ops,
+	.set_piomode	= nv133_set_piomode,
+	.set_dmamode	= nv133_set_dmamode,
+};
+
+static void amd_clear_fifo(struct pci_dev *pdev)
+{
+	u8 fifo;
+	/* Disable the FIFO, the FIFO logic will re-enable it as
+	   appropriate */
+	pci_read_config_byte(pdev, 0x41, &fifo);
+	fifo &= 0x0F;
+	pci_write_config_byte(pdev, 0x41, fifo);
+}
+
+static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[10] = {
+		{	/* 0: AMD 7401 - no swdma */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &amd33_port_ops
+		},
+		{	/* 1: Early AMD7409 - no swdma */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &amd66_port_ops
+		},
+		{	/* 2: AMD 7409 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &amd66_port_ops
+		},
+		{	/* 3: AMD 7411 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &amd100_port_ops
+		},
+		{	/* 4: AMD 7441 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &amd100_port_ops
+		},
+		{	/* 5: AMD 8111 - no swdma */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA6,
+			.port_ops = &amd133_port_ops
+		},
+		{	/* 6: AMD 8111 UDMA 100 (Serenade) - no swdma */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &amd133_port_ops
+		},
+		{	/* 7: Nvidia Nforce */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &nv100_port_ops
+		},
+		{	/* 8: Nvidia Nforce2 and later - no swdma */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA6,
+			.port_ops = &nv133_port_ops
+		},
+		{	/* 9: AMD CS5536 (Geode companion) */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &amd100_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	int type = id->driver_data;
+	void *hpriv = NULL;
+	u8 fifo;
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	pci_read_config_byte(pdev, 0x41, &fifo);
+
+	/* Check for AMD7409 without swdma errata and if found adjust type */
+	if (type == 1 && pdev->revision > 0x7)
+		type = 2;
+
+	/* Serenade ? */
+	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
+			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
+		type = 6;	/* UDMA 100 only */
+
+	/*
+	 * Okay, type is determined now.  Apply type-specific workarounds.
+	 */
+	ppi[0] = &info[type];
+
+	if (type < 3)
+		ata_pci_bmdma_clear_simplex(pdev);
+	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+		amd_clear_fifo(pdev);
+	/* Cable detection on Nvidia chips doesn't work too well,
+	 * cache BIOS programmed UDMA mode.
+	 */
+	if (type == 7 || type == 8) {
+		u32 udma;
+
+		pci_read_config_dword(pdev, 0x60, &udma);
+		hpriv = (void *)(unsigned long)udma;
+	}
+
+	/* And fire it up */
+	return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int amd_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
+		amd_clear_fifo(pdev);
+		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
+		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
+			ata_pci_bmdma_clear_simplex(pdev);
+	}
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id amd[] = {
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_COBRA_7401),		0 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7409),		1 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7411),		3 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_OPUS_7441),		4 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_8111_IDE),		5 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_IDE),	7 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE),	9 },
+
+	{ },
+};
+
+static struct pci_driver amd_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= amd,
+	.probe 		= amd_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= amd_reinit_one,
+#endif
+};
+
+module_pci_driver(amd_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, amd);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
new file mode 100644
index 0000000..ebecab8
--- /dev/null
+++ b/drivers/ata/pata_arasan_cf.c
@@ -0,0 +1,969 @@
+/*
+ * drivers/ata/pata_arasan_cf.c
+ *
+ * Arasan Compact Flash host controller source file
+ *
+ * Copyright (C) 2011 ST Microelectronics
+ * Viresh Kumar <vireshk@kernel.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * The Arasan CompactFlash Device Controller IP core has three basic modes of
+ * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
+ * ATA using true IDE modes. This driver supports only True IDE mode currently.
+ *
+ * Arasan CF Controller shares global irq register with Arasan XD Controller.
+ *
+ * Tested on arch/arm/mach-spear13xx
+ */
+
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pata_arasan_cf_data.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define DRIVER_NAME	"arasan_cf"
+#define TIMEOUT		msecs_to_jiffies(3000)
+
+/* Registers */
+/* CompactFlash Interface Status */
+#define CFI_STS			0x000
+	#define STS_CHG				(1)
+	#define BIN_AUDIO_OUT			(1 << 1)
+	#define CARD_DETECT1			(1 << 2)
+	#define CARD_DETECT2			(1 << 3)
+	#define INP_ACK				(1 << 4)
+	#define CARD_READY			(1 << 5)
+	#define IO_READY			(1 << 6)
+	#define B16_IO_PORT_SEL			(1 << 7)
+/* IRQ */
+#define IRQ_STS			0x004
+/* Interrupt Enable */
+#define IRQ_EN			0x008
+	#define CARD_DETECT_IRQ			(1)
+	#define STATUS_CHNG_IRQ			(1 << 1)
+	#define MEM_MODE_IRQ			(1 << 2)
+	#define IO_MODE_IRQ			(1 << 3)
+	#define TRUE_IDE_MODE_IRQ		(1 << 8)
+	#define PIO_XFER_ERR_IRQ		(1 << 9)
+	#define BUF_AVAIL_IRQ			(1 << 10)
+	#define XFER_DONE_IRQ			(1 << 11)
+	#define IGNORED_IRQS	(STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
+					TRUE_IDE_MODE_IRQ)
+	#define TRUE_IDE_IRQS	(CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
+					BUF_AVAIL_IRQ | XFER_DONE_IRQ)
+/* Operation Mode */
+#define OP_MODE			0x00C
+	#define CARD_MODE_MASK			(0x3)
+	#define MEM_MODE			(0x0)
+	#define IO_MODE				(0x1)
+	#define TRUE_IDE_MODE			(0x2)
+
+	#define CARD_TYPE_MASK			(1 << 2)
+	#define CF_CARD				(0)
+	#define CF_PLUS_CARD			(1 << 2)
+
+	#define CARD_RESET			(1 << 3)
+	#define CFHOST_ENB			(1 << 4)
+	#define OUTPUTS_TRISTATE		(1 << 5)
+	#define ULTRA_DMA_ENB			(1 << 8)
+	#define MULTI_WORD_DMA_ENB		(1 << 9)
+	#define DRQ_BLOCK_SIZE_MASK		(0x3 << 11)
+	#define DRQ_BLOCK_SIZE_512		(0)
+	#define DRQ_BLOCK_SIZE_1024		(1 << 11)
+	#define DRQ_BLOCK_SIZE_2048		(2 << 11)
+	#define DRQ_BLOCK_SIZE_4096		(3 << 11)
+/* CF Interface Clock Configuration */
+#define CLK_CFG			0x010
+	#define CF_IF_CLK_MASK			(0XF)
+/* CF Timing Mode Configuration */
+#define TM_CFG			0x014
+	#define MEM_MODE_TIMING_MASK		(0x3)
+	#define MEM_MODE_TIMING_250NS		(0x0)
+	#define MEM_MODE_TIMING_120NS		(0x1)
+	#define MEM_MODE_TIMING_100NS		(0x2)
+	#define MEM_MODE_TIMING_80NS		(0x3)
+
+	#define IO_MODE_TIMING_MASK		(0x3 << 2)
+	#define IO_MODE_TIMING_250NS		(0x0 << 2)
+	#define IO_MODE_TIMING_120NS		(0x1 << 2)
+	#define IO_MODE_TIMING_100NS		(0x2 << 2)
+	#define IO_MODE_TIMING_80NS		(0x3 << 2)
+
+	#define TRUEIDE_PIO_TIMING_MASK		(0x7 << 4)
+	#define TRUEIDE_PIO_TIMING_SHIFT	4
+
+	#define TRUEIDE_MWORD_DMA_TIMING_MASK	(0x7 << 7)
+	#define TRUEIDE_MWORD_DMA_TIMING_SHIFT	7
+
+	#define ULTRA_DMA_TIMING_MASK		(0x7 << 10)
+	#define ULTRA_DMA_TIMING_SHIFT		10
+/* CF Transfer Address */
+#define XFER_ADDR		0x014
+	#define XFER_ADDR_MASK			(0x7FF)
+	#define MAX_XFER_COUNT			0x20000u
+/* Transfer Control */
+#define XFER_CTR		0x01C
+	#define XFER_COUNT_MASK			(0x3FFFF)
+	#define ADDR_INC_DISABLE		(1 << 24)
+	#define XFER_WIDTH_MASK			(1 << 25)
+	#define XFER_WIDTH_8B			(0)
+	#define XFER_WIDTH_16B			(1 << 25)
+
+	#define MEM_TYPE_MASK			(1 << 26)
+	#define MEM_TYPE_COMMON			(0)
+	#define MEM_TYPE_ATTRIBUTE		(1 << 26)
+
+	#define MEM_IO_XFER_MASK		(1 << 27)
+	#define MEM_XFER			(0)
+	#define IO_XFER				(1 << 27)
+
+	#define DMA_XFER_MODE			(1 << 28)
+
+	#define AHB_BUS_NORMAL_PIO_OPRTN	(~(1 << 29))
+	#define XFER_DIR_MASK			(1 << 30)
+	#define XFER_READ			(0)
+	#define XFER_WRITE			(1 << 30)
+
+	#define XFER_START			(1 << 31)
+/* Write Data Port */
+#define WRITE_PORT		0x024
+/* Read Data Port */
+#define READ_PORT		0x028
+/* ATA Data Port */
+#define ATA_DATA_PORT		0x030
+	#define ATA_DATA_PORT_MASK		(0xFFFF)
+/* ATA Error/Features */
+#define ATA_ERR_FTR		0x034
+/* ATA Sector Count */
+#define ATA_SC			0x038
+/* ATA Sector Number */
+#define ATA_SN			0x03C
+/* ATA Cylinder Low */
+#define ATA_CL			0x040
+/* ATA Cylinder High */
+#define ATA_CH			0x044
+/* ATA Select Card/Head */
+#define ATA_SH			0x048
+/* ATA Status-Command */
+#define ATA_STS_CMD		0x04C
+/* ATA Alternate Status/Device Control */
+#define ATA_ASTS_DCTR		0x050
+/* Extended Write Data Port 0x200-0x3FC */
+#define EXT_WRITE_PORT		0x200
+/* Extended Read Data Port 0x400-0x5FC */
+#define EXT_READ_PORT		0x400
+	#define FIFO_SIZE	0x200u
+/* Global Interrupt Status */
+#define GIRQ_STS		0x800
+/* Global Interrupt Status enable */
+#define GIRQ_STS_EN		0x804
+/* Global Interrupt Signal enable */
+#define GIRQ_SGN_EN		0x808
+	#define GIRQ_CF		(1)
+	#define GIRQ_XD		(1 << 1)
+
+/* Compact Flash Controller Dev Structure */
+struct arasan_cf_dev {
+	/* pointer to ata_host structure */
+	struct ata_host *host;
+	/* clk structure */
+	struct clk *clk;
+
+	/* physical base address of controller */
+	dma_addr_t pbase;
+	/* virtual base address of controller */
+	void __iomem *vbase;
+	/* irq number*/
+	int irq;
+
+	/* status to be updated to framework regarding DMA transfer */
+	u8 dma_status;
+	/* Card is present or Not */
+	u8 card_present;
+
+	/* dma specific */
+	/* Completion for transfer complete interrupt from controller */
+	struct completion cf_completion;
+	/* Completion for DMA transfer complete. */
+	struct completion dma_completion;
+	/* Dma channel allocated */
+	struct dma_chan *dma_chan;
+	/* Mask for DMA transfers */
+	dma_cap_mask_t mask;
+	/* DMA transfer work */
+	struct work_struct work;
+	/* DMA delayed finish work */
+	struct delayed_work dwork;
+	/* qc to be transferred using DMA */
+	struct ata_queued_cmd *qc;
+};
+
+static struct scsi_host_template arasan_cf_sht = {
+	ATA_BASE_SHT(DRIVER_NAME),
+	.sg_tablesize = SG_NONE,
+	.dma_boundary = 0xFFFFFFFFUL,
+};
+
+static void cf_dumpregs(struct arasan_cf_dev *acdev)
+{
+	struct device *dev = acdev->host->dev;
+
+	dev_dbg(dev, ": =========== REGISTER DUMP ===========");
+	dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
+	dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
+	dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
+	dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
+	dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
+	dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
+	dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
+	dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
+	dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
+	dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
+	dev_dbg(dev, ": =====================================");
+}
+
+/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
+static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
+{
+	/* enable should be 0 or 1 */
+	writel(enable, acdev->vbase + GIRQ_STS_EN);
+	writel(enable, acdev->vbase + GIRQ_SGN_EN);
+}
+
+/* Enable/Disable CF interrupts */
+static inline void
+cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
+{
+	u32 val = readl(acdev->vbase + IRQ_EN);
+	/* clear & enable/disable irqs */
+	if (enable) {
+		writel(mask, acdev->vbase + IRQ_STS);
+		writel(val | mask, acdev->vbase + IRQ_EN);
+	} else
+		writel(val & ~mask, acdev->vbase + IRQ_EN);
+}
+
+static inline void cf_card_reset(struct arasan_cf_dev *acdev)
+{
+	u32 val = readl(acdev->vbase + OP_MODE);
+
+	writel(val | CARD_RESET, acdev->vbase + OP_MODE);
+	udelay(200);
+	writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
+}
+
+static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
+{
+	writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
+			acdev->vbase + OP_MODE);
+	writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
+			acdev->vbase + OP_MODE);
+}
+
+static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
+{
+	struct ata_port *ap = acdev->host->ports[0];
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u32 val = readl(acdev->vbase + CFI_STS);
+
+	/* Both CD1 & CD2 should be low if card inserted completely */
+	if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
+		if (acdev->card_present)
+			return;
+		acdev->card_present = 1;
+		cf_card_reset(acdev);
+	} else {
+		if (!acdev->card_present)
+			return;
+		acdev->card_present = 0;
+	}
+
+	if (hotplugged) {
+		ata_ehi_hotplugged(ehi);
+		ata_port_freeze(ap);
+	}
+}
+
+static int cf_init(struct arasan_cf_dev *acdev)
+{
+	struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
+	unsigned int if_clk;
+	unsigned long flags;
+	int ret = 0;
+
+	ret = clk_prepare_enable(acdev->clk);
+	if (ret) {
+		dev_dbg(acdev->host->dev, "clock enable failed");
+		return ret;
+	}
+
+	ret = clk_set_rate(acdev->clk, 166000000);
+	if (ret) {
+		dev_warn(acdev->host->dev, "clock set rate failed");
+		clk_disable_unprepare(acdev->clk);
+		return ret;
+	}
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	/* configure CF interface clock */
+	/* TODO: read from device tree */
+	if_clk = CF_IF_CLK_166M;
+	if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
+		if_clk = pdata->cf_if_clk;
+
+	writel(if_clk, acdev->vbase + CLK_CFG);
+
+	writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
+	cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
+	cf_ginterrupt_enable(acdev, 1);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+	return ret;
+}
+
+static void cf_exit(struct arasan_cf_dev *acdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	cf_ginterrupt_enable(acdev, 0);
+	cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
+	cf_card_reset(acdev);
+	writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
+			acdev->vbase + OP_MODE);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+	clk_disable_unprepare(acdev->clk);
+}
+
+static void dma_callback(void *dev)
+{
+	struct arasan_cf_dev *acdev = dev;
+
+	complete(&acdev->dma_completion);
+}
+
+static inline void dma_complete(struct arasan_cf_dev *acdev)
+{
+	struct ata_queued_cmd *qc = acdev->qc;
+	unsigned long flags;
+
+	acdev->qc = NULL;
+	ata_sff_interrupt(acdev->irq, acdev->host);
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+		ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static inline int wait4buf(struct arasan_cf_dev *acdev)
+{
+	if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
+		u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+
+		dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
+		return -ETIMEDOUT;
+	}
+
+	/* Check if PIO Error interrupt has occurred */
+	if (acdev->dma_status & ATA_DMA_ERR)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int
+dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
+{
+	struct dma_async_tx_descriptor *tx;
+	struct dma_chan *chan = acdev->dma_chan;
+	dma_cookie_t cookie;
+	unsigned long flags = DMA_PREP_INTERRUPT;
+	int ret = 0;
+
+	tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+	if (!tx) {
+		dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
+		return -EAGAIN;
+	}
+
+	tx->callback = dma_callback;
+	tx->callback_param = acdev;
+	cookie = tx->tx_submit(tx);
+
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(acdev->host->dev, "dma_submit_error\n");
+		return ret;
+	}
+
+	chan->device->device_issue_pending(chan);
+
+	/* Wait for DMA to complete */
+	if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
+		dmaengine_terminate_all(chan);
+		dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return ret;
+}
+
+static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
+{
+	dma_addr_t dest = 0, src = 0;
+	u32 xfer_cnt, sglen, dma_len, xfer_ctr;
+	u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+	unsigned long flags;
+	int ret = 0;
+
+	sglen = sg_dma_len(sg);
+	if (write) {
+		src = sg_dma_address(sg);
+		dest = acdev->pbase + EXT_WRITE_PORT;
+	} else {
+		dest = sg_dma_address(sg);
+		src = acdev->pbase + EXT_READ_PORT;
+	}
+
+	/*
+	 * For each sg:
+	 * MAX_XFER_COUNT data will be transferred before we get transfer
+	 * complete interrupt. Between after FIFO_SIZE data
+	 * buffer available interrupt will be generated. At this time we will
+	 * fill FIFO again: max FIFO_SIZE data.
+	 */
+	while (sglen) {
+		xfer_cnt = min(sglen, MAX_XFER_COUNT);
+		spin_lock_irqsave(&acdev->host->lock, flags);
+		xfer_ctr = readl(acdev->vbase + XFER_CTR) &
+			~XFER_COUNT_MASK;
+		writel(xfer_ctr | xfer_cnt | XFER_START,
+				acdev->vbase + XFER_CTR);
+		spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+		/* continue dma xfers until current sg is completed */
+		while (xfer_cnt) {
+			/* wait for read to complete */
+			if (!write) {
+				ret = wait4buf(acdev);
+				if (ret)
+					goto fail;
+			}
+
+			/* read/write FIFO in chunk of FIFO_SIZE */
+			dma_len = min(xfer_cnt, FIFO_SIZE);
+			ret = dma_xfer(acdev, src, dest, dma_len);
+			if (ret) {
+				dev_err(acdev->host->dev, "dma failed");
+				goto fail;
+			}
+
+			if (write)
+				src += dma_len;
+			else
+				dest += dma_len;
+
+			sglen -= dma_len;
+			xfer_cnt -= dma_len;
+
+			/* wait for write to complete */
+			if (write) {
+				ret = wait4buf(acdev);
+				if (ret)
+					goto fail;
+			}
+		}
+	}
+
+fail:
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+			acdev->vbase + XFER_CTR);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+	return ret;
+}
+
+/*
+ * This routine uses External DMA controller to read/write data to FIFO of CF
+ * controller. There are two xfer related interrupt supported by CF controller:
+ * - buf_avail: This interrupt is generated as soon as we have buffer of 512
+ *	bytes available for reading or empty buffer available for writing.
+ * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
+ *	data to/from FIFO. xfer_size is programmed in XFER_CTR register.
+ *
+ * Max buffer size = FIFO_SIZE = 512 Bytes.
+ * Max xfer_size = MAX_XFER_COUNT = 256 KB.
+ */
+static void data_xfer(struct work_struct *work)
+{
+	struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
+			work);
+	struct ata_queued_cmd *qc = acdev->qc;
+	struct scatterlist *sg;
+	unsigned long flags;
+	u32 temp;
+	int ret = 0;
+
+	/* request dma channels */
+	/* dma_request_channel may sleep, so calling from process context */
+	acdev->dma_chan = dma_request_slave_channel(acdev->host->dev, "data");
+	if (!acdev->dma_chan) {
+		dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+		goto chan_request_fail;
+	}
+
+	for_each_sg(qc->sg, sg, qc->n_elem, temp) {
+		ret = sg_xfer(acdev, sg);
+		if (ret)
+			break;
+	}
+
+	dma_release_channel(acdev->dma_chan);
+
+	/* data xferred successfully */
+	if (!ret) {
+		u32 status;
+
+		spin_lock_irqsave(&acdev->host->lock, flags);
+		status = ioread8(qc->ap->ioaddr.altstatus_addr);
+		spin_unlock_irqrestore(&acdev->host->lock, flags);
+		if (status & (ATA_BUSY | ATA_DRQ)) {
+			ata_sff_queue_delayed_work(&acdev->dwork, 1);
+			return;
+		}
+
+		goto sff_intr;
+	}
+
+	cf_dumpregs(acdev);
+
+chan_request_fail:
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	/* error when transferring data to/from memory */
+	qc->err_mask |= AC_ERR_HOST_BUS;
+	qc->ap->hsm_task_state = HSM_ST_ERR;
+
+	cf_ctrl_reset(acdev);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+sff_intr:
+	dma_complete(acdev);
+}
+
+static void delayed_finish(struct work_struct *work)
+{
+	struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
+			dwork.work);
+	struct ata_queued_cmd *qc = acdev->qc;
+	unsigned long flags;
+	u8 status;
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	status = ioread8(qc->ap->ioaddr.altstatus_addr);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+	if (status & (ATA_BUSY | ATA_DRQ))
+		ata_sff_queue_delayed_work(&acdev->dwork, 1);
+	else
+		dma_complete(acdev);
+}
+
+static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
+{
+	struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
+	unsigned long flags;
+	u32 irqsts;
+
+	irqsts = readl(acdev->vbase + GIRQ_STS);
+	if (!(irqsts & GIRQ_CF))
+		return IRQ_NONE;
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	irqsts = readl(acdev->vbase + IRQ_STS);
+	writel(irqsts, acdev->vbase + IRQ_STS);		/* clear irqs */
+	writel(GIRQ_CF, acdev->vbase + GIRQ_STS);	/* clear girqs */
+
+	/* handle only relevant interrupts */
+	irqsts &= ~IGNORED_IRQS;
+
+	if (irqsts & CARD_DETECT_IRQ) {
+		cf_card_detect(acdev, 1);
+		spin_unlock_irqrestore(&acdev->host->lock, flags);
+		return IRQ_HANDLED;
+	}
+
+	if (irqsts & PIO_XFER_ERR_IRQ) {
+		acdev->dma_status = ATA_DMA_ERR;
+		writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+				acdev->vbase + XFER_CTR);
+		spin_unlock_irqrestore(&acdev->host->lock, flags);
+		complete(&acdev->cf_completion);
+		dev_err(acdev->host->dev, "pio xfer err irq\n");
+		return IRQ_HANDLED;
+	}
+
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+	if (irqsts & BUF_AVAIL_IRQ) {
+		complete(&acdev->cf_completion);
+		return IRQ_HANDLED;
+	}
+
+	if (irqsts & XFER_DONE_IRQ) {
+		struct ata_queued_cmd *qc = acdev->qc;
+
+		/* Send Complete only for write */
+		if (qc->tf.flags & ATA_TFLAG_WRITE)
+			complete(&acdev->cf_completion);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void arasan_cf_freeze(struct ata_port *ap)
+{
+	struct arasan_cf_dev *acdev = ap->host->private_data;
+
+	/* stop transfer and reset controller */
+	writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+			acdev->vbase + XFER_CTR);
+	cf_ctrl_reset(acdev);
+	acdev->dma_status = ATA_DMA_ERR;
+
+	ata_sff_dma_pause(ap);
+	ata_sff_freeze(ap);
+}
+
+static void arasan_cf_error_handler(struct ata_port *ap)
+{
+	struct arasan_cf_dev *acdev = ap->host->private_data;
+
+	/*
+	 * DMA transfers using an external DMA controller may be scheduled.
+	 * Abort them before handling error. Refer data_xfer() for further
+	 * details.
+	 */
+	cancel_work_sync(&acdev->work);
+	cancel_delayed_work_sync(&acdev->dwork);
+	return ata_sff_error_handler(ap);
+}
+
+static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
+{
+	struct ata_queued_cmd *qc = acdev->qc;
+	struct ata_port *ap = qc->ap;
+	struct ata_taskfile *tf = &qc->tf;
+	u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
+	u32 write = tf->flags & ATA_TFLAG_WRITE;
+
+	xfer_ctr |= write ? XFER_WRITE : XFER_READ;
+	writel(xfer_ctr, acdev->vbase + XFER_CTR);
+
+	ap->ops->sff_exec_command(ap, tf);
+	ata_sff_queue_work(&acdev->work);
+}
+
+static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct arasan_cf_dev *acdev = ap->host->private_data;
+
+	/* defer PIO handling to sff_qc_issue */
+	if (!ata_is_dma(qc->tf.protocol))
+		return ata_sff_qc_issue(qc);
+
+	/* select the device */
+	ata_wait_idle(ap);
+	ata_sff_dev_select(ap, qc->dev->devno);
+	ata_wait_idle(ap);
+
+	/* start the command */
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);
+		acdev->dma_status = 0;
+		acdev->qc = qc;
+		arasan_cf_dma_start(acdev);
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+
+	default:
+		WARN_ON(1);
+		return AC_ERR_SYSTEM;
+	}
+
+	return 0;
+}
+
+static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct arasan_cf_dev *acdev = ap->host->private_data;
+	u8 pio = adev->pio_mode - XFER_PIO_0;
+	unsigned long flags;
+	u32 val;
+
+	/* Arasan ctrl supports Mode0 -> Mode6 */
+	if (pio > 6) {
+		dev_err(ap->dev, "Unknown PIO mode\n");
+		return;
+	}
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	val = readl(acdev->vbase + OP_MODE) &
+		~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
+	writel(val, acdev->vbase + OP_MODE);
+	val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
+	val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
+	writel(val, acdev->vbase + TM_CFG);
+
+	cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
+	cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct arasan_cf_dev *acdev = ap->host->private_data;
+	u32 opmode, tmcfg, dma_mode = adev->dma_mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&acdev->host->lock, flags);
+	opmode = readl(acdev->vbase + OP_MODE) &
+		~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
+	tmcfg = readl(acdev->vbase + TM_CFG);
+
+	if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
+		opmode |= ULTRA_DMA_ENB;
+		tmcfg &= ~ULTRA_DMA_TIMING_MASK;
+		tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
+	} else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
+		opmode |= MULTI_WORD_DMA_ENB;
+		tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
+		tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
+			TRUEIDE_MWORD_DMA_TIMING_SHIFT;
+	} else {
+		dev_err(ap->dev, "Unknown DMA mode\n");
+		spin_unlock_irqrestore(&acdev->host->lock, flags);
+		return;
+	}
+
+	writel(opmode, acdev->vbase + OP_MODE);
+	writel(tmcfg, acdev->vbase + TM_CFG);
+	writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
+
+	cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
+	cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
+	spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static struct ata_port_operations arasan_cf_ops = {
+	.inherits = &ata_sff_port_ops,
+	.freeze = arasan_cf_freeze,
+	.error_handler = arasan_cf_error_handler,
+	.qc_issue = arasan_cf_qc_issue,
+	.set_piomode = arasan_cf_set_piomode,
+	.set_dmamode = arasan_cf_set_dmamode,
+};
+
+static int arasan_cf_probe(struct platform_device *pdev)
+{
+	struct arasan_cf_dev *acdev;
+	struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct resource *res;
+	u32 quirk;
+	irq_handler_t irq_handler = NULL;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+				DRIVER_NAME)) {
+		dev_warn(&pdev->dev, "Failed to get memory region resource\n");
+		return -ENOENT;
+	}
+
+	acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
+	if (!acdev)
+		return -ENOMEM;
+
+	if (pdata)
+		quirk = pdata->quirk;
+	else
+		quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
+
+	/* if irq is 0, support only PIO */
+	acdev->irq = platform_get_irq(pdev, 0);
+	if (acdev->irq)
+		irq_handler = arasan_cf_interrupt;
+	else
+		quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
+
+	acdev->pbase = res->start;
+	acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+	if (!acdev->vbase) {
+		dev_warn(&pdev->dev, "ioremap fail\n");
+		return -ENOMEM;
+	}
+
+	acdev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(acdev->clk)) {
+		dev_warn(&pdev->dev, "Clock not found\n");
+		return PTR_ERR(acdev->clk);
+	}
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host) {
+		dev_warn(&pdev->dev, "alloc host fail\n");
+		return -ENOMEM;
+	}
+
+	ap = host->ports[0];
+	host->private_data = acdev;
+	acdev->host = host;
+	ap->ops = &arasan_cf_ops;
+	ap->pio_mask = ATA_PIO6;
+	ap->mwdma_mask = ATA_MWDMA4;
+	ap->udma_mask = ATA_UDMA6;
+
+	init_completion(&acdev->cf_completion);
+	init_completion(&acdev->dma_completion);
+	INIT_WORK(&acdev->work, data_xfer);
+	INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
+	dma_cap_set(DMA_MEMCPY, acdev->mask);
+
+	/* Handle platform specific quirks */
+	if (quirk) {
+		if (quirk & CF_BROKEN_PIO) {
+			ap->ops->set_piomode = NULL;
+			ap->pio_mask = 0;
+		}
+		if (quirk & CF_BROKEN_MWDMA)
+			ap->mwdma_mask = 0;
+		if (quirk & CF_BROKEN_UDMA)
+			ap->udma_mask = 0;
+	}
+	ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
+
+	ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
+	ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
+	ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
+	ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
+	ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
+	ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
+	ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
+	ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
+	ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
+	ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
+	ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
+	ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
+	ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
+
+	ata_port_desc(ap, "phy_addr %llx virt_addr %p",
+		      (unsigned long long) res->start, acdev->vbase);
+
+	ret = cf_init(acdev);
+	if (ret)
+		return ret;
+
+	cf_card_detect(acdev, 0);
+
+	ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
+				&arasan_cf_sht);
+	if (!ret)
+		return 0;
+
+	cf_exit(acdev);
+
+	return ret;
+}
+
+static int arasan_cf_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+	ata_host_detach(host);
+	cf_exit(acdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int arasan_cf_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+	if (acdev->dma_chan)
+		dmaengine_terminate_all(acdev->dma_chan);
+
+	cf_exit(acdev);
+	return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int arasan_cf_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+	cf_init(acdev);
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id arasan_cf_id_table[] = {
+	{ .compatible = "arasan,cf-spear1340" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
+#endif
+
+static struct platform_driver arasan_cf_driver = {
+	.probe		= arasan_cf_probe,
+	.remove		= arasan_cf_remove,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.pm	= &arasan_cf_pm_ops,
+		.of_match_table = of_match_ptr(arasan_cf_id_table),
+	},
+};
+
+module_platform_driver(arasan_cf_driver);
+
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
+MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
new file mode 100644
index 0000000..6b33553
--- /dev/null
+++ b/drivers/ata/pata_artop.c
@@ -0,0 +1,459 @@
+/*
+ *    pata_artop.c - ARTOP ATA controller driver
+ *
+ *	(C) 2006 Red Hat
+ *	(C) 2007,2011 Bartlomiej Zolnierkiewicz
+ *
+ *    Based in part on drivers/ide/pci/aec62xx.c
+ *	Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
+ *	865/865R fixes for Macintosh card version from a patch to the old
+ *		driver by Thibaut VARENE <varenet@parisc-linux.org>
+ *	When setting the PCI latency we must set 0x80 or higher for burst
+ *		performance Alessandro Zummo <alessandro.zummo@towertech.it>
+ *
+ *	TODO
+ *	Investigate no_dsc on 850R
+ *	Clock detect
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_artop"
+#define DRV_VERSION	"0.4.6"
+
+/*
+ *	The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
+ *	get PCI bus speed functionality we leave this as 0. Its a variable
+ *	for when we get the functionality and also for folks wanting to
+ *	test stuff.
+ */
+
+static int clock = 0;
+
+/**
+ *	artop62x0_pre_reset	-	probe begin
+ *	@link: link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Nothing complicated needed here.
+ */
+
+static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits artop_enable_bits[] = {
+		{ 0x4AU, 1U, 0x02UL, 0x02UL },	/* port 0 */
+		{ 0x4AU, 1U, 0x04UL, 0x04UL },	/* port 1 */
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	/* Odd numbered device ids are the units with enable bits. */
+	if ((pdev->device & 1) &&
+	    !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	artop6260_cable_detect	-	identify cable type
+ *	@ap: Port
+ *
+ *	Identify the cable type for the ARTOP interface in question
+ */
+
+static int artop6260_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+	pci_read_config_byte(pdev, 0x49, &tmp);
+	if (tmp & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	artop6210_load_piomode - Load a set of PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device
+ *	@pio: PIO mode
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	is used both to set PIO timings in PIO mode and also to set the
+ *	matching PIO clocking for UDMA, as well as the MWDMA timings.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	const u16 timing[2][5] = {
+		{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
+		{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
+
+	};
+	/* Load the PIO timing active/recovery bits */
+	pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
+}
+
+/**
+ *	artop6210_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space. For
+ *	ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ *	the event UDMA is used the later call to set_dmamode will set the
+ *	bits as required.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	u8 ultra;
+
+	artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+	/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+	pci_read_config_byte(pdev, 0x54, &ultra);
+	ultra &= ~(3 << (2 * dn));
+	pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ *	artop6260_load_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *	@pio: PIO mode
+ *
+ *	Set PIO mode for device, in host controller PCI config space. The
+ *	ARTOP6260 and relatives store the timing data differently.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	const u8 timing[2][5] = {
+		{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
+		{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
+
+	};
+	/* Load the PIO timing active/recovery bits */
+	pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
+}
+
+/**
+ *	artop6260_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space. For
+ *	ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ *	the event UDMA is used the later call to set_dmamode will set the
+ *	bits as required.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	u8 ultra;
+
+	artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+	/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+	pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+	ultra &= ~(7 << (4  * adev->devno));	/* One nibble per drive */
+	pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+/**
+ *	artop6210_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set DMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio;
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	u8 ultra;
+
+	if (adev->dma_mode == XFER_MW_DMA_0)
+		pio = 1;
+	else
+		pio = 4;
+
+	/* Load the PIO timing active/recovery bits */
+	artop6210_load_piomode(ap, adev, pio);
+
+	pci_read_config_byte(pdev, 0x54, &ultra);
+	ultra &= ~(3 << (2 * dn));
+
+	/* Add ultra DMA bits if in UDMA mode */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
+		if (mode == 0)
+			mode = 1;
+		ultra |= (mode << (2 * dn));
+	}
+	pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ *	artop6260_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set DMA mode for device, in host controller PCI config space. The
+ *	ARTOP6260 and relatives store the timing data differently.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio;
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	u8 ultra;
+
+	if (adev->dma_mode == XFER_MW_DMA_0)
+		pio = 1;
+	else
+		pio = 4;
+
+	/* Load the PIO timing active/recovery bits */
+	artop6260_load_piomode(ap, adev, pio);
+
+	/* Add ultra DMA bits if in UDMA mode */
+	pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+	ultra &= ~(7 << (4  * adev->devno));	/* One nibble per drive */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
+		if (mode == 0)
+			mode = 1;
+		ultra |= (mode << (4 * adev->devno));
+	}
+	pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+/**
+ *	artop_6210_qc_defer	-	implement serialization
+ *	@qc: command
+ *
+ *	Issue commands per host on this chip.
+ */
+
+static int artop6210_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_host *host = qc->ap->host;
+	struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
+	int rc;
+
+	/* First apply the usual rules */
+	rc = ata_std_qc_defer(qc);
+	if (rc != 0)
+		return rc;
+
+	/* Now apply serialization rules. Only allow a command if the
+	   other channel state machine is idle */
+	if (alt && alt->qc_active)
+		return	ATA_DEFER_PORT;
+	return 0;
+}
+
+static struct scsi_host_template artop_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations artop6210_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= artop6210_set_piomode,
+	.set_dmamode		= artop6210_set_dmamode,
+	.prereset		= artop62x0_pre_reset,
+	.qc_defer		= artop6210_qc_defer,
+};
+
+static struct ata_port_operations artop6260_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= artop6260_cable_detect,
+	.set_piomode		= artop6260_set_piomode,
+	.set_dmamode		= artop6260_set_dmamode,
+	.prereset		= artop62x0_pre_reset,
+};
+
+static void atp8xx_fixup(struct pci_dev *pdev)
+{
+	if (pdev->device == 0x0005)
+		/* BIOS may have left us in UDMA, clear it before libata probe */
+		pci_write_config_byte(pdev, 0x54, 0);
+	else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
+		u8 reg;
+
+		/* Mac systems come up with some registers not set as we
+		   will need them */
+
+		/* Clear reset & test bits */
+		pci_read_config_byte(pdev, 0x49, &reg);
+		pci_write_config_byte(pdev, 0x49, reg & ~0x30);
+
+		/* PCI latency must be > 0x80 for burst mode, tweak it
+		 * if required.
+		 */
+		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
+		if (reg <= 0x80)
+			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
+
+		/* Enable IRQ output and burst mode */
+		pci_read_config_byte(pdev, 0x4a, &reg);
+		pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+	}
+}
+
+/**
+ *	artop_init_one - Register ARTOP ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in artop_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_6210 = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA2,
+		.port_ops	= &artop6210_ops,
+	};
+	static const struct ata_port_info info_626x = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA4,
+		.port_ops	= &artop6260_ops,
+	};
+	static const struct ata_port_info info_628x = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA5,
+		.port_ops	= &artop6260_ops,
+	};
+	static const struct ata_port_info info_628x_fast = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA6,
+		.port_ops	= &artop6260_ops,
+	};
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (id->driver_data == 0)	/* 6210 variant */
+		ppi[0] = &info_6210;
+	else if (id->driver_data == 1)	/* 6260 */
+		ppi[0] = &info_626x;
+	else if (id->driver_data == 2)	{ /* 6280 or 6280 + fast */
+		unsigned long io = pci_resource_start(pdev, 4);
+
+		ppi[0] = &info_628x;
+		if (inb(io) & 0x10)
+			ppi[0] = &info_628x_fast;
+	}
+
+	BUG_ON(ppi[0] == NULL);
+
+	atp8xx_fixup(pdev);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
+}
+
+static const struct pci_device_id artop_pci_tbl[] = {
+	{ PCI_VDEVICE(ARTOP, 0x0005), 0 },
+	{ PCI_VDEVICE(ARTOP, 0x0006), 1 },
+	{ PCI_VDEVICE(ARTOP, 0x0007), 1 },
+	{ PCI_VDEVICE(ARTOP, 0x0008), 2 },
+	{ PCI_VDEVICE(ARTOP, 0x0009), 2 },
+
+	{ }	/* terminate list */
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int atp8xx_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	atp8xx_fixup(pdev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static struct pci_driver artop_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= artop_pci_tbl,
+	.probe			= artop_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= atp8xx_reinit_one,
+#endif
+};
+
+module_pci_driver(artop_pci_driver);
+
+MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
new file mode 100644
index 0000000..4d49fd3
--- /dev/null
+++ b/drivers/ata/pata_atiixp.c
@@ -0,0 +1,317 @@
+/*
+ * pata_atiixp.c 	- ATI PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  (C) 2009-2010 Bartlomiej Zolnierkiewicz
+ *
+ * Based on
+ *
+ *  linux/drivers/ide/pci/atiixp.c	Version 0.01-bart2	Feb. 26, 2004
+ *
+ *  Copyright (C) 2003 ATI Inc. <hyu@ati.com>
+ *  Copyright (C) 2004 Bartlomiej Zolnierkiewicz
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_atiixp"
+#define DRV_VERSION "0.4.6"
+
+enum {
+	ATIIXP_IDE_PIO_TIMING	= 0x40,
+	ATIIXP_IDE_MWDMA_TIMING	= 0x44,
+	ATIIXP_IDE_PIO_CONTROL	= 0x48,
+	ATIIXP_IDE_PIO_MODE	= 0x4a,
+	ATIIXP_IDE_UDMA_CONTROL	= 0x54,
+	ATIIXP_IDE_UDMA_MODE 	= 0x56
+};
+
+static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
+	{
+		/* Board has onboard PATA<->SATA converters */
+		.ident = "MSI E350DM-E33",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+			DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
+		},
+	},
+	{ }
+};
+
+static int atiixp_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 udma;
+
+	if (dmi_check_system(attixp_cable_override_dmi_table))
+		return ATA_CBL_PATA40_SHORT;
+
+	/* Hack from drivers/ide/pci. Really we want to know how to do the
+	   raw detection not play follow the bios mode guess */
+	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
+	if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
+		return  ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+static DEFINE_SPINLOCK(atiixp_lock);
+
+/**
+ *	atiixp_prereset	-	perform reset handling
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Reset sequence checking enable bits to see which ports are
+ *	active.
+ */
+
+static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits atiixp_enable_bits[] = {
+		{ 0x48, 1, 0x01, 0x00 },
+		{ 0x48, 1, 0x08, 0x00 }
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	atiixp_set_pio_timing	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called by both the pio and dma setup functions to set the controller
+ *	timings for PIO transfers. We must load both the mode number and
+ *	timing values into the controller.
+ */
+
+static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = 2 * ap->port_no + adev->devno;
+	int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+	u32 pio_timing_data;
+	u16 pio_mode_data;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
+	pio_mode_data &= ~(0x7 << (4 * dn));
+	pio_mode_data |= pio << (4 * dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
+
+	pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
+	pio_timing_data &= ~(0xFF << timing_shift);
+	pio_timing_data |= (pio_timings[pio] << timing_shift);
+	pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
+}
+
+/**
+ *	atiixp_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. We use a shared helper for this
+ *	as the DMA setup must also adjust the PIO timing information.
+ */
+
+static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&atiixp_lock, flags);
+	atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
+	spin_unlock_irqrestore(&atiixp_lock, flags);
+}
+
+/**
+ *	atiixp_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup. We use timing tables for most
+ *	modes but must tune an appropriate PIO mode to match.
+ */
+
+static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dma = adev->dma_mode;
+	int dn = 2 * ap->port_no + adev->devno;
+	int wanted_pio;
+	unsigned long flags;
+
+	spin_lock_irqsave(&atiixp_lock, flags);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u16 udma_mode_data;
+
+		dma -= XFER_UDMA_0;
+
+		pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
+		udma_mode_data &= ~(0x7 << (4 * dn));
+		udma_mode_data |= dma << (4 * dn);
+		pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
+	} else {
+		int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+		u32 mwdma_timing_data;
+
+		dma -= XFER_MW_DMA_0;
+
+		pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
+				      &mwdma_timing_data);
+		mwdma_timing_data &= ~(0xFF << timing_shift);
+		mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
+		pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
+				       mwdma_timing_data);
+	}
+	/*
+	 *	We must now look at the PIO mode situation. We may need to
+	 *	adjust the PIO mode to keep the timings acceptable
+	 */
+	if (adev->dma_mode >= XFER_MW_DMA_2)
+		wanted_pio = 4;
+	else if (adev->dma_mode == XFER_MW_DMA_1)
+		wanted_pio = 3;
+	else if (adev->dma_mode == XFER_MW_DMA_0)
+		wanted_pio = 0;
+	else BUG();
+
+	if (adev->pio_mode != wanted_pio)
+		atiixp_set_pio_timing(ap, adev, wanted_pio);
+	spin_unlock_irqrestore(&atiixp_lock, flags);
+}
+
+/**
+ *	atiixp_bmdma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	When DMA begins we need to ensure that the UDMA control
+ *	register for the channel is correctly set.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
+ */
+
+static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = (2 * ap->port_no) + adev->devno;
+	u16 tmp16;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+	if (ata_using_udma(adev))
+		tmp16 |= (1 << dn);
+	else
+		tmp16 &= ~(1 << dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	atiixp_dma_stop	-	DMA stop callback
+ *	@qc: Command in progress
+ *
+ *	DMA has completed. Clear the UDMA flag as the next operations will
+ *	be PIO ones not UDMA data transfer.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
+ */
+
+static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = (2 * ap->port_no) + qc->dev->devno;
+	u16 tmp16;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+	tmp16 &= ~(1 << dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+	ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template atiixp_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations atiixp_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.qc_prep 	= ata_bmdma_dumb_qc_prep,
+	.bmdma_start 	= atiixp_bmdma_start,
+	.bmdma_stop	= atiixp_bmdma_stop,
+
+	.prereset	= atiixp_prereset,
+	.cable_detect	= atiixp_cable_detect,
+	.set_piomode	= atiixp_set_piomode,
+	.set_dmamode	= atiixp_set_dmamode,
+};
+
+static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA12_ONLY,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &atiixp_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &info };
+
+	/* SB600 doesn't have secondary port wired */
+	if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+		ppi[1] = &ata_dummy_port_info;
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
+				      ATA_HOST_PARALLEL_SCAN);
+}
+
+static const struct pci_device_id atiixp[] = {
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
+
+	{ },
+};
+
+static struct pci_driver atiixp_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= atiixp,
+	.probe 		= atiixp_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.resume		= ata_pci_device_resume,
+	.suspend	= ata_pci_device_suspend,
+#endif
+};
+
+module_pci_driver(atiixp_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, atiixp);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
new file mode 100644
index 0000000..3729e24
--- /dev/null
+++ b/drivers/ata/pata_atp867x.c
@@ -0,0 +1,575 @@
+/*
+ * pata_atp867x.c - ARTOP 867X 64bit 4-channel UDMA133 ATA controller driver
+ *
+ *	(C) 2009 Google Inc. John(Jung-Ik) Lee <jilee@google.com>
+ *
+ * Per Atp867 data sheet rev 1.2, Acard.
+ * Based in part on early ide code from
+ *	2003-2004 by Eric Uhrhane, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * TODO:
+ *   1. RAID features [comparison, XOR, striping, mirroring, etc.]
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define	DRV_NAME	"pata_atp867x"
+#define	DRV_VERSION	"0.7.5"
+
+/*
+ * IO Registers
+ * Note that all runtime hot priv ports are cached in ap private_data
+ */
+
+enum {
+	ATP867X_IO_CHANNEL_OFFSET	= 0x10,
+
+	/*
+	 * IO Register Bitfields
+	 */
+
+	ATP867X_IO_PIOSPD_ACTIVE_SHIFT	= 4,
+	ATP867X_IO_PIOSPD_RECOVER_SHIFT	= 0,
+
+	ATP867X_IO_DMAMODE_MSTR_SHIFT	= 0,
+	ATP867X_IO_DMAMODE_MSTR_MASK	= 0x07,
+	ATP867X_IO_DMAMODE_SLAVE_SHIFT	= 4,
+	ATP867X_IO_DMAMODE_SLAVE_MASK	= 0x70,
+
+	ATP867X_IO_DMAMODE_UDMA_6	= 0x07,
+	ATP867X_IO_DMAMODE_UDMA_5	= 0x06,
+	ATP867X_IO_DMAMODE_UDMA_4	= 0x05,
+	ATP867X_IO_DMAMODE_UDMA_3	= 0x04,
+	ATP867X_IO_DMAMODE_UDMA_2	= 0x03,
+	ATP867X_IO_DMAMODE_UDMA_1	= 0x02,
+	ATP867X_IO_DMAMODE_UDMA_0	= 0x01,
+	ATP867X_IO_DMAMODE_DISABLE	= 0x00,
+
+	ATP867X_IO_SYS_INFO_66MHZ	= 0x04,
+	ATP867X_IO_SYS_INFO_SLOW_UDMA5	= 0x02,
+	ATP867X_IO_SYS_MASK_RESERVED	= (~0xf1),
+
+	ATP867X_IO_PORTSPD_VAL		= 0x1143,
+	ATP867X_PREREAD_VAL		= 0x0200,
+
+	ATP867X_NUM_PORTS		= 4,
+	ATP867X_BAR_IOBASE		= 0,
+	ATP867X_BAR_ROMBASE		= 6,
+};
+
+#define ATP867X_IOBASE(ap)		((ap)->host->iomap[0])
+#define ATP867X_SYS_INFO(ap)		(0x3F + ATP867X_IOBASE(ap))
+
+#define ATP867X_IO_PORTBASE(ap, port)	(0x00 + ATP867X_IOBASE(ap) + \
+					(port) * ATP867X_IO_CHANNEL_OFFSET)
+#define ATP867X_IO_DMABASE(ap, port)	(0x40 + \
+					ATP867X_IO_PORTBASE((ap), (port)))
+
+#define ATP867X_IO_STATUS(ap, port)	(0x07 + \
+					ATP867X_IO_PORTBASE((ap), (port)))
+#define ATP867X_IO_ALTSTATUS(ap, port)	(0x0E + \
+					ATP867X_IO_PORTBASE((ap), (port)))
+
+/*
+ * hot priv ports
+ */
+#define ATP867X_IO_MSTRPIOSPD(ap, port)	(0x08 + \
+					ATP867X_IO_DMABASE((ap), (port)))
+#define ATP867X_IO_SLAVPIOSPD(ap, port)	(0x09 + \
+					ATP867X_IO_DMABASE((ap), (port)))
+#define ATP867X_IO_8BPIOSPD(ap, port)	(0x0A + \
+					ATP867X_IO_DMABASE((ap), (port)))
+#define ATP867X_IO_DMAMODE(ap, port)	(0x0B + \
+					ATP867X_IO_DMABASE((ap), (port)))
+
+#define ATP867X_IO_PORTSPD(ap, port)	(0x4A + \
+					ATP867X_IO_PORTBASE((ap), (port)))
+#define ATP867X_IO_PREREAD(ap, port)	(0x4C + \
+					ATP867X_IO_PORTBASE((ap), (port)))
+
+struct atp867x_priv {
+	void __iomem *dma_mode;
+	void __iomem *mstr_piospd;
+	void __iomem *slave_piospd;
+	void __iomem *eightb_piospd;
+	int		pci66mhz;
+};
+
+static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	struct atp867x_priv *dp = ap->private_data;
+	u8 speed = adev->dma_mode;
+	u8 b;
+	u8 mode = speed - XFER_UDMA_0 + 1;
+
+	/*
+	 * Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed
+	 * on 66MHz bus
+	 *   rev-A: UDMA_1~4 (5, 6 no change)
+	 *   rev-B: all UDMA modes
+	 *   UDMA_0 stays not to disable UDMA
+	 */
+	if (dp->pci66mhz && mode > ATP867X_IO_DMAMODE_UDMA_0  &&
+	   (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B ||
+	    mode < ATP867X_IO_DMAMODE_UDMA_5))
+		mode--;
+
+	b = ioread8(dp->dma_mode);
+	if (adev->devno & 1) {
+		b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK) |
+			(mode << ATP867X_IO_DMAMODE_SLAVE_SHIFT);
+	} else {
+		b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK) |
+			(mode << ATP867X_IO_DMAMODE_MSTR_SHIFT);
+	}
+	iowrite8(b, dp->dma_mode);
+}
+
+static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
+	unsigned int clk)
+{
+	struct atp867x_priv *dp = ap->private_data;
+	unsigned char clocks = clk;
+
+	/*
+	 * Doc 6.6.9: increase the clock value by 1 for safer PIO speed
+	 * on 66MHz bus
+	 */
+	if (dp->pci66mhz)
+		clocks++;
+
+	switch (clocks) {
+	case 0:
+		clocks = 1;
+		break;
+	case 1 ... 6:
+		break;
+	default:
+		printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
+			"Using 12clk.\n", clk);
+		/* fall through */
+	case 9 ... 12:
+		clocks = 7;	/* 12 clk */
+		break;
+	case 7:
+	case 8:	/* default 8 clk */
+		clocks = 0;
+		goto active_clock_shift_done;
+	}
+
+active_clock_shift_done:
+	return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
+}
+
+static int atp867x_get_recover_clocks_shifted(unsigned int clk)
+{
+	unsigned char clocks = clk;
+
+	switch (clocks) {
+	case 0:
+		clocks = 1;
+		break;
+	case 1 ... 11:
+		break;
+	case 13:
+	case 14:
+		--clocks;	/* by the spec */
+		break;
+	case 15:
+		break;
+	default:
+		printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
+			"Using default 12clk.\n", clk);
+		/* fall through */
+	case 12:	/* default 12 clk */
+		clocks = 0;
+		break;
+	}
+
+	return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT;
+}
+
+static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_device *peer = ata_dev_pair(adev);
+	struct atp867x_priv *dp = ap->private_data;
+	u8 speed = adev->pio_mode;
+	struct ata_timing t, p;
+	int T, UT;
+	u8 b;
+
+	T = 1000000000 / 33333;
+	UT = T / 4;
+
+	ata_timing_compute(adev, speed, &t, T, UT);
+	if (peer && peer->pio_mode) {
+		ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
+		ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
+	}
+
+	b = ioread8(dp->dma_mode);
+	if (adev->devno & 1)
+		b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK);
+	else
+		b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK);
+	iowrite8(b, dp->dma_mode);
+
+	b = atp867x_get_active_clocks_shifted(ap, t.active) |
+	    atp867x_get_recover_clocks_shifted(t.recover);
+
+	if (adev->devno & 1)
+		iowrite8(b, dp->slave_piospd);
+	else
+		iowrite8(b, dp->mstr_piospd);
+
+	b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
+	    atp867x_get_recover_clocks_shifted(t.rec8b);
+
+	iowrite8(b, dp->eightb_piospd);
+}
+
+static int atp867x_cable_override(struct pci_dev *pdev)
+{
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_ARTOP &&
+		(pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867A ||
+		 pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867B)) {
+		return 1;
+	}
+	return 0;
+}
+
+static int atp867x_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (atp867x_cable_override(pdev))
+		return ATA_CBL_PATA40_SHORT;
+
+	return ATA_CBL_PATA_UNK;
+}
+
+static struct scsi_host_template atp867x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations atp867x_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= atp867x_cable_detect,
+	.set_piomode		= atp867x_set_piomode,
+	.set_dmamode		= atp867x_set_dmamode,
+};
+
+
+#ifdef	ATP867X_DEBUG
+static void atp867x_check_res(struct pci_dev *pdev)
+{
+	int i;
+	unsigned long start, len;
+
+	/* Check the PCI resources for this channel are enabled */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		start = pci_resource_start(pdev, i);
+		len   = pci_resource_len(pdev, i);
+		printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n",
+			start, len);
+	}
+}
+
+static void atp867x_check_ports(struct ata_port *ap, int port)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	struct atp867x_priv *dp = ap->private_data;
+
+	printk(KERN_DEBUG "ATP867X: port[%d] addresses\n"
+		"  cmd_addr	=0x%llx, 0x%llx\n"
+		"  ctl_addr	=0x%llx, 0x%llx\n"
+		"  bmdma_addr	=0x%llx, 0x%llx\n"
+		"  data_addr	=0x%llx\n"
+		"  error_addr	=0x%llx\n"
+		"  feature_addr	=0x%llx\n"
+		"  nsect_addr	=0x%llx\n"
+		"  lbal_addr	=0x%llx\n"
+		"  lbam_addr	=0x%llx\n"
+		"  lbah_addr	=0x%llx\n"
+		"  device_addr	=0x%llx\n"
+		"  status_addr	=0x%llx\n"
+		"  command_addr	=0x%llx\n"
+		"  dp->dma_mode	=0x%llx\n"
+		"  dp->mstr_piospd	=0x%llx\n"
+		"  dp->slave_piospd	=0x%llx\n"
+		"  dp->eightb_piospd	=0x%llx\n"
+		"  dp->pci66mhz		=0x%lx\n",
+		port,
+		(unsigned long long)ioaddr->cmd_addr,
+		(unsigned long long)ATP867X_IO_PORTBASE(ap, port),
+		(unsigned long long)ioaddr->ctl_addr,
+		(unsigned long long)ATP867X_IO_ALTSTATUS(ap, port),
+		(unsigned long long)ioaddr->bmdma_addr,
+		(unsigned long long)ATP867X_IO_DMABASE(ap, port),
+		(unsigned long long)ioaddr->data_addr,
+		(unsigned long long)ioaddr->error_addr,
+		(unsigned long long)ioaddr->feature_addr,
+		(unsigned long long)ioaddr->nsect_addr,
+		(unsigned long long)ioaddr->lbal_addr,
+		(unsigned long long)ioaddr->lbam_addr,
+		(unsigned long long)ioaddr->lbah_addr,
+		(unsigned long long)ioaddr->device_addr,
+		(unsigned long long)ioaddr->status_addr,
+		(unsigned long long)ioaddr->command_addr,
+		(unsigned long long)dp->dma_mode,
+		(unsigned long long)dp->mstr_piospd,
+		(unsigned long long)dp->slave_piospd,
+		(unsigned long long)dp->eightb_piospd,
+		(unsigned long)dp->pci66mhz);
+}
+#endif
+
+static int atp867x_set_priv(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct atp867x_priv *dp;
+	int port = ap->port_no;
+
+	dp = ap->private_data =
+		devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+	if (dp == NULL)
+		return -ENOMEM;
+
+	dp->dma_mode	 = ATP867X_IO_DMAMODE(ap, port);
+	dp->mstr_piospd	 = ATP867X_IO_MSTRPIOSPD(ap, port);
+	dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port);
+	dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port);
+
+	dp->pci66mhz =
+		ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ;
+
+	return 0;
+}
+
+static void atp867x_fixup(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct ata_port *ap = host->ports[0];
+	int i;
+	u8 v;
+
+	/*
+	 * Broken BIOS might not set latency high enough
+	 */
+	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &v);
+	if (v < 0x80) {
+		v = 0x80;
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v);
+		printk(KERN_DEBUG "ATP867X: set latency timer of device %s"
+			" to %d\n", pci_name(pdev), v);
+	}
+
+	/*
+	 * init 8bit io ports speed(0aaarrrr) to 43h and
+	 * init udma modes of master/slave to 0/0(11h)
+	 */
+	for (i = 0; i < ATP867X_NUM_PORTS; i++)
+		iowrite16(ATP867X_IO_PORTSPD_VAL, ATP867X_IO_PORTSPD(ap, i));
+
+	/*
+	 * init PreREAD counts
+	 */
+	for (i = 0; i < ATP867X_NUM_PORTS; i++)
+		iowrite16(ATP867X_PREREAD_VAL, ATP867X_IO_PREREAD(ap, i));
+
+	v = ioread8(ATP867X_IOBASE(ap) + 0x28);
+	v &= 0xcf;	/* Enable INTA#: bit4=0 means enable */
+	v |= 0xc0;	/* Enable PCI burst, MRM & not immediate interrupts */
+	iowrite8(v, ATP867X_IOBASE(ap) + 0x28);
+
+	/*
+	 * Turn off the over clocked udma5 mode, only for Rev-B
+	 */
+	v = ioread8(ATP867X_SYS_INFO(ap));
+	v &= ATP867X_IO_SYS_MASK_RESERVED;
+	if (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B)
+		v |= ATP867X_IO_SYS_INFO_SLOW_UDMA5;
+	iowrite8(v, ATP867X_SYS_INFO(ap));
+}
+
+static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
+{
+	struct device *gdev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(gdev);
+	unsigned int mask = 0;
+	int i, rc;
+
+	/*
+	 * do not map rombase
+	 */
+	rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+#ifdef	ATP867X_DEBUG
+	atp867x_check_res(pdev);
+
+	for (i = 0; i < PCI_ROM_RESOURCE; i++)
+		printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
+			(unsigned long long)(host->iomap[i]));
+#endif
+
+	/*
+	 * request, iomap BARs and init port addresses accordingly
+	 */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
+
+		ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i);
+		ioaddr->ctl_addr = ioaddr->altstatus_addr
+				 = ATP867X_IO_ALTSTATUS(ap, i);
+		ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i);
+
+		ata_sff_std_ports(ioaddr);
+		rc = atp867x_set_priv(ap);
+		if (rc)
+			return rc;
+
+#ifdef	ATP867X_DEBUG
+		atp867x_check_ports(ap, i);
+#endif
+		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+			(unsigned long)ioaddr->cmd_addr,
+			(unsigned long)ioaddr->ctl_addr);
+		ata_port_desc(ap, "bmdma 0x%lx",
+			(unsigned long)ioaddr->bmdma_addr);
+
+		mask |= 1 << i;
+	}
+
+	if (!mask) {
+		dev_err(gdev, "no available native port\n");
+		return -ENODEV;
+	}
+
+	atp867x_fixup(host);
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	return rc;
+}
+
+static int atp867x_init_one(struct pci_dev *pdev,
+	const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_867x = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask 	= ATA_UDMA6,
+		.port_ops	= &atp867x_ops,
+	};
+
+	struct ata_host *host;
+	const struct ata_port_info *ppi[] = { &info_867x, NULL };
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
+		pdev->device);
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate ATA host\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	rc = atp867x_ata_pci_sff_init_host(host);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to init host\n");
+		goto err_out;
+	}
+
+	pci_set_master(pdev);
+
+	rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				IRQF_SHARED, &atp867x_sht);
+	if (rc)
+		dev_err(&pdev->dev, "failed to activate host\n");
+
+err_out:
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int atp867x_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	atp867x_fixup(host);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static struct pci_device_id atp867x_pci_tbl[] = {
+	{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A),	0 },
+	{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B),	0 },
+	{ },
+};
+
+static struct pci_driver atp867x_driver = {
+	.name 		= DRV_NAME,
+	.id_table 	= atp867x_pci_tbl,
+	.probe 		= atp867x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= atp867x_reinit_one,
+#endif
+};
+
+module_pci_driver(atp867x_driver);
+
+MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc.");
+MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_bk3710.c b/drivers/ata/pata_bk3710.c
new file mode 100644
index 0000000..fad95cf
--- /dev/null
+++ b/drivers/ata/pata_bk3710.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Palmchip BK3710 PATA controller driver
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Based on palm_bk3710.c:
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
+ */
+
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DRV_NAME "pata_bk3710"
+
+#define BK3710_TF_OFFSET	0x1F0
+#define BK3710_CTL_OFFSET	0x3F6
+
+#define BK3710_BMISP		0x02
+#define BK3710_IDETIMP		0x40
+#define BK3710_UDMACTL		0x48
+#define BK3710_MISCCTL		0x50
+#define BK3710_REGSTB		0x54
+#define BK3710_REGRCVR		0x58
+#define BK3710_DATSTB		0x5C
+#define BK3710_DATRCVR		0x60
+#define BK3710_DMASTB		0x64
+#define BK3710_DMARCVR		0x68
+#define BK3710_UDMASTB		0x6C
+#define BK3710_UDMATRP		0x70
+#define BK3710_UDMAENV		0x74
+#define BK3710_IORDYTMP		0x78
+
+static struct scsi_host_template pata_bk3710_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static unsigned int ideclk_period; /* in nanoseconds */
+
+struct pata_bk3710_udmatiming {
+	unsigned int rptime;	/* tRP -- Ready to pause time (nsec) */
+	unsigned int cycletime;	/* tCYCTYP2/2 -- avg Cycle Time (nsec) */
+				/* tENV is always a minimum of 20 nsec */
+};
+
+static const struct pata_bk3710_udmatiming pata_bk3710_udmatimings[6] = {
+	{ 160, 240 / 2 },	/* UDMA Mode 0 */
+	{ 125, 160 / 2 },	/* UDMA Mode 1 */
+	{ 100, 120 / 2 },	/* UDMA Mode 2 */
+	{ 100,  90 / 2 },	/* UDMA Mode 3 */
+	{ 100,  60 / 2 },	/* UDMA Mode 4 */
+	{  85,  40 / 2 },	/* UDMA Mode 5 */
+};
+
+static void pata_bk3710_setudmamode(void __iomem *base, unsigned int dev,
+				    unsigned int mode)
+{
+	u32 val32;
+	u16 val16;
+	u8 tenv, trp, t0;
+
+	/* DMA Data Setup */
+	t0 = DIV_ROUND_UP(pata_bk3710_udmatimings[mode].cycletime,
+			  ideclk_period) - 1;
+	tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
+	trp = DIV_ROUND_UP(pata_bk3710_udmatimings[mode].rptime,
+			   ideclk_period) - 1;
+
+	/* udmastb Ultra DMA Access Strobe Width */
+	val32 = ioread32(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
+	val32 |= t0 << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_UDMASTB);
+
+	/* udmatrp Ultra DMA Ready to Pause Time */
+	val32 = ioread32(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
+	val32 |= trp << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_UDMATRP);
+
+	/* udmaenv Ultra DMA envelop Time */
+	val32 = ioread32(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
+	val32 |= tenv << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_UDMAENV);
+
+	/* Enable UDMA for Device */
+	val16 = ioread16(base + BK3710_UDMACTL) | (1 << dev);
+	iowrite16(val16, base + BK3710_UDMACTL);
+}
+
+static void pata_bk3710_setmwdmamode(void __iomem *base, unsigned int dev,
+				     unsigned short min_cycle,
+				     unsigned int mode)
+{
+	const struct ata_timing *t;
+	int cycletime;
+	u32 val32;
+	u16 val16;
+	u8 td, tkw, t0;
+
+	t = ata_timing_find_mode(mode);
+	cycletime = max_t(int, t->cycle, min_cycle);
+
+	/* DMA Data Setup */
+	t0 = DIV_ROUND_UP(cycletime, ideclk_period);
+	td = DIV_ROUND_UP(t->active, ideclk_period);
+	tkw = t0 - td - 1;
+	td--;
+
+	val32 = ioread32(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
+	val32 |= td << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_DMASTB);
+
+	val32 = ioread32(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
+	val32 |= tkw << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_DMARCVR);
+
+	/* Disable UDMA for Device */
+	val16 = ioread16(base + BK3710_UDMACTL) & ~(1 << dev);
+	iowrite16(val16, base + BK3710_UDMACTL);
+}
+
+static void pata_bk3710_set_dmamode(struct ata_port *ap,
+				    struct ata_device *adev)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.bmdma_addr;
+	int is_slave = adev->devno;
+	const u8 xferspeed = adev->dma_mode;
+
+	if (xferspeed >= XFER_UDMA_0)
+		pata_bk3710_setudmamode(base, is_slave,
+					xferspeed - XFER_UDMA_0);
+	else
+		pata_bk3710_setmwdmamode(base, is_slave,
+					 adev->id[ATA_ID_EIDE_DMA_MIN],
+					 xferspeed);
+}
+
+static void pata_bk3710_setpiomode(void __iomem *base, struct ata_device *pair,
+				   unsigned int dev, unsigned int cycletime,
+				   unsigned int mode)
+{
+	const struct ata_timing *t;
+	u32 val32;
+	u8 t2, t2i, t0;
+
+	t = ata_timing_find_mode(XFER_PIO_0 + mode);
+
+	/* PIO Data Setup */
+	t0 = DIV_ROUND_UP(cycletime, ideclk_period);
+	t2 = DIV_ROUND_UP(t->active, ideclk_period);
+
+	t2i = t0 - t2 - 1;
+	t2--;
+
+	val32 = ioread32(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
+	val32 |= t2 << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_DATSTB);
+
+	val32 = ioread32(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
+	val32 |= t2i << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_DATRCVR);
+
+	/* FIXME: this is broken also in the old driver */
+	if (pair) {
+		u8 mode2 = pair->pio_mode - XFER_PIO_0;
+
+		if (mode2 < mode)
+			mode = mode2;
+	}
+
+	/* TASKFILE Setup */
+	t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
+	t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
+
+	t2i = t0 - t2 - 1;
+	t2--;
+
+	val32 = ioread32(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
+	val32 |= t2 << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_REGSTB);
+
+	val32 = ioread32(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
+	val32 |= t2i << (dev ? 8 : 0);
+	iowrite32(val32, base + BK3710_REGRCVR);
+}
+
+static void pata_bk3710_set_piomode(struct ata_port *ap,
+				    struct ata_device *adev)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.bmdma_addr;
+	struct ata_device *pair = ata_dev_pair(adev);
+	const struct ata_timing *t = ata_timing_find_mode(adev->pio_mode);
+	const u16 *id = adev->id;
+	unsigned int cycle_time = 0;
+	int is_slave = adev->devno;
+	const u8 pio = adev->pio_mode - XFER_PIO_0;
+
+	if (id[ATA_ID_FIELD_VALID] & 2) {
+		if (ata_id_has_iordy(id))
+			cycle_time = id[ATA_ID_EIDE_PIO_IORDY];
+		else
+			cycle_time = id[ATA_ID_EIDE_PIO];
+
+		/* conservative "downgrade" for all pre-ATA2 drives */
+		if (pio < 3 && cycle_time < t->cycle)
+			cycle_time = 0; /* use standard timing */
+	}
+
+	if (!cycle_time)
+		cycle_time = t->cycle;
+
+	pata_bk3710_setpiomode(base, pair, is_slave, cycle_time, pio);
+}
+
+static void pata_bk3710_chipinit(void __iomem *base)
+{
+	/*
+	 * REVISIT:  the ATA reset signal needs to be managed through a
+	 * GPIO, which means it should come from platform_data.  Until
+	 * we get and use such information, we have to trust that things
+	 * have been reset before we get here.
+	 */
+
+	/*
+	 * Program the IDETIMP Register Value based on the following assumptions
+	 *
+	 * (ATA_IDETIMP_IDEEN		, ENABLE ) |
+	 * (ATA_IDETIMP_PREPOST1	, DISABLE) |
+	 * (ATA_IDETIMP_PREPOST0	, DISABLE) |
+	 *
+	 * DM6446 silicon rev 2.1 and earlier have no observed net benefit
+	 * from enabling prefetch/postwrite.
+	 */
+	iowrite16(BIT(15), base + BK3710_IDETIMP);
+
+	/*
+	 * UDMACTL Ultra-ATA DMA Control
+	 * (ATA_UDMACTL_UDMAP1	, 0 ) |
+	 * (ATA_UDMACTL_UDMAP0	, 0 )
+	 *
+	 */
+	iowrite16(0, base + BK3710_UDMACTL);
+
+	/*
+	 * MISCCTL Miscellaneous Conrol Register
+	 * (ATA_MISCCTL_HWNHLD1P	, 1 cycle)
+	 * (ATA_MISCCTL_HWNHLD0P	, 1 cycle)
+	 * (ATA_MISCCTL_TIMORIDE	, 1)
+	 */
+	iowrite32(0x001, base + BK3710_MISCCTL);
+
+	/*
+	 * IORDYTMP IORDY Timer for Primary Register
+	 * (ATA_IORDYTMP_IORDYTMP	, DISABLE)
+	 */
+	iowrite32(0, base + BK3710_IORDYTMP);
+
+	/*
+	 * Configure BMISP Register
+	 * (ATA_BMISP_DMAEN1	, DISABLE )	|
+	 * (ATA_BMISP_DMAEN0	, DISABLE )	|
+	 * (ATA_BMISP_IORDYINT	, CLEAR)	|
+	 * (ATA_BMISP_INTRSTAT	, CLEAR)	|
+	 * (ATA_BMISP_DMAERROR	, CLEAR)
+	 */
+	iowrite16(0xE, base + BK3710_BMISP);
+
+	pata_bk3710_setpiomode(base, NULL, 0, 600, 0);
+	pata_bk3710_setpiomode(base, NULL, 1, 600, 0);
+}
+
+static struct ata_port_operations pata_bk3710_ports_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= ata_cable_80wire,
+
+	.set_piomode		= pata_bk3710_set_piomode,
+	.set_dmamode		= pata_bk3710_set_dmamode,
+};
+
+static int __init pata_bk3710_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	struct resource *mem;
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *base;
+	unsigned long rate;
+	int irq;
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return -ENODEV;
+
+	clk_enable(clk);
+	rate = clk_get_rate(clk);
+	if (!rate)
+		return -EINVAL;
+
+	/* NOTE:  round *down* to meet minimum timings; we count in clocks */
+	ideclk_period = 1000000000UL / rate;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		pr_err(DRV_NAME ": failed to get IRQ resource\n");
+		return irq;
+	}
+
+	base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	/* configure the Palmchip controller */
+	pata_bk3710_chipinit(base);
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	ap->ops = &pata_bk3710_ports_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->mwdma_mask = ATA_MWDMA2;
+	ap->udma_mask = rate < 100000000 ? ATA_UDMA4 : ATA_UDMA5;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.data_addr		= base + BK3710_TF_OFFSET;
+	ap->ioaddr.error_addr		= base + BK3710_TF_OFFSET + 1;
+	ap->ioaddr.feature_addr		= base + BK3710_TF_OFFSET + 1;
+	ap->ioaddr.nsect_addr		= base + BK3710_TF_OFFSET + 2;
+	ap->ioaddr.lbal_addr		= base + BK3710_TF_OFFSET + 3;
+	ap->ioaddr.lbam_addr		= base + BK3710_TF_OFFSET + 4;
+	ap->ioaddr.lbah_addr		= base + BK3710_TF_OFFSET + 5;
+	ap->ioaddr.device_addr		= base + BK3710_TF_OFFSET + 6;
+	ap->ioaddr.status_addr		= base + BK3710_TF_OFFSET + 7;
+	ap->ioaddr.command_addr		= base + BK3710_TF_OFFSET + 7;
+
+	ap->ioaddr.altstatus_addr	= base + BK3710_CTL_OFFSET;
+	ap->ioaddr.ctl_addr		= base + BK3710_CTL_OFFSET;
+
+	ap->ioaddr.bmdma_addr		= base;
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+		      (unsigned long)base + BK3710_TF_OFFSET,
+		      (unsigned long)base + BK3710_CTL_OFFSET);
+
+	/* activate */
+	return ata_host_activate(host, irq, ata_sff_interrupt, 0,
+				 &pata_bk3710_sht);
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:palm_bk3710");
+
+static struct platform_driver pata_bk3710_driver = {
+	.driver = {
+		.name = "palm_bk3710",
+	},
+};
+
+static int __init pata_bk3710_init(void)
+{
+	return platform_driver_probe(&pata_bk3710_driver, pata_bk3710_probe);
+}
+
+module_init(pata_bk3710_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
new file mode 100644
index 0000000..e3532ed
--- /dev/null
+++ b/drivers/ata/pata_cmd640.c
@@ -0,0 +1,271 @@
+/*
+ * pata_cmd640.c 	- CMD640 PCI PATA for new ATA layer
+ *			  (C) 2007 Red Hat Inc
+ *
+ * Based upon
+ *  linux/drivers/ide/pci/cmd640.c		Version 1.02  Sep 01, 1996
+ *
+ *  Copyright (C) 1995-1996  Linus Torvalds & authors (see driver)
+ *
+ *	This drives only the PCI version of the controller. If you have a
+ *	VLB one then we have enough docs to support it but you can write
+ *	your own code.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd640"
+#define DRV_VERSION "0.0.5"
+
+struct cmd640_reg {
+	int last;
+	u8 reg58[ATA_MAX_DEVICES];
+};
+
+enum {
+	CFR = 0x50,
+	CNTRL = 0x51,
+	CMDTIM = 0x52,
+	ARTIM0 = 0x53,
+	DRWTIM0 = 0x54,
+	ARTIM23 = 0x57,
+	DRWTIM23 = 0x58,
+	BRST = 0x59
+};
+
+/**
+ *	cmd640_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup.
+ */
+
+static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct cmd640_reg *timing = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+	u8 reg;
+	int arttim = ARTIM0 + 2 * adev->devno;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
+		printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+		return;
+	}
+
+	/* The second channel has shared timings and the setup timing is
+	   messy to switch to merge it for worst case */
+	if (ap->port_no && pair) {
+		struct ata_timing p;
+		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
+	}
+
+	/* Make the timings fit */
+	if (t.recover > 16) {
+		t.active += t.recover - 16;
+		t.recover = 16;
+	}
+	if (t.active > 16)
+		t.active = 16;
+
+	/* Now convert the clocks into values we can actually stuff into
+	   the chip */
+
+	if (t.recover > 1)
+		t.recover--;	/* 640B only */
+	else
+		t.recover = 15;
+
+	if (t.setup > 4)
+		t.setup = 0xC0;
+	else
+		t.setup = setup_data[t.setup];
+
+	if (ap->port_no == 0) {
+		t.active &= 0x0F;	/* 0 = 16 */
+
+		/* Load setup timing */
+		pci_read_config_byte(pdev, arttim, &reg);
+		reg &= 0x3F;
+		reg |= t.setup;
+		pci_write_config_byte(pdev, arttim, reg);
+
+		/* Load active/recovery */
+		pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
+	} else {
+		/* Save the shared timings for channel, they will be loaded
+		   by qc_issue. Reloading the setup time is expensive so we
+		   keep a merged one loaded */
+		pci_read_config_byte(pdev, ARTIM23, &reg);
+		reg &= 0x3F;
+		reg |= t.setup;
+		pci_write_config_byte(pdev, ARTIM23, reg);
+		timing->reg58[adev->devno] = (t.active << 4) | t.recover;
+	}
+}
+
+
+/**
+ *	cmd640_qc_issue	-	command preparation hook
+ *	@qc: Command to be issued
+ *
+ *	Channel 1 has shared timings. We must reprogram the
+ *	clock each drive 2/3 switch we do.
+ */
+
+static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct cmd640_reg *timing = ap->private_data;
+
+	if (ap->port_no != 0 && adev->devno != timing->last) {
+		pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
+		timing->last = adev->devno;
+	}
+	return ata_sff_qc_issue(qc);
+}
+
+/**
+ *	cmd640_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	The CMD640 needs to maintain private data structures so we
+ *	allocate space here.
+ */
+
+static int cmd640_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct cmd640_reg *timing;
+
+	timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
+	if (timing == NULL)
+		return -ENOMEM;
+	timing->last = -1;	/* Force a load */
+	ap->private_data = timing;
+	return 0;
+}
+
+static bool cmd640_sff_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int irq_reg		= ap->port_no ? ARTIM23 : CFR;
+	u8  irq_stat, irq_mask	= ap->port_no ? 0x10 : 0x04;
+
+	pci_read_config_byte(pdev, irq_reg, &irq_stat);
+
+	return irq_stat & irq_mask;
+}
+
+static struct scsi_host_template cmd640_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cmd640_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	/* In theory xfer_noirq is not needed once we kill the prefetcher */
+	.sff_data_xfer	= ata_sff_data_xfer32,
+	.sff_irq_check	= cmd640_sff_irq_check,
+	.qc_issue	= cmd640_qc_issue,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= cmd640_set_piomode,
+	.port_start	= cmd640_port_start,
+};
+
+static void cmd640_hardware_init(struct pci_dev *pdev)
+{
+	u8 ctrl;
+
+	/* CMD640 detected, commiserations */
+	pci_write_config_byte(pdev, 0x5B, 0x00);
+	/* PIO0 command cycles */
+	pci_write_config_byte(pdev, CMDTIM, 0);
+	/* 512 byte bursts (sector) */
+	pci_write_config_byte(pdev, BRST, 0x40);
+	/*
+	 * A reporter a long time ago
+	 * Had problems with the data fifo
+	 * So don't run the risk
+	 * Of putting crap on the disk
+	 * For its better just to go slow
+	 */
+	/* Do channel 0 */
+	pci_read_config_byte(pdev, CNTRL, &ctrl);
+	pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0);
+	/* Ditto for channel 1 */
+	pci_read_config_byte(pdev, ARTIM23, &ctrl);
+	ctrl |= 0x0C;
+	pci_write_config_byte(pdev, ARTIM23, ctrl);
+}
+
+static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &cmd640_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	cmd640_hardware_init(pdev);
+
+	return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cmd640_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+	cmd640_hardware_init(pdev);
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id cmd640[] = {
+	{ PCI_VDEVICE(CMD, 0x640), 0 },
+	{ },
+};
+
+static struct pci_driver cmd640_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cmd640,
+	.probe 		= cmd640_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cmd640_reinit_one,
+#endif
+};
+
+module_pci_driver(cmd640_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd640);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
new file mode 100644
index 0000000..13ca588
--- /dev/null
+++ b/drivers/ata/pata_cmd64x.c
@@ -0,0 +1,533 @@
+/*
+ * pata_cmd64x.c 	- CMD64x PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *			  (C) 2009-2010 Bartlomiej Zolnierkiewicz
+ *			  (C) 2012 MontaVista Software, LLC <source@mvista.com>
+ *
+ * Based upon
+ * linux/drivers/ide/pci/cmd64x.c		Version 1.30	Sept 10, 2002
+ *
+ * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
+ *           Note, this driver is not used at all on other systems because
+ *           there the "BIOS" has done all of the following already.
+ *           Due to massive hardware bugs, UltraDMA is only supported
+ *           on the 646U2 and not on the 646U.
+ *
+ * Copyright (C) 1998		Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 1998		David S. Miller (davem@redhat.com)
+ *
+ * Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
+ *
+ * TODO
+ *	Testing work
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd64x"
+#define DRV_VERSION "0.2.18"
+
+/*
+ * CMD64x specific registers definition.
+ */
+
+enum {
+	CFR 		= 0x50,
+		CFR_INTR_CH0  = 0x04,
+	CNTRL		= 0x51,
+		CNTRL_CH0     = 0x04,
+		CNTRL_CH1     = 0x08,
+	CMDTIM 		= 0x52,
+	ARTTIM0 	= 0x53,
+	DRWTIM0 	= 0x54,
+	ARTTIM1 	= 0x55,
+	DRWTIM1 	= 0x56,
+	ARTTIM23 	= 0x57,
+		ARTTIM23_DIS_RA2  = 0x04,
+		ARTTIM23_DIS_RA3  = 0x08,
+		ARTTIM23_INTR_CH1 = 0x10,
+	DRWTIM2 	= 0x58,
+	BRST 		= 0x59,
+	DRWTIM3 	= 0x5b,
+	BMIDECR0	= 0x70,
+	MRDMODE		= 0x71,
+		MRDMODE_INTR_CH0 = 0x04,
+		MRDMODE_INTR_CH1 = 0x08,
+	BMIDESR0	= 0x72,
+	UDIDETCR0	= 0x73,
+	DTPR0		= 0x74,
+	BMIDECR1	= 0x78,
+	BMIDECSR	= 0x79,
+	UDIDETCR1	= 0x7B,
+	DTPR1		= 0x7C
+};
+
+static int cmd648_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 r;
+
+	/* Check cable detect bits */
+	pci_read_config_byte(pdev, BMIDECSR, &r);
+	if (r & (1 << ap->port_no))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	cmd64x_set_timing	-	set PIO and MWDMA timing
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@mode: mode
+ *
+ *	Called to do the PIO and MWDMA mode setup.
+ */
+
+static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+
+	u8 reg;
+
+	/* Port layout is not logical so use a table */
+	const u8 arttim_port[2][2] = {
+		{ ARTTIM0, ARTTIM1 },
+		{ ARTTIM23, ARTTIM23 }
+	};
+	const u8 drwtim_port[2][2] = {
+		{ DRWTIM0, DRWTIM1 },
+		{ DRWTIM2, DRWTIM3 }
+	};
+
+	int arttim = arttim_port[ap->port_no][adev->devno];
+	int drwtim = drwtim_port[ap->port_no][adev->devno];
+
+	/* ata_timing_compute is smart and will produce timings for MWDMA
+	   that don't violate the drives PIO capabilities. */
+	if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
+		printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+		return;
+	}
+	if (ap->port_no) {
+		/* Slave has shared address setup */
+		struct ata_device *pair = ata_dev_pair(adev);
+
+		if (pair) {
+			struct ata_timing tp;
+			ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
+			ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+		}
+	}
+
+	printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
+		t.active, t.recover, t.setup);
+	if (t.recover > 16) {
+		t.active += t.recover - 16;
+		t.recover = 16;
+	}
+	if (t.active > 16)
+		t.active = 16;
+
+	/* Now convert the clocks into values we can actually stuff into
+	   the chip */
+
+	if (t.recover == 16)
+		t.recover = 0;
+	else if (t.recover > 1)
+		t.recover--;
+	else
+		t.recover = 15;
+
+	if (t.setup > 4)
+		t.setup = 0xC0;
+	else
+		t.setup = setup_data[t.setup];
+
+	t.active &= 0x0F;	/* 0 = 16 */
+
+	/* Load setup timing */
+	pci_read_config_byte(pdev, arttim, &reg);
+	reg &= 0x3F;
+	reg |= t.setup;
+	pci_write_config_byte(pdev, arttim, reg);
+
+	/* Load active/recovery */
+	pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
+}
+
+/**
+ *	cmd64x_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Used when configuring the devices ot set the PIO timings. All the
+ *	actual work is done by the PIO/MWDMA setting helper
+ */
+
+static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	cmd64x_set_timing(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	cmd64x_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup.
+ */
+
+static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 udma_data[] = {
+		0x30, 0x20, 0x10, 0x20, 0x10, 0x00
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 regU, regD;
+
+	int pciU = UDIDETCR0 + 8 * ap->port_no;
+	int pciD = BMIDESR0 + 8 * ap->port_no;
+	int shift = 2 * adev->devno;
+
+	pci_read_config_byte(pdev, pciD, &regD);
+	pci_read_config_byte(pdev, pciU, &regU);
+
+	/* DMA bits off */
+	regD &= ~(0x20 << adev->devno);
+	/* DMA control bits */
+	regU &= ~(0x30 << shift);
+	/* DMA timing bits */
+	regU &= ~(0x05 << adev->devno);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		/* Merge the timing value */
+		regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
+		/* Merge the control bits */
+		regU |= 1 << adev->devno; /* UDMA on */
+		if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
+			regU |= 4 << adev->devno;
+	} else {
+		regU &= ~ (1 << adev->devno);	/* UDMA off */
+		cmd64x_set_timing(ap, adev, adev->dma_mode);
+	}
+
+	regD |= 0x20 << adev->devno;
+
+	pci_write_config_byte(pdev, pciU, regU);
+	pci_write_config_byte(pdev, pciD, regD);
+}
+
+/**
+ *	cmd64x_sff_irq_check	-	check IDE interrupt
+ *	@ap: ATA interface
+ *
+ *	Check IDE interrupt in CFR/ARTTIM23 registers.
+ */
+
+static bool cmd64x_sff_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int irq_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
+	int irq_reg  = ap->port_no ? ARTTIM23 : CFR;
+	u8 irq_stat;
+
+	/* NOTE: reading the register should clear the interrupt */
+	pci_read_config_byte(pdev, irq_reg, &irq_stat);
+
+	return irq_stat & irq_mask;
+}
+
+/**
+ *	cmd64x_sff_irq_clear	-	clear IDE interrupt
+ *	@ap: ATA interface
+ *
+ *	Clear IDE interrupt in CFR/ARTTIM23 and DMA status registers.
+ */
+
+static void cmd64x_sff_irq_clear(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int irq_reg = ap->port_no ? ARTTIM23 : CFR;
+	u8 irq_stat;
+
+	ata_bmdma_irq_clear(ap);
+
+	/* Reading the register should be enough to clear the interrupt */
+	pci_read_config_byte(pdev, irq_reg, &irq_stat);
+}
+
+/**
+ *	cmd648_sff_irq_check	-	check IDE interrupt
+ *	@ap: ATA interface
+ *
+ *	Check IDE interrupt in MRDMODE register.
+ */
+
+static bool cmd648_sff_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long base = pci_resource_start(pdev, 4);
+	int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0;
+	u8 mrdmode = inb(base + 1);
+
+	return mrdmode & irq_mask;
+}
+
+/**
+ *	cmd648_sff_irq_clear	-	clear IDE interrupt
+ *	@ap: ATA interface
+ *
+ *	Clear IDE interrupt in MRDMODE and DMA status registers.
+ */
+
+static void cmd648_sff_irq_clear(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long base = pci_resource_start(pdev, 4);
+	int irq_mask = ap->port_no ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0;
+	u8 mrdmode;
+
+	ata_bmdma_irq_clear(ap);
+
+	/* Clear this port's interrupt bit (leaving the other port alone) */
+	mrdmode  = inb(base + 1);
+	mrdmode &= ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1);
+	outb(mrdmode | irq_mask, base + 1);
+}
+
+/**
+ *	cmd646r1_bmdma_stop	-	DMA stop callback
+ *	@qc: Command in progress
+ *
+ *	Stub for now while investigating the r1 quirk in the old driver.
+ */
+
+static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template cmd64x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations cmd64x_base_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.set_piomode	= cmd64x_set_piomode,
+	.set_dmamode	= cmd64x_set_dmamode,
+};
+
+static struct ata_port_operations cmd64x_port_ops = {
+	.inherits	= &cmd64x_base_ops,
+	.sff_irq_check	= cmd64x_sff_irq_check,
+	.sff_irq_clear	= cmd64x_sff_irq_clear,
+	.cable_detect	= ata_cable_40wire,
+};
+
+static struct ata_port_operations cmd646r1_port_ops = {
+	.inherits	= &cmd64x_base_ops,
+	.sff_irq_check	= cmd64x_sff_irq_check,
+	.sff_irq_clear	= cmd64x_sff_irq_clear,
+	.bmdma_stop	= cmd646r1_bmdma_stop,
+	.cable_detect	= ata_cable_40wire,
+};
+
+static struct ata_port_operations cmd646r3_port_ops = {
+	.inherits	= &cmd64x_base_ops,
+	.sff_irq_check	= cmd648_sff_irq_check,
+	.sff_irq_clear	= cmd648_sff_irq_clear,
+	.cable_detect	= ata_cable_40wire,
+};
+
+static struct ata_port_operations cmd648_port_ops = {
+	.inherits	= &cmd64x_base_ops,
+	.sff_irq_check	= cmd648_sff_irq_check,
+	.sff_irq_clear	= cmd648_sff_irq_clear,
+	.cable_detect	= cmd648_cable_detect,
+};
+
+static void cmd64x_fixup(struct pci_dev *pdev)
+{
+	u8 mrdmode;
+
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+	pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+	mrdmode &= ~0x30;	/* IRQ set up */
+	mrdmode |= 0x02;	/* Memory read line enable */
+	pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
+	/* PPC specific fixup copied from old driver */
+#ifdef CONFIG_PPC
+	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+}
+
+static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info cmd_info[7] = {
+		{	/* CMD 643 - no UDMA */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.port_ops = &cmd64x_port_ops
+		},
+		{	/* CMD 646 with broken UDMA */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.port_ops = &cmd64x_port_ops
+		},
+		{	/* CMD 646U with broken UDMA */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.port_ops = &cmd646r3_port_ops
+		},
+		{	/* CMD 646U2 with working UDMA */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &cmd646r3_port_ops
+		},
+		{	/* CMD 646 rev 1  */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.port_ops = &cmd646r1_port_ops
+		},
+		{	/* CMD 648 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &cmd648_port_ops
+		},
+		{	/* CMD 649 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &cmd648_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = {
+		&cmd_info[id->driver_data],
+		&cmd_info[id->driver_data],
+		NULL
+	};
+	u8 reg;
+	int rc;
+	struct pci_dev *bridge = pdev->bus->self;
+	/* mobility split bridges don't report enabled ports correctly */
+	int port_ok = !(bridge && bridge->vendor ==
+			PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
+	/* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
+	int cntrl_ch0_ok = (id->driver_data != 0);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (id->driver_data == 0)	/* 643 */
+		ata_pci_bmdma_clear_simplex(pdev);
+
+	if (pdev->device == PCI_DEVICE_ID_CMD_646)
+		switch (pdev->revision) {
+		/* UDMA works since rev 5 */
+		default:
+			ppi[0] = &cmd_info[3];
+			ppi[1] = &cmd_info[3];
+			break;
+		/* Interrupts in MRDMODE since rev 3 */
+		case 3:
+		case 4:
+			ppi[0] = &cmd_info[2];
+			ppi[1] = &cmd_info[2];
+			break;
+		/* Rev 1 with other problems? */
+		case 1:
+			ppi[0] = &cmd_info[4];
+			ppi[1] = &cmd_info[4];
+			/* FALL THRU */
+		/* Early revs have no CNTRL_CH0 */
+		case 2:
+		case 0:
+			cntrl_ch0_ok = 0;
+			break;
+		}
+
+	cmd64x_fixup(pdev);
+
+	/* check for enabled ports */
+	pci_read_config_byte(pdev, CNTRL, &reg);
+	if (!port_ok)
+		dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
+	if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
+		dev_notice(&pdev->dev, "Primary port is disabled\n");
+		ppi[0] = &ata_dummy_port_info;
+
+	}
+	if (port_ok && !(reg & CNTRL_CH1)) {
+		dev_notice(&pdev->dev, "Secondary port is disabled\n");
+		ppi[1] = &ata_dummy_port_info;
+	}
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cmd64x_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	cmd64x_fixup(pdev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id cmd64x[] = {
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 5 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 6 },
+
+	{ },
+};
+
+static struct pci_driver cmd64x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cmd64x,
+	.probe 		= cmd64x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cmd64x_reinit_one,
+#endif
+};
+
+module_pci_driver(cmd64x_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd64x);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
new file mode 100644
index 0000000..4cb2407
--- /dev/null
+++ b/drivers/ata/pata_cs5520.c
@@ -0,0 +1,310 @@
+/*
+ *	IDE tuning and bus mastering support for the CS5510/CS5520
+ *	chipsets
+ *
+ *	The CS5510/CS5520 are slightly unusual devices. Unlike the
+ *	typical IDE controllers they do bus mastering with the drive in
+ *	PIO mode and smarter silicon.
+ *
+ *	The practical upshot of this is that we must always tune the
+ *	drive for the right PIO mode. We must also ignore all the blacklists
+ *	and the drive bus mastering DMA information. Also to confuse matters
+ *	further we can do DMA on PIO only drives.
+ *
+ *	DMA on the 5510 also requires we disable_hlt() during DMA on early
+ *	revisions.
+ *
+ *	*** This driver is strictly experimental ***
+ *
+ *	(c) Copyright Red Hat Inc 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Documentation:
+ *	Not publicly available.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_cs5520"
+#define DRV_VERSION	"0.6.6"
+
+struct pio_clocks
+{
+	int address;
+	int assert;
+	int recovery;
+};
+
+static const struct pio_clocks cs5520_pio_clocks[]={
+	{3, 6, 11},
+	{2, 5, 6},
+	{1, 4, 3},
+	{1, 3, 2},
+	{1, 2, 1}
+};
+
+/**
+ *	cs5520_set_timings	-	program PIO timings
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Program the PIO mode timings for the controller according to the pio
+ *	clocking table.
+ */
+
+static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int slave = adev->devno;
+
+	pio -= XFER_PIO_0;
+
+	/* Channel command timing */
+	pci_write_config_byte(pdev, 0x62 + ap->port_no,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+	/* FIXME: should these use address ? */
+	/* Read command timing */
+	pci_write_config_byte(pdev, 0x64 +  4*ap->port_no + slave,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+	/* Write command timing */
+	pci_write_config_byte(pdev, 0x66 +  4*ap->port_no + slave,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+}
+
+/**
+ *	cs5520_set_piomode	-	program PIO timings
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Program the PIO mode timings for the controller according to the pio
+ *	clocking table.
+ */
+
+static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	cs5520_set_timings(ap, adev, adev->pio_mode);
+}
+
+static struct scsi_host_template cs5520_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations cs5520_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_prep		= ata_bmdma_dumb_qc_prep,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= cs5520_set_piomode,
+};
+
+static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
+	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
+	struct ata_port_info pi = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.port_ops	= &cs5520_port_ops,
+	};
+	const struct ata_port_info *ppi[2];
+	u8 pcicfg;
+	void __iomem *iomap[5];
+	struct ata_host *host;
+	struct ata_ioports *ioaddr;
+	int i, rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* IDE port enable bits */
+	pci_read_config_byte(pdev, 0x60, &pcicfg);
+
+	/* Check if the ATA ports are enabled */
+	if ((pcicfg & 3) == 0)
+		return -ENODEV;
+
+	ppi[0] = ppi[1] = &ata_dummy_port_info;
+	if (pcicfg & 1)
+		ppi[0] = &pi;
+	if (pcicfg & 2)
+		ppi[1] = &pi;
+
+	if ((pcicfg & 0x40) == 0) {
+		dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
+		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+	}
+
+	pi.mwdma_mask = id->driver_data;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+
+	/* Perform set up for DMA */
+	if (pci_enable_device_io(pdev)) {
+		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
+		return -ENODEV;
+	}
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
+		return -ENODEV;
+	}
+	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
+		return -ENODEV;
+	}
+
+	/* Map IO ports and initialize host accordingly */
+	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
+	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
+	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
+	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
+	iomap[4] = pcim_iomap(pdev, 2, 0);
+
+	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
+		return -ENOMEM;
+
+	ioaddr = &host->ports[0]->ioaddr;
+	ioaddr->cmd_addr = iomap[0];
+	ioaddr->ctl_addr = iomap[1];
+	ioaddr->altstatus_addr = iomap[1];
+	ioaddr->bmdma_addr = iomap[4];
+	ata_sff_std_ports(ioaddr);
+
+	ata_port_desc(host->ports[0],
+		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
+	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
+
+	ioaddr = &host->ports[1]->ioaddr;
+	ioaddr->cmd_addr = iomap[2];
+	ioaddr->ctl_addr = iomap[3];
+	ioaddr->altstatus_addr = iomap[3];
+	ioaddr->bmdma_addr = iomap[4] + 8;
+	ata_sff_std_ports(ioaddr);
+
+	ata_port_desc(host->ports[1],
+		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
+	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
+
+	/* activate the host */
+	pci_set_master(pdev);
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < 2; i++) {
+		static const int irq[] = { 14, 15 };
+		struct ata_port *ap = host->ports[i];
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
+				      ata_bmdma_interrupt, 0, DRV_NAME, host);
+		if (rc)
+			return rc;
+
+		ata_port_desc(ap, "irq %d", irq[i]);
+	}
+
+	return ata_host_register(host, &cs5520_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ *	cs5520_reinit_one	-	device resume
+ *	@pdev: PCI device
+ *
+ *	Do any reconfiguration work needed by a resume from RAM. We need
+ *	to restore DMA mode support on BIOSen which disabled it
+ */
+
+static int cs5520_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	u8 pcicfg;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	pci_read_config_byte(pdev, 0x60, &pcicfg);
+	if ((pcicfg & 0x40) == 0)
+		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+
+	ata_host_resume(host);
+	return 0;
+}
+
+/**
+ *	cs5520_pci_device_suspend	-	device suspend
+ *	@pdev: PCI device
+ *
+ *	We have to cut and waste bits from the standard method because
+ *	the 5520 is a bit odd and not just a pure ATA device. As a result
+ *	we must not disable it. The needed code is short and this avoids
+ *	chip specific mess in the core code.
+ */
+
+static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	pci_save_state(pdev);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/* For now keep DMA off. We can set it for all but A rev CS5510 once the
+   core ATA code can handle it */
+
+static const struct pci_device_id pata_cs5520[] = {
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
+
+	{ },
+};
+
+static struct pci_driver cs5520_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pata_cs5520,
+	.probe 		= cs5520_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= cs5520_pci_device_suspend,
+	.resume		= cs5520_reinit_one,
+#endif
+};
+
+module_pci_driver(cs5520_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_cs5520);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
new file mode 100644
index 0000000..f9ca72e
--- /dev/null
+++ b/drivers/ata/pata_cs5530.c
@@ -0,0 +1,369 @@
+/*
+ * pata-cs5530.c 	- CS5530 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *
+ * based upon cs5530.c by Mark Lord.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"pata_cs5530"
+#define DRV_VERSION	"0.7.4"
+
+static void __iomem *cs5530_port_base(struct ata_port *ap)
+{
+	unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
+
+	return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
+}
+
+/**
+ *	cs5530_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the CS5530
+ *	chips.
+ */
+
+static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const unsigned int cs5530_pio_timings[2][5] = {
+		{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
+		{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
+	};
+	void __iomem *base = cs5530_port_base(ap);
+	u32 tuning;
+	int format;
+
+	/* Find out which table to use */
+	tuning = ioread32(base + 0x04);
+	format = (tuning & 0x80000000UL) ? 1 : 0;
+
+	/* Now load the right timing register */
+	if (adev->devno)
+		base += 0x08;
+
+	iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
+}
+
+/**
+ *	cs5530_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	We cannot mix MWDMA and UDMA without reloading timings each switch
+ *	master to slave. We track the last DMA setup in order to minimise
+ *	reloads.
+ */
+
+static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	void __iomem *base = cs5530_port_base(ap);
+	u32 tuning, timing = 0;
+	u8 reg;
+
+	/* Find out which table to use */
+	tuning = ioread32(base + 0x04);
+
+	switch(adev->dma_mode) {
+		case XFER_UDMA_0:
+			timing  = 0x00921250;break;
+		case XFER_UDMA_1:
+			timing  = 0x00911140;break;
+		case XFER_UDMA_2:
+			timing  = 0x00911030;break;
+		case XFER_MW_DMA_0:
+			timing  = 0x00077771;break;
+		case XFER_MW_DMA_1:
+			timing  = 0x00012121;break;
+		case XFER_MW_DMA_2:
+			timing  = 0x00002020;break;
+		default:
+			BUG();
+	}
+	/* Merge in the PIO format bit */
+	timing |= (tuning & 0x80000000UL);
+	if (adev->devno == 0) /* Master */
+		iowrite32(timing, base + 0x04);
+	else {
+		if (timing & 0x00100000)
+			tuning |= 0x00100000;	/* UDMA for both */
+		else
+			tuning &= ~0x00100000;	/* MWDMA for both */
+		iowrite32(tuning, base + 0x04);
+		iowrite32(timing, base + 0x0C);
+	}
+
+	/* Set the DMA capable bit in the BMDMA area */
+	reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	reg |= (1 << (5 + adev->devno));
+	iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+	/* Remember the last DMA setup we did */
+
+	ap->private_data = adev;
+}
+
+/**
+ *	cs5530_qc_issue		-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary.  Specifically we have a problem that there is only
+ *	one MWDMA/UDMA bit.
+ */
+
+static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_device *prev = ap->private_data;
+
+	/* See if the DMA settings could be wrong */
+	if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
+		/* Maybe, but do the channels match MWDMA/UDMA ? */
+		if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+		    (ata_using_udma(prev) && !ata_using_udma(adev)))
+		    	/* Switch the mode bits */
+		    	cs5530_set_dmamode(ap, adev);
+	}
+
+	return ata_bmdma_qc_issue(qc);
+}
+
+static struct scsi_host_template cs5530_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations cs5530_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.qc_prep 	= ata_bmdma_dumb_qc_prep,
+	.qc_issue	= cs5530_qc_issue,
+
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= cs5530_set_piomode,
+	.set_dmamode	= cs5530_set_dmamode,
+};
+
+static const struct dmi_system_id palmax_dmi_table[] = {
+	{
+		.ident = "Palmax PD1100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
+		},
+	},
+	{ }
+};
+
+static int cs5530_is_palmax(void)
+{
+	if (dmi_check_system(palmax_dmi_table)) {
+		printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
+		return 1;
+	}
+	return 0;
+}
+
+
+/**
+ *	cs5530_init_chip	-	Chipset init
+ *
+ *	Perform the chip initialisation work that is shared between both
+ *	setup and resume paths
+ */
+
+static int cs5530_init_chip(void)
+{
+	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
+
+	while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
+		switch (dev->device) {
+			case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
+				master_0 = pci_dev_get(dev);
+				break;
+			case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
+				cs5530_0 = pci_dev_get(dev);
+				break;
+		}
+	}
+	if (!master_0) {
+		printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
+		goto fail_put;
+	}
+	if (!cs5530_0) {
+		printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
+		goto fail_put;
+	}
+
+	pci_set_master(cs5530_0);
+	pci_try_set_mwi(cs5530_0);
+
+	/*
+	 * Set PCI CacheLineSize to 16-bytes:
+	 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
+	 *
+	 * Note: This value is constant because the 5530 is only a Geode companion
+	 */
+
+	pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
+
+	/*
+	 * Disable trapping of UDMA register accesses (Win98 hack):
+	 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
+	 */
+
+	pci_write_config_word(cs5530_0, 0xd0, 0x5006);
+
+	/*
+	 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
+	 * The other settings are what is necessary to get the register
+	 * into a sane state for IDE DMA operation.
+	 */
+
+	pci_write_config_byte(master_0, 0x40, 0x1e);
+
+	/*
+	 * Set max PCI burst size (16-bytes seems to work best):
+	 *	   16bytes: set bit-1 at 0x41 (reg value of 0x16)
+	 *	all others: clear bit-1 at 0x41, and do:
+	 *	  128bytes: OR 0x00 at 0x41
+	 *	  256bytes: OR 0x04 at 0x41
+	 *	  512bytes: OR 0x08 at 0x41
+	 *	 1024bytes: OR 0x0c at 0x41
+	 */
+
+	pci_write_config_byte(master_0, 0x41, 0x14);
+
+	/*
+	 * These settings are necessary to get the chip
+	 * into a sane state for IDE DMA operation.
+	 */
+
+	pci_write_config_byte(master_0, 0x42, 0x00);
+	pci_write_config_byte(master_0, 0x43, 0xc1);
+
+	pci_dev_put(master_0);
+	pci_dev_put(cs5530_0);
+	return 0;
+fail_put:
+	pci_dev_put(master_0);
+	pci_dev_put(cs5530_0);
+	return -ENODEV;
+}
+
+/**
+ *	cs5530_init_one		-	Initialise a CS5530
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Install a driver for the newly found CS5530 companion chip. Most of
+ *	this is just housekeeping. We have to set the chip up correctly and
+ *	turn off various bits of emulation magic.
+ */
+
+static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &cs5530_port_ops
+	};
+	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
+	static const struct ata_port_info info_palmax_secondary = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &cs5530_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* Chip initialisation */
+	if (cs5530_init_chip())
+		return -ENODEV;
+
+	if (cs5530_is_palmax())
+		ppi[1] = &info_palmax_secondary;
+
+	/* Now kick off ATA set up */
+	return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cs5530_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	/* If we fail on resume we are doomed */
+	if (cs5530_init_chip())
+		return -EIO;
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct pci_device_id cs5530[] = {
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
+
+	{ },
+};
+
+static struct pci_driver cs5530_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cs5530,
+	.probe 		= cs5530_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cs5530_reinit_one,
+#endif
+};
+
+module_pci_driver(cs5530_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5530);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
new file mode 100644
index 0000000..97584e8
--- /dev/null
+++ b/drivers/ata/pata_cs5535.c
@@ -0,0 +1,215 @@
+/*
+ * pata-cs5535.c 	- CS5535 PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
+ * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
+ * and Alexander Kiausch <alex.kiausch@t-online.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ * TODO
+ *	Review errata to see if serializing is necessary
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/msr.h>
+
+#define DRV_NAME	"pata_cs5535"
+#define DRV_VERSION	"0.2.12"
+
+/*
+ *	The Geode (Aka Athlon GX now) uses an internal MSR based
+ *	bus system for control. Demented but there you go.
+ */
+
+#define MSR_ATAC_BASE    	0x51300000
+#define ATAC_GLD_MSR_CAP 	(MSR_ATAC_BASE+0)
+#define ATAC_GLD_MSR_CONFIG    (MSR_ATAC_BASE+0x01)
+#define ATAC_GLD_MSR_SMI       (MSR_ATAC_BASE+0x02)
+#define ATAC_GLD_MSR_ERROR     (MSR_ATAC_BASE+0x03)
+#define ATAC_GLD_MSR_PM        (MSR_ATAC_BASE+0x04)
+#define ATAC_GLD_MSR_DIAG      (MSR_ATAC_BASE+0x05)
+#define ATAC_IO_BAR            (MSR_ATAC_BASE+0x08)
+#define ATAC_RESET             (MSR_ATAC_BASE+0x10)
+#define ATAC_CH0D0_PIO         (MSR_ATAC_BASE+0x20)
+#define ATAC_CH0D0_DMA         (MSR_ATAC_BASE+0x21)
+#define ATAC_CH0D1_PIO         (MSR_ATAC_BASE+0x22)
+#define ATAC_CH0D1_DMA         (MSR_ATAC_BASE+0x23)
+#define ATAC_PCI_ABRTERR       (MSR_ATAC_BASE+0x24)
+
+#define ATAC_BM0_CMD_PRIM      0x00
+#define ATAC_BM0_STS_PRIM      0x02
+#define ATAC_BM0_PRD           0x04
+
+#define CS5535_CABLE_DETECT    0x48
+
+/**
+ *	cs5535_cable_detect	-	detect cable type
+ *	@ap: Port to detect on
+ *
+ *	Perform cable detection for ATA66 capable cable. Return a libata
+ *	cable type.
+ */
+
+static int cs5535_cable_detect(struct ata_port *ap)
+{
+	u8 cable;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
+	if (cable & 1)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	cs5535_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. The CS5535 is pretty clean about all this
+ */
+
+static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 pio_timings[5] = {
+		0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131
+	};
+	static const u16 pio_cmd_timings[5] = {
+		0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
+	};
+	u32 reg, dummy;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	int mode = adev->pio_mode - XFER_PIO_0;
+	int cmdmode = mode;
+
+	/* Command timing has to be for the lowest of the pair of devices */
+	if (pair) {
+		int pairmode = pair->pio_mode - XFER_PIO_0;
+		cmdmode = min(mode, pairmode);
+		/* Write the other drive timing register if it changed */
+		if (cmdmode < pairmode)
+			wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
+				pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
+	}
+	/* Write the drive timing register */
+	wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
+		pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
+
+	/* Set the PIO "format 1" bit in the DMA timing register */
+	rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+	wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
+}
+
+/**
+ *	cs5535_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ */
+
+static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 udma_timings[5] = {
+		0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
+	};
+	static const u32 mwdma_timings[3] = {
+		0x7F0FFFF3, 0x7F035352, 0x7F024241
+	};
+	u32 reg, dummy;
+	int mode = adev->dma_mode;
+
+	rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+	reg &= 0x80000000UL;
+	if (mode >= XFER_UDMA_0)
+		reg |= udma_timings[mode - XFER_UDMA_0];
+	else
+		reg |= mwdma_timings[mode - XFER_MW_DMA_0];
+	wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
+}
+
+static struct scsi_host_template cs5535_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cs5535_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= cs5535_cable_detect,
+	.set_piomode	= cs5535_set_piomode,
+	.set_dmamode	= cs5535_set_dmamode,
+};
+
+/**
+ *	cs5535_init_one		-	Initialise a CS5530
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Install a driver for the newly found CS5530 companion chip. Most of
+ *	this is just housekeeping. We have to set the chip up correctly and
+ *	turn off various bits of emulation magic.
+ */
+
+static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &cs5535_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
+}
+
+static const struct pci_device_id cs5535[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
+
+	{ },
+};
+
+static struct pci_driver cs5535_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= cs5535,
+	.probe 		= cs5535_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(cs5535_pci_driver);
+
+MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5535);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
new file mode 100644
index 0000000..dc12552
--- /dev/null
+++ b/drivers/ata/pata_cs5536.c
@@ -0,0 +1,313 @@
+/*
+ * pata_cs5536.c	- CS5536 PATA for new ATA layer
+ *			  (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ *			  (C) 2011 Bartlomiej Zolnierkiewicz
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307	 USA
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ *
+ * The IDE timing registers for the CS5536 live in the Geode Machine
+ * Specific Register file and not PCI config space.  Most BIOSes
+ * virtualize the PCI registers so the chip looks like a standard IDE
+ * controller.	Unfortunately not all implementations get this right.
+ * In particular some have problems with unaligned accesses to the
+ * virtualized PCI registers.  This driver always does full dword
+ * writes to work around the issue.  Also, in case of a bad BIOS this
+ * driver can be loaded with the "msr=1" parameter which forces using
+ * the Machine Specific Registers to configure the device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+#include <linux/dmi.h>
+
+#ifdef CONFIG_X86_32
+#include <asm/msr.h>
+static int use_msr;
+module_param_named(msr, use_msr, int, 0644);
+MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
+#else
+#undef rdmsr	/* avoid accidental MSR usage on, e.g. x86-64 */
+#undef wrmsr
+#define rdmsr(x, y, z) do { } while (0)
+#define wrmsr(x, y, z) do { } while (0)
+#define use_msr 0
+#endif
+
+#define DRV_NAME	"pata_cs5536"
+#define DRV_VERSION	"0.0.8"
+
+enum {
+	MSR_IDE_CFG		= 0x51300010,
+	PCI_IDE_CFG		= 0x40,
+
+	CFG			= 0,
+	DTC			= 2,
+	CAST			= 3,
+	ETC			= 4,
+
+	IDE_CFG_CHANEN		= (1 << 1),
+	IDE_CFG_CABLE		= (1 << 17) | (1 << 16),
+
+	IDE_D0_SHIFT		= 24,
+	IDE_D1_SHIFT		= 16,
+	IDE_DRV_MASK		= 0xff,
+
+	IDE_CAST_D0_SHIFT	= 6,
+	IDE_CAST_D1_SHIFT	= 4,
+	IDE_CAST_DRV_MASK	= 0x3,
+	IDE_CAST_CMD_MASK	= 0xff,
+	IDE_CAST_CMD_SHIFT	= 24,
+
+	IDE_ETC_UDMA_MASK	= 0xc0,
+};
+
+/* Some Bachmann OT200 devices have a non working UDMA support due a
+ * missing resistor.
+ */
+static const struct dmi_system_id udma_quirk_dmi_table[] = {
+	{
+		.ident = "Bachmann electronic OT200",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "OT200"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1")
+		},
+	},
+	{ }
+};
+
+static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+{
+	if (unlikely(use_msr)) {
+		u32 dummy __maybe_unused;
+
+		rdmsr(MSR_IDE_CFG + reg, *val, dummy);
+		return 0;
+	}
+
+	return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static int cs5536_write(struct pci_dev *pdev, int reg, int val)
+{
+	if (unlikely(use_msr)) {
+		wrmsr(MSR_IDE_CFG + reg, val, 0);
+		return 0;
+	}
+
+	return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
+{
+	struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
+	int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+	u32 dtc;
+
+	cs5536_read(pdev, DTC, &dtc);
+	dtc &= ~(IDE_DRV_MASK << dshift);
+	dtc |= tim << dshift;
+	cs5536_write(pdev, DTC, dtc);
+}
+
+/**
+ *	cs5536_cable_detect	-	detect cable type
+ *	@ap: Port to detect on
+ *
+ *	Perform cable detection for ATA66 capable cable.
+ *
+ *	Returns a cable type.
+ */
+
+static int cs5536_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 cfg;
+
+	cs5536_read(pdev, CFG, &cfg);
+
+	if (cfg & IDE_CFG_CABLE)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	cs5536_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ */
+
+static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 drv_timings[5] = {
+		0x98, 0x55, 0x32, 0x21, 0x20,
+	};
+
+	static const u8 addr_timings[5] = {
+		0x2, 0x1, 0x0, 0x0, 0x0,
+	};
+
+	static const u8 cmd_timings[5] = {
+		0x99, 0x92, 0x90, 0x22, 0x20,
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *pair = ata_dev_pair(adev);
+	int mode = adev->pio_mode - XFER_PIO_0;
+	int cmdmode = mode;
+	int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
+	u32 cast;
+
+	if (pair)
+		cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
+
+	cs5536_program_dtc(adev, drv_timings[mode]);
+
+	cs5536_read(pdev, CAST, &cast);
+
+	cast &= ~(IDE_CAST_DRV_MASK << cshift);
+	cast |= addr_timings[mode] << cshift;
+
+	cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
+	cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
+
+	cs5536_write(pdev, CAST, cast);
+}
+
+/**
+ *	cs5536_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ */
+
+static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 udma_timings[6] = {
+		0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
+	};
+
+	static const u8 mwdma_timings[3] = {
+		0x67, 0x21, 0x20,
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 etc;
+	int mode = adev->dma_mode;
+	int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+
+	cs5536_read(pdev, ETC, &etc);
+
+	if (mode >= XFER_UDMA_0) {
+		etc &= ~(IDE_DRV_MASK << dshift);
+		etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
+	} else { /* MWDMA */
+		etc &= ~(IDE_ETC_UDMA_MASK << dshift);
+		cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
+	}
+
+	cs5536_write(pdev, ETC, etc);
+}
+
+static struct scsi_host_template cs5536_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cs5536_port_ops = {
+	.inherits		= &ata_bmdma32_port_ops,
+	.cable_detect		= cs5536_cable_detect,
+	.set_piomode		= cs5536_set_piomode,
+	.set_dmamode		= cs5536_set_dmamode,
+};
+
+/**
+ *	cs5536_init_one
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ */
+
+static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &cs5536_port_ops,
+	};
+
+	static const struct ata_port_info no_udma_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &cs5536_port_ops,
+	};
+
+
+	const struct ata_port_info *ppi[2];
+	u32 cfg;
+
+	if (dmi_check_system(udma_quirk_dmi_table))
+		ppi[0] = &no_udma_info;
+	else
+		ppi[0] = &info;
+
+	ppi[1] = &ata_dummy_port_info;
+
+	if (use_msr)
+		printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
+
+	cs5536_read(dev, CFG, &cfg);
+
+	if ((cfg & IDE_CFG_CHANEN) == 0) {
+		printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
+		return -ENODEV;
+	}
+
+	return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
+}
+
+static const struct pci_device_id cs5536[] = {
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE), },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
+	{ },
+};
+
+static struct pci_driver cs5536_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= cs5536,
+	.probe		= cs5536_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(cs5536_pci_driver);
+
+MODULE_AUTHOR("Martin K. Petersen");
+MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5536);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
new file mode 100644
index 0000000..7930184
--- /dev/null
+++ b/drivers/ata/pata_cypress.c
@@ -0,0 +1,166 @@
+/*
+ * pata_cypress.c 	- Cypress PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *			  Alan Cox
+ *
+ * Based heavily on
+ * linux/drivers/ide/pci/cy82c693.c		Version 0.40	Sep. 10, 2002
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cypress"
+#define DRV_VERSION "0.1.5"
+
+/* here are the offset definitions for the registers */
+
+enum {
+	CY82_IDE_CMDREG		= 0x04,
+	CY82_IDE_ADDRSETUP	= 0x48,
+	CY82_IDE_MASTER_IOR	= 0x4C,
+	CY82_IDE_MASTER_IOW	= 0x4D,
+	CY82_IDE_SLAVE_IOR	= 0x4E,
+	CY82_IDE_SLAVE_IOW	= 0x4F,
+	CY82_IDE_MASTER_8BIT	= 0x50,
+	CY82_IDE_SLAVE_8BIT	= 0x51,
+
+	CY82_INDEX_PORT		= 0x22,
+	CY82_DATA_PORT		= 0x23,
+
+	CY82_INDEX_CTRLREG1	= 0x01,
+	CY82_INDEX_CHANNEL0	= 0x30,
+	CY82_INDEX_CHANNEL1	= 0x31,
+	CY82_INDEX_TIMEOUT	= 0x32
+};
+
+/**
+ *	cy82c693_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup.
+ */
+
+static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	short time_16, time_8;
+	u32 addr;
+
+	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
+		printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
+		return;
+	}
+
+	time_16 = clamp_val(t.recover - 1, 0, 15) |
+		  (clamp_val(t.active - 1, 0, 15) << 4);
+	time_8 = clamp_val(t.act8b - 1, 0, 15) |
+		 (clamp_val(t.rec8b - 1, 0, 15) << 4);
+
+	if (adev->devno == 0) {
+		pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+		addr &= ~0x0F;	/* Mask bits */
+		addr |= clamp_val(t.setup - 1, 0, 15);
+
+		pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
+	} else {
+		pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+		addr &= ~0xF0;	/* Mask bits */
+		addr |= (clamp_val(t.setup - 1, 0, 15) << 4);
+
+		pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
+	}
+}
+
+/**
+ *	cy82c693_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup.
+ */
+
+static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
+
+	/* Be afraid, be very afraid. Magic registers  in low I/O space */
+	outb(reg, 0x22);
+	outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
+
+	/* 0x50 gives the best behaviour on the Alpha's using this chip */
+	outb(CY82_INDEX_TIMEOUT, 0x22);
+	outb(0x50, 0x23);
+}
+
+static struct scsi_host_template cy82c693_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cy82c693_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= cy82c693_set_piomode,
+	.set_dmamode	= cy82c693_set_dmamode,
+};
+
+static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &cy82c693_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
+	   For the moment we don't handle the secondary. FIXME */
+
+	if (PCI_FUNC(pdev->devfn) != 1)
+		return -ENODEV;
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
+}
+
+static const struct pci_device_id cy82c693[] = {
+	{ PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), },
+
+	{ },
+};
+
+static struct pci_driver cy82c693_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cy82c693,
+	.probe 		= cy82c693_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(cy82c693_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cy82c693);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
new file mode 100644
index 0000000..4a57a6f
--- /dev/null
+++ b/drivers/ata/pata_efar.c
@@ -0,0 +1,303 @@
+/*
+ *    pata_efar.c - EFAR PIIX clone controller driver
+ *
+ *	(C) 2005 Red Hat
+ *	(C) 2009-2010 Bartlomiej Zolnierkiewicz
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
+ *    Intel ICH controllers the EFAR widened the UDMA mode register bits
+ *    and doesn't require the funky clock selection.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_efar"
+#define DRV_VERSION	"0.4.5"
+
+/**
+ *	efar_pre_reset	-	Enable bits
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for the EFAR ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits efar_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+	};
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	efar_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform cable detection for the EFAR ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int efar_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	pci_read_config_byte(pdev, 0x47, &tmp);
+	if (tmp & (2 >> ap->port_no))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static DEFINE_SPINLOCK(efar_lock);
+
+/**
+ *	efar_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int master_port = ap->port_no ? 0x42 : 0x40;
+	unsigned long flags;
+	u16 master_data;
+	u8 udma_enable;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. The EFAR is a clone so very similar
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 1)
+		control |= 1;	/* TIME */
+	if (ata_pio_need_iordy(adev))	/* PIO 3/4 require IORDY */
+		control |= 2;	/* IE */
+	/* Intel specifies that the prefetch/posting is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE */
+
+	spin_lock_irqsave(&efar_lock, flags);
+
+	pci_read_config_word(dev, master_port, &master_data);
+
+	/* Set PPE, IE, and TIME as appropriate */
+	if (adev->devno == 0) {
+		master_data &= 0xCCF0;
+		master_data |= control;
+		master_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	} else {
+		int shift = 4 * ap->port_no;
+		u8 slave_data;
+
+		master_data &= 0xFF0F;
+		master_data |= (control << 4);
+
+		/* Slave timing in separate register */
+		pci_read_config_byte(dev, 0x44, &slave_data);
+		slave_data &= ap->port_no ? 0x0F : 0xF0;
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
+		pci_write_config_byte(dev, 0x44, slave_data);
+	}
+
+	master_data |= 0x4000;	/* Ensure SITRE is set */
+	pci_write_config_word(dev, master_port, master_data);
+
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+	udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+	pci_write_config_byte(dev, 0x48, udma_enable);
+	spin_unlock_irqrestore(&efar_lock, flags);
+}
+
+/**
+ *	efar_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u8 master_port		= ap->port_no ? 0x42 : 0x40;
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno + 2 * ap->port_no;
+	unsigned long flags;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	spin_lock_irqsave(&efar_lock, flags);
+
+	pci_read_config_word(dev, master_port, &master_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma	= adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+
+		udma_enable |= (1 << devid);
+
+		/* Load the UDMA mode number */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(7 << (4 * devid));
+		udma_timing |= udma << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+	} else {
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (adev->devno) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= ap->port_no ? 0x0F : 0xF0;
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, master_port, master_data);
+	}
+	pci_write_config_byte(dev, 0x48, udma_enable);
+	spin_unlock_irqrestore(&efar_lock, flags);
+}
+
+static struct scsi_host_template efar_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations efar_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= efar_cable_detect,
+	.set_piomode		= efar_set_piomode,
+	.set_dmamode		= efar_set_dmamode,
+	.prereset		= efar_pre_reset,
+};
+
+
+/**
+ *	efar_init_one - Register EFAR ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in efar_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY,
+		.udma_mask 	= ATA_UDMA4,
+		.port_ops	= &efar_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, &info };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
+				      ATA_HOST_PARALLEL_SCAN);
+}
+
+static const struct pci_device_id efar_pci_tbl[] = {
+	{ PCI_VDEVICE(EFAR, 0x9130), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver efar_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= efar_pci_tbl,
+	.probe			= efar_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(efar_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
new file mode 100644
index 0000000..0a55019
--- /dev/null
+++ b/drivers/ata/pata_ep93xx.c
@@ -0,0 +1,1035 @@
+/*
+ * EP93XX PATA controller driver.
+ *
+ * Copyright (c) 2012, Metasoft s.c.
+ *	Rafal Prylowski <prylowski@metasoft.pl>
+ *
+ * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX
+ * PATA driver by Lennert Buytenhek and Alessandro Zummo.
+ * Read/Write timings, resource management and other improvements
+ * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz.
+ * DMA engine support based on spi-ep93xx.c by Mika Westerberg.
+ *
+ * Original copyrights:
+ *
+ * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs
+ * PATA host controller driver.
+ *
+ * Copyright (c) 2009, Bartlomiej Zolnierkiewicz
+ *
+ * Heavily based on the ep93xx-ide.c driver:
+ *
+ * Copyright (c) 2009, Joao Ramos <joao.ramos@inov.pt>
+ *		      INESC Inovacao (INOV)
+ *
+ * EP93XX PATA controller driver.
+ * Copyright (C) 2007 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * An ATA driver for the Cirrus Logic EP93xx PATA controller.
+ *
+ * Based on an earlier version by Alessandro Zummo, which is:
+ *   Copyright (C) 2006 Tower Technologies
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/ktime.h>
+
+#include <linux/platform_data/dma-ep93xx.h>
+#include <mach/platform.h>
+
+#define DRV_NAME	"ep93xx-ide"
+#define DRV_VERSION	"1.0"
+
+enum {
+	/* IDE Control Register */
+	IDECTRL				= 0x00,
+	IDECTRL_CS0N			= (1 << 0),
+	IDECTRL_CS1N			= (1 << 1),
+	IDECTRL_DIORN			= (1 << 5),
+	IDECTRL_DIOWN			= (1 << 6),
+	IDECTRL_INTRQ			= (1 << 9),
+	IDECTRL_IORDY			= (1 << 10),
+	/*
+	 * the device IDE register to be accessed is selected through
+	 * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N':
+	 *   b4   b3   b2    b1     b0
+	 *   A2   A1   A0   CS1N   CS0N
+	 * the values filled in this structure allows the value to be directly
+	 * ORed to the IDECTRL register, hence giving directly the A[2:0] and
+	 * CS1N/CS0N values for each IDE register.
+	 * The values correspond to the transformation:
+	 *   ((real IDE address) << 2) | CS1N value << 1 | CS0N value
+	 */
+	IDECTRL_ADDR_CMD		= 0 + 2, /* CS1 */
+	IDECTRL_ADDR_DATA		= (ATA_REG_DATA << 2) + 2,
+	IDECTRL_ADDR_ERROR		= (ATA_REG_ERR << 2) + 2,
+	IDECTRL_ADDR_FEATURE		= (ATA_REG_FEATURE << 2) + 2,
+	IDECTRL_ADDR_NSECT		= (ATA_REG_NSECT << 2) + 2,
+	IDECTRL_ADDR_LBAL		= (ATA_REG_LBAL << 2) + 2,
+	IDECTRL_ADDR_LBAM		= (ATA_REG_LBAM << 2) + 2,
+	IDECTRL_ADDR_LBAH		= (ATA_REG_LBAH << 2) + 2,
+	IDECTRL_ADDR_DEVICE		= (ATA_REG_DEVICE << 2) + 2,
+	IDECTRL_ADDR_STATUS		= (ATA_REG_STATUS << 2) + 2,
+	IDECTRL_ADDR_COMMAND		= (ATA_REG_CMD << 2) + 2,
+	IDECTRL_ADDR_ALTSTATUS		= (0x06 << 2) + 1, /* CS0 */
+	IDECTRL_ADDR_CTL		= (0x06 << 2) + 1, /* CS0 */
+
+	/* IDE Configuration Register */
+	IDECFG				= 0x04,
+	IDECFG_IDEEN			= (1 << 0),
+	IDECFG_PIO			= (1 << 1),
+	IDECFG_MDMA			= (1 << 2),
+	IDECFG_UDMA			= (1 << 3),
+	IDECFG_MODE_SHIFT		= 4,
+	IDECFG_MODE_MASK		= (0xf << 4),
+	IDECFG_WST_SHIFT		= 8,
+	IDECFG_WST_MASK			= (0x3 << 8),
+
+	/* MDMA Operation Register */
+	IDEMDMAOP			= 0x08,
+
+	/* UDMA Operation Register */
+	IDEUDMAOP			= 0x0c,
+	IDEUDMAOP_UEN			= (1 << 0),
+	IDEUDMAOP_RWOP			= (1 << 1),
+
+	/* PIO/MDMA/UDMA Data Registers */
+	IDEDATAOUT			= 0x10,
+	IDEDATAIN			= 0x14,
+	IDEMDMADATAOUT			= 0x18,
+	IDEMDMADATAIN			= 0x1c,
+	IDEUDMADATAOUT			= 0x20,
+	IDEUDMADATAIN			= 0x24,
+
+	/* UDMA Status Register */
+	IDEUDMASTS			= 0x28,
+	IDEUDMASTS_DMAIDE		= (1 << 16),
+	IDEUDMASTS_INTIDE		= (1 << 17),
+	IDEUDMASTS_SBUSY		= (1 << 18),
+	IDEUDMASTS_NDO			= (1 << 24),
+	IDEUDMASTS_NDI			= (1 << 25),
+	IDEUDMASTS_N4X			= (1 << 26),
+
+	/* UDMA Debug Status Register */
+	IDEUDMADEBUG			= 0x2c,
+};
+
+struct ep93xx_pata_data {
+	const struct platform_device *pdev;
+	void __iomem *ide_base;
+	struct ata_timing t;
+	bool iordy;
+
+	unsigned long udma_in_phys;
+	unsigned long udma_out_phys;
+
+	struct dma_chan *dma_rx_channel;
+	struct ep93xx_dma_data dma_rx_data;
+	struct dma_chan *dma_tx_channel;
+	struct ep93xx_dma_data dma_tx_data;
+};
+
+static void ep93xx_pata_clear_regs(void __iomem *base)
+{
+	writel(IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN |
+		IDECTRL_DIOWN, base + IDECTRL);
+
+	writel(0, base + IDECFG);
+	writel(0, base + IDEMDMAOP);
+	writel(0, base + IDEUDMAOP);
+	writel(0, base + IDEDATAOUT);
+	writel(0, base + IDEDATAIN);
+	writel(0, base + IDEMDMADATAOUT);
+	writel(0, base + IDEMDMADATAIN);
+	writel(0, base + IDEUDMADATAOUT);
+	writel(0, base + IDEUDMADATAIN);
+	writel(0, base + IDEUDMADEBUG);
+}
+
+static bool ep93xx_pata_check_iordy(void __iomem *base)
+{
+	return !!(readl(base + IDECTRL) & IDECTRL_IORDY);
+}
+
+/*
+ * According to EP93xx User's Guide, WST field of IDECFG specifies number
+ * of HCLK cycles to hold the data bus after a PIO write operation.
+ * It should be programmed to guarantee following delays:
+ *
+ * PIO Mode   [ns]
+ * 0          30
+ * 1          20
+ * 2          15
+ * 3          10
+ * 4          5
+ *
+ * Maximum possible value for HCLK is 100MHz.
+ */
+static int ep93xx_pata_get_wst(int pio_mode)
+{
+	int val;
+
+	if (pio_mode == 0)
+		val = 3;
+	else if (pio_mode < 3)
+		val = 2;
+	else
+		val = 1;
+
+	return val << IDECFG_WST_SHIFT;
+}
+
+static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode)
+{
+	writel(IDECFG_IDEEN | IDECFG_PIO |
+		ep93xx_pata_get_wst(pio_mode) |
+		(pio_mode << IDECFG_MODE_SHIFT), base + IDECFG);
+}
+
+/*
+ * Based on delay loop found in mach-pxa/mp900.c.
+ *
+ * Single iteration should take 5 cpu cycles. This is 25ns assuming the
+ * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings
+ * than eg. 20ns.
+ */
+static void ep93xx_pata_delay(unsigned long count)
+{
+	__asm__ volatile (
+		"0:\n"
+		"mov r0, r0\n"
+		"subs %0, %1, #1\n"
+		"bge 0b\n"
+		: "=r" (count)
+		: "0" (count)
+	);
+}
+
+static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base,
+						unsigned long t2)
+{
+	/*
+	 * According to ATA specification, IORDY pin can be first sampled
+	 * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse
+	 * width is tB = 1250ns.
+	 *
+	 * We are already t2 delay loop iterations after activation of
+	 * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional
+	 * delay loop iterations.
+	 */
+	unsigned long start = (1250 + 35) / 25 - t2;
+	unsigned long counter = start;
+
+	while (!ep93xx_pata_check_iordy(base) && counter--)
+		ep93xx_pata_delay(1);
+	return start - counter;
+}
+
+/* common part at start of ep93xx_pata_read/write() */
+static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr,
+				 unsigned long t1)
+{
+	writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
+	ep93xx_pata_delay(t1);
+}
+
+/* common part at end of ep93xx_pata_read/write() */
+static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr,
+			       bool iordy, unsigned long t0, unsigned long t2,
+			       unsigned long t2i)
+{
+	ep93xx_pata_delay(t2);
+	/* lengthen t2 if needed */
+	if (iordy)
+		t2 += ep93xx_pata_wait_for_iordy(base, t2);
+	writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
+	if (t0 > t2 && t0 - t2 > t2i)
+		ep93xx_pata_delay(t0 - t2);
+	else
+		ep93xx_pata_delay(t2i);
+}
+
+static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data,
+			    unsigned long addr,
+			    bool reg)
+{
+	void __iomem *base = drv_data->ide_base;
+	const struct ata_timing *t = &drv_data->t;
+	unsigned long t0 = reg ? t->cyc8b : t->cycle;
+	unsigned long t2 = reg ? t->act8b : t->active;
+	unsigned long t2i = reg ? t->rec8b : t->recover;
+
+	ep93xx_pata_rw_begin(base, addr, t->setup);
+	writel(IDECTRL_DIOWN | addr, base + IDECTRL);
+	/*
+	 * The IDEDATAIN register is loaded from the DD pins at the positive
+	 * edge of the DIORN signal. (EP93xx UG p27-14)
+	 */
+	ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
+	return readl(base + IDEDATAIN);
+}
+
+/* IDE register read */
+static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data,
+				unsigned long addr)
+{
+	return ep93xx_pata_read(drv_data, addr, true);
+}
+
+/* PIO data read */
+static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data,
+				 unsigned long addr)
+{
+	return ep93xx_pata_read(drv_data, addr, false);
+}
+
+static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data,
+			      u16 value, unsigned long addr,
+			      bool reg)
+{
+	void __iomem *base = drv_data->ide_base;
+	const struct ata_timing *t = &drv_data->t;
+	unsigned long t0 = reg ? t->cyc8b : t->cycle;
+	unsigned long t2 = reg ? t->act8b : t->active;
+	unsigned long t2i = reg ? t->rec8b : t->recover;
+
+	ep93xx_pata_rw_begin(base, addr, t->setup);
+	/*
+	 * Value from IDEDATAOUT register is driven onto the DD pins when
+	 * DIOWN is low. (EP93xx UG p27-13)
+	 */
+	writel(value, base + IDEDATAOUT);
+	writel(IDECTRL_DIORN | addr, base + IDECTRL);
+	ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
+}
+
+/* IDE register write */
+static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data,
+				  u16 value, unsigned long addr)
+{
+	ep93xx_pata_write(drv_data, value, addr, true);
+}
+
+/* PIO data write */
+static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data,
+				   u16 value, unsigned long addr)
+{
+	ep93xx_pata_write(drv_data, value, addr, false);
+}
+
+static void ep93xx_pata_set_piomode(struct ata_port *ap,
+				    struct ata_device *adev)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	struct ata_device *pair = ata_dev_pair(adev);
+	/*
+	 * Calculate timings for the delay loop, assuming ep93xx cpu speed
+	 * is 200MHz (maximum possible for ep93xx). If actual cpu speed is
+	 * slower, we will wait a bit longer in each delay.
+	 * Additional division of cpu speed by 5, because single iteration
+	 * of our delay loop takes 5 cpu cycles (25ns).
+	 */
+	unsigned long T = 1000000 / (200 / 5);
+
+	ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0);
+	if (pair && pair->pio_mode) {
+		struct ata_timing t;
+		ata_timing_compute(pair, pair->pio_mode, &t, T, 0);
+		ata_timing_merge(&t, &drv_data->t, &drv_data->t,
+			ATA_TIMING_SETUP | ATA_TIMING_8BIT);
+	}
+	drv_data->iordy = ata_pio_need_iordy(adev);
+
+	ep93xx_pata_enable_pio(drv_data->ide_base,
+			       adev->pio_mode - XFER_PIO_0);
+}
+
+/* Note: original code is ata_sff_check_status */
+static u8 ep93xx_pata_check_status(struct ata_port *ap)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS);
+}
+
+static u8 ep93xx_pata_check_altstatus(struct ata_port *ap)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS);
+}
+
+/* Note: original code is ata_sff_tf_load */
+static void ep93xx_pata_tf_load(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		ep93xx_pata_write_reg(drv_data, tf->hob_feature,
+			IDECTRL_ADDR_FEATURE);
+		ep93xx_pata_write_reg(drv_data, tf->hob_nsect,
+			IDECTRL_ADDR_NSECT);
+		ep93xx_pata_write_reg(drv_data, tf->hob_lbal,
+			IDECTRL_ADDR_LBAL);
+		ep93xx_pata_write_reg(drv_data, tf->hob_lbam,
+			IDECTRL_ADDR_LBAM);
+		ep93xx_pata_write_reg(drv_data, tf->hob_lbah,
+			IDECTRL_ADDR_LBAH);
+	}
+
+	if (is_addr) {
+		ep93xx_pata_write_reg(drv_data, tf->feature,
+			IDECTRL_ADDR_FEATURE);
+		ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT);
+		ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL);
+		ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM);
+		ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE)
+		ep93xx_pata_write_reg(drv_data, tf->device,
+			IDECTRL_ADDR_DEVICE);
+
+	ata_wait_idle(ap);
+}
+
+/* Note: original code is ata_sff_tf_read */
+static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	tf->command = ep93xx_pata_check_status(ap);
+	tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
+	tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
+	tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
+	tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
+	tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH);
+	tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB,
+			IDECTRL_ADDR_CTL);
+		tf->hob_feature = ep93xx_pata_read_reg(drv_data,
+			IDECTRL_ADDR_FEATURE);
+		tf->hob_nsect = ep93xx_pata_read_reg(drv_data,
+			IDECTRL_ADDR_NSECT);
+		tf->hob_lbal = ep93xx_pata_read_reg(drv_data,
+			IDECTRL_ADDR_LBAL);
+		tf->hob_lbam = ep93xx_pata_read_reg(drv_data,
+			IDECTRL_ADDR_LBAM);
+		tf->hob_lbah = ep93xx_pata_read_reg(drv_data,
+			IDECTRL_ADDR_LBAH);
+		ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
+		ap->last_ctl = tf->ctl;
+	}
+}
+
+/* Note: original code is ata_sff_exec_command */
+static void ep93xx_pata_exec_command(struct ata_port *ap,
+				     const struct ata_taskfile *tf)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	ep93xx_pata_write_reg(drv_data, tf->command,
+			  IDECTRL_ADDR_COMMAND);
+	ata_sff_pause(ap);
+}
+
+/* Note: original code is ata_sff_dev_select */
+static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	u8 tmp = ATA_DEVICE_OBS;
+
+	if (device != 0)
+		tmp |= ATA_DEV1;
+
+	ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE);
+	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
+}
+
+/* Note: original code is ata_sff_set_devctl */
+static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL);
+}
+
+/* Note: original code is ata_sff_data_xfer */
+static unsigned int ep93xx_pata_data_xfer(struct ata_queued_cmd *qc,
+					  unsigned char *buf,
+					  unsigned int buflen, int rw)
+{
+	struct ata_port *ap = qc->dev->link->ap;
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	u16 *data = (u16 *)buf;
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	while (words--)
+		if (rw == READ)
+			*data++ = cpu_to_le16(
+				ep93xx_pata_read_data(
+					drv_data, IDECTRL_ADDR_DATA));
+		else
+			ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++),
+				IDECTRL_ADDR_DATA);
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned char pad[2] = { };
+
+		buf += buflen - 1;
+
+		if (rw == READ) {
+			*pad = cpu_to_le16(
+				ep93xx_pata_read_data(
+					drv_data, IDECTRL_ADDR_DATA));
+			*buf = pad[0];
+		} else {
+			pad[0] = *buf;
+			ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad),
+					  IDECTRL_ADDR_DATA);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+
+/* Note: original code is ata_devchk */
+static bool ep93xx_pata_device_is_present(struct ata_port *ap,
+					  unsigned int device)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	u8 nsect, lbal;
+
+	ap->ops->sff_dev_select(ap, device);
+
+	ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
+	ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
+
+	ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT);
+	ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL);
+
+	ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
+	ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
+
+	nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
+	lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return true;
+
+	return false;
+}
+
+/* Note: original code is ata_sff_wait_after_reset */
+static int ep93xx_pata_wait_after_reset(struct ata_link *link,
+					unsigned int devmask,
+					unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	int rc, ret = 0;
+
+	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+	/* always check readiness of the master device */
+	rc = ata_sff_wait_ready(link, deadline);
+	/*
+	 * -ENODEV means the odd clown forgot the D7 pulldown resistor
+	 * and TF status is 0xff, bail out on it too.
+	 */
+	if (rc)
+		return rc;
+
+	/*
+	 * if device 1 was found in ata_devchk, wait for register
+	 * access briefly, then wait for BSY to clear.
+	 */
+	if (dev1) {
+		int i;
+
+		ap->ops->sff_dev_select(ap, 1);
+
+		/*
+		 * Wait for register access.  Some ATAPI devices fail
+		 * to set nsect/lbal after reset, so don't waste too
+		 * much time on it.  We're gonna wait for !BSY anyway.
+		 */
+		for (i = 0; i < 2; i++) {
+			u8 nsect, lbal;
+
+			nsect = ep93xx_pata_read_reg(drv_data,
+				IDECTRL_ADDR_NSECT);
+			lbal = ep93xx_pata_read_reg(drv_data,
+				IDECTRL_ADDR_LBAL);
+			if (nsect == 1 && lbal == 1)
+				break;
+			msleep(50);	/* give drive a breather */
+		}
+
+		rc = ata_sff_wait_ready(link, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
+	}
+	/* is all this really necessary? */
+	ap->ops->sff_dev_select(ap, 0);
+	if (dev1)
+		ap->ops->sff_dev_select(ap, 1);
+	if (dev0)
+		ap->ops->sff_dev_select(ap, 0);
+
+	return ret;
+}
+
+/* Note: original code is ata_bus_softreset */
+static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+				     unsigned long deadline)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
+	udelay(20);		/* FIXME: flush */
+	ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL);
+	udelay(20);		/* FIXME: flush */
+	ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
+	ap->last_ctl = ap->ctl;
+
+	return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
+{
+	if (drv_data->dma_rx_channel) {
+		dma_release_channel(drv_data->dma_rx_channel);
+		drv_data->dma_rx_channel = NULL;
+	}
+	if (drv_data->dma_tx_channel) {
+		dma_release_channel(drv_data->dma_tx_channel);
+		drv_data->dma_tx_channel = NULL;
+	}
+}
+
+static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+	if (ep93xx_dma_chan_is_m2p(chan))
+		return false;
+
+	chan->private = filter_param;
+	return true;
+}
+
+static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+{
+	const struct platform_device *pdev = drv_data->pdev;
+	dma_cap_mask_t mask;
+	struct dma_slave_config conf;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/*
+	 * Request two channels for IDE. Another possibility would be
+	 * to request only one channel, and reprogram it's direction at
+	 * start of new transfer.
+	 */
+	drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
+	drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
+	drv_data->dma_rx_data.name = "ep93xx-pata-rx";
+	drv_data->dma_rx_channel = dma_request_channel(mask,
+		ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
+	if (!drv_data->dma_rx_channel)
+		return;
+
+	drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
+	drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
+	drv_data->dma_tx_data.name = "ep93xx-pata-tx";
+	drv_data->dma_tx_channel = dma_request_channel(mask,
+		ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
+	if (!drv_data->dma_tx_channel) {
+		dma_release_channel(drv_data->dma_rx_channel);
+		return;
+	}
+
+	/* Configure receive channel direction and source address */
+	memset(&conf, 0, sizeof(conf));
+	conf.direction = DMA_FROM_DEVICE;
+	conf.src_addr = drv_data->udma_in_phys;
+	conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
+		dev_err(&pdev->dev, "failed to configure rx dma channel\n");
+		ep93xx_pata_release_dma(drv_data);
+		return;
+	}
+
+	/* Configure transmit channel direction and destination address */
+	memset(&conf, 0, sizeof(conf));
+	conf.direction = DMA_TO_DEVICE;
+	conf.dst_addr = drv_data->udma_out_phys;
+	conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
+		dev_err(&pdev->dev, "failed to configure tx dma channel\n");
+		ep93xx_pata_release_dma(drv_data);
+	}
+}
+
+static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
+{
+	struct dma_async_tx_descriptor *txd;
+	struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
+	void __iomem *base = drv_data->ide_base;
+	struct ata_device *adev = qc->dev;
+	u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0;
+	struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE
+		? drv_data->dma_tx_channel : drv_data->dma_rx_channel;
+
+	txd = dmaengine_prep_slave_sg(channel, qc->sg, qc->n_elem, qc->dma_dir,
+		DMA_CTRL_ACK);
+	if (!txd) {
+		dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n");
+		return;
+	}
+	txd->callback = NULL;
+	txd->callback_param = NULL;
+
+	if (dmaengine_submit(txd) < 0) {
+		dev_err(qc->ap->dev, "failed to submit dma transfer\n");
+		return;
+	}
+	dma_async_issue_pending(channel);
+
+	/*
+	 * When enabling UDMA operation, IDEUDMAOP register needs to be
+	 * programmed in three step sequence:
+	 * 1) set or clear the RWOP bit,
+	 * 2) perform dummy read of the register,
+	 * 3) set the UEN bit.
+	 */
+	writel(v, base + IDEUDMAOP);
+	readl(base + IDEUDMAOP);
+	writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP);
+
+	writel(IDECFG_IDEEN | IDECFG_UDMA |
+		((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT),
+		base + IDECFG);
+}
+
+static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc)
+{
+	struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
+	void __iomem *base = drv_data->ide_base;
+
+	/* terminate all dma transfers, if not yet finished */
+	dmaengine_terminate_all(drv_data->dma_rx_channel);
+	dmaengine_terminate_all(drv_data->dma_tx_channel);
+
+	/*
+	 * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared
+	 * and IDECTRL register must be set to default value.
+	 */
+	writel(0, base + IDEUDMAOP);
+	writel(readl(base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN |
+		IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL);
+
+	ep93xx_pata_enable_pio(drv_data->ide_base,
+		qc->dev->pio_mode - XFER_PIO_0);
+
+	ata_sff_dma_pause(qc->ap);
+}
+
+static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc)
+{
+	qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
+}
+
+static u8 ep93xx_pata_dma_status(struct ata_port *ap)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+	u32 val = readl(drv_data->ide_base + IDEUDMASTS);
+
+	/*
+	 * UDMA Status Register bits:
+	 *
+	 * DMAIDE - DMA request signal from UDMA state machine,
+	 * INTIDE - INT line generated by UDMA because of errors in the
+	 *          state machine,
+	 * SBUSY - UDMA state machine busy, not in idle state,
+	 * NDO   - error for data-out not completed,
+	 * NDI   - error for data-in not completed,
+	 * N4X   - error for data transferred not multiplies of four
+	 *         32-bit words.
+	 * (EP93xx UG p27-17)
+	 */
+	if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI ||
+	    val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE)
+		return ATA_DMA_ERR;
+
+	/* read INTRQ (INT[3]) pin input state */
+	if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ)
+		return ATA_DMA_INTR;
+
+	if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE)
+		return ATA_DMA_ACTIVE;
+
+	return 0;
+}
+
+/* Note: original code is ata_sff_softreset */
+static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
+				 unsigned long deadline)
+{
+	struct ata_port *ap = al->ap;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	unsigned int devmask = 0;
+	int rc;
+	u8 err;
+
+	/* determine if device 0/1 are present */
+	if (ep93xx_pata_device_is_present(ap, 0))
+		devmask |= (1 << 0);
+	if (slave_possible && ep93xx_pata_device_is_present(ap, 1))
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->sff_dev_select(al->ap, 0);
+
+	/* issue bus reset */
+	rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
+	/* if link is ocuppied, -ENODEV too is an error */
+	if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
+		ata_link_err(al, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0),
+					  &err);
+	if (slave_possible && err != 0x81)
+		classes[1] = ata_sff_dev_classify(&al->device[1],
+						  devmask & (1 << 1), &err);
+
+	return 0;
+}
+
+/* Note: original code is ata_sff_drain_fifo */
+static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
+{
+	int count;
+	struct ata_port *ap;
+	struct ep93xx_pata_data *drv_data;
+
+	/* We only need to flush incoming data when a command was running */
+	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+		return;
+
+	ap = qc->ap;
+	drv_data = ap->host->private_data;
+	/* Drain up to 64K of data before we give up this recovery method */
+	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
+		     && count < 65536; count += 2)
+		ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
+
+	/* Can become DEBUG later */
+	if (count)
+		ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
+
+}
+
+static int ep93xx_pata_port_start(struct ata_port *ap)
+{
+	struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+	/*
+	 * Set timings to safe values at startup (= number of ns from ATA
+	 * specification), we'll switch to properly calculated values later.
+	 */
+	drv_data->t = *ata_timing_find_mode(XFER_PIO_0);
+	return 0;
+}
+
+static struct scsi_host_template ep93xx_pata_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	/* ep93xx dma implementation limit */
+	.sg_tablesize		= 32,
+	/* ep93xx dma can't transfer 65536 bytes at once */
+	.dma_boundary		= 0x7fff,
+};
+
+static struct ata_port_operations ep93xx_pata_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.qc_prep		= ata_noop_qc_prep,
+
+	.softreset		= ep93xx_pata_softreset,
+	.hardreset		= ATA_OP_NULL,
+
+	.sff_dev_select		= ep93xx_pata_dev_select,
+	.sff_set_devctl		= ep93xx_pata_set_devctl,
+	.sff_check_status	= ep93xx_pata_check_status,
+	.sff_check_altstatus	= ep93xx_pata_check_altstatus,
+	.sff_tf_load		= ep93xx_pata_tf_load,
+	.sff_tf_read		= ep93xx_pata_tf_read,
+	.sff_exec_command	= ep93xx_pata_exec_command,
+	.sff_data_xfer		= ep93xx_pata_data_xfer,
+	.sff_drain_fifo		= ep93xx_pata_drain_fifo,
+	.sff_irq_clear		= ATA_OP_NULL,
+
+	.set_piomode		= ep93xx_pata_set_piomode,
+
+	.bmdma_setup		= ep93xx_pata_dma_setup,
+	.bmdma_start		= ep93xx_pata_dma_start,
+	.bmdma_stop		= ep93xx_pata_dma_stop,
+	.bmdma_status		= ep93xx_pata_dma_status,
+
+	.cable_detect		= ata_cable_unknown,
+	.port_start		= ep93xx_pata_port_start,
+};
+
+static int ep93xx_pata_probe(struct platform_device *pdev)
+{
+	struct ep93xx_pata_data *drv_data;
+	struct ata_host *host;
+	struct ata_port *ap;
+	int irq;
+	struct resource *mem_res;
+	void __iomem *ide_base;
+	int err;
+
+	err = ep93xx_ide_acquire_gpio(pdev);
+	if (err)
+		return err;
+
+	/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		err = -ENXIO;
+		goto err_rel_gpio;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
+	if (IS_ERR(ide_base)) {
+		err = PTR_ERR(ide_base);
+		goto err_rel_gpio;
+	}
+
+	drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data) {
+		err = -ENXIO;
+		goto err_rel_gpio;
+	}
+
+	drv_data->pdev = pdev;
+	drv_data->ide_base = ide_base;
+	drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
+	drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
+	ep93xx_pata_dma_init(drv_data);
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host) {
+		err = -ENXIO;
+		goto err_rel_dma;
+	}
+
+	ep93xx_pata_clear_regs(ide_base);
+
+	host->private_data = drv_data;
+
+	ap = host->ports[0];
+	ap->dev = &pdev->dev;
+	ap->ops = &ep93xx_pata_port_ops;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+	ap->pio_mask = ATA_PIO4;
+
+	/*
+	 * Maximum UDMA modes:
+	 * EP931x rev.E0 - UDMA2
+	 * EP931x rev.E1 - UDMA3
+	 * EP931x rev.E2 - UDMA4
+	 *
+	 * MWDMA support was removed from EP931x rev.E2,
+	 * so this driver supports only UDMA modes.
+	 */
+	if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
+		int chip_rev = ep93xx_chip_revision();
+
+		if (chip_rev == EP93XX_CHIP_REV_E1)
+			ap->udma_mask = ATA_UDMA3;
+		else if (chip_rev == EP93XX_CHIP_REV_E2)
+			ap->udma_mask = ATA_UDMA4;
+		else
+			ap->udma_mask = ATA_UDMA2;
+	}
+
+	/* defaults, pio 0 */
+	ep93xx_pata_enable_pio(ide_base, 0);
+
+	dev_info(&pdev->dev, "version " DRV_VERSION "\n");
+
+	/* activate host */
+	err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0,
+		&ep93xx_pata_sht);
+	if (err == 0)
+		return 0;
+
+err_rel_dma:
+	ep93xx_pata_release_dma(drv_data);
+err_rel_gpio:
+	ep93xx_ide_release_gpio(pdev);
+	return err;
+}
+
+static int ep93xx_pata_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct ep93xx_pata_data *drv_data = host->private_data;
+
+	ata_host_detach(host);
+	ep93xx_pata_release_dma(drv_data);
+	ep93xx_pata_clear_regs(drv_data->ide_base);
+	ep93xx_ide_release_gpio(pdev);
+	return 0;
+}
+
+static struct platform_driver ep93xx_pata_platform_driver = {
+	.driver = {
+		.name = DRV_NAME,
+	},
+	.probe = ep93xx_pata_probe,
+	.remove = ep93xx_pata_remove,
+};
+
+module_platform_driver(ep93xx_pata_platform_driver);
+
+MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, "
+		"Bartlomiej Zolnierkiewicz, Rafal Prylowski");
+MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:pata_ep93xx");
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
new file mode 100644
index 0000000..41e0d6a
--- /dev/null
+++ b/drivers/ata/pata_falcon.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Atari Falcon PATA controller driver
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Based on falconide.c:
+ *
+ *     Created 12 Jul 1997 by Geert Uytterhoeven
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/ide.h>
+
+#define DRV_NAME "pata_falcon"
+#define DRV_VERSION "0.1.0"
+
+#define ATA_HD_BASE	0xfff00000
+#define ATA_HD_CONTROL	0x39
+
+static struct scsi_host_template pata_falcon_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
+					  unsigned char *buf,
+					  unsigned int buflen, int rw)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_port *ap = dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1;
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	bool swap = 1;
+
+	if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
+	    !blk_rq_is_passthrough(cmd->request))
+		swap = 0;
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ) {
+		if (swap)
+			raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+		else
+			raw_insw((u16 *)data_addr, (u16 *)buf, words);
+	} else {
+		if (swap)
+			raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+		else
+			raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+	}
+
+	/* Transfer trailing byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned char pad[2] = { };
+
+		/* Point buf to the tail of buffer */
+		buf += buflen - 1;
+
+		if (rw == READ) {
+			if (swap)
+				raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+			else
+				raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+			*buf = pad[0];
+		} else {
+			pad[0] = *buf;
+			if (swap)
+				raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+			else
+				raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_falcon_set_mode(struct ata_link *link,
+				struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+		ata_dev_info(dev, "configured for PIO\n");
+	}
+	return 0;
+}
+
+static struct ata_port_operations pata_falcon_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= pata_falcon_data_xfer,
+	.cable_detect	= ata_cable_unknown,
+	.set_mode	= pata_falcon_set_mode,
+};
+
+static int pata_falcon_init_one(void)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct platform_device *pdev;
+	void __iomem *base;
+
+	if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
+		return -ENODEV;
+
+	pr_info(DRV_NAME ": Atari Falcon PATA controller\n");
+
+	pdev = platform_device_register_simple(DRV_NAME, 0, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	if (!devm_request_mem_region(&pdev->dev, ATA_HD_BASE, 0x40, DRV_NAME)) {
+		pr_err(DRV_NAME ": resources busy\n");
+		return -EBUSY;
+	}
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	ap->ops = &pata_falcon_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+	ap->flags |= ATA_FLAG_PIO_POLLING;
+
+	base = (void __iomem *)ATA_HD_BASE;
+	ap->ioaddr.data_addr		= base;
+	ap->ioaddr.error_addr		= base + 1 + 1 * 4;
+	ap->ioaddr.feature_addr		= base + 1 + 1 * 4;
+	ap->ioaddr.nsect_addr		= base + 1 + 2 * 4;
+	ap->ioaddr.lbal_addr		= base + 1 + 3 * 4;
+	ap->ioaddr.lbam_addr		= base + 1 + 4 * 4;
+	ap->ioaddr.lbah_addr		= base + 1 + 5 * 4;
+	ap->ioaddr.device_addr		= base + 1 + 6 * 4;
+	ap->ioaddr.status_addr		= base + 1 + 7 * 4;
+	ap->ioaddr.command_addr		= base + 1 + 7 * 4;
+
+	ap->ioaddr.altstatus_addr	= base + ATA_HD_CONTROL;
+	ap->ioaddr.ctl_addr		= base + ATA_HD_CONTROL;
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", (unsigned long)base,
+		      (unsigned long)base + ATA_HD_CONTROL);
+
+	/* activate */
+	return ata_host_activate(host, 0, NULL, 0, &pata_falcon_sht);
+}
+
+module_init(pata_falcon_init_one);
+
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
new file mode 100644
index 0000000..569a4a6
--- /dev/null
+++ b/drivers/ata/pata_ftide010.c
@@ -0,0 +1,574 @@
+/*
+ * Faraday Technology FTIDE010 driver
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Includes portions of the SL2312/SL3516/Gemini PATA driver
+ * Copyright (C) 2003 StorLine, Inc <jason@storlink.com.tw>
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2010 Frederic Pecourt <opengemini@free.fr>
+ * Copyright (C) 2011 Tobias Waldvogel <tobias.waldvogel@gmail.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/bitops.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include "sata_gemini.h"
+
+#define DRV_NAME "pata_ftide010"
+
+/**
+ * struct ftide010 - state container for the Faraday FTIDE010
+ * @dev: pointer back to the device representing this controller
+ * @base: remapped I/O space address
+ * @pclk: peripheral clock for the IDE block
+ * @host: pointer to the ATA host for this device
+ * @master_cbl: master cable type
+ * @slave_cbl: slave cable type
+ * @sg: Gemini SATA bridge pointer, if running on the Gemini
+ * @master_to_sata0: Gemini SATA bridge: the ATA master is connected
+ * to the SATA0 bridge
+ * @slave_to_sata0: Gemini SATA bridge: the ATA slave is connected
+ * to the SATA0 bridge
+ * @master_to_sata1: Gemini SATA bridge: the ATA master is connected
+ * to the SATA1 bridge
+ * @slave_to_sata1: Gemini SATA bridge: the ATA slave is connected
+ * to the SATA1 bridge
+ */
+struct ftide010 {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *pclk;
+	struct ata_host *host;
+	unsigned int master_cbl;
+	unsigned int slave_cbl;
+	/* Gemini-specific properties */
+	struct sata_gemini *sg;
+	bool master_to_sata0;
+	bool slave_to_sata0;
+	bool master_to_sata1;
+	bool slave_to_sata1;
+};
+
+#define FTIDE010_DMA_REG	0x00
+#define FTIDE010_DMA_STATUS	0x02
+#define FTIDE010_IDE_BMDTPR	0x04
+#define FTIDE010_IDE_DEVICE_ID	0x08
+#define FTIDE010_PIO_TIMING	0x10
+#define FTIDE010_MWDMA_TIMING	0x11
+#define FTIDE010_UDMA_TIMING0	0x12 /* Master */
+#define FTIDE010_UDMA_TIMING1	0x13 /* Slave */
+#define FTIDE010_CLK_MOD	0x14
+/* These registers are mapped directly to the IDE registers */
+#define FTIDE010_CMD_DATA	0x20
+#define FTIDE010_ERROR_FEATURES	0x21
+#define FTIDE010_NSECT		0x22
+#define FTIDE010_LBAL		0x23
+#define FTIDE010_LBAM		0x24
+#define FTIDE010_LBAH		0x25
+#define FTIDE010_DEVICE		0x26
+#define FTIDE010_STATUS_COMMAND	0x27
+#define FTIDE010_ALTSTAT_CTRL	0x36
+
+/* Set this bit for UDMA mode 5 and 6 */
+#define FTIDE010_UDMA_TIMING_MODE_56	BIT(7)
+
+/* 0 = 50 MHz, 1 = 66 MHz */
+#define FTIDE010_CLK_MOD_DEV0_CLK_SEL	BIT(0)
+#define FTIDE010_CLK_MOD_DEV1_CLK_SEL	BIT(1)
+/* Enable UDMA on a device */
+#define FTIDE010_CLK_MOD_DEV0_UDMA_EN	BIT(4)
+#define FTIDE010_CLK_MOD_DEV1_UDMA_EN	BIT(5)
+
+static struct scsi_host_template pata_ftide010_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Bus timings
+ *
+ * The unit of the below required timings is two clock periods of the ATA
+ * reference clock which is 30 nanoseconds per unit at 66MHz and 20
+ * nanoseconds per unit at 50 MHz. The PIO timings assume 33MHz speed for
+ * PIO.
+ *
+ * pio_active_time: array of 5 elements for T2 timing for Mode 0,
+ * 1, 2, 3 and 4. Range 0..15.
+ * pio_recovery_time: array of 5 elements for T2l timing for Mode 0,
+ * 1, 2, 3 and 4. Range 0..15.
+ * mdma_50_active_time: array of 4 elements for Td timing for multi
+ * word DMA, Mode 0, 1, and 2 at 50 MHz. Range 0..15.
+ * mdma_50_recovery_time: array of 4 elements for Tk timing for
+ * multi word DMA, Mode 0, 1 and 2 at 50 MHz. Range 0..15.
+ * mdma_66_active_time: array of 4 elements for Td timing for multi
+ * word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
+ * mdma_66_recovery_time: array of 4 elements for Tk timing for
+ * multi word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
+ * udma_50_setup_time: array of 4 elements for Tvds timing for ultra
+ * DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz. Range 0..7.
+ * udma_50_hold_time: array of 4 elements for Tdvh timing for
+ * multi word DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz, Range 0..7.
+ * udma_66_setup_time: array of 4 elements for Tvds timing for multi
+ * word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
+ * udma_66_hold_time: array of 4 elements for Tdvh timing for
+ * multi word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
+ */
+static const u8 pio_active_time[5] = {10, 10, 10, 3, 3};
+static const u8 pio_recovery_time[5] = {10, 3, 1, 3, 1};
+static const u8 mwdma_50_active_time[3] = {6, 2, 2};
+static const u8 mwdma_50_recovery_time[3] = {6, 2, 1};
+static const u8 mwdma_66_active_time[3] = {8, 3, 3};
+static const u8 mwdma_66_recovery_time[3] = {8, 2, 1};
+static const u8 udma_50_setup_time[6] = {3, 3, 2, 2, 1, 1};
+static const u8 udma_50_hold_time[6] = {3, 1, 1, 1, 1, 1};
+static const u8 udma_66_setup_time[7] = {4, 4, 3, 2, };
+static const u8 udma_66_hold_time[7] = {};
+
+/*
+ * We set 66 MHz for all MWDMA modes
+ */
+static const bool set_mdma_66_mhz[] = { true, true, true, true };
+
+/*
+ * We set 66 MHz for UDMA modes 3, 4 and 6 and no others
+ */
+static const bool set_udma_66_mhz[] = { false, false, false, true, true, false, true };
+
+static void ftide010_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ftide010 *ftide = ap->host->private_data;
+	u8 speed = adev->dma_mode;
+	u8 devno = adev->devno & 1;
+	u8 udma_en_mask;
+	u8 f66m_en_mask;
+	u8 clkreg;
+	u8 timreg;
+	u8 i;
+
+	/* Target device 0 (master) or 1 (slave) */
+	if (!devno) {
+		udma_en_mask = FTIDE010_CLK_MOD_DEV0_UDMA_EN;
+		f66m_en_mask = FTIDE010_CLK_MOD_DEV0_CLK_SEL;
+	} else {
+		udma_en_mask = FTIDE010_CLK_MOD_DEV1_UDMA_EN;
+		f66m_en_mask = FTIDE010_CLK_MOD_DEV1_CLK_SEL;
+	}
+
+	clkreg = readb(ftide->base + FTIDE010_CLK_MOD);
+	clkreg &= ~udma_en_mask;
+	clkreg &= ~f66m_en_mask;
+
+	if (speed & XFER_UDMA_0) {
+		i = speed & ~XFER_UDMA_0;
+		dev_dbg(ftide->dev, "set UDMA mode %02x, index %d\n",
+			speed, i);
+
+		clkreg |= udma_en_mask;
+		if (set_udma_66_mhz[i]) {
+			clkreg |= f66m_en_mask;
+			timreg = udma_66_setup_time[i] << 4 |
+				udma_66_hold_time[i];
+		} else {
+			timreg = udma_50_setup_time[i] << 4 |
+				udma_50_hold_time[i];
+		}
+
+		/* A special bit needs to be set for modes 5 and 6 */
+		if (i >= 5)
+			timreg |= FTIDE010_UDMA_TIMING_MODE_56;
+
+		dev_dbg(ftide->dev, "UDMA write clkreg = %02x, timreg = %02x\n",
+			clkreg, timreg);
+
+		writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
+		writeb(timreg, ftide->base + FTIDE010_UDMA_TIMING0 + devno);
+	} else {
+		i = speed & ~XFER_MW_DMA_0;
+		dev_dbg(ftide->dev, "set MWDMA mode %02x, index %d\n",
+			speed, i);
+
+		if (set_mdma_66_mhz[i]) {
+			clkreg |= f66m_en_mask;
+			timreg = mwdma_66_active_time[i] << 4 |
+				mwdma_66_recovery_time[i];
+		} else {
+			timreg = mwdma_50_active_time[i] << 4 |
+				mwdma_50_recovery_time[i];
+		}
+		dev_dbg(ftide->dev,
+			"MWDMA write clkreg = %02x, timreg = %02x\n",
+			clkreg, timreg);
+		/* This will affect all devices */
+		writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
+		writeb(timreg, ftide->base + FTIDE010_MWDMA_TIMING);
+	}
+
+	/*
+	 * Store the current device (master or slave) in ap->private_data
+	 * so that .qc_issue() can detect if this changes and reprogram
+	 * the DMA settings.
+	 */
+	ap->private_data = adev;
+
+	return;
+}
+
+static void ftide010_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ftide010 *ftide = ap->host->private_data;
+	u8 pio = adev->pio_mode - XFER_PIO_0;
+
+	dev_dbg(ftide->dev, "set PIO mode %02x, index %d\n",
+		adev->pio_mode, pio);
+	writeb(pio_active_time[pio] << 4 | pio_recovery_time[pio],
+	       ftide->base + FTIDE010_PIO_TIMING);
+}
+
+/*
+ * We implement our own qc_issue() callback since we may need to set up
+ * the timings differently for master and slave transfers: the CLK_MOD_REG
+ * and MWDMA_TIMING_REG is shared between master and slave, so reprogramming
+ * this may be necessary.
+ */
+static unsigned int ftide010_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/*
+	 * If the device changed, i.e. slave->master, master->slave,
+	 * then set up the DMA mode again so we are sure the timings
+	 * are correct.
+	 */
+	if (adev != ap->private_data && ata_dma_enabled(adev))
+		ftide010_set_dmamode(ap, adev);
+
+	return ata_bmdma_qc_issue(qc);
+}
+
+static struct ata_port_operations pata_ftide010_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.set_dmamode	= ftide010_set_dmamode,
+	.set_piomode	= ftide010_set_piomode,
+	.qc_issue	= ftide010_qc_issue,
+};
+
+static struct ata_port_info ftide010_port_info = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.pio_mask	= ATA_PIO4,
+	.port_ops	= &pata_ftide010_port_ops,
+};
+
+#if IS_ENABLED(CONFIG_SATA_GEMINI)
+
+static int pata_ftide010_gemini_port_start(struct ata_port *ap)
+{
+	struct ftide010 *ftide = ap->host->private_data;
+	struct device *dev = ftide->dev;
+	struct sata_gemini *sg = ftide->sg;
+	int bridges = 0;
+	int ret;
+
+	ret = ata_bmdma_port_start(ap);
+	if (ret)
+		return ret;
+
+	if (ftide->master_to_sata0) {
+		dev_info(dev, "SATA0 (master) start\n");
+		ret = gemini_sata_start_bridge(sg, 0);
+		if (!ret)
+			bridges++;
+	}
+	if (ftide->master_to_sata1) {
+		dev_info(dev, "SATA1 (master) start\n");
+		ret = gemini_sata_start_bridge(sg, 1);
+		if (!ret)
+			bridges++;
+	}
+	/* Avoid double-starting */
+	if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
+		dev_info(dev, "SATA0 (slave) start\n");
+		ret = gemini_sata_start_bridge(sg, 0);
+		if (!ret)
+			bridges++;
+	}
+	/* Avoid double-starting */
+	if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
+		dev_info(dev, "SATA1 (slave) start\n");
+		ret = gemini_sata_start_bridge(sg, 1);
+		if (!ret)
+			bridges++;
+	}
+
+	dev_info(dev, "brought %d bridges online\n", bridges);
+	return (bridges > 0) ? 0 : -EINVAL; // -ENODEV;
+}
+
+static void pata_ftide010_gemini_port_stop(struct ata_port *ap)
+{
+	struct ftide010 *ftide = ap->host->private_data;
+	struct device *dev = ftide->dev;
+	struct sata_gemini *sg = ftide->sg;
+
+	if (ftide->master_to_sata0) {
+		dev_info(dev, "SATA0 (master) stop\n");
+		gemini_sata_stop_bridge(sg, 0);
+	}
+	if (ftide->master_to_sata1) {
+		dev_info(dev, "SATA1 (master) stop\n");
+		gemini_sata_stop_bridge(sg, 1);
+	}
+	/* Avoid double-stopping */
+	if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
+		dev_info(dev, "SATA0 (slave) stop\n");
+		gemini_sata_stop_bridge(sg, 0);
+	}
+	/* Avoid double-stopping */
+	if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
+		dev_info(dev, "SATA1 (slave) stop\n");
+		gemini_sata_stop_bridge(sg, 1);
+	}
+}
+
+static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
+{
+	struct ftide010 *ftide = ap->host->private_data;
+
+	/*
+	 * Return the master cable, I have no clue how to return a different
+	 * cable for the slave than for the master.
+	 */
+	return ftide->master_cbl;
+}
+
+static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+				     struct ata_port_info *pi,
+				     bool is_ata1)
+{
+	struct device *dev = ftide->dev;
+	struct sata_gemini *sg;
+	enum gemini_muxmode muxmode;
+
+	/* Look up SATA bridge */
+	sg = gemini_sata_bridge_get();
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
+	ftide->sg = sg;
+
+	muxmode = gemini_sata_get_muxmode(sg);
+
+	/* Special ops */
+	pata_ftide010_port_ops.port_start =
+		pata_ftide010_gemini_port_start;
+	pata_ftide010_port_ops.port_stop =
+		pata_ftide010_gemini_port_stop;
+	pata_ftide010_port_ops.cable_detect =
+		pata_ftide010_gemini_cable_detect;
+
+	/* Flag port as SATA-capable */
+	if (gemini_sata_bridge_enabled(sg, is_ata1))
+		pi->flags |= ATA_FLAG_SATA;
+
+	/* This device has broken DMA, only PIO works */
+	if (of_machine_is_compatible("itian,sq201")) {
+		pi->mwdma_mask = 0;
+		pi->udma_mask = 0;
+	}
+
+	/*
+	 * We assume that a simple 40-wire cable is used in the PATA mode.
+	 * if you're adding a system using the PATA interface, make sure
+	 * the right cable is set up here, it might be necessary to use
+	 * special hardware detection or encode the cable type in the device
+	 * tree with special properties.
+	 */
+	if (!is_ata1) {
+		switch (muxmode) {
+		case GEMINI_MUXMODE_0:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_PATA40;
+			ftide->master_to_sata0 = true;
+			break;
+		case GEMINI_MUXMODE_1:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_NONE;
+			ftide->master_to_sata0 = true;
+			break;
+		case GEMINI_MUXMODE_2:
+			ftide->master_cbl = ATA_CBL_PATA40;
+			ftide->slave_cbl = ATA_CBL_PATA40;
+			break;
+		case GEMINI_MUXMODE_3:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_SATA;
+			ftide->master_to_sata0 = true;
+			ftide->slave_to_sata1 = true;
+			break;
+		}
+	} else {
+		switch (muxmode) {
+		case GEMINI_MUXMODE_0:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_NONE;
+			ftide->master_to_sata1 = true;
+			break;
+		case GEMINI_MUXMODE_1:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_PATA40;
+			ftide->master_to_sata1 = true;
+			break;
+		case GEMINI_MUXMODE_2:
+			ftide->master_cbl = ATA_CBL_SATA;
+			ftide->slave_cbl = ATA_CBL_SATA;
+			ftide->slave_to_sata0 = true;
+			ftide->master_to_sata1 = true;
+			break;
+		case GEMINI_MUXMODE_3:
+			ftide->master_cbl = ATA_CBL_PATA40;
+			ftide->slave_cbl = ATA_CBL_PATA40;
+			break;
+		}
+	}
+	dev_info(dev, "set up Gemini PATA%d\n", is_ata1);
+
+	return 0;
+}
+#else
+static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+				     struct ata_port_info *pi,
+				     bool is_ata1)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+
+static int pata_ftide010_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct ata_port_info pi = ftide010_port_info;
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct ftide010 *ftide;
+	struct resource *res;
+	int irq;
+	int ret;
+	int i;
+
+	ftide = devm_kzalloc(dev, sizeof(*ftide), GFP_KERNEL);
+	if (!ftide)
+		return -ENOMEM;
+	ftide->dev = dev;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	ftide->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ftide->base))
+		return PTR_ERR(ftide->base);
+
+	ftide->pclk = devm_clk_get(dev, "PCLK");
+	if (!IS_ERR(ftide->pclk)) {
+		ret = clk_prepare_enable(ftide->pclk);
+		if (ret) {
+			dev_err(dev, "failed to enable PCLK\n");
+			return ret;
+		}
+	}
+
+	/* Some special Cortina Gemini init, if needed */
+	if (of_device_is_compatible(np, "cortina,gemini-pata")) {
+		/*
+		 * We need to know which instance is probing (the
+		 * Gemini has two instances of FTIDE010) and we do
+		 * this simply by looking at the physical base
+		 * address, which is 0x63400000 for ATA1, else we
+		 * are ATA0. This will also set up the cable types.
+		 */
+		ret = pata_ftide010_gemini_init(ftide,
+				&pi,
+				(res->start == 0x63400000));
+		if (ret)
+			goto err_dis_clk;
+	} else {
+		/* Else assume we are connected using PATA40 */
+		ftide->master_cbl = ATA_CBL_PATA40;
+		ftide->slave_cbl = ATA_CBL_PATA40;
+	}
+
+	ftide->host = ata_host_alloc_pinfo(dev, ppi, 1);
+	if (!ftide->host) {
+		ret = -ENOMEM;
+		goto err_dis_clk;
+	}
+	ftide->host->private_data = ftide;
+
+	for (i = 0; i < ftide->host->n_ports; i++) {
+		struct ata_port *ap = ftide->host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
+
+		ioaddr->bmdma_addr = ftide->base + FTIDE010_DMA_REG;
+		ioaddr->cmd_addr = ftide->base + FTIDE010_CMD_DATA;
+		ioaddr->ctl_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
+		ioaddr->altstatus_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
+		ata_sff_std_ports(ioaddr);
+	}
+
+	dev_info(dev, "device ID %08x, irq %d, reg %pR\n",
+		 readl(ftide->base + FTIDE010_IDE_DEVICE_ID), irq, res);
+
+	ret = ata_host_activate(ftide->host, irq, ata_bmdma_interrupt,
+				0, &pata_ftide010_sht);
+	if (ret)
+		goto err_dis_clk;
+
+	return 0;
+
+err_dis_clk:
+	if (!IS_ERR(ftide->pclk))
+		clk_disable_unprepare(ftide->pclk);
+	return ret;
+}
+
+static int pata_ftide010_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct ftide010 *ftide = host->private_data;
+
+	ata_host_detach(ftide->host);
+	if (!IS_ERR(ftide->pclk))
+		clk_disable_unprepare(ftide->pclk);
+
+	return 0;
+}
+
+static const struct of_device_id pata_ftide010_of_match[] = {
+	{
+		.compatible = "faraday,ftide010",
+	},
+	{},
+};
+
+static struct platform_driver pata_ftide010_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(pata_ftide010_of_match),
+	},
+	.probe = pata_ftide010_probe,
+	.remove = pata_ftide010_remove,
+};
+module_platform_driver(pata_ftide010_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
new file mode 100644
index 0000000..65bc9f3
--- /dev/null
+++ b/drivers/ata/pata_gayle.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Amiga Gayle PATA controller driver
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Based on gayle.c:
+ *
+ *     Created 12 Jul 1997 by Geert Uytterhoeven
+ */
+
+#include <linux/ata.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/zorro.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <asm/amigayle.h>
+#include <asm/ide.h>
+#include <asm/setup.h>
+
+#define DRV_NAME "pata_gayle"
+#define DRV_VERSION "0.1.0"
+
+#define GAYLE_CONTROL	0x101a
+
+static struct scsi_host_template pata_gayle_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+/* FIXME: is this needed? */
+static unsigned int pata_gayle_data_xfer(struct ata_queued_cmd *qc,
+					 unsigned char *buf,
+					 unsigned int buflen, int rw)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_port *ap = dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ)
+		raw_insw((u16 *)data_addr, (u16 *)buf, words);
+	else
+		raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+
+	/* Transfer trailing byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned char pad[2] = { };
+
+		/* Point buf to the tail of buffer */
+		buf += buflen - 1;
+
+		if (rw == READ) {
+			raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+			*buf = pad[0];
+		} else {
+			pad[0] = *buf;
+			raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_gayle_set_mode(struct ata_link *link,
+			       struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+		ata_dev_info(dev, "configured for PIO\n");
+	}
+	return 0;
+}
+
+static bool pata_gayle_irq_check(struct ata_port *ap)
+{
+	u8 ch;
+
+	ch = z_readb((unsigned long)ap->private_data);
+
+	return !!(ch & GAYLE_IRQ_IDE);
+}
+
+static void pata_gayle_irq_clear(struct ata_port *ap)
+{
+	(void)z_readb((unsigned long)ap->ioaddr.status_addr);
+	z_writeb(0x7c, (unsigned long)ap->private_data);
+}
+
+static struct ata_port_operations pata_gayle_a1200_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= pata_gayle_data_xfer,
+	.sff_irq_check	= pata_gayle_irq_check,
+	.sff_irq_clear	= pata_gayle_irq_clear,
+	.cable_detect	= ata_cable_unknown,
+	.set_mode	= pata_gayle_set_mode,
+};
+
+static struct ata_port_operations pata_gayle_a4000_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= pata_gayle_data_xfer,
+	.cable_detect	= ata_cable_unknown,
+	.set_mode	= pata_gayle_set_mode,
+};
+
+static int __init pata_gayle_init_one(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct gayle_ide_platform_data *pdata;
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *base;
+	int ret;
+
+	pdata = dev_get_platdata(&pdev->dev);
+
+	dev_info(&pdev->dev, "Amiga Gayle IDE controller (A%u style)\n",
+		pdata->explicit_ack ? 1200 : 4000);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				     resource_size(res), DRV_NAME)) {
+		pr_err(DRV_NAME ": resources busy\n");
+		return -EBUSY;
+	}
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+
+	if (pdata->explicit_ack)
+		ap->ops = &pata_gayle_a1200_ops;
+	else
+		ap->ops = &pata_gayle_a4000_ops;
+
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+
+	base = ZTWO_VADDR(pdata->base);
+	ap->ioaddr.data_addr		= base;
+	ap->ioaddr.error_addr		= base + 2 + 1 * 4;
+	ap->ioaddr.feature_addr		= base + 2 + 1 * 4;
+	ap->ioaddr.nsect_addr		= base + 2 + 2 * 4;
+	ap->ioaddr.lbal_addr		= base + 2 + 3 * 4;
+	ap->ioaddr.lbam_addr		= base + 2 + 4 * 4;
+	ap->ioaddr.lbah_addr		= base + 2 + 5 * 4;
+	ap->ioaddr.device_addr		= base + 2 + 6 * 4;
+	ap->ioaddr.status_addr		= base + 2 + 7 * 4;
+	ap->ioaddr.command_addr		= base + 2 + 7 * 4;
+
+	ap->ioaddr.altstatus_addr	= base + GAYLE_CONTROL;
+	ap->ioaddr.ctl_addr		= base + GAYLE_CONTROL;
+
+	ap->private_data = (void *)ZTWO_VADDR(pdata->irqport);
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", pdata->base,
+		      pdata->base + GAYLE_CONTROL);
+
+	ret = ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt,
+				IRQF_SHARED, &pata_gayle_sht);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, host);
+
+	return 0;
+}
+
+static int __exit pata_gayle_remove_one(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+
+	ata_host_detach(host);
+
+	return 0;
+}
+
+static struct platform_driver pata_gayle_driver = {
+	.remove = __exit_p(pata_gayle_remove_one),
+	.driver   = {
+		.name	= "amiga-gayle-ide",
+	},
+};
+
+module_platform_driver_probe(pata_gayle_driver, pata_gayle_init_one);
+
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
+MODULE_DESCRIPTION("low-level driver for Amiga Gayle PATA");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:amiga-gayle-ide");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
new file mode 100644
index 0000000..a219a50
--- /dev/null
+++ b/drivers/ata/pata_hpt366.c
@@ -0,0 +1,423 @@
+/*
+ * Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ *
+ *
+ * TODO
+ *	Look into engine reset on timeout errors. Should not be required.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt366"
+#define DRV_VERSION	"0.6.11"
+
+struct hpt_clock {
+	u8	xfer_mode;
+	u32	timing;
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 4:7    data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 8:11   cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 12:15  cmd_low_time. Active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 16:18  udma_cycle_time. Clock cycles for UDMA xfer?
+ * 19:21  pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
+ * 22:24  cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
+ *        register access.
+ * 28     UDMA enable.
+ * 29     DMA  enable.
+ * 30     PIO_MST enable. If set, the chip is in bus master mode during
+ *        PIO xfer.
+ * 31     FIFO enable.
+ */
+
+static const struct hpt_clock hpt366_40[] = {
+	{	XFER_UDMA_4,	0x900fd943	},
+	{	XFER_UDMA_3,	0x900ad943	},
+	{	XFER_UDMA_2,	0x900bd943	},
+	{	XFER_UDMA_1,	0x9008d943	},
+	{	XFER_UDMA_0,	0x9008d943	},
+
+	{	XFER_MW_DMA_2,	0xa008d943	},
+	{	XFER_MW_DMA_1,	0xa010d955	},
+	{	XFER_MW_DMA_0,	0xa010d9fc	},
+
+	{	XFER_PIO_4,	0xc008d963	},
+	{	XFER_PIO_3,	0xc010d974	},
+	{	XFER_PIO_2,	0xc010d997	},
+	{	XFER_PIO_1,	0xc010d9c7	},
+	{	XFER_PIO_0,	0xc018d9d9	},
+	{	0,		0x0120d9d9	}
+};
+
+static const struct hpt_clock hpt366_33[] = {
+	{	XFER_UDMA_4,	0x90c9a731	},
+	{	XFER_UDMA_3,	0x90cfa731	},
+	{	XFER_UDMA_2,	0x90caa731	},
+	{	XFER_UDMA_1,	0x90cba731	},
+	{	XFER_UDMA_0,	0x90c8a731	},
+
+	{	XFER_MW_DMA_2,	0xa0c8a731	},
+	{	XFER_MW_DMA_1,	0xa0c8a732	},	/* 0xa0c8a733 */
+	{	XFER_MW_DMA_0,	0xa0c8a797	},
+
+	{	XFER_PIO_4,	0xc0c8a731	},
+	{	XFER_PIO_3,	0xc0c8a742	},
+	{	XFER_PIO_2,	0xc0d0a753	},
+	{	XFER_PIO_1,	0xc0d0a7a3	},	/* 0xc0d0a793 */
+	{	XFER_PIO_0,	0xc0d0a7aa	},	/* 0xc0d0a7a7 */
+	{	0,		0x0120a7a7	}
+};
+
+static const struct hpt_clock hpt366_25[] = {
+	{	XFER_UDMA_4,	0x90c98521	},
+	{	XFER_UDMA_3,	0x90cf8521	},
+	{	XFER_UDMA_2,	0x90cf8521	},
+	{	XFER_UDMA_1,	0x90cb8521	},
+	{	XFER_UDMA_0,	0x90cb8521	},
+
+	{	XFER_MW_DMA_2,	0xa0ca8521	},
+	{	XFER_MW_DMA_1,	0xa0ca8532	},
+	{	XFER_MW_DMA_0,	0xa0ca8575	},
+
+	{	XFER_PIO_4,	0xc0ca8521	},
+	{	XFER_PIO_3,	0xc0ca8532	},
+	{	XFER_PIO_2,	0xc0ca8542	},
+	{	XFER_PIO_1,	0xc0d08572	},
+	{	XFER_PIO_0,	0xc0d08585	},
+	{	0,		0x01208585	}
+};
+
+/**
+ *	hpt36x_find_mode	-	find the hpt36x timing
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided.
+ */
+
+static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = ap->host->private_data;
+
+	while (clocks->xfer_mode) {
+		if (clocks->xfer_mode == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+static const char * const bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+	"Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+	"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+	"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+	"Maxtor 90510D4",
+	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+	"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+	"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	NULL
+};
+
+static const char * const bad_ata66_4[] = {
+	"IBM-DTLA-307075",
+	"IBM-DTLA-307060",
+	"IBM-DTLA-307045",
+	"IBM-DTLA-307030",
+	"IBM-DTLA-307020",
+	"IBM-DTLA-307015",
+	"IBM-DTLA-305040",
+	"IBM-DTLA-305030",
+	"IBM-DTLA-305020",
+	"IC35L010AVER07-0",
+	"IC35L020AVER07-0",
+	"IC35L030AVER07-0",
+	"IC35L040AVER07-0",
+	"IC35L060AVER07-0",
+	"WDC AC310200R",
+	NULL
+};
+
+static const char * const bad_ata66_3[] = {
+	"WDC AC310200R",
+	NULL
+};
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+			       const char * const list[])
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	int i;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	i = match_string(list, -1, model_num);
+	if (i >= 0) {
+		pr_warn("%s is not supported for %s\n", modestr, list[i]);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *	hpt366_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA",  bad_ata33))
+			mask &= ~ATA_MASK_UDMA;
+		if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+			mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+		if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+			mask &= ~(0xF0 << ATA_SHIFT_UDMA);
+	} else if (adev->class == ATA_DEV_ATAPI)
+		mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+
+	return mask;
+}
+
+static int hpt36x_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+
+	/*
+	 * Each channel of pata_hpt366 occupies separate PCI function
+	 * as the primary channel and bit1 indicates the cable type.
+	 */
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	if (ata66 & 2)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
+			    u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr = 0x40 + 4 * adev->devno;
+	u32 mask, reg, t;
+
+	/* determine timing mask and find matching clock entry */
+	if (mode < XFER_MW_DMA_0)
+		mask = 0xc1f8ffff;
+	else if (mode < XFER_UDMA_0)
+		mask = 0x303800ff;
+	else
+		mask = 0x30070000;
+
+	t = hpt36x_find_mode(ap, mode);
+
+	/*
+	 * Combine new mode bits with old config bits and disable
+	 * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid
+	 * problems handling I/O errors later.
+	 */
+	pci_read_config_dword(pdev, addr, &reg);
+	reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000;
+	pci_write_config_dword(pdev, addr, reg);
+}
+
+/**
+ *	hpt366_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt366_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	hpt366_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt366_set_mode(ap, adev, adev->dma_mode);
+}
+
+static struct scsi_host_template hpt36x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ *	Configuration for HPT366/68
+ */
+
+static struct ata_port_operations hpt366_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= hpt36x_cable_detect,
+	.mode_filter	= hpt366_filter,
+	.set_piomode	= hpt366_set_piomode,
+	.set_dmamode	= hpt366_set_dmamode,
+};
+
+/**
+ *	hpt36x_init_chipset	-	common chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the chip setup work that must be done at both init and
+ *	resume time
+ */
+
+static void hpt36x_init_chipset(struct pci_dev *dev)
+{
+	u8 drive_fast;
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x51, &drive_fast);
+	if (drive_fast & 0x80)
+		pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+}
+
+/**
+ *	hpt36x_init_one		-	Initialise an HPT366/368
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT36x device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT366			4 (HPT366)	0	UDMA66
+ *	HPT366			4 (HPT366)	1	UDMA66
+ *	HPT368			4 (HPT366)	2	UDMA66
+ *	HPT37x/30x		4 (HPT366)	3+	Other driver
+ *
+ */
+
+static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_hpt366 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &hpt366_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
+
+	const void *hpriv = NULL;
+	u32 reg1;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
+	/* May be a later chip in disguise. Check */
+	/* Newer chips are not in the HPT36x driver. Ignore them */
+	if (dev->revision > 2)
+		return -ENODEV;
+
+	hpt36x_init_chipset(dev);
+
+	pci_read_config_dword(dev, 0x40,  &reg1);
+
+	/* PCI clocking determines the ATA timing values to use */
+	/* info_hpt366 is safe against re-entry so we can scribble on it */
+	switch ((reg1 & 0xf00) >> 8) {
+	case 9:
+		hpriv = &hpt366_40;
+		break;
+	case 5:
+		hpriv = &hpt366_25;
+		break;
+	default:
+		hpriv = &hpt366_33;
+		break;
+	}
+	/* Now kick off ATA set up */
+	return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, (void *)hpriv, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hpt36x_reinit_one(struct pci_dev *dev)
+{
+	struct ata_host *host = pci_get_drvdata(dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(dev);
+	if (rc)
+		return rc;
+	hpt36x_init_chipset(dev);
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id hpt36x[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ },
+};
+
+static struct pci_driver hpt36x_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= hpt36x,
+	.probe		= hpt36x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt36x_reinit_one,
+#endif
+};
+
+module_pci_driver(hpt36x_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt36x);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
new file mode 100644
index 0000000..ef8aaeb
--- /dev/null
+++ b/drivers/ata/pata_hpt37x.c
@@ -0,0 +1,1063 @@
+/*
+ * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ * Portions Copyright (C) 2005-2010	MontaVista Software, Inc.
+ *
+ * TODO
+ *	Look into engine reset on timeout errors. Should not be	required.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt37x"
+#define DRV_VERSION	"0.6.23"
+
+struct hpt_clock {
+	u8	xfer_speed;
+	u32	timing;
+};
+
+struct hpt_chip {
+	const char *name;
+	unsigned int base;
+	struct hpt_clock const *clocks[4];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 4:8    data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 9:12   cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 13:17  cmd_low_time. Active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 18:20  udma_cycle_time. Clock cycles for UDMA xfer.
+ * 21     CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
+ * 22:24  pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
+ * 25:27  cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
+ *        register access.
+ * 28     UDMA enable.
+ * 29     DMA  enable.
+ * 30     PIO_MST enable. If set, the chip is in bus master mode during
+ *        PIO xfer.
+ * 31     FIFO enable. Only for PIO.
+ */
+
+static struct hpt_clock hpt37x_timings_33[] = {
+	{ XFER_UDMA_6,		0x12446231 },	/* 0x12646231 ?? */
+	{ XFER_UDMA_5,		0x12446231 },
+	{ XFER_UDMA_4,		0x12446231 },
+	{ XFER_UDMA_3,		0x126c6231 },
+	{ XFER_UDMA_2,		0x12486231 },
+	{ XFER_UDMA_1,		0x124c6233 },
+	{ XFER_UDMA_0,		0x12506297 },
+
+	{ XFER_MW_DMA_2,	0x22406c31 },
+	{ XFER_MW_DMA_1,	0x22406c33 },
+	{ XFER_MW_DMA_0,	0x22406c97 },
+
+	{ XFER_PIO_4,		0x06414e31 },
+	{ XFER_PIO_3,		0x06414e42 },
+	{ XFER_PIO_2,		0x06414e53 },
+	{ XFER_PIO_1,		0x06814e93 },
+	{ XFER_PIO_0,		0x06814ea7 }
+};
+
+static struct hpt_clock hpt37x_timings_50[] = {
+	{ XFER_UDMA_6,		0x12848242 },
+	{ XFER_UDMA_5,		0x12848242 },
+	{ XFER_UDMA_4,		0x12ac8242 },
+	{ XFER_UDMA_3,		0x128c8242 },
+	{ XFER_UDMA_2,		0x120c8242 },
+	{ XFER_UDMA_1,		0x12148254 },
+	{ XFER_UDMA_0,		0x121882ea },
+
+	{ XFER_MW_DMA_2,	0x22808242 },
+	{ XFER_MW_DMA_1,	0x22808254 },
+	{ XFER_MW_DMA_0,	0x228082ea },
+
+	{ XFER_PIO_4,		0x0a81f442 },
+	{ XFER_PIO_3,		0x0a81f443 },
+	{ XFER_PIO_2,		0x0a81f454 },
+	{ XFER_PIO_1,		0x0ac1f465 },
+	{ XFER_PIO_0,		0x0ac1f48a }
+};
+
+static struct hpt_clock hpt37x_timings_66[] = {
+	{ XFER_UDMA_6,		0x1c869c62 },
+	{ XFER_UDMA_5,		0x1cae9c62 },	/* 0x1c8a9c62 */
+	{ XFER_UDMA_4,		0x1c8a9c62 },
+	{ XFER_UDMA_3,		0x1c8e9c62 },
+	{ XFER_UDMA_2,		0x1c929c62 },
+	{ XFER_UDMA_1,		0x1c9a9c62 },
+	{ XFER_UDMA_0,		0x1c829c62 },
+
+	{ XFER_MW_DMA_2,	0x2c829c62 },
+	{ XFER_MW_DMA_1,	0x2c829c66 },
+	{ XFER_MW_DMA_0,	0x2c829d2e },
+
+	{ XFER_PIO_4,		0x0c829c62 },
+	{ XFER_PIO_3,		0x0c829c84 },
+	{ XFER_PIO_2,		0x0c829ca6 },
+	{ XFER_PIO_1,		0x0d029d26 },
+	{ XFER_PIO_0,		0x0d029d5e }
+};
+
+
+static const struct hpt_chip hpt370 = {
+	"HPT370",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		NULL,
+		NULL
+	}
+};
+
+static const struct hpt_chip hpt370a = {
+	"HPT370A",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		NULL
+	}
+};
+
+static const struct hpt_chip hpt372 = {
+	"HPT372",
+	55,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt302 = {
+	"HPT302",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt371 = {
+	"HPT371",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt372a = {
+	"HPT372A",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt374 = {
+	"HPT374",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		NULL,
+		NULL
+	}
+};
+
+/**
+ *	hpt37x_find_mode	-	reset the hpt37x bus
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided.
+ */
+
+static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = ap->host->private_data;
+
+	while (clocks->xfer_speed) {
+		if (clocks->xfer_speed == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+			       const char * const list[])
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	int i;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	i = match_string(list, -1, model_num);
+	if (i >= 0) {
+		pr_warn("%s is not supported for %s\n", modestr, list[i]);
+		return 1;
+	}
+	return 0;
+}
+
+static const char * const bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+	"Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+	"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+	"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+	"Maxtor 90510D4",
+	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+	"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+	"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	NULL
+};
+
+static const char * const bad_ata100_5[] = {
+	"IBM-DTLA-307075",
+	"IBM-DTLA-307060",
+	"IBM-DTLA-307045",
+	"IBM-DTLA-307030",
+	"IBM-DTLA-307020",
+	"IBM-DTLA-307015",
+	"IBM-DTLA-305040",
+	"IBM-DTLA-305030",
+	"IBM-DTLA-305020",
+	"IC35L010AVER07-0",
+	"IC35L020AVER07-0",
+	"IC35L030AVER07-0",
+	"IC35L040AVER07-0",
+	"IC35L060AVER07-0",
+	"WDC AC310200R",
+	NULL
+};
+
+/**
+ *	hpt370_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+			mask &= ~ATA_MASK_UDMA;
+		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+	}
+	return mask;
+}
+
+/**
+ *	hpt370a_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+	}
+	return mask;
+}
+
+/**
+ *	hpt372_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: mode mask
+ *
+ *	The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ *	to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (ata_id_is_sata(adev->id))
+		mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+	return mask;
+}
+
+/**
+ *	hpt37x_cable_detect	-	Detect the cable type
+ *	@ap: ATA port to detect on
+ *
+ *	Return the cable type attached to this port
+ */
+
+static int hpt37x_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 scr2, ata66;
+
+	pci_read_config_byte(pdev, 0x5B, &scr2);
+	pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+
+	udelay(10); /* debounce */
+
+	/* Cable register now active */
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Restore state */
+	pci_write_config_byte(pdev, 0x5B, scr2);
+
+	if (ata66 & (2 >> ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	hpt374_fn1_cable_detect	-	Detect the cable type
+ *	@ap: ATA port to detect on
+ *
+ *	Return the cable type attached to this port
+ */
+
+static int hpt374_fn1_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int mcrbase = 0x50 + 4 * ap->port_no;
+	u16 mcr3;
+	u8 ata66;
+
+	/* Do the extra channel work */
+	pci_read_config_word(pdev, mcrbase + 2, &mcr3);
+	/* Set bit 15 of 0x52 to enable TCBLID as input */
+	pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000);
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Reset TCBLID/FCBLID to output */
+	pci_write_config_word(pdev, mcrbase + 2, mcr3);
+
+	if (ata66 & (2 >> ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	hpt37x_pre_reset	-	reset the hpt37x bus
+ *	@link: ATA link to reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the initial reset handling for the HPT37x.
+ */
+
+static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits hpt37x_enable_bits[] = {
+		{ 0x50, 1, 0x04, 0x04 },
+		{ 0x54, 1, 0x04, 0x04 }
+	};
+
+	if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	/* Reset the state machine */
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(100);
+
+	return ata_sff_prereset(link, deadline);
+}
+
+static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
+			    u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg, timing, mask;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x02;
+	fast |= 0x01;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	/* Determine timing mask and find matching mode entry */
+	if (mode < XFER_MW_DMA_0)
+		mask = 0xcfc3ffff;
+	else if (mode < XFER_UDMA_0)
+		mask = 0x31c001ff;
+	else
+		mask = 0x303c0000;
+
+	timing = hpt37x_find_mode(ap, mode);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	reg = (reg & ~mask) | (timing & mask);
+	pci_write_config_dword(pdev, addr1, reg);
+}
+/**
+ *	hpt370_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt370_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	hpt370_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes.
+ */
+
+static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt370_set_mode(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	hpt370_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Work around the HPT370 DMA engine.
+ */
+
+static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+	u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
+	u8 dma_cmd;
+
+	if (dma_stat & ATA_DMA_ACTIVE) {
+		udelay(20);
+		dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
+	}
+	if (dma_stat & ATA_DMA_ACTIVE) {
+		/* Clear the engine */
+		pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+		udelay(10);
+		/* Stop DMA */
+		dma_cmd = ioread8(bmdma + ATA_DMA_CMD);
+		iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD);
+		/* Clear Error */
+		dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
+		iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+			 bmdma + ATA_DMA_STATUS);
+		/* Clear the engine */
+		pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+		udelay(10);
+	}
+	ata_bmdma_stop(qc);
+}
+
+static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev,
+			    u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg, timing, mask;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	/* Determine timing mask and find matching mode entry */
+	if (mode < XFER_MW_DMA_0)
+		mask = 0xcfc3ffff;
+	else if (mode < XFER_UDMA_0)
+		mask = 0x31c001ff;
+	else
+		mask = 0x303c0000;
+
+	timing = hpt37x_find_mode(ap, mode);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	reg = (reg & ~mask) | (timing & mask);
+	pci_write_config_dword(pdev, addr1, reg);
+}
+
+/**
+ *	hpt372_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt372_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	hpt372_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes.
+ */
+
+static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt372_set_mode(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	hpt37x_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Clean up after the HPT372 and later DMA engine
+ */
+
+static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int mscreg = 0x50 + 4 * ap->port_no;
+	u8 bwsr_stat, msc_stat;
+
+	pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+	pci_read_config_byte(pdev, mscreg, &msc_stat);
+	if (bwsr_stat & (1 << ap->port_no))
+		pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+	ata_bmdma_stop(qc);
+}
+
+
+static struct scsi_host_template hpt37x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ *	Configuration for HPT370
+ */
+
+static struct ata_port_operations hpt370_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.bmdma_stop	= hpt370_bmdma_stop,
+
+	.mode_filter	= hpt370_filter,
+	.cable_detect	= hpt37x_cable_detect,
+	.set_piomode	= hpt370_set_piomode,
+	.set_dmamode	= hpt370_set_dmamode,
+	.prereset	= hpt37x_pre_reset,
+};
+
+/*
+ *	Configuration for HPT370A. Close to 370 but less filters
+ */
+
+static struct ata_port_operations hpt370a_port_ops = {
+	.inherits	= &hpt370_port_ops,
+	.mode_filter	= hpt370a_filter,
+};
+
+/*
+ *	Configuration for HPT371 and HPT302. Slightly different PIO and DMA
+ *	mode setting functionality.
+ */
+
+static struct ata_port_operations hpt302_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.bmdma_stop	= hpt37x_bmdma_stop,
+
+	.cable_detect	= hpt37x_cable_detect,
+	.set_piomode	= hpt372_set_piomode,
+	.set_dmamode	= hpt372_set_dmamode,
+	.prereset	= hpt37x_pre_reset,
+};
+
+/*
+ *	Configuration for HPT372. Mode setting works like 371 and 302
+ *	but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372_port_ops = {
+	.inherits	= &hpt302_port_ops,
+	.mode_filter	= hpt372_filter,
+};
+
+/*
+ *	Configuration for HPT374. Mode setting and filtering works like 372
+ *	but we have a different cable detection procedure for function 1.
+ */
+
+static struct ata_port_operations hpt374_fn1_port_ops = {
+	.inherits	= &hpt372_port_ops,
+	.cable_detect	= hpt374_fn1_cable_detect,
+};
+
+/**
+ *	hpt37x_clock_slot	-	Turn timing to PC clock entry
+ *	@freq: Reported frequency timing
+ *	@base: Base timing
+ *
+ *	Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
+ *	and 3 for 66Mhz)
+ */
+
+static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
+{
+	unsigned int f = (base * freq) / 192;	/* Mhz */
+	if (f < 40)
+		return 0;	/* 33Mhz slot */
+	if (f < 45)
+		return 1;	/* 40Mhz slot */
+	if (f < 55)
+		return 2;	/* 50Mhz slot */
+	return 3;		/* 60Mhz slot */
+}
+
+/**
+ *	hpt37x_calibrate_dpll		-	Calibrate the DPLL loop
+ *	@dev: PCI device
+ *
+ *	Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
+ *	succeeds
+ */
+
+static int hpt37x_calibrate_dpll(struct pci_dev *dev)
+{
+	u8 reg5b;
+	u32 reg5c;
+	int tries;
+
+	for (tries = 0; tries < 0x5000; tries++) {
+		udelay(50);
+		pci_read_config_byte(dev, 0x5b, &reg5b);
+		if (reg5b & 0x80) {
+			/* See if it stays set */
+			for (tries = 0; tries < 0x1000; tries++) {
+				pci_read_config_byte(dev, 0x5b, &reg5b);
+				/* Failed ? */
+				if ((reg5b & 0x80) == 0)
+					return 0;
+			}
+			/* Turn off tuning, we have the DPLL set */
+			pci_read_config_dword(dev, 0x5c, &reg5c);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
+			return 1;
+		}
+	}
+	/* Never went stable */
+	return 0;
+}
+
+static u32 hpt374_read_freq(struct pci_dev *pdev)
+{
+	u32 freq;
+	unsigned long io_base = pci_resource_start(pdev, 4);
+
+	if (PCI_FUNC(pdev->devfn) & 1) {
+		struct pci_dev *pdev_0;
+
+		pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
+		/* Someone hot plugged the controller on us ? */
+		if (pdev_0 == NULL)
+			return 0;
+		io_base = pci_resource_start(pdev_0, 4);
+		freq = inl(io_base + 0x90);
+		pci_dev_put(pdev_0);
+	} else
+		freq = inl(io_base + 0x90);
+	return freq;
+}
+
+/**
+ *	hpt37x_init_one		-	Initialise an HPT37X/302
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT37x device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT366			4 (HPT366)	0	Other driver
+ *	HPT366			4 (HPT366)	1	Other driver
+ *	HPT368			4 (HPT366)	2	Other driver
+ *	HPT370			4 (HPT366)	3	UDMA100
+ *	HPT370A			4 (HPT366)	4	UDMA100
+ *	HPT372			4 (HPT366)	5	UDMA133 (1)
+ *	HPT372N			4 (HPT366)	6	Other driver
+ *	HPT372A			5 (HPT372)	1	UDMA133 (1)
+ *	HPT372N			5 (HPT372)	2	Other driver
+ *	HPT302			6 (HPT302)	1	UDMA133
+ *	HPT302N			6 (HPT302)	2	Other driver
+ *	HPT371			7 (HPT371)	*	UDMA133
+ *	HPT374			8 (HPT374)	*	UDMA133 4 channel
+ *	HPT372N			9 (HPT372N)	*	Other driver
+ *
+ *	(1) UDMA133 support depends on the bus clock
+ */
+
+static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* HPT370 - UDMA100 */
+	static const struct ata_port_info info_hpt370 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &hpt370_port_ops
+	};
+	/* HPT370A - UDMA100 */
+	static const struct ata_port_info info_hpt370a = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &hpt370a_port_ops
+	};
+	/* HPT370 - UDMA66 */
+	static const struct ata_port_info info_hpt370_33 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &hpt370_port_ops
+	};
+	/* HPT370A - UDMA66 */
+	static const struct ata_port_info info_hpt370a_33 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &hpt370a_port_ops
+	};
+	/* HPT372 - UDMA133 */
+	static const struct ata_port_info info_hpt372 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt372_port_ops
+	};
+	/* HPT371, 302 - UDMA133 */
+	static const struct ata_port_info info_hpt302 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt302_port_ops
+	};
+	/* HPT374 - UDMA100, function 1 uses different cable_detect method */
+	static const struct ata_port_info info_hpt374_fn0 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &hpt372_port_ops
+	};
+	static const struct ata_port_info info_hpt374_fn1 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &hpt374_fn1_port_ops
+	};
+
+	static const int MHz[4] = { 33, 40, 50, 66 };
+	void *private_data = NULL;
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	u8 rev = dev->revision;
+	u8 irqmask;
+	u8 mcr1;
+	u32 freq;
+	int prefer_dpll = 1;
+
+	unsigned long iobase = pci_resource_start(dev, 4);
+
+	const struct hpt_chip *chip_table;
+	int clock_slot;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
+	switch (dev->device) {
+	case PCI_DEVICE_ID_TTI_HPT366:
+		/* May be a later chip in disguise. Check */
+		/* Older chips are in the HPT366 driver. Ignore them */
+		if (rev < 3)
+			return -ENODEV;
+		/* N series chips have their own driver. Ignore */
+		if (rev == 6)
+			return -ENODEV;
+
+		switch (rev) {
+		case 3:
+			ppi[0] = &info_hpt370;
+			chip_table = &hpt370;
+			prefer_dpll = 0;
+			break;
+		case 4:
+			ppi[0] = &info_hpt370a;
+			chip_table = &hpt370a;
+			prefer_dpll = 0;
+			break;
+		case 5:
+			ppi[0] = &info_hpt372;
+			chip_table = &hpt372;
+			break;
+		default:
+			pr_err("Unknown HPT366 subtype, please report (%d)\n",
+			       rev);
+			return -ENODEV;
+		}
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372:
+		/* 372N if rev >= 2 */
+		if (rev >= 2)
+			return -ENODEV;
+		ppi[0] = &info_hpt372;
+		chip_table = &hpt372a;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT302:
+		/* 302N if rev > 1 */
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		/* Check this */
+		chip_table = &hpt302;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT371:
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		chip_table = &hpt371;
+		/*
+		 * Single channel device, master is not present but the BIOS
+		 * (or us for non x86) must mark it absent
+		 */
+		pci_read_config_byte(dev, 0x50, &mcr1);
+		mcr1 &= ~0x04;
+		pci_write_config_byte(dev, 0x50, mcr1);
+		break;
+	case PCI_DEVICE_ID_TTI_HPT374:
+		chip_table = &hpt374;
+		if (!(PCI_FUNC(dev->devfn) & 1))
+			*ppi = &info_hpt374_fn0;
+		else
+			*ppi = &info_hpt374_fn1;
+		break;
+	default:
+		pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+		return -ENODEV;
+	}
+	/* Ok so this is a chip we support */
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x5A, &irqmask);
+	irqmask &= ~0x10;
+	pci_write_config_byte(dev, 0x5a, irqmask);
+
+	/*
+	 * default to pci clock. make sure MA15/16 are set to output
+	 * to prevent drives having problems with 40-pin cables. Needed
+	 * for some drives such as IBM-DTLA which will not enter ready
+	 * state on reset when PDIAG is a input.
+	 */
+
+	pci_write_config_byte(dev, 0x5b, 0x23);
+
+	/*
+	 * HighPoint does this for HPT372A.
+	 * NOTE: This register is only writeable via I/O space.
+	 */
+	if (chip_table == &hpt372a)
+		outb(0x0e, iobase + 0x9c);
+
+	/*
+	 * Some devices do not let this value be accessed via PCI space
+	 * according to the old driver. In addition we must use the value
+	 * from FN 0 on the HPT374.
+	 */
+
+	if (chip_table == &hpt374) {
+		freq = hpt374_read_freq(dev);
+		if (freq == 0)
+			return -ENODEV;
+	} else
+		freq = inl(iobase + 0x90);
+
+	if ((freq >> 12) != 0xABCDE) {
+		int i;
+		u8 sr;
+		u32 total = 0;
+
+		pr_warn("BIOS has not set timing clocks\n");
+
+		/* This is the process the HPT371 BIOS is reported to use */
+		for (i = 0; i < 128; i++) {
+			pci_read_config_byte(dev, 0x78, &sr);
+			total += sr & 0x1FF;
+			udelay(15);
+		}
+		freq = total / 128;
+	}
+	freq &= 0x1FF;
+
+	/*
+	 *	Turn the frequency check into a band and then find a timing
+	 *	table to match it.
+	 */
+
+	clock_slot = hpt37x_clock_slot(freq, chip_table->base);
+	if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) {
+		/*
+		 *	We need to try PLL mode instead
+		 *
+		 *	For non UDMA133 capable devices we should
+		 *	use a 50MHz DPLL by choice
+		 */
+		unsigned int f_low, f_high;
+		int dpll, adjust;
+
+		/* Compute DPLL */
+		dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2;
+
+		f_low = (MHz[clock_slot] * 48) / MHz[dpll];
+		f_high = f_low + 2;
+		if (clock_slot > 1)
+			f_high += 2;
+
+		/* Select the DPLL clock. */
+		pci_write_config_byte(dev, 0x5b, 0x21);
+		pci_write_config_dword(dev, 0x5C,
+				       (f_high << 16) | f_low | 0x100);
+
+		for (adjust = 0; adjust < 8; adjust++) {
+			if (hpt37x_calibrate_dpll(dev))
+				break;
+			/*
+			 * See if it'll settle at a fractionally
+			 * different clock
+			 */
+			if (adjust & 1)
+				f_low -= adjust >> 1;
+			else
+				f_high += adjust >> 1;
+			pci_write_config_dword(dev, 0x5C,
+					       (f_high << 16) | f_low | 0x100);
+		}
+		if (adjust == 8) {
+			pr_err("DPLL did not stabilize!\n");
+			return -ENODEV;
+		}
+		if (dpll == 3)
+			private_data = (void *)hpt37x_timings_66;
+		else
+			private_data = (void *)hpt37x_timings_50;
+
+		pr_info("bus clock %dMHz, using %dMHz DPLL\n",
+			MHz[clock_slot], MHz[dpll]);
+	} else {
+		private_data = (void *)chip_table->clocks[clock_slot];
+		/*
+		 *	Perform a final fixup. Note that we will have used the
+		 *	DPLL on the HPT372 which means we don't have to worry
+		 *	about lack of UDMA133 support on lower clocks
+		 */
+
+		if (clock_slot < 2 && ppi[0] == &info_hpt370)
+			ppi[0] = &info_hpt370_33;
+		if (clock_slot < 2 && ppi[0] == &info_hpt370a)
+			ppi[0] = &info_hpt370a_33;
+
+		pr_info("%s using %dMHz bus clock\n",
+			chip_table->name, MHz[clock_slot]);
+	}
+
+	/* Now kick off ATA set up */
+	return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
+}
+
+static const struct pci_device_id hpt37x[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+
+	{ },
+};
+
+static struct pci_driver hpt37x_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= hpt37x,
+	.probe		= hpt37x_init_one,
+	.remove		= ata_pci_remove_one
+};
+
+module_pci_driver(hpt37x_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt37x);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
new file mode 100644
index 0000000..b93c0f0
--- /dev/null
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -0,0 +1,629 @@
+/*
+ * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ * Portions Copyright (C) 2005-2010	MontaVista Software, Inc.
+ *
+ *
+ * TODO
+ *	Work out best PLL policy
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt3x2n"
+#define DRV_VERSION	"0.3.15"
+
+enum {
+	HPT_PCI_FAST	=	(1 << 31),
+	PCI66		=	(1 << 1),
+	USE_DPLL	=	(1 << 0)
+};
+
+struct hpt_clock {
+	u8	xfer_speed;
+	u32	timing;
+};
+
+struct hpt_chip {
+	const char *name;
+	struct hpt_clock *clocks[3];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 4:8    data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
+ *        cycles = value + 1
+ * 9:12   cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 13:17  cmd_low_time. Active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 18:20  udma_cycle_time. Clock cycles for UDMA xfer.
+ * 21     CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
+ * 22:24  pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
+ * 25:27  cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
+ *        register access.
+ * 28     UDMA enable.
+ * 29     DMA  enable.
+ * 30     PIO_MST enable. If set, the chip is in bus master mode during
+ *        PIO xfer.
+ * 31     FIFO enable. Only for PIO.
+ */
+
+/* 66MHz DPLL clocks */
+
+static struct hpt_clock hpt3x2n_clocks[] = {
+	{	XFER_UDMA_7,	0x1c869c62	},
+	{	XFER_UDMA_6,	0x1c869c62	},
+	{	XFER_UDMA_5,	0x1c8a9c62	},
+	{	XFER_UDMA_4,	0x1c8a9c62	},
+	{	XFER_UDMA_3,	0x1c8e9c62	},
+	{	XFER_UDMA_2,	0x1c929c62	},
+	{	XFER_UDMA_1,	0x1c9a9c62	},
+	{	XFER_UDMA_0,	0x1c829c62	},
+
+	{	XFER_MW_DMA_2,	0x2c829c62	},
+	{	XFER_MW_DMA_1,	0x2c829c66	},
+	{	XFER_MW_DMA_0,	0x2c829d2e	},
+
+	{	XFER_PIO_4,	0x0c829c62	},
+	{	XFER_PIO_3,	0x0c829c84	},
+	{	XFER_PIO_2,	0x0c829ca6	},
+	{	XFER_PIO_1,	0x0d029d26	},
+	{	XFER_PIO_0,	0x0d029d5e	},
+};
+
+/**
+ *	hpt3x2n_find_mode	-	reset the hpt3x2n bus
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided. For the moment the clocks table
+ *	is hard coded but easy to change. This will be needed if we use
+ *	different DPLLs
+ */
+
+static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = hpt3x2n_clocks;
+
+	while (clocks->xfer_speed) {
+		if (clocks->xfer_speed == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+/**
+ *	hpt372n_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: mode mask
+ *
+ *	The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ *	to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (ata_id_is_sata(adev->id))
+		mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+	return mask;
+}
+
+/**
+ *	hpt3x2n_cable_detect	-	Detect the cable type
+ *	@ap: ATA port to detect on
+ *
+ *	Return the cable type attached to this port
+ */
+
+static int hpt3x2n_cable_detect(struct ata_port *ap)
+{
+	u8 scr2, ata66;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, 0x5B, &scr2);
+	pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+
+	udelay(10); /* debounce */
+
+	/* Cable register now active */
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Restore state */
+	pci_write_config_byte(pdev, 0x5B, scr2);
+
+	if (ata66 & (2 >> ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	hpt3x2n_pre_reset	-	reset the hpt3x2n bus
+ *	@link: ATA link to reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the initial reset handling for the 3x2n series controllers.
+ *	Reset the hardware and state machine,
+ */
+
+static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	/* Reset the state machine */
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(100);
+
+	return ata_sff_prereset(link, deadline);
+}
+
+static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
+			     u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg, timing, mask;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	/* Determine timing mask and find matching mode entry */
+	if (mode < XFER_MW_DMA_0)
+		mask = 0xcfc3ffff;
+	else if (mode < XFER_UDMA_0)
+		mask = 0x31c001ff;
+	else
+		mask = 0x303c0000;
+
+	timing = hpt3x2n_find_mode(ap, mode);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	reg = (reg & ~mask) | (timing & mask);
+	pci_write_config_dword(pdev, addr1, reg);
+}
+
+/**
+ *	hpt3x2n_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt3x2n_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	hpt3x2n_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes.
+ */
+
+static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	hpt3x2n_set_mode(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	hpt3x2n_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Clean up after the HPT3x2n and later DMA engine
+ */
+
+static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int mscreg = 0x50 + 2 * ap->port_no;
+	u8 bwsr_stat, msc_stat;
+
+	pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+	pci_read_config_byte(pdev, mscreg, &msc_stat);
+	if (bwsr_stat & (1 << ap->port_no))
+		pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+	ata_bmdma_stop(qc);
+}
+
+/**
+ *	hpt3x2n_set_clock	-	clock control
+ *	@ap: ATA port
+ *	@source: 0x21 or 0x23 for PLL or PCI sourced clock
+ *
+ *	Switch the ATA bus clock between the PLL and PCI clock sources
+ *	while correctly isolating the bus and resetting internal logic
+ *
+ *	We must use the DPLL for
+ *	-	writing
+ *	-	second channel UDMA7 (SATA ports) or higher
+ *	-	66MHz PCI
+ *
+ *	or we will underclock the device and get reduced performance.
+ */
+
+static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+{
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
+
+	/* Tristate the bus */
+	iowrite8(0x80, bmdma+0x73);
+	iowrite8(0x80, bmdma+0x77);
+
+	/* Switch clock and reset channels */
+	iowrite8(source, bmdma+0x7B);
+	iowrite8(0xC0, bmdma+0x79);
+
+	/* Reset state machines, avoid enabling the disabled channels */
+	iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
+	iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
+
+	/* Complete reset */
+	iowrite8(0x00, bmdma+0x79);
+
+	/* Reconnect channels to bus */
+	iowrite8(0x00, bmdma+0x73);
+	iowrite8(0x00, bmdma+0x77);
+}
+
+static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+{
+	long flags = (long)ap->host->private_data;
+
+	/* See if we should use the DPLL */
+	if (writing)
+		return USE_DPLL;	/* Needed for write */
+	if (flags & PCI66)
+		return USE_DPLL;	/* Needed at 66Mhz */
+	return 0;
+}
+
+static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
+	int rc, flags = (long)ap->host->private_data;
+	int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+
+	/* First apply the usual rules */
+	rc = ata_std_qc_defer(qc);
+	if (rc != 0)
+		return rc;
+
+	if ((flags & USE_DPLL) != dpll && alt->qc_active)
+		return ATA_DEFER_PORT;
+	return 0;
+}
+
+static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	int flags = (long)ap->host->private_data;
+	int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+
+	if ((flags & USE_DPLL) != dpll) {
+		flags &= ~USE_DPLL;
+		flags |= dpll;
+		ap->host->private_data = (void *)(long)flags;
+
+		hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
+	}
+	return ata_bmdma_qc_issue(qc);
+}
+
+static struct scsi_host_template hpt3x2n_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ *	Configuration for HPT302N/371N.
+ */
+
+static struct ata_port_operations hpt3xxn_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.bmdma_stop	= hpt3x2n_bmdma_stop,
+
+	.qc_defer	= hpt3x2n_qc_defer,
+	.qc_issue	= hpt3x2n_qc_issue,
+
+	.cable_detect	= hpt3x2n_cable_detect,
+	.set_piomode	= hpt3x2n_set_piomode,
+	.set_dmamode	= hpt3x2n_set_dmamode,
+	.prereset	= hpt3x2n_pre_reset,
+};
+
+/*
+ *	Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372n_port_ops = {
+	.inherits	= &hpt3xxn_port_ops,
+	.mode_filter	= &hpt372n_filter,
+};
+
+/**
+ *	hpt3xn_calibrate_dpll		-	Calibrate the DPLL loop
+ *	@dev: PCI device
+ *
+ *	Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
+ *	succeeds
+ */
+
+static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
+{
+	u8 reg5b;
+	u32 reg5c;
+	int tries;
+
+	for (tries = 0; tries < 0x5000; tries++) {
+		udelay(50);
+		pci_read_config_byte(dev, 0x5b, &reg5b);
+		if (reg5b & 0x80) {
+			/* See if it stays set */
+			for (tries = 0; tries < 0x1000; tries++) {
+				pci_read_config_byte(dev, 0x5b, &reg5b);
+				/* Failed ? */
+				if ((reg5b & 0x80) == 0)
+					return 0;
+			}
+			/* Turn off tuning, we have the DPLL set */
+			pci_read_config_dword(dev, 0x5c, &reg5c);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
+			return 1;
+		}
+	}
+	/* Never went stable */
+	return 0;
+}
+
+static int hpt3x2n_pci_clock(struct pci_dev *pdev)
+{
+	unsigned long freq;
+	u32 fcnt;
+	unsigned long iobase = pci_resource_start(pdev, 4);
+
+	fcnt = inl(iobase + 0x90);	/* Not PCI readable for some chips */
+	if ((fcnt >> 12) != 0xABCDE) {
+		int i;
+		u16 sr;
+		u32 total = 0;
+
+		pr_warn("BIOS clock data not set\n");
+
+		/* This is the process the HPT371 BIOS is reported to use */
+		for (i = 0; i < 128; i++) {
+			pci_read_config_word(pdev, 0x78, &sr);
+			total += sr & 0x1FF;
+			udelay(15);
+		}
+		fcnt = total / 128;
+	}
+	fcnt &= 0x1FF;
+
+	freq = (fcnt * 77) / 192;
+
+	/* Clamp to bands */
+	if (freq < 40)
+		return 33;
+	if (freq < 45)
+		return 40;
+	if (freq < 55)
+		return 50;
+	return 66;
+}
+
+/**
+ *	hpt3x2n_init_one		-	Initialise an HPT37X/302
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT3x2n device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT372			4 (HPT366)	5	Other driver
+ *	HPT372N			4 (HPT366)	6	UDMA133
+ *	HPT372			5 (HPT372)	1	Other driver
+ *	HPT372N			5 (HPT372)	2	UDMA133
+ *	HPT302			6 (HPT302)	*	Other driver
+ *	HPT302N			6 (HPT302)	> 1	UDMA133
+ *	HPT371			7 (HPT371)	*	Other driver
+ *	HPT371N			7 (HPT371)	> 1	UDMA133
+ *	HPT374			8 (HPT374)	*	Other driver
+ *	HPT372N			9 (HPT372N)	*	UDMA133
+ *
+ *	(1) UDMA133 support depends on the bus clock
+ */
+
+static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* HPT372N - UDMA133 */
+	static const struct ata_port_info info_hpt372n = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt372n_port_ops
+	};
+	/* HPT302N and HPT371N - UDMA133 */
+	static const struct ata_port_info info_hpt3xxn = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt3xxn_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL };
+	u8 rev = dev->revision;
+	u8 irqmask;
+	unsigned int pci_mhz;
+	unsigned int f_low, f_high;
+	int adjust;
+	unsigned long iobase = pci_resource_start(dev, 4);
+	void *hpriv = (void *)USE_DPLL;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
+	switch (dev->device) {
+	case PCI_DEVICE_ID_TTI_HPT366:
+		/* 372N if rev >= 6 */
+		if (rev < 6)
+			return -ENODEV;
+		goto hpt372n;
+	case PCI_DEVICE_ID_TTI_HPT371:
+		/* 371N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372:
+		/* 372N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		goto hpt372n;
+	case PCI_DEVICE_ID_TTI_HPT302:
+		/* 302N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372N:
+hpt372n:
+		ppi[0] = &info_hpt372n;
+		break;
+	default:
+		pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+		return -ENODEV;
+	}
+
+	/* Ok so this is a chip we support */
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x5A, &irqmask);
+	irqmask &= ~0x10;
+	pci_write_config_byte(dev, 0x5a, irqmask);
+
+	/*
+	 * HPT371 chips physically have only one channel, the secondary one,
+	 * but the primary channel registers do exist!  Go figure...
+	 * So,  we manually disable the non-existing channel here
+	 * (if the BIOS hasn't done this already).
+	 */
+	if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+		u8 mcr1;
+		pci_read_config_byte(dev, 0x50, &mcr1);
+		mcr1 &= ~0x04;
+		pci_write_config_byte(dev, 0x50, mcr1);
+	}
+
+	/*
+	 * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
+	 * 50 for UDMA100. Right now we always use 66
+	 */
+
+	pci_mhz = hpt3x2n_pci_clock(dev);
+
+	f_low = (pci_mhz * 48) / 66;	/* PCI Mhz for 66Mhz DPLL */
+	f_high = f_low + 2;		/* Tolerance */
+
+	pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+	/* PLL clock */
+	pci_write_config_byte(dev, 0x5B, 0x21);
+
+	/* Unlike the 37x we don't try jiggling the frequency */
+	for (adjust = 0; adjust < 8; adjust++) {
+		if (hpt3xn_calibrate_dpll(dev))
+			break;
+		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+	}
+	if (adjust == 8) {
+		pr_err("DPLL did not stabilize!\n");
+		return -ENODEV;
+	}
+
+	pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
+
+	/*
+	 * Set our private data up. We only need a few flags
+	 * so we use it directly.
+	 */
+	if (pci_mhz > 60)
+		hpriv = (void *)(PCI66 | USE_DPLL);
+
+	/*
+	 * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+	 * the MISC. register to stretch the UltraDMA Tss timing.
+	 * NOTE: This register is only writeable via I/O space.
+	 */
+	if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+		outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
+
+	/* Now kick off ATA set up */
+	return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
+}
+
+static const struct pci_device_id hpt3x2n[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), },
+
+	{ },
+};
+
+static struct pci_driver hpt3x2n_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= hpt3x2n,
+	.probe		= hpt3x2n_init_one,
+	.remove		= ata_pci_remove_one
+};
+
+module_pci_driver(hpt3x2n_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x2n);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
new file mode 100644
index 0000000..b2fc023
--- /dev/null
+++ b/drivers/ata/pata_hpt3x3.c
@@ -0,0 +1,292 @@
+/*
+ *	pata_hpt3x3		-	HPT3x3 driver
+ *	(c) Copyright 2005-2006 Red Hat
+ *
+ *	Was pata_hpt34x but the naming was confusing as it supported the
+ *	343 and 363 so it has been renamed.
+ *
+ *	Based on:
+ *	linux/drivers/ide/pci/hpt34x.c		Version 0.40	Sept 10, 2002
+ *	Copyright (C) 1998-2000	Andre Hedrick <andre@linux-ide.org>
+ *
+ *	May be copied or modified under the terms of the GNU General Public
+ *	License
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt3x3"
+#define DRV_VERSION	"0.6.1"
+
+/**
+ *	hpt3x3_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the HPT3x3 as
+ *	all we have to do is clear the MWDMA and UDMA bits then load the
+ *	mode number.
+ */
+
+static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 r1, r2;
+	int dn = 2 * ap->port_no + adev->devno;
+
+	pci_read_config_dword(pdev, 0x44, &r1);
+	pci_read_config_dword(pdev, 0x48, &r2);
+	/* Load the PIO timing number */
+	r1 &= ~(7 << (3 * dn));
+	r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
+	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
+
+	pci_write_config_dword(pdev, 0x44, r1);
+	pci_write_config_dword(pdev, 0x48, r2);
+}
+
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+/**
+ *	hpt3x3_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ *
+ *	0x44 : bit 0-2 master mode, 3-5 slave mode, etc
+ *	0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
+ */
+
+static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 r1, r2;
+	int dn = 2 * ap->port_no + adev->devno;
+	int mode_num = adev->dma_mode & 0x0F;
+
+	pci_read_config_dword(pdev, 0x44, &r1);
+	pci_read_config_dword(pdev, 0x48, &r2);
+	/* Load the timing number */
+	r1 &= ~(7 << (3 * dn));
+	r1 |= (mode_num << (3 * dn));
+	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
+
+	if (adev->dma_mode >= XFER_UDMA_0)
+		r2 |= (0x01 << dn);	/* Ultra mode */
+	else
+		r2 |= (0x10 << dn);	/* MWDMA */
+
+	pci_write_config_dword(pdev, 0x44, r1);
+	pci_write_config_dword(pdev, 0x48, r2);
+}
+
+/**
+ *	hpt3x3_freeze		-	DMA workaround
+ *	@ap: port to freeze
+ *
+ *	When freezing an HPT3x3 we must stop any pending DMA before
+ *	writing to the control register or the chip will hang
+ */
+
+static void hpt3x3_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START,
+			mmio + ATA_DMA_CMD);
+	ata_sff_dma_pause(ap);
+	ata_sff_freeze(ap);
+}
+
+/**
+ *	hpt3x3_bmdma_setup	-	DMA workaround
+ *	@qc: Queued command
+ *
+ *	When issuing BMDMA we must clean up the error/active bits in
+ *	software on this device
+ */
+
+static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	r |= ATA_DMA_INTR | ATA_DMA_ERR;
+	iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	return ata_bmdma_setup(qc);
+}
+
+/**
+ *	hpt3x3_atapi_dma	-	ATAPI DMA check
+ *	@qc: Queued command
+ *
+ *	Just say no - we don't do ATAPI DMA
+ */
+
+static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 1;
+}
+
+#endif /* CONFIG_PATA_HPT3X3_DMA */
+
+static struct scsi_host_template hpt3x3_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations hpt3x3_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= hpt3x3_set_piomode,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+	.set_dmamode	= hpt3x3_set_dmamode,
+	.bmdma_setup	= hpt3x3_bmdma_setup,
+	.check_atapi_dma= hpt3x3_atapi_dma,
+	.freeze		= hpt3x3_freeze,
+#endif
+
+};
+
+/**
+ *	hpt3x3_init_chipset	-	chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the setup required at boot and on resume.
+ */
+
+static void hpt3x3_init_chipset(struct pci_dev *dev)
+{
+	u16 cmd;
+	/* Initialize the board */
+	pci_write_config_word(dev, 0x80, 0x00);
+	/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	if (cmd & PCI_COMMAND_MEMORY)
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
+	else
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+}
+
+/**
+ *	hpt3x3_init_one		-	Initialise an HPT343/363
+ *	@pdev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Perform basic initialisation. We set the device up so we access all
+ *	ports via BAR4. This is necessary to work around errata.
+ */
+
+static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+		/* Further debug needed */
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+#endif
+		.port_ops = &hpt3x3_port_ops
+	};
+	/* Register offsets of taskfiles in BAR4 area */
+	static const u8 offset_cmd[2] = { 0x20, 0x28 };
+	static const u8 offset_ctl[2] = { 0x36, 0x3E };
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	struct ata_host *host;
+	int i, rc;
+	void __iomem *base;
+
+	hpt3x3_init_chipset(pdev);
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* Everything is relative to BAR4 if we set up this way */
+	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	base = host->iomap[4];	/* Bus mastering base */
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
+
+		ioaddr->cmd_addr = base + offset_cmd[i];
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = base + offset_ctl[i];
+		ioaddr->scr_addr = NULL;
+		ata_sff_std_ports(ioaddr);
+		ioaddr->bmdma_addr = base + 8 * i;
+
+		ata_port_pbar_desc(ap, 4, -1, "ioport");
+		ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
+	}
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &hpt3x3_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hpt3x3_reinit_one(struct pci_dev *dev)
+{
+	struct ata_host *host = pci_get_drvdata(dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(dev);
+	if (rc)
+		return rc;
+
+	hpt3x3_init_chipset(dev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id hpt3x3[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
+
+	{ },
+};
+
+static struct pci_driver hpt3x3_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= hpt3x3,
+	.probe 		= hpt3x3_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt3x3_reinit_one,
+#endif
+};
+
+module_pci_driver(hpt3x3_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x3);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
new file mode 100644
index 0000000..c272f2c
--- /dev/null
+++ b/drivers/ata/pata_icside.c
@@ -0,0 +1,627 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define DRV_NAME	"pata_icside"
+
+#define ICS_IDENT_OFFSET		0x2280
+
+#define ICS_ARCIN_V5_INTRSTAT		0x0000
+#define ICS_ARCIN_V5_INTROFFSET		0x0004
+
+#define ICS_ARCIN_V6_INTROFFSET_1	0x2200
+#define ICS_ARCIN_V6_INTRSTAT_1		0x2290
+#define ICS_ARCIN_V6_INTROFFSET_2	0x3200
+#define ICS_ARCIN_V6_INTRSTAT_2		0x3290
+
+struct portinfo {
+	unsigned int dataoffset;
+	unsigned int ctrloffset;
+	unsigned int stepping;
+};
+
+static const struct portinfo pata_icside_portinfo_v5 = {
+	.dataoffset	= 0x2800,
+	.ctrloffset	= 0x2b80,
+	.stepping	= 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_1 = {
+	.dataoffset	= 0x2000,
+	.ctrloffset	= 0x2380,
+	.stepping	= 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_2 = {
+	.dataoffset	= 0x3000,
+	.ctrloffset	= 0x3380,
+	.stepping	= 6,
+};
+
+struct pata_icside_state {
+	void __iomem *irq_port;
+	void __iomem *ioc_base;
+	unsigned int type;
+	unsigned int dma;
+	struct {
+		u8 port_sel;
+		u8 disabled;
+		unsigned int speed[ATA_MAX_DEVICES];
+	} port[2];
+};
+
+struct pata_icside_info {
+	struct pata_icside_state *state;
+	struct expansion_card	*ec;
+	void __iomem		*base;
+	void __iomem		*irqaddr;
+	unsigned int		irqmask;
+	const expansioncard_ops_t *irqops;
+	unsigned int		mwdma_mask;
+	unsigned int		nr_ports;
+	const struct portinfo	*port[2];
+	unsigned long		raw_base;
+	unsigned long		raw_ioc_base;
+};
+
+#define ICS_TYPE_A3IN	0
+#define ICS_TYPE_A3USER	1
+#define ICS_TYPE_V6	3
+#define ICS_TYPE_V5	15
+#define ICS_TYPE_NOTYPE	((unsigned int)-1)
+
+/* ---------------- Version 5 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose  : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose  : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
+	.irqenable	= pata_icside_irqenable_arcin_v5,
+	.irqdisable	= pata_icside_irqdisable_arcin_v5,
+};
+
+
+/* ---------------- Version 6 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose  : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+	void __iomem *base = state->irq_port;
+
+	if (!state->port[0].disabled)
+		writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+	if (!state->port[1].disabled)
+		writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose  : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+	readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
+ * Purpose  : detect an active interrupt from card
+ */
+static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
+	       readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
+	.irqenable	= pata_icside_irqenable_arcin_v6,
+	.irqdisable	= pata_icside_irqdisable_arcin_v6,
+	.irqpending	= pata_icside_irqpending_arcin_v6,
+};
+
+
+/*
+ * SG-DMA support.
+ *
+ * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
+ * There is only one DMA controller per card, which means that only
+ * one drive can be accessed at one time.  NOTE! We do not enforce that
+ * here, but we rely on the main IDE driver spotting that both
+ * interfaces use the same IRQ, which should guarantee this.
+ */
+
+/*
+ * Configure the IOMD to give the appropriate timings for the transfer
+ * mode being requested.  We take the advice of the ATA standards, and
+ * calculate the cycle time based on the transfer mode, and the EIDE
+ * MW DMA specs that the drive provides in the IDENTIFY command.
+ *
+ * We have the following IOMD DMA modes to choose from:
+ *
+ *	Type	Active		Recovery	Cycle
+ *	A	250 (250)	312 (550)	562 (800)
+ *	B	187 (200)	250 (550)	437 (750)
+ *	C	125 (125)	125 (375)	250 (500)
+ *	D	62  (50)	125 (375)	187 (425)
+ *
+ * (figures in brackets are actual measured timings on DIOR/DIOW)
+ *
+ * However, we also need to take care of the read/write active and
+ * recovery timings:
+ *
+ *			Read	Write
+ *  	Mode	Active	-- Recovery --	Cycle	IOMD type
+ *	MW0	215	50	215	480	A
+ *	MW1	80	50	50	150	C
+ *	MW2	70	25	25	120	C
+ */
+static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pata_icside_state *state = ap->host->private_data;
+	struct ata_timing t;
+	unsigned int cycle;
+	char iomd_type;
+
+	/*
+	 * DMA is based on a 16MHz clock
+	 */
+	if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
+		return;
+
+	/*
+	 * Choose the IOMD cycle timing which ensure that the interface
+	 * satisfies the measured active, recovery and cycle times.
+	 */
+	if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
+		iomd_type = 'D', cycle = 187;
+	else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
+		iomd_type = 'C', cycle = 250;
+	else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
+		iomd_type = 'B', cycle = 437;
+	else
+		iomd_type = 'A', cycle = 562;
+
+	ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
+		     t.active, t.recover, t.cycle, iomd_type);
+
+	state->port[ap->port_no].speed[adev->devno] = cycle;
+}
+
+static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+	unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
+
+	/*
+	 * We are simplex; BUG if we try to fiddle with DMA
+	 * while it's active.
+	 */
+	BUG_ON(dma_channel_active(state->dma));
+
+	/*
+	 * Route the DMA signals to the correct interface
+	 */
+	writeb(state->port[ap->port_no].port_sel, state->ioc_base);
+
+	set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
+	set_dma_sg(state->dma, qc->sg, qc->n_elem);
+	set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+
+	BUG_ON(dma_channel_active(state->dma));
+	enable_dma(state->dma);
+}
+
+static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+
+	disable_dma(state->dma);
+
+	/* see ata_bmdma_stop */
+	ata_sff_dma_pause(ap);
+}
+
+static u8 pata_icside_bmdma_status(struct ata_port *ap)
+{
+	struct pata_icside_state *state = ap->host->private_data;
+	void __iomem *irq_port;
+
+	irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
+						    ICS_ARCIN_V6_INTRSTAT_1);
+
+	return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
+}
+
+static int icside_dma_init(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	struct expansion_card *ec = info->ec;
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		state->port[0].speed[i] = 480;
+		state->port[1].speed[i] = 480;
+	}
+
+	if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
+		state->dma = ec->dma;
+		info->mwdma_mask = ATA_MWDMA2;
+	}
+
+	return 0;
+}
+
+
+static struct scsi_host_template pata_icside_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= SG_MAX_SEGMENTS,
+	.dma_boundary		= IOMD_DMA_BOUNDARY,
+};
+
+static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
+{
+	struct ata_port *ap = link->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+
+	if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
+		return ata_sff_postreset(link, classes);
+
+	state->port[ap->port_no].disabled = 1;
+
+	if (state->type == ICS_TYPE_V6) {
+		/*
+		 * Disable interrupts from this port, otherwise we
+		 * receive spurious interrupts from the floating
+		 * interrupt line.
+		 */
+		void __iomem *irq_port = state->irq_port +
+				(ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
+		readb(irq_port);
+	}
+}
+
+static struct ata_port_operations pata_icside_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	/* no need to build any PRD tables for DMA */
+	.qc_prep		= ata_noop_qc_prep,
+	.sff_data_xfer		= ata_sff_data_xfer32,
+	.bmdma_setup		= pata_icside_bmdma_setup,
+	.bmdma_start		= pata_icside_bmdma_start,
+	.bmdma_stop		= pata_icside_bmdma_stop,
+	.bmdma_status		= pata_icside_bmdma_status,
+
+	.cable_detect		= ata_cable_40wire,
+	.set_dmamode		= pata_icside_set_dmamode,
+	.postreset		= pata_icside_postreset,
+
+	.port_start		= ATA_OP_NULL,	/* don't need PRD table */
+};
+
+static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
+				     struct pata_icside_info *info,
+				     const struct portinfo *port)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	void __iomem *cmd = base + port->dataoffset;
+
+	ioaddr->cmd_addr	= cmd;
+	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << port->stepping);
+	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << port->stepping);
+	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << port->stepping);
+	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << port->stepping);
+	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << port->stepping);
+	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << port->stepping);
+	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << port->stepping);
+	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << port->stepping);
+	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << port->stepping);
+	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << port->stepping);
+
+	ioaddr->ctl_addr	= base + port->ctrloffset;
+	ioaddr->altstatus_addr	= ioaddr->ctl_addr;
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+		      info->raw_base + port->dataoffset,
+		      info->raw_base + port->ctrloffset);
+
+	if (info->raw_ioc_base)
+		ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
+}
+
+static int pata_icside_register_v5(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	void __iomem *base;
+
+	base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
+	if (!base)
+		return -ENOMEM;
+
+	state->irq_port = base;
+
+	info->base = base;
+	info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
+	info->irqmask = 1;
+	info->irqops = &pata_icside_ops_arcin_v5;
+	info->nr_ports = 1;
+	info->port[0] = &pata_icside_portinfo_v5;
+
+	info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
+
+	return 0;
+}
+
+static int pata_icside_register_v6(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	struct expansion_card *ec = info->ec;
+	void __iomem *ioc_base, *easi_base;
+	unsigned int sel = 0;
+
+	ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+	if (!ioc_base)
+		return -ENOMEM;
+
+	easi_base = ioc_base;
+
+	if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
+		easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
+		if (!easi_base)
+			return -ENOMEM;
+
+		/*
+		 * Enable access to the EASI region.
+		 */
+		sel = 1 << 5;
+	}
+
+	writeb(sel, ioc_base);
+
+	state->irq_port = easi_base;
+	state->ioc_base = ioc_base;
+	state->port[0].port_sel = sel;
+	state->port[1].port_sel = sel | 1;
+
+	info->base = easi_base;
+	info->irqops = &pata_icside_ops_arcin_v6;
+	info->nr_ports = 2;
+	info->port[0] = &pata_icside_portinfo_v6_1;
+	info->port[1] = &pata_icside_portinfo_v6_2;
+
+	info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
+	info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
+
+	return icside_dma_init(info);
+}
+
+static int pata_icside_add_ports(struct pata_icside_info *info)
+{
+	struct expansion_card *ec = info->ec;
+	struct ata_host *host;
+	int i;
+
+	if (info->irqaddr) {
+		ec->irqaddr = info->irqaddr;
+		ec->irqmask = info->irqmask;
+	}
+	if (info->irqops)
+		ecard_setirq(ec, info->irqops, info->state);
+
+	/*
+	 * Be on the safe side - disable interrupts
+	 */
+	ec->ops->irqdisable(ec, ec->irq);
+
+	host = ata_host_alloc(&ec->dev, info->nr_ports);
+	if (!host)
+		return -ENOMEM;
+
+	host->private_data = info->state;
+	host->flags = ATA_HOST_SIMPLEX;
+
+	for (i = 0; i < info->nr_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ap->pio_mask = ATA_PIO4;
+		ap->mwdma_mask = info->mwdma_mask;
+		ap->flags |= ATA_FLAG_SLAVE_POSS;
+		ap->ops = &pata_icside_port_ops;
+
+		pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
+	}
+
+	return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
+				 &pata_icside_sht);
+}
+
+static int pata_icside_probe(struct expansion_card *ec,
+			     const struct ecard_id *id)
+{
+	struct pata_icside_state *state;
+	struct pata_icside_info info;
+	void __iomem *idmem;
+	int ret;
+
+	ret = ecard_request_resources(ec);
+	if (ret)
+		goto out;
+
+	state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
+	if (!state) {
+		ret = -ENOMEM;
+		goto release;
+	}
+
+	state->type = ICS_TYPE_NOTYPE;
+	state->dma = NO_DMA;
+
+	idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+	if (idmem) {
+		unsigned int type;
+
+		type = readb(idmem + ICS_IDENT_OFFSET) & 1;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
+		ecardm_iounmap(ec, idmem);
+
+		state->type = type;
+	}
+
+	memset(&info, 0, sizeof(info));
+	info.state = state;
+	info.ec = ec;
+
+	switch (state->type) {
+	case ICS_TYPE_A3IN:
+		dev_warn(&ec->dev, "A3IN unsupported\n");
+		ret = -ENODEV;
+		break;
+
+	case ICS_TYPE_A3USER:
+		dev_warn(&ec->dev, "A3USER unsupported\n");
+		ret = -ENODEV;
+		break;
+
+	case ICS_TYPE_V5:
+		ret = pata_icside_register_v5(&info);
+		break;
+
+	case ICS_TYPE_V6:
+		ret = pata_icside_register_v6(&info);
+		break;
+
+	default:
+		dev_warn(&ec->dev, "unknown interface type\n");
+		ret = -ENODEV;
+		break;
+	}
+
+	if (ret == 0)
+		ret = pata_icside_add_ports(&info);
+
+	if (ret == 0)
+		goto out;
+
+ release:
+	ecard_release_resources(ec);
+ out:
+	return ret;
+}
+
+static void pata_icside_shutdown(struct expansion_card *ec)
+{
+	struct ata_host *host = ecard_get_drvdata(ec);
+	unsigned long flags;
+
+	/*
+	 * Disable interrupts from this card.  We need to do
+	 * this before disabling EASI since we may be accessing
+	 * this register via that region.
+	 */
+	local_irq_save(flags);
+	ec->ops->irqdisable(ec, ec->irq);
+	local_irq_restore(flags);
+
+	/*
+	 * Reset the ROM pointer so that we can read the ROM
+	 * after a soft reboot.  This also disables access to
+	 * the IDE taskfile via the EASI region.
+	 */
+	if (host) {
+		struct pata_icside_state *state = host->private_data;
+		if (state->ioc_base)
+			writeb(0, state->ioc_base);
+	}
+}
+
+static void pata_icside_remove(struct expansion_card *ec)
+{
+	struct ata_host *host = ecard_get_drvdata(ec);
+	struct pata_icside_state *state = host->private_data;
+
+	ata_host_detach(host);
+
+	pata_icside_shutdown(ec);
+
+	/*
+	 * don't NULL out the drvdata - devres/libata wants it
+	 * to free the ata_host structure.
+	 */
+	if (state->dma != NO_DMA)
+		free_dma(state->dma);
+
+	ecard_release_resources(ec);
+}
+
+static const struct ecard_id pata_icside_ids[] = {
+	{ MANU_ICS,  PROD_ICS_IDE  },
+	{ MANU_ICS2, PROD_ICS2_IDE },
+	{ 0xffff, 0xffff }
+};
+
+static struct ecard_driver pata_icside_driver = {
+	.probe		= pata_icside_probe,
+	.remove 	= pata_icside_remove,
+	.shutdown	= pata_icside_shutdown,
+	.id_table	= pata_icside_ids,
+	.drv = {
+		.name	= DRV_NAME,
+	},
+};
+
+static int __init pata_icside_init(void)
+{
+	return ecard_register_driver(&pata_icside_driver);
+}
+
+static void __exit pata_icside_exit(void)
+{
+	ecard_remove_driver(&pata_icside_driver);
+}
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ICS PATA driver");
+
+module_init(pata_icside_init);
+module_exit(pata_icside_exit);
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 0000000..2e53872
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,285 @@
+/*
+ * Freescale iMX PATA driver
+ *
+ * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Based on pata_platform - Copyright (C) 2006 - 2007  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * TODO:
+ * - dmaengine support
+ */
+
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_imx"
+
+#define PATA_IMX_ATA_TIME_OFF		0x00
+#define PATA_IMX_ATA_TIME_ON		0x01
+#define PATA_IMX_ATA_TIME_1		0x02
+#define PATA_IMX_ATA_TIME_2W		0x03
+#define PATA_IMX_ATA_TIME_2R		0x04
+#define PATA_IMX_ATA_TIME_AX		0x05
+#define PATA_IMX_ATA_TIME_PIO_RDX	0x06
+#define PATA_IMX_ATA_TIME_4		0x07
+#define PATA_IMX_ATA_TIME_9		0x08
+
+#define PATA_IMX_ATA_CONTROL		0x24
+#define PATA_IMX_ATA_CTRL_FIFO_RST_B	(1<<7)
+#define PATA_IMX_ATA_CTRL_ATA_RST_B	(1<<6)
+#define PATA_IMX_ATA_CTRL_IORDY_EN	(1<<0)
+#define PATA_IMX_ATA_INT_EN		0x2C
+#define PATA_IMX_ATA_INTR_ATA_INTRQ2	(1<<3)
+#define PATA_IMX_DRIVE_DATA		0xA0
+#define PATA_IMX_DRIVE_CONTROL		0xD8
+
+static u32 pio_t4[] = { 30,  20,  15,  10,  10 };
+static u32 pio_t9[] = { 20,  15,  10,  10,  10 };
+static u32 pio_tA[] = { 35,  35,  35,  35,  35 };
+
+struct pata_imx_priv {
+	struct clk *clk;
+	/* timings/interrupt/control regs */
+	void __iomem *host_regs;
+	u32 ata_ctl;
+};
+
+static void pata_imx_set_timing(struct ata_device *adev,
+				struct pata_imx_priv *priv)
+{
+	struct ata_timing timing;
+	unsigned long clkrate;
+	u32 T, mode;
+
+	clkrate = clk_get_rate(priv->clk);
+
+	if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
+	    !clkrate)
+		return;
+
+	T = 1000000000 / clkrate;
+	ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
+
+	mode = adev->pio_mode - XFER_PIO_0;
+
+	writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
+	writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
+	writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
+	writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
+	writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
+	writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
+
+	writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
+	writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
+	writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
+}
+
+static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pata_imx_priv *priv = ap->host->private_data;
+	u32 val;
+
+	pata_imx_set_timing(adev, priv);
+
+	val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+	if (ata_pio_need_iordy(adev))
+		val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+	else
+		val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+	__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+}
+
+static struct scsi_host_template pata_imx_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_imx_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ata_sff_data_xfer32,
+	.cable_detect		= ata_cable_unknown,
+	.set_piomode		= pata_imx_set_piomode,
+};
+
+static void pata_imx_setup_port(struct ata_ioports *ioaddr)
+{
+	/* Fixup the port shift for platforms that need it */
+	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA    << 2);
+	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR     << 2);
+	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+	ioaddr->nsect_addr	= ioaddr->cmd_addr + (ATA_REG_NSECT   << 2);
+	ioaddr->lbal_addr	= ioaddr->cmd_addr + (ATA_REG_LBAL    << 2);
+	ioaddr->lbam_addr	= ioaddr->cmd_addr + (ATA_REG_LBAM    << 2);
+	ioaddr->lbah_addr	= ioaddr->cmd_addr + (ATA_REG_LBAH    << 2);
+	ioaddr->device_addr	= ioaddr->cmd_addr + (ATA_REG_DEVICE  << 2);
+	ioaddr->status_addr	= ioaddr->cmd_addr + (ATA_REG_STATUS  << 2);
+	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD     << 2);
+}
+
+static int pata_imx_probe(struct platform_device *pdev)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct pata_imx_priv *priv;
+	int irq = 0;
+	struct resource *io_res;
+	int ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	priv = devm_kzalloc(&pdev->dev,
+				sizeof(struct pata_imx_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "Failed to get clock\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	host->private_data = priv;
+	ap = host->ports[0];
+
+	ap->ops = &pata_imx_port_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
+	if (IS_ERR(priv->host_regs)) {
+		ret = PTR_ERR(priv->host_regs);
+		goto err;
+	}
+
+	ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
+	ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
+
+	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+	pata_imx_setup_port(&ap->ioaddr);
+
+	ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+		(unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
+		(unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
+
+	/* deassert resets */
+	__raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
+			PATA_IMX_ATA_CTRL_ATA_RST_B,
+			priv->host_regs + PATA_IMX_ATA_CONTROL);
+	/* enable interrupts */
+	__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+			priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+	/* activate */
+	ret = ata_host_activate(host, irq, ata_sff_interrupt, 0,
+				&pata_imx_sht);
+
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static int pata_imx_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct pata_imx_priv *priv = host->private_data;
+
+	ata_host_detach(host);
+
+	__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pata_imx_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct pata_imx_priv *priv = host->private_data;
+	int ret;
+
+	ret = ata_host_suspend(host, PMSG_SUSPEND);
+	if (!ret) {
+		__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+		priv->ata_ctl =
+			__raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+		clk_disable_unprepare(priv->clk);
+	}
+
+	return ret;
+}
+
+static int pata_imx_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct pata_imx_priv *priv = host->private_data;
+
+	int ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
+
+	__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+			priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pata_imx_pm_ops, pata_imx_suspend, pata_imx_resume);
+
+static const struct of_device_id imx_pata_dt_ids[] = {
+	{
+		.compatible = "fsl,imx27-pata",
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
+
+static struct platform_driver pata_imx_driver = {
+	.probe		= pata_imx_probe,
+	.remove		= pata_imx_remove,
+	.driver = {
+		.name		= DRV_NAME,
+		.of_match_table	= imx_pata_dt_ids,
+		.pm		= &pata_imx_pm_ops,
+	},
+};
+
+module_platform_driver(pata_imx_driver);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("low-level driver for iMX PATA");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
new file mode 100644
index 0000000..994f168
--- /dev/null
+++ b/drivers/ata/pata_isapnp.c
@@ -0,0 +1,135 @@
+
+/*
+ *   pata-isapnp.c - ISA PnP PATA controller driver.
+ *   Copyright 2005/2006 Red Hat Inc, all rights reserved.
+ *
+ *   Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_isapnp"
+#define DRV_VERSION "0.2.5"
+
+static struct scsi_host_template isapnp_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations isapnp_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+};
+
+static struct ata_port_operations isapnp_noalt_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	/* No altstatus so we don't want to use the lost interrupt poll */
+	.lost_interrupt = ATA_OP_NULL,
+};
+
+/**
+ *	isapnp_init_one		-	attach an isapnp interface
+ *	@idev: PnP device
+ *	@dev_id: matching detect line
+ *
+ *	Register an ISA bus IDE interface. Such interfaces are PIO 0 and
+ *	non shared IRQ.
+ */
+
+static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *cmd_addr, *ctl_addr;
+	int irq = 0;
+	irq_handler_t handler = NULL;
+
+	if (pnp_port_valid(idev, 0) == 0)
+		return -ENODEV;
+
+	if (pnp_irq_valid(idev, 0)) {
+		irq = pnp_irq(idev, 0);
+		handler = ata_sff_interrupt;
+	}
+
+	/* allocate host */
+	host = ata_host_alloc(&idev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
+	if (!cmd_addr)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+
+	ap->ops = &isapnp_noalt_port_ops;
+	ap->pio_mask = ATA_PIO0;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.cmd_addr = cmd_addr;
+
+	if (pnp_port_valid(idev, 1)) {
+		ctl_addr = devm_ioport_map(&idev->dev,
+					   pnp_port_start(idev, 1), 1);
+		ap->ioaddr.altstatus_addr = ctl_addr;
+		ap->ioaddr.ctl_addr = ctl_addr;
+		ap->ops = &isapnp_port_ops;
+	}
+
+	ata_sff_std_ports(&ap->ioaddr);
+
+	ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+		      (unsigned long long)pnp_port_start(idev, 0),
+		      (unsigned long long)pnp_port_start(idev, 1));
+
+	/* activate */
+	return ata_host_activate(host, irq, handler, 0,
+				 &isapnp_sht);
+}
+
+/**
+ *	isapnp_remove_one	-	unplug an isapnp interface
+ *	@idev: PnP device
+ *
+ *	Remove a previously configured PnP ATA port. Called only on module
+ *	unload events as the core does not currently deal with ISAPnP docking.
+ */
+
+static void isapnp_remove_one(struct pnp_dev *idev)
+{
+	struct device *dev = &idev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_detach(host);
+}
+
+static struct pnp_device_id isapnp_devices[] = {
+  	/* Generic ESDI/IDE/ATA compatible hard disk controller */
+	{.id = "PNP0600", .driver_data = 0},
+	{.id = ""}
+};
+
+MODULE_DEVICE_TABLE(pnp, isapnp_devices);
+
+static struct pnp_driver isapnp_driver = {
+	.name		= DRV_NAME,
+	.id_table	= isapnp_devices,
+	.probe		= isapnp_init_one,
+	.remove		= isapnp_remove_one,
+};
+
+module_pnp_driver(isapnp_driver);
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
new file mode 100644
index 0000000..4f97d1e
--- /dev/null
+++ b/drivers/ata/pata_it8213.c
@@ -0,0 +1,298 @@
+/*
+ *    pata_it8213.c - iTE Tech. Inc.  IT8213 PATA driver
+ *
+ *    The IT8213 is a very Intel ICH like device for timing purposes, having
+ *    a similar register layout and the same split clock arrangement. Cable
+ *    detection is different, and it does not have slave channels or all the
+ *    clutter of later ICH/SATA setups.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_it8213"
+#define DRV_VERSION	"0.0.3"
+
+/**
+ *	it8213_pre_reset	-	probe begin
+ *	@link: link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Filter out ports by the enable bits before doing the normal reset
+ *	and probe.
+ */
+
+static int it8213_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits it8213_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+	};
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	it8213_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform cable detection for the 8213 ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int it8213_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+	pci_read_config_byte(pdev, 0x42, &tmp);
+	if (tmp & 2)	/* The initial docs are incorrect */
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	it8213_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int master_port = ap->port_no ? 0x42 : 0x40;
+	u16 master_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. The 8213 is a clone so very similar
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 1)
+		control |= 1;	/* TIME */
+	if (ata_pio_need_iordy(adev))	/* PIO 3/4 require IORDY */
+		control |= 2;	/* IE */
+	/* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */
+	if (adev->class != ATA_DEV_ATA)
+		control |= 4;	/* PPE */
+
+	pci_read_config_word(dev, master_port, &master_data);
+
+	/* Set PPE, IE, and TIME as appropriate */
+	if (adev->devno == 0) {
+		master_data &= 0xCCF0;
+		master_data |= control;
+		master_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	} else {
+		u8 slave_data;
+
+		master_data &= 0xFF0F;
+		master_data |= (control << 4);
+
+		/* Slave timing in separate register */
+		pci_read_config_byte(dev, 0x44, &slave_data);
+		slave_data &= 0xF0;
+		slave_data |= (timings[pio][0] << 2) | timings[pio][1];
+		pci_write_config_byte(dev, 0x44, slave_data);
+	}
+
+	master_data |= 0x4000;	/* Ensure SITRE is set */
+	pci_write_config_word(dev, master_port, master_data);
+}
+
+/**
+ *	it8213_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	This device is basically an ICH alike.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(dev, 0x40, &master_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+		u16 ideconf;
+		int u_clock, u_speed;
+
+		/* Clocks follow the PIIX style */
+		u_speed = min(2 - (udma & 1), udma);
+		if (udma > 4)
+			u_clock = 0x1000;	/* 100Mhz */
+		else if (udma > 2)
+			u_clock = 1;		/* 66Mhz */
+		else
+			u_clock = 0;		/* 33Mhz */
+
+		udma_enable |= (1 << devid);
+
+		/* Load the UDMA cycle time */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(3 << (4 * devid));
+		udma_timing |= u_speed << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+
+		/* Load the clock selection */
+		pci_read_config_word(dev, 0x54, &ideconf);
+		ideconf &= ~(0x1001 << devid);
+		ideconf |= u_clock << devid;
+		pci_write_config_word(dev, 0x54, ideconf);
+	} else {
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		static const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (devid) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= 0xF0;
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, 0x40, master_data);
+	}
+	pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct scsi_host_template it8213_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+static struct ata_port_operations it8213_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= it8213_cable_detect,
+	.set_piomode		= it8213_set_piomode,
+	.set_dmamode		= it8213_set_dmamode,
+	.prereset		= it8213_pre_reset,
+};
+
+
+/**
+ *	it8213_init_one - Register 8213 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in it8213_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &it8213_ops,
+	};
+	/* Current IT8213 stuff is single port */
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
+}
+
+static const struct pci_device_id it8213_pci_tbl[] = {
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver it8213_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= it8213_pci_tbl,
+	.probe			= it8213_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(it8213_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
new file mode 100644
index 0000000..9bac79e
--- /dev/null
+++ b/drivers/ata/pata_it821x.c
@@ -0,0 +1,983 @@
+/*
+ * pata_it821x.c 	- IT821x PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *			  (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * based upon
+ *
+ * it821x.c
+ *
+ * linux/drivers/ide/pci/it821x.c		Version 0.09	December 2004
+ *
+ * Copyright (C) 2004		Red Hat
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *  Based in part on the ITE vendor provided SCSI driver.
+ *
+ *  Documentation available from IT8212F_V04.pdf
+ * 	http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91
+ *  Some other documents are NDA.
+ *
+ *  The ITE8212 isn't exactly a standard IDE controller. It has two
+ *  modes. In pass through mode then it is an IDE controller. In its smart
+ *  mode its actually quite a capable hardware raid controller disguised
+ *  as an IDE controller. Smart mode only understands DMA read/write and
+ *  identify, none of the fancier commands apply. The IT8211 is identical
+ *  in other respects but lacks the raid mode.
+ *
+ *  Errata:
+ *  o	Rev 0x10 also requires master/slave hold the same DMA timings and
+ *	cannot do ATAPI MWDMA.
+ *  o	The identify data for raid volumes lacks CHS info (technically ok)
+ *	but also fails to set the LBA28 and other bits. We fix these in
+ *	the IDE probe quirk code.
+ *  o	If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
+ *	raid then the controller firmware dies
+ *  o	Smart mode without RAID doesn't clear all the necessary identify
+ *	bits to reduce the command set to the one used
+ *
+ *  This has a few impacts on the driver
+ *  - In pass through mode we do all the work you would expect
+ *  - In smart mode the clocking set up is done by the controller generally
+ *    but we must watch the other limits and filter.
+ *  - There are a few extra vendor commands that actually talk to the
+ *    controller but only work PIO with no IRQ.
+ *
+ *  Vendor areas of the identify block in smart mode are used for the
+ *  timing and policy set up. Each HDD in raid mode also has a serial
+ *  block on the disk. The hardware extra commands are get/set chip status,
+ *  rebuild, get rebuild status.
+ *
+ *  In Linux the driver supports pass through mode as if the device was
+ *  just another IDE controller. If the smart mode is running then
+ *  volumes are managed by the controller firmware and each IDE "disk"
+ *  is a raid volume. Even more cute - the controller can do automated
+ *  hotplug and rebuild.
+ *
+ *  The pass through controller itself is a little demented. It has a
+ *  flaw that it has a single set of PIO/MWDMA timings per channel so
+ *  non UDMA devices restrict each others performance. It also has a
+ *  single clock source per channel so mixed UDMA100/133 performance
+ *  isn't perfect and we have to pick a clock. Thankfully none of this
+ *  matters in smart mode. ATAPI DMA is not currently supported.
+ *
+ *  It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
+ *
+ *  TODO
+ *	-	ATAPI and other speed filtering
+ *	-	RAID configuration ioctls
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+
+#define DRV_NAME "pata_it821x"
+#define DRV_VERSION "0.4.2"
+
+struct it821x_dev
+{
+	unsigned int smart:1,		/* Are we in smart raid mode */
+		timing10:1;		/* Rev 0x10 */
+	u8	clock_mode;		/* 0, ATA_50 or ATA_66 */
+	u8	want[2][2];		/* Mode/Pri log for master slave */
+	/* We need these for switching the clock when DMA goes on/off
+	   The high byte is the 66Mhz timing */
+	u16	pio[2];			/* Cached PIO values */
+	u16	mwdma[2];		/* Cached MWDMA values */
+	u16	udma[2];		/* Cached UDMA values (per drive) */
+	u16	last_device;		/* Master or slave loaded ? */
+};
+
+#define ATA_66		0
+#define ATA_50		1
+#define ATA_ANY		2
+
+#define UDMA_OFF	0
+#define MWDMA_OFF	0
+
+/*
+ *	We allow users to force the card into non raid mode without
+ *	flashing the alternative BIOS. This is also necessary right now
+ *	for embedded platforms that cannot run a PC BIOS but are using this
+ *	device.
+ */
+
+static int it8212_noraid;
+
+/**
+ *	it821x_program	-	program the PIO/MWDMA registers
+ *	@ap: ATA port
+ *	@adev: Device to program
+ *	@timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
+ *
+ *	Program the PIO/MWDMA timing for this channel according to the
+ *	current clock. These share the same register so are managed by
+ *	the DMA start/stop sequence as with the old driver.
+ */
+
+static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	int channel = ap->port_no;
+	u8 conf;
+
+	/* Program PIO/MWDMA timing bits */
+	if (itdev->clock_mode == ATA_66)
+		conf = timing >> 8;
+	else
+		conf = timing & 0xFF;
+	pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
+}
+
+
+/**
+ *	it821x_program_udma	-	program the UDMA registers
+ *	@ap: ATA port
+ *	@adev: ATA device to update
+ *	@timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
+ *
+ *	Program the UDMA timing for this drive according to the
+ *	current clock. Handles the dual clocks and also knows about
+ *	the errata on the 0x10 revision. The UDMA errata is partly handled
+ *	here and partly in start_dma.
+ */
+
+static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+	struct it821x_dev *itdev = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int channel = ap->port_no;
+	int unit = adev->devno;
+	u8 conf;
+
+	/* Program UDMA timing bits */
+	if (itdev->clock_mode == ATA_66)
+		conf = timing >> 8;
+	else
+		conf = timing & 0xFF;
+	if (itdev->timing10 == 0)
+		pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
+	else {
+		/* Early revision must be programmed for both together */
+		pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
+		pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
+	}
+}
+
+/**
+ *	it821x_clock_strategy
+ *	@ap: ATA interface
+ *	@adev: ATA device being updated
+ *
+ *	Select between the 50 and 66Mhz base clocks to get the best
+ *	results for this interface.
+ */
+
+static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	u8 unit = adev->devno;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	int clock, altclock;
+	u8 v;
+	int sel = 0;
+
+	/* Look for the most wanted clocking */
+	if (itdev->want[0][0] > itdev->want[1][0]) {
+		clock = itdev->want[0][1];
+		altclock = itdev->want[1][1];
+	} else {
+		clock = itdev->want[1][1];
+		altclock = itdev->want[0][1];
+	}
+
+	/* Master doesn't care does the slave ? */
+	if (clock == ATA_ANY)
+		clock = altclock;
+
+	/* Nobody cares - keep the same clock */
+	if (clock == ATA_ANY)
+		return;
+	/* No change */
+	if (clock == itdev->clock_mode)
+		return;
+
+	/* Load this into the controller */
+	if (clock == ATA_66)
+		itdev->clock_mode = ATA_66;
+	else {
+		itdev->clock_mode = ATA_50;
+		sel = 1;
+	}
+	pci_read_config_byte(pdev, 0x50, &v);
+	v &= ~(1 << (1 + ap->port_no));
+	v |= sel << (1 + ap->port_no);
+	pci_write_config_byte(pdev, 0x50, v);
+
+	/*
+	 *	Reprogram the UDMA/PIO of the pair drive for the switch
+	 *	MWDMA will be dealt with by the dma switcher
+	 */
+	if (pair && itdev->udma[1-unit] != UDMA_OFF) {
+		it821x_program_udma(ap, pair, itdev->udma[1-unit]);
+		it821x_program(ap, pair, itdev->pio[1-unit]);
+	}
+	/*
+	 *	Reprogram the UDMA/PIO of our drive for the switch.
+	 *	MWDMA will be dealt with by the dma switcher
+	 */
+	if (itdev->udma[unit] != UDMA_OFF) {
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+		it821x_program(ap, adev, itdev->pio[unit]);
+	}
+}
+
+/**
+ *	it821x_passthru_set_piomode	-	set PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Configure for PIO mode. This is complicated as the register is
+ *	shared by PIO and MWDMA and for both channels.
+ */
+
+static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	/* Spec says 89 ref driver uses 88 */
+	static const u16 pio[]	= { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
+	static const u8 pio_want[]    = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
+
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+	int mode_wanted = adev->pio_mode - XFER_PIO_0;
+
+	/* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
+	itdev->want[unit][1] = pio_want[mode_wanted];
+	itdev->want[unit][0] = 1;	/* PIO is lowest priority */
+	itdev->pio[unit] = pio[mode_wanted];
+	it821x_clock_strategy(ap, adev);
+	it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+/**
+ *	it821x_passthru_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Set up the DMA modes. The actions taken depend heavily on the mode
+ *	to use. If UDMA is used as is hopefully the usual case then the
+ *	timing register is private and we need only consider the clock. If
+ *	we are using MWDMA then we have to manage the setting ourself as
+ *	we switch devices and mode.
+ */
+
+static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 dma[]	= 	{ 0x8866, 0x3222, 0x3121 };
+	static const u8 mwdma_want[] =  { ATA_ANY, ATA_66, ATA_ANY };
+	static const u16 udma[]	= 	{ 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
+	static const u8 udma_want[] =   { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	int channel = ap->port_no;
+	int unit = adev->devno;
+	u8 conf;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int mode_wanted = adev->dma_mode - XFER_UDMA_0;
+
+		itdev->want[unit][1] = udma_want[mode_wanted];
+		itdev->want[unit][0] = 3;	/* UDMA is high priority */
+		itdev->mwdma[unit] = MWDMA_OFF;
+		itdev->udma[unit] = udma[mode_wanted];
+		if (mode_wanted >= 5)
+			itdev->udma[unit] |= 0x8080;	/* UDMA 5/6 select on */
+
+		/* UDMA on. Again revision 0x10 must do the pair */
+		pci_read_config_byte(pdev, 0x50, &conf);
+		if (itdev->timing10)
+			conf &= channel ? 0x9F: 0xE7;
+		else
+			conf &= ~ (1 << (3 + 2 * channel + unit));
+		pci_write_config_byte(pdev, 0x50, conf);
+		it821x_clock_strategy(ap, adev);
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+	} else {
+		int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
+
+		itdev->want[unit][1] = mwdma_want[mode_wanted];
+		itdev->want[unit][0] = 2;	/* MWDMA is low priority */
+		itdev->mwdma[unit] = dma[mode_wanted];
+		itdev->udma[unit] = UDMA_OFF;
+
+		/* UDMA bits off - Revision 0x10 do them in pairs */
+		pci_read_config_byte(pdev, 0x50, &conf);
+		if (itdev->timing10)
+			conf |= channel ? 0x60: 0x18;
+		else
+			conf |= 1 << (3 + 2 * channel + unit);
+		pci_write_config_byte(pdev, 0x50, conf);
+		it821x_clock_strategy(ap, adev);
+	}
+}
+
+/**
+ *	it821x_passthru_dma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	Usually drivers set the DMA timing at the point the set_dmamode call
+ *	is made. IT821x however requires we load new timings on the
+ *	transitions in some cases.
+ */
+
+static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+
+	if (itdev->mwdma[unit] != MWDMA_OFF)
+		it821x_program(ap, adev, itdev->mwdma[unit]);
+	else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	it821x_passthru_dma_stop	-	DMA stop callback
+ *	@qc: ATA command
+ *
+ *	We loaded new timings in dma_start, as a result we need to restore
+ *	the PIO timings in dma_stop so that the next command issue gets the
+ *	right clock values.
+ */
+
+static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+
+	ata_bmdma_stop(qc);
+	if (itdev->mwdma[unit] != MWDMA_OFF)
+		it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+
+/**
+ *	it821x_passthru_dev_select	-	Select master/slave
+ *	@ap: ATA port
+ *	@device: Device number (not pointer)
+ *
+ *	Device selection hook. If necessary perform clock switching
+ */
+
+static void it821x_passthru_dev_select(struct ata_port *ap,
+				       unsigned int device)
+{
+	struct it821x_dev *itdev = ap->private_data;
+	if (itdev && device != itdev->last_device) {
+		struct ata_device *adev = &ap->link.device[device];
+		it821x_program(ap, adev, itdev->pio[adev->devno]);
+		itdev->last_device = device;
+	}
+	ata_sff_dev_select(ap, device);
+}
+
+/**
+ *	it821x_smart_qc_issue		-	wrap qc issue prot
+ *	@qc: command
+ *
+ *	Wrap the command issue sequence for the IT821x. We need to
+ *	perform out own device selection timing loads before the
+ *	usual happenings kick off
+ */
+
+static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
+{
+	switch(qc->tf.command)
+	{
+		/* Commands the firmware supports */
+		case ATA_CMD_READ:
+		case ATA_CMD_READ_EXT:
+		case ATA_CMD_WRITE:
+		case ATA_CMD_WRITE_EXT:
+		case ATA_CMD_PIO_READ:
+		case ATA_CMD_PIO_READ_EXT:
+		case ATA_CMD_PIO_WRITE:
+		case ATA_CMD_PIO_WRITE_EXT:
+		case ATA_CMD_READ_MULTI:
+		case ATA_CMD_READ_MULTI_EXT:
+		case ATA_CMD_WRITE_MULTI:
+		case ATA_CMD_WRITE_MULTI_EXT:
+		case ATA_CMD_ID_ATA:
+		case ATA_CMD_INIT_DEV_PARAMS:
+		case 0xFC:	/* Internal 'report rebuild state' */
+		/* Arguably should just no-op this one */
+		case ATA_CMD_SET_FEATURES:
+			return ata_bmdma_qc_issue(qc);
+	}
+	printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+	return AC_ERR_DEV;
+}
+
+/**
+ *	it821x_passthru_qc_issue	-	wrap qc issue prot
+ *	@qc: command
+ *
+ *	Wrap the command issue sequence for the IT821x. We need to
+ *	perform out own device selection timing loads before the
+ *	usual happenings kick off
+ */
+
+static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
+{
+	it821x_passthru_dev_select(qc->ap, qc->dev->devno);
+	return ata_bmdma_qc_issue(qc);
+}
+
+/**
+ *	it821x_smart_set_mode	-	mode setting
+ *	@link: interface to set up
+ *	@unused: device that failed (error only)
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *	The BIOS configured everything. Our job is not to fiddle. We
+ *	read the dma enabled bits from the PCI configuration of the device
+ *	and respect them.
+ */
+
+static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = XFER_PIO_0;
+		dev->dma_mode = XFER_MW_DMA_0;
+		/* We do need the right mode information for DMA or PIO
+		   and this comes from the current configuration flags */
+		if (ata_id_has_dma(dev->id)) {
+			ata_dev_info(dev, "configured for DMA\n");
+			dev->xfer_mode = XFER_MW_DMA_0;
+			dev->xfer_shift = ATA_SHIFT_MWDMA;
+			dev->flags &= ~ATA_DFLAG_PIO;
+		} else {
+			ata_dev_info(dev, "configured for PIO\n");
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+	return 0;
+}
+
+/**
+ *	it821x_dev_config	-	Called each device identify
+ *	@adev: Device that has just been identified
+ *
+ *	Perform the initial setup needed for each device that is chip
+ *	special. In our case we need to lock the sector count to avoid
+ *	blowing the brains out of the firmware with large LBA48 requests
+ *
+ */
+
+static void it821x_dev_config(struct ata_device *adev)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	if (adev->max_sectors > 255)
+		adev->max_sectors = 255;
+
+	if (strstr(model_num, "Integrated Technology Express")) {
+		/* RAID mode */
+		ata_dev_info(adev, "%sRAID%d volume",
+			     adev->id[147] ? "Bootable " : "",
+			     adev->id[129]);
+		if (adev->id[129] != 1)
+			pr_cont("(%dK stripe)", adev->id[146]);
+		pr_cont("\n");
+	}
+	/* This is a controller firmware triggered funny, don't
+	   report the drive faulty! */
+	adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
+	/* No HPA in 'smart' mode */
+	adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+}
+
+/**
+ *	it821x_read_id	-	Hack identify data up
+ *	@adev: device to read
+ *	@tf: proposed taskfile
+ *	@id: buffer for returned ident data
+ *
+ *	Query the devices on this firmware driven port and slightly
+ *	mash the identify data to stop us and common tools trying to
+ *	use features not firmware supported. The firmware itself does
+ *	some masking (eg SMART) but not enough.
+ */
+
+static unsigned int it821x_read_id(struct ata_device *adev,
+					struct ata_taskfile *tf, u16 *id)
+{
+	unsigned int err_mask;
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	err_mask = ata_do_dev_read_id(adev, tf, id);
+	if (err_mask)
+		return err_mask;
+	ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	id[83] &= ~(1 << 12);	/* Cache flush is firmware handled */
+	id[83] &= ~(1 << 13);	/* Ditto for LBA48 flushes */
+	id[84] &= ~(1 << 6);	/* No FUA */
+	id[85] &= ~(1 << 10);	/* No HPA */
+	id[76] = 0;		/* No NCQ/AN etc */
+
+	if (strstr(model_num, "Integrated Technology Express")) {
+		/* Set feature bits the firmware neglects */
+		id[49] |= 0x0300;	/* LBA, DMA */
+		id[83] &= 0x7FFF;
+		id[83] |= 0x4400;	/* Word 83 is valid and LBA48 */
+		id[86] |= 0x0400;	/* LBA48 on */
+		id[ATA_ID_MAJOR_VER] |= 0x1F;
+		/* Clear the serial number because it's different each boot
+		   which breaks validation on resume */
+		memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
+	}
+	return err_mask;
+}
+
+/**
+ *	it821x_check_atapi_dma	-	ATAPI DMA handler
+ *	@qc: Command we are about to issue
+ *
+ *	Decide if this ATAPI command can be issued by DMA on this
+ *	controller. Return 0 if it can be.
+ */
+
+static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct it821x_dev *itdev = ap->private_data;
+
+	/* Only use dma for transfers to/from the media. */
+	if (ata_qc_raw_nbytes(qc) < 2048)
+		return -EOPNOTSUPP;
+
+	/* No ATAPI DMA in smart mode */
+	if (itdev->smart)
+		return -EOPNOTSUPP;
+	/* No ATAPI DMA on rev 10 */
+	if (itdev->timing10)
+		return -EOPNOTSUPP;
+	/* Cool */
+	return 0;
+}
+
+/**
+ *	it821x_display_disk	-	display disk setup
+ *	@n: Device number
+ *	@buf: Buffer block from firmware
+ *
+ *	Produce a nice informative display of the device setup as provided
+ *	by the firmware.
+ */
+
+static void it821x_display_disk(int n, u8 *buf)
+{
+	unsigned char id[41];
+	int mode = 0;
+	const char *mtype = "";
+	char mbuf[8];
+	const char *cbl = "(40 wire cable)";
+
+	static const char *types[5] = {
+		"RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK"
+	};
+
+	if (buf[52] > 4)	/* No Disk */
+		return;
+
+	ata_id_c_string((u16 *)buf, id, 0, 41);
+
+	if (buf[51]) {
+		mode = ffs(buf[51]);
+		mtype = "UDMA";
+	} else if (buf[49]) {
+		mode = ffs(buf[49]);
+		mtype = "MWDMA";
+	}
+
+	if (buf[76])
+		cbl = "";
+
+	if (mode)
+		snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
+	else
+		strcpy(mbuf, "PIO");
+	if (buf[52] == 4)
+		printk(KERN_INFO "%d: %-6s %-8s          %s %s\n",
+				n, mbuf, types[buf[52]], id, cbl);
+	else
+		printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n",
+				n, mbuf, types[buf[52]], buf[53], id, cbl);
+	if (buf[125] < 100)
+		printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]);
+}
+
+/**
+ *	it821x_firmware_command		-	issue firmware command
+ *	@ap: IT821x port to interrogate
+ *	@cmd: command
+ *	@len: length
+ *
+ *	Issue firmware commands expecting data back from the controller. We
+ *	use this to issue commands that do not go via the normal paths. Other
+ *	commands such as 0xFC can be issued normally.
+ */
+
+static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
+{
+	u8 status;
+	int n = 0;
+	u16 *buf = kmalloc(len, GFP_KERNEL);
+
+	if (!buf)
+		return NULL;
+
+	/* This isn't quite a normal ATA command as we are talking to the
+	   firmware not the drives */
+	ap->ctl |= ATA_NIEN;
+	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+	ata_wait_idle(ap);
+	iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+	iowrite8(cmd, ap->ioaddr.command_addr);
+	udelay(1);
+	/* This should be almost immediate but a little paranoia goes a long
+	   way. */
+	while(n++ < 10) {
+		status = ioread8(ap->ioaddr.status_addr);
+		if (status & ATA_ERR) {
+			kfree(buf);
+			printk(KERN_ERR "it821x_firmware_command: rejected\n");
+			return NULL;
+		}
+		if (status & ATA_DRQ) {
+			ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
+			return (u8 *)buf;
+		}
+		usleep_range(500, 1000);
+	}
+	kfree(buf);
+	printk(KERN_ERR "it821x_firmware_command: timeout\n");
+	return NULL;
+}
+
+/**
+ *	it821x_probe_firmware	-	firmware reporting/setup
+ *	@ap: IT821x port being probed
+ *
+ *	Probe the firmware of the controller by issuing firmware command
+ *	0xFA and analysing the returned data.
+ */
+
+static void it821x_probe_firmware(struct ata_port *ap)
+{
+	u8 *buf;
+	int i;
+
+	/* This is a bit ugly as we can't just issue a task file to a device
+	   as this is controller magic */
+
+	buf = it821x_firmware_command(ap, 0xFA, 512);
+
+	if (buf != NULL) {
+		printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
+				buf[505],
+				buf[506],
+				buf[507],
+				buf[508]);
+		for (i = 0; i < 4; i++)
+ 			it821x_display_disk(i, buf + 128 * i);
+		kfree(buf);
+	}
+}
+
+
+
+/**
+ *	it821x_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	The it821x needs to maintain private data structures and also to
+ *	use the standard PCI interface which lacks support for this
+ *	functionality. We instead set up the private data on the port
+ *	start hook, and tear it down on port stop
+ */
+
+static int it821x_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev;
+	u8 conf;
+
+	int ret = ata_bmdma_port_start(ap);
+	if (ret < 0)
+		return ret;
+
+	itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL);
+	if (itdev == NULL)
+		return -ENOMEM;
+	ap->private_data = itdev;
+
+	pci_read_config_byte(pdev, 0x50, &conf);
+
+	if (conf & 1) {
+		itdev->smart = 1;
+		/* Long I/O's although allowed in LBA48 space cause the
+		   onboard firmware to enter the twighlight zone */
+		/* No ATAPI DMA in this mode either */
+		if (ap->port_no == 0)
+			it821x_probe_firmware(ap);
+	}
+	/* Pull the current clocks from 0x50 */
+	if (conf & (1 << (1 + ap->port_no)))
+		itdev->clock_mode = ATA_50;
+	else
+		itdev->clock_mode = ATA_66;
+
+	itdev->want[0][1] = ATA_ANY;
+	itdev->want[1][1] = ATA_ANY;
+	itdev->last_device = -1;
+
+	if (pdev->revision == 0x10) {
+		itdev->timing10 = 1;
+		/* Need to disable ATAPI DMA for this case */
+		if (!itdev->smart)
+			printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
+	}
+
+	return 0;
+}
+
+/**
+ *	it821x_rdc_cable	-	Cable detect for RDC1010
+ *	@ap: port we are checking
+ *
+ *	Return the RDC1010 cable type. Unlike the IT821x we know how to do
+ *	this and can do host side cable detect
+ */
+
+static int it821x_rdc_cable(struct ata_port *ap)
+{
+	u16 r40;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_word(pdev, 0x40, &r40);
+	if (r40 & (1 << (2 + ap->port_no)))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static struct scsi_host_template it821x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations it821x_smart_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.check_atapi_dma= it821x_check_atapi_dma,
+	.qc_issue	= it821x_smart_qc_issue,
+
+	.cable_detect	= ata_cable_80wire,
+	.set_mode	= it821x_smart_set_mode,
+	.dev_config	= it821x_dev_config,
+	.read_id	= it821x_read_id,
+
+	.port_start	= it821x_port_start,
+};
+
+static struct ata_port_operations it821x_passthru_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.check_atapi_dma= it821x_check_atapi_dma,
+	.sff_dev_select	= it821x_passthru_dev_select,
+	.bmdma_start 	= it821x_passthru_bmdma_start,
+	.bmdma_stop	= it821x_passthru_bmdma_stop,
+	.qc_issue	= it821x_passthru_qc_issue,
+
+	.cable_detect	= ata_cable_unknown,
+	.set_piomode	= it821x_passthru_set_piomode,
+	.set_dmamode	= it821x_passthru_set_dmamode,
+
+	.port_start	= it821x_port_start,
+};
+
+static struct ata_port_operations it821x_rdc_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+
+	.check_atapi_dma= it821x_check_atapi_dma,
+	.sff_dev_select	= it821x_passthru_dev_select,
+	.bmdma_start 	= it821x_passthru_bmdma_start,
+	.bmdma_stop	= it821x_passthru_bmdma_stop,
+	.qc_issue	= it821x_passthru_qc_issue,
+
+	.cable_detect	= it821x_rdc_cable,
+	.set_piomode	= it821x_passthru_set_piomode,
+	.set_dmamode	= it821x_passthru_set_dmamode,
+
+	.port_start	= it821x_port_start,
+};
+
+static void it821x_disable_raid(struct pci_dev *pdev)
+{
+	/* Neither the RDC nor the IT8211 */
+	if (pdev->vendor != PCI_VENDOR_ID_ITE ||
+			pdev->device != PCI_DEVICE_ID_ITE_8212)
+			return;
+
+	/* Reset local CPU, and set BIOS not ready */
+	pci_write_config_byte(pdev, 0x5E, 0x01);
+
+	/* Set to bypass mode, and reset PCI bus */
+	pci_write_config_byte(pdev, 0x50, 0x00);
+	pci_write_config_word(pdev, PCI_COMMAND,
+			      PCI_COMMAND_PARITY | PCI_COMMAND_IO |
+			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+	pci_write_config_word(pdev, 0x40, 0xA0F3);
+
+	pci_write_config_dword(pdev,0x4C, 0x02040204);
+	pci_write_config_byte(pdev, 0x42, 0x36);
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
+}
+
+
+static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	u8 conf;
+
+	static const struct ata_port_info info_smart = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &it821x_smart_port_ops
+	};
+	static const struct ata_port_info info_passthru = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &it821x_passthru_port_ops
+	};
+	static const struct ata_port_info info_rdc = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &it821x_rdc_port_ops
+	};
+	static const struct ata_port_info info_rdc_11 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		/* No UDMA */
+		.port_ops = &it821x_rdc_port_ops
+	};
+
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	static const char *mode[2] = { "pass through", "smart" };
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->vendor == PCI_VENDOR_ID_RDC) {
+		/* Deal with Vortex86SX */
+		if (pdev->revision == 0x11)
+			ppi[0] = &info_rdc_11;
+		else
+			ppi[0] = &info_rdc;
+	} else {
+		/* Force the card into bypass mode if so requested */
+		if (it8212_noraid) {
+			printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
+			it821x_disable_raid(pdev);
+		}
+		pci_read_config_byte(pdev, 0x50, &conf);
+		conf &= 1;
+
+		printk(KERN_INFO DRV_NAME": controller in %s mode.\n",
+								mode[conf]);
+		if (conf == 0)
+			ppi[0] = &info_passthru;
+		else
+			ppi[0] = &info_smart;
+	}
+	return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int it821x_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+	/* Resume - turn raid back off if need be */
+	if (it8212_noraid)
+		it821x_disable_raid(pdev);
+	ata_host_resume(host);
+	return rc;
+}
+#endif
+
+static const struct pci_device_id it821x[] = {
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
+	{ PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), },
+
+	{ },
+};
+
+static struct pci_driver it821x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= it821x,
+	.probe 		= it821x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= it821x_reinit_one,
+#endif
+};
+
+module_pci_driver(it821x_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it821x);
+MODULE_VERSION(DRV_VERSION);
+
+module_param_named(noraid, it8212_noraid, int, S_IRUGO);
+MODULE_PARM_DESC(noraid, "Force card into bypass mode");
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
new file mode 100644
index 0000000..0b0d930
--- /dev/null
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -0,0 +1,207 @@
+/*
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * An ATA driver to handle a Compact Flash connected
+ * to the ixp4xx expansion bus in TrueIDE mode. The CF
+ * must have it chip selects connected to two CS lines
+ * on the ixp4xx. In the irq is not available, you might
+ * want to modify both this driver and libata to run in
+ * polling mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME	"pata_ixp4xx_cf"
+#define DRV_VERSION	"0.2"
+
+static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		ata_dev_info(dev, "configured for PIO0\n");
+		dev->pio_mode = XFER_PIO_0;
+		dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+	}
+	return 0;
+}
+
+static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
+				unsigned char *buf, unsigned int buflen, int rw)
+{
+	unsigned int i;
+	unsigned int words = buflen >> 1;
+	u16 *buf16 = (u16 *) buf;
+	struct ata_port *ap = qc->dev->link->ap;
+	void __iomem *mmio = ap->ioaddr.data_addr;
+	struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev);
+
+	/* set the expansion bus in 16bit mode and restore
+	 * 8 bit mode after the transaction.
+	 */
+	*data->cs0_cfg &= ~(0x01);
+	udelay(100);
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ)
+		for (i = 0; i < words; i++)
+			buf16[i] = readw(mmio);
+	else
+		for (i = 0; i < words; i++)
+			writew(buf16[i], mmio);
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (rw == READ) {
+			align_buf[0] = readw(mmio);
+			memcpy(trailing_buf, align_buf, 1);
+		} else {
+			memcpy(align_buf, trailing_buf, 1);
+			writew(align_buf[0], mmio);
+		}
+		words++;
+	}
+
+	udelay(100);
+	*data->cs0_cfg |= 0x01;
+
+	return words << 1;
+}
+
+static struct scsi_host_template ixp4xx_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ixp4xx_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ixp4xx_mmio_data_xfer,
+	.cable_detect		= ata_cable_40wire,
+	.set_mode		= ixp4xx_set_mode,
+};
+
+static void ixp4xx_setup_port(struct ata_port *ap,
+			      struct ixp4xx_pata_data *data,
+			      unsigned long raw_cs0, unsigned long raw_cs1)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned long raw_cmd = raw_cs0;
+	unsigned long raw_ctl = raw_cs1 + 0x06;
+
+	ioaddr->cmd_addr	= data->cs0;
+	ioaddr->altstatus_addr	= data->cs1 + 0x06;
+	ioaddr->ctl_addr	= data->cs1 + 0x06;
+
+	ata_sff_std_ports(ioaddr);
+
+#ifndef __ARMEB__
+
+	/* adjust the addresses to handle the address swizzling of the
+	 * ixp4xx in little endian mode.
+	 */
+
+	*(unsigned long *)&ioaddr->data_addr		^= 0x02;
+	*(unsigned long *)&ioaddr->cmd_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->altstatus_addr	^= 0x03;
+	*(unsigned long *)&ioaddr->ctl_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->error_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->feature_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->nsect_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbal_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbam_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbah_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->device_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->status_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->command_addr		^= 0x03;
+
+	raw_cmd ^= 0x03;
+	raw_ctl ^= 0x03;
+#endif
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
+}
+
+static int ixp4xx_pata_probe(struct platform_device *pdev)
+{
+	unsigned int irq;
+	struct resource *cs0, *cs1;
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+	int ret;
+
+	cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+	if (!cs0 || !cs1)
+		return -EINVAL;
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
+	data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
+
+	if (!data->cs0 || !data->cs1)
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq)
+		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+
+	/* Setup expansion bus chip selects */
+	*data->cs0_cfg = data->cs0_bits;
+	*data->cs1_cfg = data->cs1_bits;
+
+	ap = host->ports[0];
+
+	ap->ops	= &ixp4xx_port_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_NO_ATAPI;
+
+	ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* activate host */
+	return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
+}
+
+static struct platform_driver ixp4xx_pata_platform_driver = {
+	.driver	 = {
+		.name   = DRV_NAME,
+	},
+	.probe		= ixp4xx_pata_probe,
+	.remove		= ata_platform_remove_one,
+};
+
+module_platform_driver(ixp4xx_pata_platform_driver);
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
new file mode 100644
index 0000000..4d1a5d2
--- /dev/null
+++ b/drivers/ata/pata_jmicron.c
@@ -0,0 +1,173 @@
+/*
+ *    pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
+ *			PATA port of the controller. The SATA ports are
+ *			driven by AHCI in the usual configuration although
+ *			this driver can handle other setups if we need it.
+ *
+ *	(c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_jmicron"
+#define DRV_VERSION	"0.1.5"
+
+typedef enum {
+	PORT_PATA0 = 0,
+	PORT_PATA1 = 1,
+	PORT_SATA = 2,
+} port_type;
+
+/**
+ *	jmicron_pre_reset	-	check for 40/80 pin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ *
+ *	On the Jmicron 361/363 there is a single PATA port that can be mapped
+ *	either as primary or secondary (or neither). We don't do any policy
+ *	and setup here. We assume that has been done by init_one and the
+ *	BIOS.
+ */
+static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 control;
+	u32 control5;
+	int port_mask = 1<< (4 * ap->port_no);
+	int port = ap->port_no;
+	port_type port_map[2];
+
+	/* Check if our port is enabled */
+	pci_read_config_dword(pdev, 0x40, &control);
+	if ((control & port_mask) == 0)
+		return -ENOENT;
+
+	/* There are two basic mappings. One has the two SATA ports merged
+	   as master/slave and the secondary as PATA, the other has only the
+	   SATA port mapped */
+	if (control & (1 << 23)) {
+		port_map[0] = PORT_SATA;
+		port_map[1] = PORT_PATA0;
+	} else {
+		port_map[0] = PORT_SATA;
+		port_map[1] = PORT_SATA;
+	}
+
+	/* The 365/366 may have this bit set to map the second PATA port
+	   as the internal primary channel */
+	pci_read_config_dword(pdev, 0x80, &control5);
+	if (control5 & (1<<24))
+		port_map[0] = PORT_PATA1;
+
+	/* The two ports may then be logically swapped by the firmware */
+	if (control & (1 << 22))
+		port = port ^ 1;
+
+	/*
+	 *	Now we know which physical port we are talking about we can
+	 *	actually do our cable checking etc. Thankfully we don't need
+	 *	to do the plumbing for other cases.
+	 */
+	switch (port_map[port]) {
+	case PORT_PATA0:
+		if ((control & (1 << 5)) == 0)
+			return -ENOENT;
+		if (control & (1 << 3))	/* 40/80 pin primary */
+			ap->cbl = ATA_CBL_PATA40;
+		else
+			ap->cbl = ATA_CBL_PATA80;
+		break;
+	case PORT_PATA1:
+		/* Bit 21 is set if the port is enabled */
+		if ((control5 & (1 << 21)) == 0)
+			return -ENOENT;
+		if (control5 & (1 << 19))	/* 40/80 pin secondary */
+			ap->cbl = ATA_CBL_PATA40;
+		else
+			ap->cbl = ATA_CBL_PATA80;
+		break;
+	case PORT_SATA:
+		ap->cbl = ATA_CBL_SATA;
+		break;
+	}
+	return ata_sff_prereset(link, deadline);
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template jmicron_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations jmicron_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.prereset		= jmicron_pre_reset,
+};
+
+
+/**
+ *	jmicron_init_one - Register Jmicron ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in jmicron_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags	= ATA_FLAG_SLAVE_POSS,
+
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA5,
+
+		.port_ops	= &jmicron_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
+}
+
+static const struct pci_device_id jmicron_pci_tbl[] = {
+	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
+	{ }	/* terminate list */
+};
+
+static struct pci_driver jmicron_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= jmicron_pci_tbl,
+	.probe			= jmicron_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(jmicron_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
new file mode 100644
index 0000000..8ea4b84
--- /dev/null
+++ b/drivers/ata/pata_legacy.c
@@ -0,0 +1,1279 @@
+/*
+ *   pata-legacy.c - Legacy port PATA/SATA controller driver.
+ *   Copyright 2005/2006 Red Hat, all rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *   An ATA driver for the legacy ATA ports.
+ *
+ *   Data Sources:
+ *	Opti 82C465/82C611 support: Data sheets at opti-inc.com
+ *	HT6560 series:
+ *	Promise 20230/20620:
+ *		http://www.ryston.cz/petr/vlb/pdc20230b.html
+ *		http://www.ryston.cz/petr/vlb/pdc20230c.html
+ *		http://www.ryston.cz/petr/vlb/pdc20630.html
+ *	QDI65x0:
+ *		http://www.ryston.cz/petr/vlb/qd6500.html
+ *		http://www.ryston.cz/petr/vlb/qd6580.html
+ *
+ *	QDI65x0 probe code based on drivers/ide/legacy/qd65xx.c
+ *	Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
+ *	Samuel Thibault <samuel.thibault@ens-lyon.org>
+ *
+ *  Unsupported but docs exist:
+ *	Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
+ *
+ *  This driver handles legacy (that is "ISA/VLB side") IDE ports found
+ *  on PC class systems. There are three hybrid devices that are exceptions
+ *  The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
+ *  the MPIIX where the tuning is PCI side but the IDE is "ISA side".
+ *
+ *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
+ *  opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
+ *
+ *  Support for the Winbond 83759A when operating in advanced mode.
+ *  Multichip mode is not currently supported.
+ *
+ *  Use the autospeed and pio_mask options with:
+ *	Appian ADI/2 aka CLPD7220 or AIC25VL01.
+ *  Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
+ *	Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
+ *	Winbond W83759A, Promise PDC20230-B
+ *
+ *  For now use autospeed and pio_mask as above with the W83759A. This may
+ *  change.
+ *
+ */
+
+#include <linux/async.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_legacy"
+#define DRV_VERSION "0.6.5"
+
+#define NR_HOST 6
+
+static int all;
+module_param(all, int, 0444);
+MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
+
+enum controller {
+	BIOS = 0,
+	SNOOP = 1,
+	PDC20230 = 2,
+	HT6560A = 3,
+	HT6560B = 4,
+	OPTI611A = 5,
+	OPTI46X = 6,
+	QDI6500 = 7,
+	QDI6580 = 8,
+	QDI6580DP = 9,		/* Dual channel mode is different */
+	W83759A = 10,
+
+	UNKNOWN = -1
+};
+
+struct legacy_data {
+	unsigned long timing;
+	u8 clock[2];
+	u8 last;
+	int fast;
+	enum controller type;
+	struct platform_device *platform_dev;
+};
+
+struct legacy_probe {
+	unsigned char *name;
+	unsigned long port;
+	unsigned int irq;
+	unsigned int slot;
+	enum controller type;
+	unsigned long private;
+};
+
+struct legacy_controller {
+	const char *name;
+	struct ata_port_operations *ops;
+	unsigned int pio_mask;
+	unsigned int flags;
+	unsigned int pflags;
+	int (*setup)(struct platform_device *, struct legacy_probe *probe,
+		struct legacy_data *data);
+};
+
+static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+
+static struct legacy_probe probe_list[NR_HOST];
+static struct legacy_data legacy_data[NR_HOST];
+static struct ata_host *legacy_host[NR_HOST];
+static int nr_legacy_host;
+
+
+static int probe_all;		/* Set to check all ISA port ranges */
+static int ht6560a;		/* HT 6560A on primary 1, second 2, both 3 */
+static int ht6560b;		/* HT 6560A on primary 1, second 2, both 3 */
+static int opti82c611a;		/* Opti82c611A on primary 1, sec 2, both 3 */
+static int opti82c46x;		/* Opti 82c465MV present(pri/sec autodetect) */
+static int autospeed;		/* Chip present which snoops speed changes */
+static int pio_mask = ATA_PIO4;	/* PIO range for autospeed devices */
+static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
+
+/* Set to probe QDI controllers */
+#ifdef CONFIG_PATA_QDI_MODULE
+static int qdi = 1;
+#else
+static int qdi;
+#endif
+
+#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
+static int winbond = 1;		/* Set to probe Winbond controllers,
+					give I/O port if non standard */
+#else
+static int winbond;		/* Set to probe Winbond controllers,
+					give I/O port if non standard */
+#endif
+
+/**
+ *	legacy_probe_add	-	Add interface to probe list
+ *	@port: Controller port
+ *	@irq: IRQ number
+ *	@type: Controller type
+ *	@private: Controller specific info
+ *
+ *	Add an entry into the probe list for ATA controllers. This is used
+ *	to add the default ISA slots and then to build up the table
+ *	further according to other ISA/VLB/Weird device scans
+ *
+ *	An I/O port list is used to keep ordering stable and sane, as we
+ *	don't have any good way to talk about ordering otherwise
+ */
+
+static int legacy_probe_add(unsigned long port, unsigned int irq,
+				enum controller type, unsigned long private)
+{
+	struct legacy_probe *lp = &probe_list[0];
+	int i;
+	struct legacy_probe *free = NULL;
+
+	for (i = 0; i < NR_HOST; i++) {
+		if (lp->port == 0 && free == NULL)
+			free = lp;
+		/* Matching port, or the correct slot for ordering */
+		if (lp->port == port || legacy_port[i] == port) {
+			free = lp;
+			break;
+		}
+		lp++;
+	}
+	if (free == NULL) {
+		printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
+		return -1;
+	}
+	/* Fill in the entry for later probing */
+	free->port = port;
+	free->irq = irq;
+	free->type = type;
+	free->private = private;
+	return 0;
+}
+
+
+/**
+ *	legacy_set_mode		-	mode setting
+ *	@link: IDE link
+ *	@unused: Device that failed when error is returned
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *
+ *	The BIOS configured everything. Our job is not to fiddle. Just use
+ *	whatever PIO the hardware is using and leave it at that. When we
+ *	get some kind of nice user driven API for control then we can
+ *	expand on this as per hdparm in the base kernel.
+ */
+
+static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		ata_dev_info(dev, "configured for PIO\n");
+		dev->pio_mode = XFER_PIO_0;
+		dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+	}
+	return 0;
+}
+
+static struct scsi_host_template legacy_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations legacy_base_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+};
+
+/*
+ *	These ops are used if the user indicates the hardware
+ *	snoops the commands to decide on the mode and handles the
+ *	mode selection "magically" itself. Several legacy controllers
+ *	do this. The mode range can be set if it is not 0x1F by setting
+ *	pio_mask as well.
+ */
+
+static struct ata_port_operations simple_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer32,
+};
+
+static struct ata_port_operations legacy_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer32,
+	.set_mode	= legacy_set_mode,
+};
+
+/*
+ *	Promise 20230C and 20620 support
+ *
+ *	This controller supports PIO0 to PIO2. We set PIO timings
+ *	conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
+ *	support is weird being DMA to controller and PIO'd to the host
+ *	and not supported.
+ */
+
+static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int tries = 5;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	u8 rt;
+	unsigned long flags;
+
+	/* Safe as UP only. Force I/Os to occur together */
+
+	local_irq_save(flags);
+
+	/* Unlock the control interface */
+	do {
+		inb(0x1F5);
+		outb(inb(0x1F2) | 0x80, 0x1F2);
+		inb(0x1F2);
+		inb(0x3F6);
+		inb(0x3F6);
+		inb(0x1F2);
+		inb(0x1F2);
+	}
+	while ((inb(0x1F2) & 0x80) && --tries);
+
+	local_irq_restore(flags);
+
+	outb(inb(0x1F4) & 0x07, 0x1F4);
+
+	rt = inb(0x1F3);
+	rt &= 0x07 << (3 * adev->devno);
+	if (pio)
+		rt |= (1 + 3 * pio) << (3 * adev->devno);
+
+	udelay(100);
+	outb(inb(0x1F2) | 0x01, 0x1F2);
+	udelay(100);
+	inb(0x1F5);
+
+}
+
+static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
+			unsigned char *buf, unsigned int buflen, int rw)
+{
+	struct ata_device *dev = qc->dev;
+	struct ata_port *ap = dev->link->ap;
+	int slop = buflen & 3;
+
+	/* 32bit I/O capable *and* we need to write a whole number of dwords */
+	if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)
+					&& (ap->pflags & ATA_PFLAG_PIO32)) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+
+		/* Perform the 32bit I/O synchronization sequence */
+		ioread8(ap->ioaddr.nsect_addr);
+		ioread8(ap->ioaddr.nsect_addr);
+		ioread8(ap->ioaddr.nsect_addr);
+
+		/* Now the data */
+		if (rw == READ)
+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			__le32 pad;
+			if (rw == READ) {
+				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+				memcpy(buf + buflen - slop, &pad, slop);
+			} else {
+				memcpy(&pad, buf + buflen - slop, slop);
+				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+			}
+			buflen += 4 - slop;
+		}
+		local_irq_restore(flags);
+	} else
+		buflen = ata_sff_data_xfer32(qc, buf, buflen, rw);
+
+	return buflen;
+}
+
+static struct ata_port_operations pdc20230_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= pdc20230_set_piomode,
+	.sff_data_xfer	= pdc_data_xfer_vlb,
+};
+
+/*
+ *	Holtek 6560A support
+ *
+ *	This controller supports PIO0 to PIO2 (no IORDY even though higher
+ *	timings can be loaded).
+ */
+
+static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover;
+	struct ata_timing t;
+
+	/* Get the timing data in cycles. For now play safe at 50Mhz */
+	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+	active = clamp_val(t.active, 2, 15);
+	recover = clamp_val(t.recover, 4, 15);
+
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+
+	iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+	ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560a_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= ht6560a_set_piomode,
+};
+
+/*
+ *	Holtek 6560B support
+ *
+ *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
+ *	setting unless we see an ATAPI device in which case we force it off.
+ *
+ *	FIXME: need to implement 2nd channel support.
+ */
+
+static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover;
+	struct ata_timing t;
+
+	/* Get the timing data in cycles. For now play safe at 50Mhz */
+	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+	active = clamp_val(t.active, 2, 15);
+	recover = clamp_val(t.recover, 2, 16) & 0x0F;
+
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+
+	iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+
+	if (adev->class != ATA_DEV_ATA) {
+		u8 rconf = inb(0x3E6);
+		if (rconf & 0x24) {
+			rconf &= ~0x24;
+			outb(rconf, 0x3E6);
+		}
+	}
+	ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560b_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= ht6560b_set_piomode,
+};
+
+/*
+ *	Opti core chipset helpers
+ */
+
+/**
+ *	opti_syscfg	-	read OPTI chipset configuration
+ *	@reg: Configuration register to read
+ *
+ *	Returns the value of an OPTI system board configuration register.
+ */
+
+static u8 opti_syscfg(u8 reg)
+{
+	unsigned long flags;
+	u8 r;
+
+	/* Uniprocessor chipset and must force cycles adjancent */
+	local_irq_save(flags);
+	outb(reg, 0x22);
+	r = inb(0x24);
+	local_irq_restore(flags);
+	return r;
+}
+
+/*
+ *	Opti 82C611A
+ *
+ *	This controller supports PIO0 to PIO3.
+ */
+
+static void opti82c611a_set_piomode(struct ata_port *ap,
+						struct ata_device *adev)
+{
+	u8 active, recover, setup;
+	struct ata_timing t;
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int khz[4] = { 50000, 40000, 33000, 25000 };
+	u8 rc;
+
+	/* Enter configuration mode */
+	ioread16(ap->ioaddr.error_addr);
+	ioread16(ap->ioaddr.error_addr);
+	iowrite8(3, ap->ioaddr.nsect_addr);
+
+	/* Read VLB clock strapping */
+	clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+	/* Setup timing is shared */
+	if (pair) {
+		struct ata_timing tp;
+		ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+	}
+
+	active = clamp_val(t.active, 2, 17) - 2;
+	recover = clamp_val(t.recover, 1, 16) - 1;
+	setup = clamp_val(t.setup, 1, 4) - 1;
+
+	/* Select the right timing bank for write timing */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x7F;
+	rc |= (adev->devno << 7);
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Write the timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+	/* Select the right bank for read timings, also
+	   load the shared timings for address */
+	rc = ioread8(ap->ioaddr.device_addr);
+	rc &= 0xC0;
+	rc |= adev->devno;	/* Index select */
+	rc |= (setup << 4) | 0x04;
+	iowrite8(rc, ap->ioaddr.device_addr);
+
+	/* Load the read timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+	/* Ensure the timing register mode is right */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x73;
+	rc |= 0x84;
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Exit command mode */
+	iowrite8(0x83,  ap->ioaddr.nsect_addr);
+}
+
+
+static struct ata_port_operations opti82c611a_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= opti82c611a_set_piomode,
+};
+
+/*
+ *	Opti 82C465MV
+ *
+ *	This controller supports PIO0 to PIO3. Unlike the 611A the MVB
+ *	version is dual channel but doesn't have a lot of unique registers.
+ */
+
+static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover, setup;
+	struct ata_timing t;
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int khz[4] = { 50000, 40000, 33000, 25000 };
+	u8 rc;
+	u8 sysclk;
+
+	/* Get the clock */
+	sysclk = (opti_syscfg(0xAC) & 0xC0) >> 6;	/* BIOS set */
+
+	/* Enter configuration mode */
+	ioread16(ap->ioaddr.error_addr);
+	ioread16(ap->ioaddr.error_addr);
+	iowrite8(3, ap->ioaddr.nsect_addr);
+
+	/* Read VLB clock strapping */
+	clock = 1000000000 / khz[sysclk];
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+	/* Setup timing is shared */
+	if (pair) {
+		struct ata_timing tp;
+		ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+	}
+
+	active = clamp_val(t.active, 2, 17) - 2;
+	recover = clamp_val(t.recover, 1, 16) - 1;
+	setup = clamp_val(t.setup, 1, 4) - 1;
+
+	/* Select the right timing bank for write timing */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x7F;
+	rc |= (adev->devno << 7);
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Write the timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+	/* Select the right bank for read timings, also
+	   load the shared timings for address */
+	rc = ioread8(ap->ioaddr.device_addr);
+	rc &= 0xC0;
+	rc |= adev->devno;	/* Index select */
+	rc |= (setup << 4) | 0x04;
+	iowrite8(rc, ap->ioaddr.device_addr);
+
+	/* Load the read timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+	/* Ensure the timing register mode is right */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x73;
+	rc |= 0x84;
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Exit command mode */
+	iowrite8(0x83,  ap->ioaddr.nsect_addr);
+
+	/* We need to know this for quad device on the MVB */
+	ap->host->private_data = ap;
+}
+
+/**
+ *	opt82c465mv_qc_issue		-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings. The
+ *	MVB has a single set of timing registers and these are shared
+ *	across channels. As there are two registers we really ought to
+ *	track the last two used values as a sort of register window. For
+ *	now we just reload on a channel switch. On the single channel
+ *	setup this condition never fires so we do nothing extra.
+ *
+ *	FIXME: dual channel needs ->serialize support
+ */
+
+static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If timings are set and for the wrong channel (2nd test is
+	   due to a libata shortcoming and will eventually go I hope) */
+	if (ap->host->private_data != ap->host
+	    && ap->host->private_data != NULL)
+		opti82c46x_set_piomode(ap, adev);
+
+	return ata_sff_qc_issue(qc);
+}
+
+static struct ata_port_operations opti82c46x_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= opti82c46x_set_piomode,
+	.qc_issue	= opti82c46x_qc_issue,
+};
+
+/**
+ *	qdi65x0_set_piomode		-	PIO setup for QDI65x0
+ *	@ap: Port
+ *	@adev: Device
+ *
+ *	In single channel mode the 6580 has one clock per device and we can
+ *	avoid the requirement to clock switch. We also have to load the timing
+ *	into the right clock according to whether we are master or slave.
+ *
+ *	In dual channel mode the 6580 has one clock per channel and we have
+ *	to software clockswitch in qc_issue.
+ */
+
+static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct legacy_data *ld_qdi = ap->host->private_data;
+	int active, recovery;
+	u8 timing;
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	if (ld_qdi->fast) {
+		active = 8 - clamp_val(t.active, 1, 8);
+		recovery = 18 - clamp_val(t.recover, 3, 18);
+	} else {
+		active = 9 - clamp_val(t.active, 2, 9);
+		recovery = 15 - clamp_val(t.recover, 0, 15);
+	}
+	timing = (recovery << 4) | active | 0x08;
+	ld_qdi->clock[adev->devno] = timing;
+
+	if (ld_qdi->type == QDI6580)
+		outb(timing, ld_qdi->timing + 2 * adev->devno);
+	else
+		outb(timing, ld_qdi->timing + 2 * ap->port_no);
+
+	/* Clear the FIFO */
+	if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA)
+		outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
+}
+
+/**
+ *	qdi_qc_issue		-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings.
+ */
+
+static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct legacy_data *ld_qdi = ap->host->private_data;
+
+	if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
+		if (adev->pio_mode) {
+			ld_qdi->last = ld_qdi->clock[adev->devno];
+			outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
+							2 * ap->port_no);
+		}
+	}
+	return ata_sff_qc_issue(qc);
+}
+
+static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
+				    unsigned char *buf,
+				    unsigned int buflen, int rw)
+{
+	struct ata_device *adev = qc->dev;
+	struct ata_port *ap = adev->link->ap;
+	int slop = buflen & 3;
+
+	if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)
+					&& (ap->pflags & ATA_PFLAG_PIO32)) {
+		if (rw == WRITE)
+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			__le32 pad;
+			if (rw == WRITE) {
+				memcpy(&pad, buf + buflen - slop, slop);
+				iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+			} else {
+				pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+				memcpy(buf + buflen - slop, &pad, slop);
+			}
+		}
+		return (buflen + 3) & ~3;
+	} else
+		return ata_sff_data_xfer(qc, buf, buflen, rw);
+}
+
+static int qdi_port(struct platform_device *dev,
+			struct legacy_probe *lp, struct legacy_data *ld)
+{
+	if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
+		return -EBUSY;
+	ld->timing = lp->private;
+	return 0;
+}
+
+static struct ata_port_operations qdi6500_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= qdi65x0_set_piomode,
+	.qc_issue	= qdi_qc_issue,
+	.sff_data_xfer	= vlb32_data_xfer,
+};
+
+static struct ata_port_operations qdi6580_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= qdi65x0_set_piomode,
+	.sff_data_xfer	= vlb32_data_xfer,
+};
+
+static struct ata_port_operations qdi6580dp_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= qdi65x0_set_piomode,
+	.qc_issue	= qdi_qc_issue,
+	.sff_data_xfer	= vlb32_data_xfer,
+};
+
+static DEFINE_SPINLOCK(winbond_lock);
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	outb(val, port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+	u8 val;
+
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	val = inb(port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+
+	return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct legacy_data *ld_winbond = ap->host->private_data;
+	int active, recovery;
+	u8 reg;
+	int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+	reg = winbond_readcfg(ld_winbond->timing, 0x81);
+
+	/* Get the timing data in cycles */
+	if (reg & 0x40)		/* Fast VLB bus, assume 50MHz */
+		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+	else
+		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
+	recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
+	timing = (active << 4) | recovery;
+	winbond_writecfg(ld_winbond->timing, timing, reg);
+
+	/* Load the setup timing */
+
+	reg = 0x35;
+	if (adev->class != ATA_DEV_ATA)
+		reg |= 0x08;	/* FIFO off */
+	if (!ata_pio_need_iordy(adev))
+		reg |= 0x02;	/* IORDY off */
+	reg |= (clamp_val(t.setup, 0, 3) << 6);
+	winbond_writecfg(ld_winbond->timing, timing + 1, reg);
+}
+
+static int winbond_port(struct platform_device *dev,
+			struct legacy_probe *lp, struct legacy_data *ld)
+{
+	if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
+		return -EBUSY;
+	ld->timing = lp->private;
+	return 0;
+}
+
+static struct ata_port_operations winbond_port_ops = {
+	.inherits	= &legacy_base_port_ops,
+	.set_piomode	= winbond_set_piomode,
+	.sff_data_xfer	= vlb32_data_xfer,
+};
+
+static struct legacy_controller controllers[] = {
+	{"BIOS",	&legacy_port_ops, 	ATA_PIO4,
+			ATA_FLAG_NO_IORDY,	0,			NULL },
+	{"Snooping", 	&simple_port_ops, 	ATA_PIO4,
+			0,			0,			NULL },
+	{"PDC20230",	&pdc20230_port_ops,	ATA_PIO2,
+			ATA_FLAG_NO_IORDY,
+			ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,	NULL },
+	{"HT6560A",	&ht6560a_port_ops,	ATA_PIO2,
+			ATA_FLAG_NO_IORDY,	0,			NULL },
+	{"HT6560B",	&ht6560b_port_ops,	ATA_PIO4,
+			ATA_FLAG_NO_IORDY,	0,			NULL },
+	{"OPTI82C611A",	&opti82c611a_port_ops,	ATA_PIO3,
+			0,			0,			NULL },
+	{"OPTI82C46X",	&opti82c46x_port_ops,	ATA_PIO3,
+			0,			0,			NULL },
+	{"QDI6500",	&qdi6500_port_ops,	ATA_PIO2,
+			ATA_FLAG_NO_IORDY,
+			ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,    qdi_port },
+	{"QDI6580",	&qdi6580_port_ops,	ATA_PIO4,
+			0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
+	{"QDI6580DP",	&qdi6580dp_port_ops,	ATA_PIO4,
+			0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
+	{"W83759A",	&winbond_port_ops,	ATA_PIO4,
+			0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,
+								winbond_port }
+};
+
+/**
+ *	probe_chip_type		-	Discover controller
+ *	@probe: Probe entry to check
+ *
+ *	Probe an ATA port and identify the type of controller. We don't
+ *	check if the controller appears to be driveless at this point.
+ */
+
+static __init int probe_chip_type(struct legacy_probe *probe)
+{
+	int mask = 1 << probe->slot;
+
+	if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
+		u8 reg = winbond_readcfg(winbond, 0x81);
+		reg |= 0x80;	/* jumpered mode off */
+		winbond_writecfg(winbond, 0x81, reg);
+		reg = winbond_readcfg(winbond, 0x83);
+		reg |= 0xF0;	/* local control */
+		winbond_writecfg(winbond, 0x83, reg);
+		reg = winbond_readcfg(winbond, 0x85);
+		reg |= 0xF0;	/* programmable timing */
+		winbond_writecfg(winbond, 0x85, reg);
+
+		reg = winbond_readcfg(winbond, 0x81);
+
+		if (reg & mask)
+			return W83759A;
+	}
+	if (probe->port == 0x1F0) {
+		unsigned long flags;
+		local_irq_save(flags);
+		/* Probes */
+		outb(inb(0x1F2) | 0x80, 0x1F2);
+		inb(0x1F5);
+		inb(0x1F2);
+		inb(0x3F6);
+		inb(0x3F6);
+		inb(0x1F2);
+		inb(0x1F2);
+
+		if ((inb(0x1F2) & 0x80) == 0) {
+			/* PDC20230c or 20630 ? */
+			printk(KERN_INFO  "PDC20230-C/20630 VLB ATA controller"
+							" detected.\n");
+			udelay(100);
+			inb(0x1F5);
+			local_irq_restore(flags);
+			return PDC20230;
+		} else {
+			outb(0x55, 0x1F2);
+			inb(0x1F2);
+			inb(0x1F2);
+			if (inb(0x1F2) == 0x00)
+				printk(KERN_INFO "PDC20230-B VLB ATA "
+						     "controller detected.\n");
+			local_irq_restore(flags);
+			return BIOS;
+		}
+	}
+
+	if (ht6560a & mask)
+		return HT6560A;
+	if (ht6560b & mask)
+		return HT6560B;
+	if (opti82c611a & mask)
+		return OPTI611A;
+	if (opti82c46x & mask)
+		return OPTI46X;
+	if (autospeed & mask)
+		return SNOOP;
+	return BIOS;
+}
+
+
+/**
+ *	legacy_init_one		-	attach a legacy interface
+ *	@pl: probe record
+ *
+ *	Register an ISA bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ */
+
+static __init int legacy_init_one(struct legacy_probe *probe)
+{
+	struct legacy_controller *controller = &controllers[probe->type];
+	int pio_modes = controller->pio_mask;
+	unsigned long io = probe->port;
+	u32 mask = (1 << probe->slot);
+	struct ata_port_operations *ops = controller->ops;
+	struct legacy_data *ld = &legacy_data[probe->slot];
+	struct ata_host *host = NULL;
+	struct ata_port *ap;
+	struct platform_device *pdev;
+	struct ata_device *dev;
+	void __iomem *io_addr, *ctrl_addr;
+	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
+	int ret;
+
+	iordy |= controller->flags;
+
+	pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	ret = -EBUSY;
+	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
+	    devm_request_region(&pdev->dev, io + 0x0206, 1,
+							"pata_legacy") == NULL)
+		goto fail;
+
+	ret = -ENOMEM;
+	io_addr = devm_ioport_map(&pdev->dev, io, 8);
+	ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
+	if (!io_addr || !ctrl_addr)
+		goto fail;
+	ld->type = probe->type;
+	if (controller->setup)
+		if (controller->setup(pdev, probe, ld) < 0)
+			goto fail;
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		goto fail;
+	ap = host->ports[0];
+
+	ap->ops = ops;
+	ap->pio_mask = pio_modes;
+	ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
+	ap->pflags |= controller->pflags;
+	ap->ioaddr.cmd_addr = io_addr;
+	ap->ioaddr.altstatus_addr = ctrl_addr;
+	ap->ioaddr.ctl_addr = ctrl_addr;
+	ata_sff_std_ports(&ap->ioaddr);
+	ap->host->private_data = ld;
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
+
+	ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0,
+				&legacy_sht);
+	if (ret)
+		goto fail;
+	async_synchronize_full();
+	ld->platform_dev = pdev;
+
+	/* Nothing found means we drop the port as its probably not there */
+
+	ret = -ENODEV;
+	ata_for_each_dev(dev, &ap->link, ALL) {
+		if (!ata_dev_absent(dev)) {
+			legacy_host[probe->slot] = host;
+			ld->platform_dev = pdev;
+			return 0;
+		}
+	}
+	ata_host_detach(host);
+fail:
+	platform_device_unregister(pdev);
+	return ret;
+}
+
+/**
+ *	legacy_check_special_cases	-	ATA special cases
+ *	@p: PCI device to check
+ *	@master: set this if we find an ATA master
+ *	@master: set this if we find an ATA secondary
+ *
+ *	A small number of vendors implemented early PCI ATA interfaces
+ *	on bridge logic without the ATA interface being PCI visible.
+ *	Where we have a matching PCI driver we must skip the relevant
+ *	device here. If we don't know about it then the legacy driver
+ *	is the right driver anyway.
+ */
+
+static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
+								int *secondary)
+{
+	/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
+	if (p->vendor == 0x1078 && p->device == 0x0000) {
+		*primary = *secondary = 1;
+		return;
+	}
+	/* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
+	if (p->vendor == 0x1078 && p->device == 0x0002) {
+		*primary = *secondary = 1;
+		return;
+	}
+	/* Intel MPIIX - PIO ATA on non PCI side of bridge */
+	if (p->vendor == 0x8086 && p->device == 0x1234) {
+		u16 r;
+		pci_read_config_word(p, 0x6C, &r);
+		if (r & 0x8000) {
+			/* ATA port enabled */
+			if (r & 0x4000)
+				*secondary = 1;
+			else
+				*primary = 1;
+		}
+		return;
+	}
+}
+
+static __init void probe_opti_vlb(void)
+{
+	/* If an OPTI 82C46X is present find out where the channels are */
+	static const char *optis[4] = {
+		"3/463MV", "5MV",
+		"5MVA", "5MVB"
+	};
+	u8 chans = 1;
+	u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
+
+	opti82c46x = 3;	/* Assume master and slave first */
+	printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
+								optis[ctrl]);
+	if (ctrl == 3)
+		chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
+	ctrl = opti_syscfg(0xAC);
+	/* Check enabled and this port is the 465MV port. On the
+	   MVB we may have two channels */
+	if (ctrl & 8) {
+		if (chans == 2) {
+			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+			legacy_probe_add(0x170, 15, OPTI46X, 0);
+		}
+		if (ctrl & 4)
+			legacy_probe_add(0x170, 15, OPTI46X, 0);
+		else
+			legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+	} else
+		legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+}
+
+static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
+{
+	static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
+	/* Check card type */
+	if ((r & 0xF0) == 0xC0) {
+		/* QD6500: single channel */
+		if (r & 8)
+			/* Disabled ? */
+			return;
+		legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
+								QDI6500, port);
+	}
+	if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
+		/* QD6580: dual channel */
+		if (!request_region(port + 2 , 2, "pata_qdi")) {
+			release_region(port, 2);
+			return;
+		}
+		res = inb(port + 3);
+		/* Single channel mode ? */
+		if (res & 1)
+			legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
+								QDI6580, port);
+		else { /* Dual channel mode */
+			legacy_probe_add(0x1F0, 14, QDI6580DP, port);
+			/* port + 0x02, r & 0x04 */
+			legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
+		}
+		release_region(port + 2, 2);
+	}
+}
+
+static __init void probe_qdi_vlb(void)
+{
+	unsigned long flags;
+	static const unsigned long qd_port[2] = { 0x30, 0xB0 };
+	int i;
+
+	/*
+	 *	Check each possible QD65xx base address
+	 */
+
+	for (i = 0; i < 2; i++) {
+		unsigned long port = qd_port[i];
+		u8 r, res;
+
+
+		if (request_region(port, 2, "pata_qdi")) {
+			/* Check for a card */
+			local_irq_save(flags);
+			/* I have no h/w that needs this delay but it
+			   is present in the historic code */
+			r = inb(port);
+			udelay(1);
+			outb(0x19, port);
+			udelay(1);
+			res = inb(port);
+			udelay(1);
+			outb(r, port);
+			udelay(1);
+			local_irq_restore(flags);
+
+			/* Fail */
+			if (res == 0x19) {
+				release_region(port, 2);
+				continue;
+			}
+			/* Passes the presence test */
+			r = inb(port + 1);
+			udelay(1);
+			/* Check port agrees with port set */
+			if ((r & 2) >> 1 == i)
+				qdi65_identify_port(r, res, port);
+			release_region(port, 2);
+		}
+	}
+}
+
+/**
+ *	legacy_init		-	attach legacy interfaces
+ *
+ *	Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
+ *	Right now we do not scan the ide0 and ide1 address but should do so
+ *	for non PCI systems or systems with no PCI IDE legacy mode devices.
+ *	If you fix that note there are special cases to consider like VLB
+ *	drivers and CS5510/20.
+ */
+
+static __init int legacy_init(void)
+{
+	int i;
+	int ct = 0;
+	int primary = 0;
+	int secondary = 0;
+	int pci_present = 0;
+	struct legacy_probe *pl = &probe_list[0];
+	int slot = 0;
+
+	struct pci_dev *p = NULL;
+
+	for_each_pci_dev(p) {
+		int r;
+		/* Check for any overlap of the system ATA mappings. Native
+		   mode controllers stuck on these addresses or some devices
+		   in 'raid' mode won't be found by the storage class test */
+		for (r = 0; r < 6; r++) {
+			if (pci_resource_start(p, r) == 0x1f0)
+				primary = 1;
+			if (pci_resource_start(p, r) == 0x170)
+				secondary = 1;
+		}
+		/* Check for special cases */
+		legacy_check_special_cases(p, &primary, &secondary);
+
+		/* If PCI bus is present then don't probe for tertiary
+		   legacy ports */
+		pci_present = 1;
+	}
+
+	if (winbond == 1)
+		winbond = 0x130;	/* Default port, alt is 1B0 */
+
+	if (primary == 0 || all)
+		legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
+	if (secondary == 0 || all)
+		legacy_probe_add(0x170, 15, UNKNOWN, 0);
+
+	if (probe_all || !pci_present) {
+		/* ISA/VLB extra ports */
+		legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
+		legacy_probe_add(0x168, 10, UNKNOWN, 0);
+		legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
+		legacy_probe_add(0x160, 12, UNKNOWN, 0);
+	}
+
+	if (opti82c46x)
+		probe_opti_vlb();
+	if (qdi)
+		probe_qdi_vlb();
+
+	for (i = 0; i < NR_HOST; i++, pl++) {
+		if (pl->port == 0)
+			continue;
+		if (pl->type == UNKNOWN)
+			pl->type = probe_chip_type(pl);
+		pl->slot = slot++;
+		if (legacy_init_one(pl) == 0)
+			ct++;
+	}
+	if (ct != 0)
+		return 0;
+	return -ENODEV;
+}
+
+static __exit void legacy_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_legacy_host; i++) {
+		struct legacy_data *ld = &legacy_data[i];
+		ata_host_detach(legacy_host[i]);
+		platform_device_unregister(ld->platform_dev);
+	}
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for legacy ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_qdi");
+MODULE_ALIAS("pata_winbond");
+
+module_param(probe_all, int, 0);
+module_param(autospeed, int, 0);
+module_param(ht6560a, int, 0);
+module_param(ht6560b, int, 0);
+module_param(opti82c611a, int, 0);
+module_param(opti82c46x, int, 0);
+module_param(qdi, int, 0);
+module_param(winbond, int, 0);
+module_param(pio_mask, int, 0);
+module_param(iordy_mask, int, 0);
+
+module_init(legacy_init);
+module_exit(legacy_exit);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
new file mode 100644
index 0000000..9588e68
--- /dev/null
+++ b/drivers/ata/pata_macio.c
@@ -0,0 +1,1418 @@
+/*
+ * Libata based driver for Apple "macio" family of PATA controllers
+ *
+ * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
+ *                     <benh@kernel.crashing.org>
+ *
+ * Some bits and pieces from drivers/ide/ppc/pmac.c
+ *
+ */
+
+#undef DEBUG
+#undef DEBUG_DMA
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+
+#include <asm/macio.h>
+#include <asm/io.h>
+#include <asm/dbdma.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/mediabay.h>
+
+#ifdef DEBUG_DMA
+#define dev_dbgdma(dev, format, arg...)		\
+	dev_printk(KERN_DEBUG , dev , format , ## arg)
+#else
+#define dev_dbgdma(dev, format, arg...)		\
+	({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
+#endif
+
+#define DRV_NAME	"pata_macio"
+#define DRV_VERSION	"0.9"
+
+/* Models of macio ATA controller */
+enum {
+	controller_ohare,	/* OHare based */
+	controller_heathrow,	/* Heathrow/Paddington */
+	controller_kl_ata3,	/* KeyLargo ATA-3 */
+	controller_kl_ata4,	/* KeyLargo ATA-4 */
+	controller_un_ata6,	/* UniNorth2 ATA-6 */
+	controller_k2_ata6,	/* K2 ATA-6 */
+	controller_sh_ata6,	/* Shasta ATA-6 */
+};
+
+static const char* macio_ata_names[] = {
+	"OHare ATA",		/* OHare based */
+	"Heathrow ATA",		/* Heathrow/Paddington */
+	"KeyLargo ATA-3",	/* KeyLargo ATA-3 (MDMA only) */
+	"KeyLargo ATA-4",	/* KeyLargo ATA-4 (UDMA/66) */
+	"UniNorth ATA-6",	/* UniNorth2 ATA-6 (UDMA/100) */
+	"K2 ATA-6",		/* K2 ATA-6 (UDMA/100) */
+	"Shasta ATA-6",		/* Shasta ATA-6 (UDMA/133) */
+};
+
+/*
+ * Extra registers, both 32-bit little-endian
+ */
+#define IDE_TIMING_CONFIG	0x200
+#define IDE_INTERRUPT		0x300
+
+/* Kauai (U2) ATA has different register setup */
+#define IDE_KAUAI_PIO_CONFIG	0x200
+#define IDE_KAUAI_ULTRA_CONFIG	0x210
+#define IDE_KAUAI_POLL_CONFIG	0x220
+
+/*
+ * Timing configuration register definitions
+ */
+
+/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
+#define SYSCLK_TICKS(t)		(((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
+#define SYSCLK_TICKS_66(t)	(((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
+#define IDE_SYSCLK_NS		30	/* 33Mhz cell */
+#define IDE_SYSCLK_66_NS	15	/* 66Mhz cell */
+
+/* 133Mhz cell, found in shasta.
+ * See comments about 100 Mhz Uninorth 2...
+ * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
+ * weird and I don't now why .. at this stage
+ */
+#define TR_133_PIOREG_PIO_MASK		0xff000fff
+#define TR_133_PIOREG_MDMA_MASK		0x00fff800
+#define TR_133_UDMAREG_UDMA_MASK	0x0003ffff
+#define TR_133_UDMAREG_UDMA_EN		0x00000001
+
+/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
+ * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
+ * controlled like gem or fw. It appears to be an evolution of keylargo
+ * ATA4 with a timing register extended to 2x32bits registers (one
+ * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
+ * It has it's own local feature control register as well.
+ *
+ * After scratching my mind over the timing values, at least for PIO
+ * and MDMA, I think I've figured the format of the timing register,
+ * though I use pre-calculated tables for UDMA as usual...
+ */
+#define TR_100_PIO_ADDRSETUP_MASK	0xff000000 /* Size of field unknown */
+#define TR_100_PIO_ADDRSETUP_SHIFT	24
+#define TR_100_MDMA_MASK		0x00fff000
+#define TR_100_MDMA_RECOVERY_MASK	0x00fc0000
+#define TR_100_MDMA_RECOVERY_SHIFT	18
+#define TR_100_MDMA_ACCESS_MASK		0x0003f000
+#define TR_100_MDMA_ACCESS_SHIFT	12
+#define TR_100_PIO_MASK			0xff000fff
+#define TR_100_PIO_RECOVERY_MASK	0x00000fc0
+#define TR_100_PIO_RECOVERY_SHIFT	6
+#define TR_100_PIO_ACCESS_MASK		0x0000003f
+#define TR_100_PIO_ACCESS_SHIFT		0
+
+#define TR_100_UDMAREG_UDMA_MASK	0x0000ffff
+#define TR_100_UDMAREG_UDMA_EN		0x00000001
+
+
+/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
+ * 40 connector cable and to 4 on 80 connector one.
+ * Clock unit is 15ns (66Mhz)
+ *
+ * 3 Values can be programmed:
+ *  - Write data setup, which appears to match the cycle time. They
+ *    also call it DIOW setup.
+ *  - Ready to pause time (from spec)
+ *  - Address setup. That one is weird. I don't see where exactly
+ *    it fits in UDMA cycles, I got it's name from an obscure piece
+ *    of commented out code in Darwin. They leave it to 0, we do as
+ *    well, despite a comment that would lead to think it has a
+ *    min value of 45ns.
+ * Apple also add 60ns to the write data setup (or cycle time ?) on
+ * reads.
+ */
+#define TR_66_UDMA_MASK			0xfff00000
+#define TR_66_UDMA_EN			0x00100000 /* Enable Ultra mode for DMA */
+#define TR_66_PIO_ADDRSETUP_MASK	0xe0000000 /* Address setup */
+#define TR_66_PIO_ADDRSETUP_SHIFT	29
+#define TR_66_UDMA_RDY2PAUS_MASK	0x1e000000 /* Ready 2 pause time */
+#define TR_66_UDMA_RDY2PAUS_SHIFT	25
+#define TR_66_UDMA_WRDATASETUP_MASK	0x01e00000 /* Write data setup time */
+#define TR_66_UDMA_WRDATASETUP_SHIFT	21
+#define TR_66_MDMA_MASK			0x000ffc00
+#define TR_66_MDMA_RECOVERY_MASK	0x000f8000
+#define TR_66_MDMA_RECOVERY_SHIFT	15
+#define TR_66_MDMA_ACCESS_MASK		0x00007c00
+#define TR_66_MDMA_ACCESS_SHIFT		10
+#define TR_66_PIO_MASK			0xe00003ff
+#define TR_66_PIO_RECOVERY_MASK		0x000003e0
+#define TR_66_PIO_RECOVERY_SHIFT	5
+#define TR_66_PIO_ACCESS_MASK		0x0000001f
+#define TR_66_PIO_ACCESS_SHIFT		0
+
+/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
+ * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
+ *
+ * The access time and recovery time can be programmed. Some older
+ * Darwin code base limit OHare to 150ns cycle time. I decided to do
+ * the same here fore safety against broken old hardware ;)
+ * The HalfTick bit, when set, adds half a clock (15ns) to the access
+ * time and removes one from recovery. It's not supported on KeyLargo
+ * implementation afaik. The E bit appears to be set for PIO mode 0 and
+ * is used to reach long timings used in this mode.
+ */
+#define TR_33_MDMA_MASK			0x003ff800
+#define TR_33_MDMA_RECOVERY_MASK	0x001f0000
+#define TR_33_MDMA_RECOVERY_SHIFT	16
+#define TR_33_MDMA_ACCESS_MASK		0x0000f800
+#define TR_33_MDMA_ACCESS_SHIFT		11
+#define TR_33_MDMA_HALFTICK		0x00200000
+#define TR_33_PIO_MASK			0x000007ff
+#define TR_33_PIO_E			0x00000400
+#define TR_33_PIO_RECOVERY_MASK		0x000003e0
+#define TR_33_PIO_RECOVERY_SHIFT	5
+#define TR_33_PIO_ACCESS_MASK		0x0000001f
+#define TR_33_PIO_ACCESS_SHIFT		0
+
+/*
+ * Interrupt register definitions. Only present on newer cells
+ * (Keylargo and later afaik) so we don't use it.
+ */
+#define IDE_INTR_DMA			0x80000000
+#define IDE_INTR_DEVICE			0x40000000
+
+/*
+ * FCR Register on Kauai. Not sure what bit 0x4 is  ...
+ */
+#define KAUAI_FCR_UATA_MAGIC		0x00000004
+#define KAUAI_FCR_UATA_RESET_N		0x00000002
+#define KAUAI_FCR_UATA_ENABLE		0x00000001
+
+
+/* Allow up to 256 DBDMA commands per xfer */
+#define MAX_DCMDS		256
+
+/* Don't let a DMA segment go all the way to 64K */
+#define MAX_DBDMA_SEG		0xff00
+
+
+/*
+ * Wait 1s for disk to answer on IDE bus after a hard reset
+ * of the device (via GPIO/FCR).
+ *
+ * Some devices seem to "pollute" the bus even after dropping
+ * the BSY bit (typically some combo drives slave on the UDMA
+ * bus) after a hard reset. Since we hard reset all drives on
+ * KeyLargo ATA66, we have to keep that delay around. I may end
+ * up not hard resetting anymore on these and keep the delay only
+ * for older interfaces instead (we have to reset when coming
+ * from MacOS...) --BenH.
+ */
+#define IDE_WAKEUP_DELAY_MS	1000
+
+struct pata_macio_timing;
+
+struct pata_macio_priv {
+	int				kind;
+	int				aapl_bus_id;
+	int				mediabay : 1;
+	struct device_node		*node;
+	struct macio_dev		*mdev;
+	struct pci_dev			*pdev;
+	struct device			*dev;
+	int				irq;
+	u32				treg[2][2];
+	void __iomem			*tfregs;
+	void __iomem			*kauai_fcr;
+	struct dbdma_cmd *		dma_table_cpu;
+	dma_addr_t			dma_table_dma;
+	struct ata_host			*host;
+	const struct pata_macio_timing	*timings;
+};
+
+/* Previous variants of this driver used to calculate timings
+ * for various variants of the chip and use tables for others.
+ *
+ * Not only was this confusing, but in addition, it isn't clear
+ * whether our calculation code was correct. It didn't entirely
+ * match the darwin code and whatever documentation I could find
+ * on these cells
+ *
+ * I decided to entirely rely on a table instead for this version
+ * of the driver. Also, because I don't really care about derated
+ * modes and really old HW other than making it work, I'm not going
+ * to calculate / snoop timing values for something else than the
+ * standard modes.
+ */
+struct pata_macio_timing {
+	int	mode;
+	u32	reg1;	/* Bits to set in first timing reg */
+	u32	reg2;	/* Bits to set in second timing reg */
+};
+
+static const struct pata_macio_timing pata_macio_ohare_timings[] = {
+	{ XFER_PIO_0,		0x00000526,	0, },
+	{ XFER_PIO_1,		0x00000085,	0, },
+	{ XFER_PIO_2,		0x00000025,	0, },
+	{ XFER_PIO_3,		0x00000025,	0, },
+	{ XFER_PIO_4,		0x00000025,	0, },
+	{ XFER_MW_DMA_0,	0x00074000,	0, },
+	{ XFER_MW_DMA_1,	0x00221000,	0, },
+	{ XFER_MW_DMA_2,	0x00211000,	0, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
+	{ XFER_PIO_0,		0x00000526,	0, },
+	{ XFER_PIO_1,		0x00000085,	0, },
+	{ XFER_PIO_2,		0x00000025,	0, },
+	{ XFER_PIO_3,		0x00000025,	0, },
+	{ XFER_PIO_4,		0x00000025,	0, },
+	{ XFER_MW_DMA_0,	0x00074000,	0, },
+	{ XFER_MW_DMA_1,	0x00221000,	0, },
+	{ XFER_MW_DMA_2,	0x00211000,	0, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kl33_timings[] = {
+	{ XFER_PIO_0,		0x00000526,	0, },
+	{ XFER_PIO_1,		0x00000085,	0, },
+	{ XFER_PIO_2,		0x00000025,	0, },
+	{ XFER_PIO_3,		0x00000025,	0, },
+	{ XFER_PIO_4,		0x00000025,	0, },
+	{ XFER_MW_DMA_0,	0x00084000,	0, },
+	{ XFER_MW_DMA_1,	0x00021800,	0, },
+	{ XFER_MW_DMA_2,	0x00011800,	0, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kl66_timings[] = {
+	{ XFER_PIO_0,		0x0000038c,	0, },
+	{ XFER_PIO_1,		0x0000020a,	0, },
+	{ XFER_PIO_2,		0x00000127,	0, },
+	{ XFER_PIO_3,		0x000000c6,	0, },
+	{ XFER_PIO_4,		0x00000065,	0, },
+	{ XFER_MW_DMA_0,	0x00084000,	0, },
+	{ XFER_MW_DMA_1,	0x00029800,	0, },
+	{ XFER_MW_DMA_2,	0x00019400,	0, },
+	{ XFER_UDMA_0,		0x19100000,	0, },
+	{ XFER_UDMA_1,		0x14d00000,	0, },
+	{ XFER_UDMA_2,		0x10900000,	0, },
+	{ XFER_UDMA_3,		0x0c700000,	0, },
+	{ XFER_UDMA_4,		0x0c500000,	0, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kauai_timings[] = {
+	{ XFER_PIO_0,		0x08000a92,	0, },
+	{ XFER_PIO_1,		0x0800060f,	0, },
+	{ XFER_PIO_2,		0x0800038b,	0, },
+	{ XFER_PIO_3,		0x05000249,	0, },
+	{ XFER_PIO_4,		0x04000148,	0, },
+	{ XFER_MW_DMA_0,	0x00618000,	0, },
+	{ XFER_MW_DMA_1,	0x00209000,	0, },
+	{ XFER_MW_DMA_2,	0x00148000,	0, },
+	{ XFER_UDMA_0,		         0,	0x000070c1, },
+	{ XFER_UDMA_1,		         0,	0x00005d81, },
+	{ XFER_UDMA_2,		         0,	0x00004a61, },
+	{ XFER_UDMA_3,		         0,	0x00003a51, },
+	{ XFER_UDMA_4,		         0,	0x00002a31, },
+	{ XFER_UDMA_5,		         0,	0x00002921, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_shasta_timings[] = {
+	{ XFER_PIO_0,		0x0a000c97,	0, },
+	{ XFER_PIO_1,		0x07000712,	0, },
+	{ XFER_PIO_2,		0x040003cd,	0, },
+	{ XFER_PIO_3,		0x0500028b,	0, },
+	{ XFER_PIO_4,		0x0400010a,	0, },
+	{ XFER_MW_DMA_0,	0x00820800,	0, },
+	{ XFER_MW_DMA_1,	0x0028b000,	0, },
+	{ XFER_MW_DMA_2,	0x001ca000,	0, },
+	{ XFER_UDMA_0,		         0,	0x00035901, },
+	{ XFER_UDMA_1,		         0,	0x000348b1, },
+	{ XFER_UDMA_2,		         0,	0x00033881, },
+	{ XFER_UDMA_3,		         0,	0x00033861, },
+	{ XFER_UDMA_4,		         0,	0x00033841, },
+	{ XFER_UDMA_5,		         0,	0x00033031, },
+	{ XFER_UDMA_6,		         0,	0x00033021, },
+	{ -1, 0, 0 }
+};
+
+static const struct pata_macio_timing *pata_macio_find_timing(
+					    struct pata_macio_priv *priv,
+					    int mode)
+{
+	int i;
+
+	for (i = 0; priv->timings[i].mode > 0; i++) {
+		if (priv->timings[i].mode == mode)
+			return &priv->timings[i];
+	}
+	return NULL;
+}
+
+
+static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+	void __iomem *rbase = ap->ioaddr.cmd_addr;
+
+	if (priv->kind == controller_sh_ata6 ||
+	    priv->kind == controller_un_ata6 ||
+	    priv->kind == controller_k2_ata6) {
+		writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
+		writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
+	} else
+		writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
+}
+
+static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
+{
+	ata_sff_dev_select(ap, device);
+
+	/* Apply timings */
+	pata_macio_apply_timings(ap, device);
+}
+
+static void pata_macio_set_timings(struct ata_port *ap,
+				   struct ata_device *adev)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+	const struct pata_macio_timing *t;
+
+	dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
+		adev->devno,
+		adev->pio_mode,
+		ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
+		adev->dma_mode,
+		ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
+
+	/* First clear timings */
+	priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
+
+	/* Now get the PIO timings */
+	t = pata_macio_find_timing(priv, adev->pio_mode);
+	if (t == NULL) {
+		dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
+			 adev->pio_mode);
+		t = pata_macio_find_timing(priv, XFER_PIO_0);
+	}
+	BUG_ON(t == NULL);
+
+	/* PIO timings only ever use the first treg */
+	priv->treg[adev->devno][0] |= t->reg1;
+
+	/* Now get DMA timings */
+	t = pata_macio_find_timing(priv, adev->dma_mode);
+	if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
+		dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
+		t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
+	}
+	BUG_ON(t == NULL);
+
+	/* DMA timings can use both tregs */
+	priv->treg[adev->devno][0] |= t->reg1;
+	priv->treg[adev->devno][1] |= t->reg2;
+
+	dev_dbg(priv->dev, " -> %08x %08x\n",
+		priv->treg[adev->devno][0],
+		priv->treg[adev->devno][1]);
+
+	/* Apply to hardware */
+	pata_macio_apply_timings(ap, adev->devno);
+}
+
+/*
+ * Blast some well known "safe" values to the timing registers at init or
+ * wakeup from sleep time, before we do real calculation
+ */
+static void pata_macio_default_timings(struct pata_macio_priv *priv)
+{
+	unsigned int value, value2 = 0;
+
+	switch(priv->kind) {
+		case controller_sh_ata6:
+			value = 0x0a820c97;
+			value2 = 0x00033031;
+			break;
+		case controller_un_ata6:
+		case controller_k2_ata6:
+			value = 0x08618a92;
+			value2 = 0x00002921;
+			break;
+		case controller_kl_ata4:
+			value = 0x0008438c;
+			break;
+		case controller_kl_ata3:
+			value = 0x00084526;
+			break;
+		case controller_heathrow:
+		case controller_ohare:
+		default:
+			value = 0x00074526;
+			break;
+	}
+	priv->treg[0][0] = priv->treg[1][0] = value;
+	priv->treg[0][1] = priv->treg[1][1] = value2;
+}
+
+static int pata_macio_cable_detect(struct ata_port *ap)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+
+	/* Get cable type from device-tree */
+	if (priv->kind == controller_kl_ata4 ||
+	    priv->kind == controller_un_ata6 ||
+	    priv->kind == controller_k2_ata6 ||
+	    priv->kind == controller_sh_ata6) {
+		const char* cable = of_get_property(priv->node, "cable-type",
+						    NULL);
+		struct device_node *root = of_find_node_by_path("/");
+		const char *model = of_get_property(root, "model", NULL);
+
+		if (cable && !strncmp(cable, "80-", 3)) {
+			/* Some drives fail to detect 80c cable in PowerBook
+			 * These machine use proprietary short IDE cable
+			 * anyway
+			 */
+			if (!strncmp(model, "PowerBook", 9))
+				return ATA_CBL_PATA40_SHORT;
+			else
+				return ATA_CBL_PATA80;
+		}
+	}
+
+	/* G5's seem to have incorrect cable type in device-tree.
+	 * Let's assume they always have a 80 conductor cable, this seem to
+	 * be always the case unless the user mucked around
+	 */
+	if (of_device_is_compatible(priv->node, "K2-UATA") ||
+	    of_device_is_compatible(priv->node, "shasta-ata"))
+		return ATA_CBL_PATA80;
+
+	/* Anything else is 40 connectors */
+	return ATA_CBL_PATA40;
+}
+
+static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+{
+	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct ata_port *ap = qc->ap;
+	struct pata_macio_priv *priv = ap->private_data;
+	struct scatterlist *sg;
+	struct dbdma_cmd *table;
+	unsigned int si, pi;
+
+	dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
+		   __func__, qc, qc->flags, write, qc->dev->devno);
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	table = (struct dbdma_cmd *) priv->dma_table_cpu;
+
+	pi = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			/* table overflow should never happen */
+			BUG_ON (pi++ >= MAX_DCMDS);
+
+			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+			table->req_count = cpu_to_le16(len);
+			table->phy_addr = cpu_to_le32(addr);
+			table->cmd_dep = 0;
+			table->xfer_status = 0;
+			table->res_count = 0;
+			addr += len;
+			sg_len -= len;
+			++table;
+		}
+	}
+
+	/* Should never happen according to Tejun */
+	BUG_ON(!pi);
+
+	/* Convert the last command to an input/output */
+	table--;
+	table->command = cpu_to_le16(write ? OUTPUT_LAST: INPUT_LAST);
+	table++;
+
+	/* Add the stop command to the end of the list */
+	memset(table, 0, sizeof(struct dbdma_cmd));
+	table->command = cpu_to_le16(DBDMA_STOP);
+
+	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+}
+
+
+static void pata_macio_freeze(struct ata_port *ap)
+{
+	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+
+	if (dma_regs) {
+		unsigned int timeout = 1000000;
+
+		/* Make sure DMA controller is stopped */
+		writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
+		while (--timeout && (readl(&dma_regs->status) & RUN))
+			udelay(1);
+	}
+
+	ata_sff_freeze(ap);
+}
+
+
+static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_macio_priv *priv = ap->private_data;
+	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+	int dev = qc->dev->devno;
+
+	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+	/* Make sure DMA commands updates are visible */
+	writel(priv->dma_table_dma, &dma_regs->cmdptr);
+
+	/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
+	 * UDMA reads
+	 */
+	if (priv->kind == controller_kl_ata4 &&
+	    (priv->treg[dev][0] & TR_66_UDMA_EN)) {
+		void __iomem *rbase = ap->ioaddr.cmd_addr;
+		u32 reg = priv->treg[dev][0];
+
+		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+			reg += 0x00800000;
+		writel(reg, rbase + IDE_TIMING_CONFIG);
+	}
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_macio_priv *priv = ap->private_data;
+	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+
+	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+	writel((RUN << 16) | RUN, &dma_regs->control);
+	/* Make sure it gets to the controller right now */
+	(void)readl(&dma_regs->control);
+}
+
+static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_macio_priv *priv = ap->private_data;
+	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+	unsigned int timeout = 1000000;
+
+	dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+	/* Stop the DMA engine and wait for it to full halt */
+	writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
+	while (--timeout && (readl(&dma_regs->status) & RUN))
+		udelay(1);
+}
+
+static u8 pata_macio_bmdma_status(struct ata_port *ap)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+	struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+	u32 dstat, rstat = ATA_DMA_INTR;
+	unsigned long timeout = 0;
+
+	dstat = readl(&dma_regs->status);
+
+	dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
+
+	/* We have two things to deal with here:
+	 *
+	 * - The dbdma won't stop if the command was started
+	 * but completed with an error without transferring all
+	 * datas. This happens when bad blocks are met during
+	 * a multi-block transfer.
+	 *
+	 * - The dbdma fifo hasn't yet finished flushing to
+	 * to system memory when the disk interrupt occurs.
+	 *
+	 */
+
+	/* First check for errors */
+	if ((dstat & (RUN|DEAD)) != RUN)
+		rstat |= ATA_DMA_ERR;
+
+	/* If ACTIVE is cleared, the STOP command has been hit and
+	 * the transfer is complete. If not, we have to flush the
+	 * channel.
+	 */
+	if ((dstat & ACTIVE) == 0)
+		return rstat;
+
+	dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
+
+	/* If dbdma didn't execute the STOP command yet, the
+	 * active bit is still set. We consider that we aren't
+	 * sharing interrupts (which is hopefully the case with
+	 * those controllers) and so we just try to flush the
+	 * channel for pending data in the fifo
+	 */
+	udelay(1);
+	writel((FLUSH << 16) | FLUSH, &dma_regs->control);
+	for (;;) {
+		udelay(1);
+		dstat = readl(&dma_regs->status);
+		if ((dstat & FLUSH) == 0)
+			break;
+		if (++timeout > 1000) {
+			dev_warn(priv->dev, "timeout flushing DMA\n");
+			rstat |= ATA_DMA_ERR;
+			break;
+		}
+	}
+	return rstat;
+}
+
+/* port_start is when we allocate the DMA command list */
+static int pata_macio_port_start(struct ata_port *ap)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+
+	if (ap->ioaddr.bmdma_addr == NULL)
+		return 0;
+
+	/* Allocate space for the DBDMA commands.
+	 *
+	 * The +2 is +1 for the stop command and +1 to allow for
+	 * aligning the start address to a multiple of 16 bytes.
+	 */
+	priv->dma_table_cpu =
+		dmam_alloc_coherent(priv->dev,
+				    (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
+				    &priv->dma_table_dma, GFP_KERNEL);
+	if (priv->dma_table_cpu == NULL) {
+		dev_err(priv->dev, "Unable to allocate DMA command list\n");
+		ap->ioaddr.bmdma_addr = NULL;
+		ap->mwdma_mask = 0;
+		ap->udma_mask = 0;
+	}
+	return 0;
+}
+
+static void pata_macio_irq_clear(struct ata_port *ap)
+{
+	struct pata_macio_priv *priv = ap->private_data;
+
+	/* Nothing to do here */
+
+	dev_dbgdma(priv->dev, "%s\n", __func__);
+}
+
+static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
+{
+	dev_dbg(priv->dev, "Enabling & resetting... \n");
+
+	if (priv->mediabay)
+		return;
+
+	if (priv->kind == controller_ohare && !resume) {
+		/* The code below is having trouble on some ohare machines
+		 * (timing related ?). Until I can put my hand on one of these
+		 * units, I keep the old way
+		 */
+		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
+	} else {
+		int rc;
+
+ 		/* Reset and enable controller */
+		rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
+					 priv->node, priv->aapl_bus_id, 1);
+		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
+				    priv->node, priv->aapl_bus_id, 1);
+		msleep(10);
+		/* Only bother waiting if there's a reset control */
+		if (rc == 0) {
+			ppc_md.feature_call(PMAC_FTR_IDE_RESET,
+					    priv->node, priv->aapl_bus_id, 0);
+			msleep(IDE_WAKEUP_DELAY_MS);
+		}
+	}
+
+	/* If resuming a PCI device, restore the config space here */
+	if (priv->pdev && resume) {
+		int rc;
+
+		pci_restore_state(priv->pdev);
+		rc = pcim_enable_device(priv->pdev);
+		if (rc)
+			dev_err(&priv->pdev->dev,
+				"Failed to enable device after resume (%d)\n",
+				rc);
+		else
+			pci_set_master(priv->pdev);
+	}
+
+	/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
+	 * seem necessary and speeds up the boot process
+	 */
+	if (priv->kauai_fcr)
+		writel(KAUAI_FCR_UATA_MAGIC |
+		       KAUAI_FCR_UATA_RESET_N |
+		       KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
+}
+
+/* Hook the standard slave config to fixup some HW related alignment
+ * restrictions
+ */
+static int pata_macio_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct pata_macio_priv *priv = ap->private_data;
+	struct ata_device *dev;
+	u16 cmd;
+	int rc;
+
+	/* First call original */
+	rc = ata_scsi_slave_config(sdev);
+	if (rc)
+		return rc;
+
+	/* This is lifted from sata_nv */
+	dev = &ap->link.device[sdev->id];
+
+	/* OHare has issues with non cache aligned DMA on some chipsets */
+	if (priv->kind == controller_ohare) {
+		blk_queue_update_dma_alignment(sdev->request_queue, 31);
+		blk_queue_update_dma_pad(sdev->request_queue, 31);
+
+		/* Tell the world about it */
+		ata_dev_info(dev, "OHare alignment limits applied\n");
+		return 0;
+	}
+
+	/* We only have issues with ATAPI */
+	if (dev->class != ATA_DEV_ATAPI)
+		return 0;
+
+	/* Shasta and K2 seem to have "issues" with reads ... */
+	if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
+		/* Allright these are bad, apply restrictions */
+		blk_queue_update_dma_alignment(sdev->request_queue, 15);
+		blk_queue_update_dma_pad(sdev->request_queue, 15);
+
+		/* We enable MWI and hack cache line size directly here, this
+		 * is specific to this chipset and not normal values, we happen
+		 * to somewhat know what we are doing here (which is basically
+		 * to do the same Apple does and pray they did not get it wrong :-)
+		 */
+		BUG_ON(!priv->pdev);
+		pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
+		pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
+		pci_write_config_word(priv->pdev, PCI_COMMAND,
+				      cmd | PCI_COMMAND_INVALIDATE);
+
+		/* Tell the world about it */
+		ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
+{
+	int rc;
+
+	/* First, core libata suspend to do most of the work */
+	rc = ata_host_suspend(priv->host, mesg);
+	if (rc)
+		return rc;
+
+	/* Restore to default timings */
+	pata_macio_default_timings(priv);
+
+	/* Mask interrupt. Not strictly necessary but old driver did
+	 * it and I'd rather not change that here */
+	disable_irq(priv->irq);
+
+	/* The media bay will handle itself just fine */
+	if (priv->mediabay)
+		return 0;
+
+	/* Kauai has bus control FCRs directly here */
+	if (priv->kauai_fcr) {
+		u32 fcr = readl(priv->kauai_fcr);
+		fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
+		writel(fcr, priv->kauai_fcr);
+	}
+
+	/* For PCI, save state and disable DMA. No need to call
+	 * pci_set_power_state(), the HW doesn't do D states that
+	 * way, the platform code will take care of suspending the
+	 * ASIC properly
+	 */
+	if (priv->pdev) {
+		pci_save_state(priv->pdev);
+		pci_disable_device(priv->pdev);
+	}
+
+	/* Disable the bus on older machines and the cell on kauai */
+	ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
+			    priv->aapl_bus_id, 0);
+
+	return 0;
+}
+
+static int pata_macio_do_resume(struct pata_macio_priv *priv)
+{
+	/* Reset and re-enable the HW */
+	pata_macio_reset_hw(priv, 1);
+
+	/* Sanitize drive timings */
+	pata_macio_apply_timings(priv->host->ports[0], 0);
+
+	/* We want our IRQ back ! */
+	enable_irq(priv->irq);
+
+	/* Let the libata core take it from there */
+	ata_host_resume(priv->host);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static struct scsi_host_template pata_macio_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= MAX_DCMDS,
+	/* We may not need that strict one */
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= pata_macio_slave_config,
+};
+
+static struct ata_port_operations pata_macio_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.freeze			= pata_macio_freeze,
+	.set_piomode		= pata_macio_set_timings,
+	.set_dmamode		= pata_macio_set_timings,
+	.cable_detect		= pata_macio_cable_detect,
+	.sff_dev_select		= pata_macio_dev_select,
+	.qc_prep		= pata_macio_qc_prep,
+	.bmdma_setup		= pata_macio_bmdma_setup,
+	.bmdma_start		= pata_macio_bmdma_start,
+	.bmdma_stop		= pata_macio_bmdma_stop,
+	.bmdma_status		= pata_macio_bmdma_status,
+	.port_start		= pata_macio_port_start,
+	.sff_irq_clear		= pata_macio_irq_clear,
+};
+
+static void pata_macio_invariants(struct pata_macio_priv *priv)
+{
+	const int *bidp;
+
+	/* Identify the type of controller */
+	if (of_device_is_compatible(priv->node, "shasta-ata")) {
+		priv->kind = controller_sh_ata6;
+	        priv->timings = pata_macio_shasta_timings;
+	} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
+		priv->kind = controller_un_ata6;
+	        priv->timings = pata_macio_kauai_timings;
+	} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
+		priv->kind = controller_k2_ata6;
+	        priv->timings = pata_macio_kauai_timings;
+	} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
+		if (strcmp(priv->node->name, "ata-4") == 0) {
+			priv->kind = controller_kl_ata4;
+			priv->timings = pata_macio_kl66_timings;
+		} else {
+			priv->kind = controller_kl_ata3;
+			priv->timings = pata_macio_kl33_timings;
+		}
+	} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
+		priv->kind = controller_heathrow;
+		priv->timings = pata_macio_heathrow_timings;
+	} else {
+		priv->kind = controller_ohare;
+		priv->timings = pata_macio_ohare_timings;
+	}
+
+	/* XXX FIXME --- setup priv->mediabay here */
+
+	/* Get Apple bus ID (for clock and ASIC control) */
+	bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
+	priv->aapl_bus_id =  bidp ? *bidp : 0;
+
+	/* Fixup missing Apple bus ID in case of media-bay */
+	if (priv->mediabay && bidp == 0)
+		priv->aapl_bus_id = 1;
+}
+
+static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
+				 void __iomem * base, void __iomem * dma)
+{
+	/* cmd_addr is the base of regs for that port */
+	ioaddr->cmd_addr	= base;
+
+	/* taskfile registers */
+	ioaddr->data_addr	= base + (ATA_REG_DATA    << 4);
+	ioaddr->error_addr	= base + (ATA_REG_ERR     << 4);
+	ioaddr->feature_addr	= base + (ATA_REG_FEATURE << 4);
+	ioaddr->nsect_addr	= base + (ATA_REG_NSECT   << 4);
+	ioaddr->lbal_addr	= base + (ATA_REG_LBAL    << 4);
+	ioaddr->lbam_addr	= base + (ATA_REG_LBAM    << 4);
+	ioaddr->lbah_addr	= base + (ATA_REG_LBAH    << 4);
+	ioaddr->device_addr	= base + (ATA_REG_DEVICE  << 4);
+	ioaddr->status_addr	= base + (ATA_REG_STATUS  << 4);
+	ioaddr->command_addr	= base + (ATA_REG_CMD     << 4);
+	ioaddr->altstatus_addr	= base + 0x160;
+	ioaddr->ctl_addr	= base + 0x160;
+	ioaddr->bmdma_addr	= dma;
+}
+
+static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
+					 struct ata_port_info *pinfo)
+{
+	int i = 0;
+
+	pinfo->pio_mask		= 0;
+	pinfo->mwdma_mask	= 0;
+	pinfo->udma_mask	= 0;
+
+	while (priv->timings[i].mode > 0) {
+		unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
+		switch(priv->timings[i].mode & 0xf0) {
+		case 0x00: /* PIO */
+			pinfo->pio_mask |= (mask >> 8);
+			break;
+		case 0x20: /* MWDMA */
+			pinfo->mwdma_mask |= mask;
+			break;
+		case 0x40: /* UDMA */
+			pinfo->udma_mask |= mask;
+			break;
+		}
+		i++;
+	}
+	dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
+		pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
+}
+
+static int pata_macio_common_init(struct pata_macio_priv *priv,
+				  resource_size_t tfregs,
+				  resource_size_t dmaregs,
+				  resource_size_t fcregs,
+				  unsigned long irq)
+{
+	struct ata_port_info		pinfo;
+	const struct ata_port_info	*ppi[] = { &pinfo, NULL };
+	void __iomem			*dma_regs = NULL;
+
+	/* Fill up privates with various invariants collected from the
+	 * device-tree
+	 */
+	pata_macio_invariants(priv);
+
+	/* Make sure we have sane initial timings in the cache */
+	pata_macio_default_timings(priv);
+
+	/* Not sure what the real max is but we know it's less than 64K, let's
+	 * use 64K minus 256
+	 */
+	dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
+
+	/* Allocate libata host for 1 port */
+	memset(&pinfo, 0, sizeof(struct ata_port_info));
+	pmac_macio_calc_timing_masks(priv, &pinfo);
+	pinfo.flags		= ATA_FLAG_SLAVE_POSS;
+	pinfo.port_ops		= &pata_macio_ops;
+	pinfo.private_data	= priv;
+
+	priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
+	if (priv->host == NULL) {
+		dev_err(priv->dev, "Failed to allocate ATA port structure\n");
+		return -ENOMEM;
+	}
+
+	/* Setup the private data in host too */
+	priv->host->private_data = priv;
+
+	/* Map base registers */
+	priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
+	if (priv->tfregs == NULL) {
+		dev_err(priv->dev, "Failed to map ATA ports\n");
+		return -ENOMEM;
+	}
+	priv->host->iomap = &priv->tfregs;
+
+	/* Map DMA regs */
+	if (dmaregs != 0) {
+		dma_regs = devm_ioremap(priv->dev, dmaregs,
+					sizeof(struct dbdma_regs));
+		if (dma_regs == NULL)
+			dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
+	}
+
+	/* If chip has local feature control, map those regs too */
+	if (fcregs != 0) {
+		priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
+		if (priv->kauai_fcr == NULL) {
+			dev_err(priv->dev, "Failed to map ATA FCR register\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* Setup port data structure */
+	pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
+			     priv->tfregs, dma_regs);
+	priv->host->ports[0]->private_data = priv;
+
+	/* hard-reset the controller */
+	pata_macio_reset_hw(priv, 0);
+	pata_macio_apply_timings(priv->host->ports[0], 0);
+
+	/* Enable bus master if necessary */
+	if (priv->pdev && dma_regs)
+		pci_set_master(priv->pdev);
+
+	dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
+		 macio_ata_names[priv->kind], priv->aapl_bus_id);
+
+	/* Start it up */
+	priv->irq = irq;
+	return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
+				 &pata_macio_sht);
+}
+
+static int pata_macio_attach(struct macio_dev *mdev,
+			     const struct of_device_id *match)
+{
+	struct pata_macio_priv	*priv;
+	resource_size_t		tfregs, dmaregs = 0;
+	unsigned long		irq;
+	int			rc;
+
+	/* Check for broken device-trees */
+	if (macio_resource_count(mdev) == 0) {
+		dev_err(&mdev->ofdev.dev,
+			"No addresses for controller\n");
+		return -ENXIO;
+	}
+
+	/* Enable managed resources */
+	macio_enable_devres(mdev);
+
+	/* Allocate and init private data structure */
+	priv = devm_kzalloc(&mdev->ofdev.dev,
+			    sizeof(struct pata_macio_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->node = of_node_get(mdev->ofdev.dev.of_node);
+	priv->mdev = mdev;
+	priv->dev = &mdev->ofdev.dev;
+
+	/* Request memory resource for taskfile registers */
+	if (macio_request_resource(mdev, 0, "pata-macio")) {
+		dev_err(&mdev->ofdev.dev,
+			"Cannot obtain taskfile resource\n");
+		return -EBUSY;
+	}
+	tfregs = macio_resource_start(mdev, 0);
+
+	/* Request resources for DMA registers if any */
+	if (macio_resource_count(mdev) >= 2) {
+		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
+			dev_err(&mdev->ofdev.dev,
+				"Cannot obtain DMA resource\n");
+		else
+			dmaregs = macio_resource_start(mdev, 1);
+	}
+
+	/*
+	 * Fixup missing IRQ for some old implementations with broken
+	 * device-trees.
+	 *
+	 * This is a bit bogus, it should be fixed in the device-tree itself,
+	 * via the existing macio fixups, based on the type of interrupt
+	 * controller in the machine. However, I have no test HW for this case,
+	 * and this trick works well enough on those old machines...
+	 */
+	if (macio_irq_count(mdev) == 0) {
+		dev_warn(&mdev->ofdev.dev,
+			 "No interrupts for controller, using 13\n");
+		irq = irq_create_mapping(NULL, 13);
+	} else
+		irq = macio_irq(mdev, 0);
+
+	/* Prevvent media bay callbacks until fully registered */
+	lock_media_bay(priv->mdev->media_bay);
+
+	/* Get register addresses and call common initialization */
+	rc = pata_macio_common_init(priv,
+				    tfregs,		/* Taskfile regs */
+				    dmaregs,		/* DBDMA regs */
+				    0,			/* Feature control */
+				    irq);
+	unlock_media_bay(priv->mdev->media_bay);
+
+	return rc;
+}
+
+static int pata_macio_detach(struct macio_dev *mdev)
+{
+	struct ata_host *host = macio_get_drvdata(mdev);
+	struct pata_macio_priv *priv = host->private_data;
+
+	lock_media_bay(priv->mdev->media_bay);
+
+	/* Make sure the mediabay callback doesn't try to access
+	 * dead stuff
+	 */
+	priv->host->private_data = NULL;
+
+	ata_host_detach(host);
+
+	unlock_media_bay(priv->mdev->media_bay);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
+{
+	struct ata_host *host = macio_get_drvdata(mdev);
+
+	return pata_macio_do_suspend(host->private_data, mesg);
+}
+
+static int pata_macio_resume(struct macio_dev *mdev)
+{
+	struct ata_host *host = macio_get_drvdata(mdev);
+
+	return pata_macio_do_resume(host->private_data);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PMAC_MEDIABAY
+static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
+{
+	struct ata_host *host = macio_get_drvdata(mdev);
+	struct ata_port *ap;
+	struct ata_eh_info *ehi;
+	struct ata_device *dev;
+	unsigned long flags;
+
+	if (!host || !host->private_data)
+		return;
+	ap = host->ports[0];
+	spin_lock_irqsave(ap->lock, flags);
+	ehi = &ap->link.eh_info;
+	if (mb_state == MB_CD) {
+		ata_ehi_push_desc(ehi, "mediabay plug");
+		ata_ehi_hotplugged(ehi);
+		ata_port_freeze(ap);
+	} else {
+		ata_ehi_push_desc(ehi, "mediabay unplug");
+		ata_for_each_dev(dev, &ap->link, ALL)
+			dev->flags |= ATA_DFLAG_DETACH;
+		ata_port_abort(ap);
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+}
+#endif /* CONFIG_PMAC_MEDIABAY */
+
+
+static int pata_macio_pci_attach(struct pci_dev *pdev,
+				 const struct pci_device_id *id)
+{
+	struct pata_macio_priv	*priv;
+	struct device_node	*np;
+	resource_size_t		rbase;
+
+	/* We cannot use a MacIO controller without its OF device node */
+	np = pci_device_to_OF_node(pdev);
+	if (np == NULL) {
+		dev_err(&pdev->dev,
+			"Cannot find OF device node for controller\n");
+		return -ENODEV;
+	}
+
+	/* Check that it can be enabled */
+	if (pcim_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot enable controller PCI device\n");
+		return -ENXIO;
+	}
+
+	/* Allocate and init private data structure */
+	priv = devm_kzalloc(&pdev->dev,
+			    sizeof(struct pata_macio_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->node = of_node_get(np);
+	priv->pdev = pdev;
+	priv->dev = &pdev->dev;
+
+	/* Get MMIO regions */
+	if (pci_request_regions(pdev, "pata-macio")) {
+		dev_err(&pdev->dev,
+			"Cannot obtain PCI resources\n");
+		return -EBUSY;
+	}
+
+	/* Get register addresses and call common initialization */
+	rbase = pci_resource_start(pdev, 0);
+	if (pata_macio_common_init(priv,
+				   rbase + 0x2000,	/* Taskfile regs */
+				   rbase + 0x1000,	/* DBDMA regs */
+				   rbase,		/* Feature control */
+				   pdev->irq))
+		return -ENXIO;
+
+	return 0;
+}
+
+static void pata_macio_pci_detach(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+
+	ata_host_detach(host);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+
+	return pata_macio_do_suspend(host->private_data, mesg);
+}
+
+static int pata_macio_pci_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+
+	return pata_macio_do_resume(host->private_data);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id pata_macio_match[] =
+{
+	{
+	.name 		= "IDE",
+	},
+	{
+	.name 		= "ATA",
+	},
+	{
+	.type		= "ide",
+	},
+	{
+	.type		= "ata",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, pata_macio_match);
+
+static struct macio_driver pata_macio_driver =
+{
+	.driver = {
+		.name 		= "pata-macio",
+		.owner		= THIS_MODULE,
+		.of_match_table	= pata_macio_match,
+	},
+	.probe		= pata_macio_attach,
+	.remove		= pata_macio_detach,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= pata_macio_suspend,
+	.resume		= pata_macio_resume,
+#endif
+#ifdef CONFIG_PMAC_MEDIABAY
+	.mediabay_event	= pata_macio_mb_event,
+#endif
+};
+
+static const struct pci_device_id pata_macio_pci_match[] = {
+	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA),	0 },
+	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100),	0 },
+	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100),	0 },
+	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA),	0 },
+	{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA),	0 },
+	{},
+};
+
+static struct pci_driver pata_macio_pci_driver = {
+	.name		= "pata-pci-macio",
+	.id_table	= pata_macio_pci_match,
+	.probe		= pata_macio_pci_attach,
+	.remove		= pata_macio_pci_detach,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= pata_macio_pci_suspend,
+	.resume		= pata_macio_pci_resume,
+#endif
+	.driver = {
+		.owner		= THIS_MODULE,
+	},
+};
+MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
+
+
+static int __init pata_macio_init(void)
+{
+	int rc;
+
+	if (!machine_is(powermac))
+		return -ENODEV;
+
+	rc = pci_register_driver(&pata_macio_pci_driver);
+	if (rc)
+		return rc;
+	rc = macio_register_driver(&pata_macio_driver);
+	if (rc) {
+		pci_unregister_driver(&pata_macio_pci_driver);
+		return rc;
+	}
+	return 0;
+}
+
+static void __exit pata_macio_exit(void)
+{
+	macio_unregister_driver(&pata_macio_driver);
+	pci_unregister_driver(&pata_macio_pci_driver);
+}
+
+module_init(pata_macio_init);
+module_exit(pata_macio_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt");
+MODULE_DESCRIPTION("Apple MacIO PATA driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
new file mode 100644
index 0000000..ff468a6
--- /dev/null
+++ b/drivers/ata/pata_marvell.c
@@ -0,0 +1,186 @@
+/*
+ *	Marvell PATA driver.
+ *
+ *	For the moment we drive the PATA port in legacy mode. That
+ *	isn't making full use of the device functionality but it is
+ *	easy to get working.
+ *
+ *	(c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_marvell"
+#define DRV_VERSION	"0.1.6"
+
+/**
+ *	marvell_pata_active	-	check if PATA is active
+ *	@pdev: PCI device
+ *
+ *	Returns 1 if the PATA port may be active. We know how to check this
+ *	for the 6145 but not the other devices
+ */
+
+static int marvell_pata_active(struct pci_dev *pdev)
+{
+	int i;
+	u32 devices;
+	void __iomem *barp;
+
+	/* We don't yet know how to do this for other devices */
+	if (pdev->device != 0x6145)
+		return 1;
+
+	barp = pci_iomap(pdev, 5, 0x10);
+	if (barp == NULL)
+		return -ENOMEM;
+
+	printk("BAR5:");
+	for(i = 0; i <= 0x0F; i++)
+		printk("%02X:%02X ", i, ioread8(barp + i));
+	printk("\n");
+
+	devices = ioread32(barp + 0x0C);
+	pci_iounmap(pdev, barp);
+
+	if (devices & 0x10)
+		return 1;
+	return 0;
+}
+
+/**
+ *	marvell_pre_reset	-	probe begin
+ *	@link: link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ */
+
+static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (pdev->device == 0x6145 && ap->port_no == 0 &&
+		!marvell_pata_active(pdev))	/* PATA enable ? */
+			return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+static int marvell_cable_detect(struct ata_port *ap)
+{
+	/* Cable type */
+	switch(ap->port_no)
+	{
+	case 0:
+		if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
+			return ATA_CBL_PATA40;
+		return ATA_CBL_PATA80;
+	case 1: /* Legacy SATA port */
+		return ATA_CBL_SATA;
+	}
+
+	BUG();
+	return 0;	/* Our BUG macro needs the right markup */
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template marvell_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations marvell_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= marvell_cable_detect,
+	.prereset		= marvell_pre_reset,
+};
+
+
+/**
+ *	marvell_init_one - Register Marvell ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in marvell_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA5,
+
+		.port_ops	= &marvell_ops,
+	};
+	static const struct ata_port_info info_sata = {
+		/* Slave possible as its magically mapped not real */
+		.flags		= ATA_FLAG_SLAVE_POSS,
+
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA6,
+
+		.port_ops	= &marvell_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, &info_sata };
+
+	if (pdev->device == 0x6101)
+		ppi[1] = &ata_dummy_port_info;
+
+#if IS_ENABLED(CONFIG_SATA_AHCI)
+	if (!marvell_pata_active(pdev)) {
+		printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
+		return -ENODEV;
+	}
+#endif
+	return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
+}
+
+static const struct pci_device_id marvell_pci_tbl[] = {
+	{ PCI_DEVICE(0x11AB, 0x6101), },
+	{ PCI_DEVICE(0x11AB, 0x6121), },
+	{ PCI_DEVICE(0x11AB, 0x6123), },
+	{ PCI_DEVICE(0x11AB, 0x6145), },
+	{ PCI_DEVICE(0x1B4B, 0x91A0), },
+	{ PCI_DEVICE(0x1B4B, 0x91A4), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver marvell_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= marvell_pci_tbl,
+	.probe			= marvell_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(marvell_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
new file mode 100644
index 0000000..f1d352d
--- /dev/null
+++ b/drivers/ata/pata_mpc52xx.c
@@ -0,0 +1,875 @@
+/*
+ * drivers/ata/pata_mpc52xx.c
+ *
+ * libata driver for the Freescale MPC52xx on-chip IDE interface
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
+ *
+ * UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby),
+ * Domen Puncer and Tim Yamin.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
+
+#define DRV_NAME	"mpc52xx_ata"
+
+/* Private structures used by the driver */
+struct mpc52xx_ata_timings {
+	u32	pio1;
+	u32	pio2;
+	u32	mdma1;
+	u32	mdma2;
+	u32	udma1;
+	u32	udma2;
+	u32	udma3;
+	u32	udma4;
+	u32	udma5;
+	int	using_udma;
+};
+
+struct mpc52xx_ata_priv {
+	unsigned int			ipb_period;
+	struct mpc52xx_ata __iomem	*ata_regs;
+	phys_addr_t			ata_regs_pa;
+	int				ata_irq;
+	struct mpc52xx_ata_timings	timings[2];
+	int				csel;
+
+	/* DMA */
+	struct bcom_task		*dmatsk;
+	const struct udmaspec		*udmaspec;
+	const struct mdmaspec		*mdmaspec;
+	int 				mpc52xx_ata_dma_last_write;
+	int				waiting_for_dma;
+};
+
+
+/* ATAPI-4 PIO specs (in ns) */
+static const u16 ataspec_t0[5]		= {600, 383, 240, 180, 120};
+static const u16 ataspec_t1[5]		= { 70,  50,  30,  30,  25};
+static const u16 ataspec_t2_8[5]	= {290, 290, 290,  80,  70};
+static const u16 ataspec_t2_16[5]	= {165, 125, 100,  80,  70};
+static const u16 ataspec_t2i[5]		= {  0,   0,   0,  70,  25};
+static const u16 ataspec_t4[5]		= { 30,  20,  15,  10,  10};
+static const u16 ataspec_ta[5]		= { 35,  35,  35,  35,  35};
+
+#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
+
+/* ======================================================================== */
+
+/* ATAPI-4 MDMA specs (in clocks) */
+struct mdmaspec {
+	u8 t0M;
+	u8 td;
+	u8 th;
+	u8 tj;
+	u8 tkw;
+	u8 tm;
+	u8 tn;
+};
+
+static const struct mdmaspec mdmaspec66[3] = {
+	{ .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 },
+	{ .t0M = 10, .td = 6,  .th = 1, .tj = 1, .tkw = 4,  .tm = 2, .tn = 1 },
+	{ .t0M = 8,  .td = 5,  .th = 1, .tj = 1, .tkw = 2,  .tm = 2, .tn = 1 },
+};
+
+static const struct mdmaspec mdmaspec132[3] = {
+	{ .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 },
+	{ .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7,  .tm = 4, .tn = 1 },
+	{ .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4,  .tm = 4, .tn = 1 },
+};
+
+/* ATAPI-4 UDMA specs (in clocks) */
+struct udmaspec {
+	u8 tcyc;
+	u8 t2cyc;
+	u8 tds;
+	u8 tdh;
+	u8 tdvs;
+	u8 tdvh;
+	u8 tfs;
+	u8 tli;
+	u8 tmli;
+	u8 taz;
+	u8 tzah;
+	u8 tenv;
+	u8 tsr;
+	u8 trfs;
+	u8 trp;
+	u8 tack;
+	u8 tss;
+};
+
+static const struct udmaspec udmaspec66[6] = {
+	{ .tcyc = 8,  .t2cyc = 16, .tds  = 1,  .tdh  = 1, .tdvs = 5,  .tdvh = 1,
+	  .tfs  = 16, .tli   = 10, .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 3,  .trfs  = 5,  .trp  = 11, .tack = 2, .tss  = 4,
+	},
+	{ .tcyc = 5,  .t2cyc = 11, .tds  = 1,  .tdh  = 1, .tdvs = 4,  .tdvh = 1,
+	  .tfs  = 14, .tli   = 10, .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 5,  .trp  = 9,  .tack = 2, .tss  = 4,
+	},
+	{ .tcyc = 4,  .t2cyc = 8,  .tds  = 1,  .tdh  = 1, .tdvs = 3,  .tdvh = 1,
+	  .tfs  = 12, .tli   = 10, .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 4,  .trp  = 7,  .tack = 2, .tss  = 4,
+	},
+	{ .tcyc = 3,  .t2cyc = 6,  .tds  = 1,  .tdh  = 1, .tdvs = 2,  .tdvh = 1,
+	  .tfs  = 9,  .tli   = 7,  .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 4,  .trp  = 7,  .tack = 2, .tss  = 4,
+	},
+	{ .tcyc = 2,  .t2cyc = 4,  .tds  = 1,  .tdh  = 1, .tdvs = 1,  .tdvh = 1,
+	  .tfs  = 8,  .tli   = 8,  .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 4,  .trp  = 7,  .tack = 2, .tss  = 4,
+	},
+	{ .tcyc = 2,  .t2cyc = 2,  .tds  = 1,  .tdh  = 1, .tdvs = 1,  .tdvh = 1,
+	  .tfs  = 6,  .tli   = 5,  .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 4,  .trp  = 6,  .tack = 2, .tss  = 4,
+	},
+};
+
+static const struct udmaspec udmaspec132[6] = {
+	{ .tcyc = 15, .t2cyc = 31, .tds  = 2,  .tdh  = 1, .tdvs = 10, .tdvh = 1,
+	  .tfs  = 30, .tli   = 20, .tmli = 3,  .taz  = 2, .tzah = 3,  .tenv = 3,
+	  .tsr  = 7,  .trfs  = 10, .trp  = 22, .tack = 3, .tss  = 7,
+	},
+	{ .tcyc = 10, .t2cyc = 21, .tds  = 2,  .tdh  = 1, .tdvs = 7,  .tdvh = 1,
+	  .tfs  = 27, .tli   = 20, .tmli = 3,  .taz  = 2, .tzah = 3,  .tenv = 3,
+	  .tsr  = 4,  .trfs  = 10, .trp  = 17, .tack = 3, .tss  = 7,
+	},
+	{ .tcyc = 6,  .t2cyc = 12, .tds  = 1,  .tdh  = 1, .tdvs = 5,  .tdvh = 1,
+	  .tfs  = 23, .tli   = 20, .tmli = 3,  .taz  = 2, .tzah = 3,  .tenv = 3,
+	  .tsr  = 3,  .trfs  = 8,  .trp  = 14, .tack = 3, .tss  = 7,
+	},
+	{ .tcyc = 7,  .t2cyc = 12, .tds  = 1,  .tdh  = 1, .tdvs = 3,  .tdvh = 1,
+	  .tfs  = 15, .tli   = 13, .tmli = 3,  .taz  = 2, .tzah = 3,  .tenv = 3,
+	  .tsr  = 3,  .trfs  = 8,  .trp  = 14, .tack = 3, .tss  = 7,
+	},
+	{ .tcyc = 2,  .t2cyc = 5,  .tds  = 0,  .tdh  = 0, .tdvs = 1,  .tdvh = 1,
+	  .tfs  = 16, .tli   = 14, .tmli = 2,  .taz  = 1, .tzah = 2,  .tenv = 2,
+	  .tsr  = 2,  .trfs  = 7,  .trp  = 13, .tack = 2, .tss  = 6,
+	},
+	{ .tcyc = 3,  .t2cyc = 6,  .tds  = 1,  .tdh  = 1, .tdvs = 1,  .tdvh = 1,
+	  .tfs  = 12, .tli   = 10, .tmli = 3,  .taz  = 2, .tzah = 3,  .tenv = 3,
+	  .tsr  = 3,  .trfs  = 7,  .trp  = 12, .tack = 3, .tss  = 7,
+	},
+};
+
+/* ======================================================================== */
+
+/* Bit definitions inside the registers */
+#define MPC52xx_ATA_HOSTCONF_SMR	0x80000000UL /* State machine reset */
+#define MPC52xx_ATA_HOSTCONF_FR		0x40000000UL /* FIFO Reset */
+#define MPC52xx_ATA_HOSTCONF_IE		0x02000000UL /* Enable interrupt in PIO */
+#define MPC52xx_ATA_HOSTCONF_IORDY	0x01000000UL /* Drive supports IORDY protocol */
+
+#define MPC52xx_ATA_HOSTSTAT_TIP	0x80000000UL /* Transaction in progress */
+#define MPC52xx_ATA_HOSTSTAT_UREP	0x40000000UL /* UDMA Read Extended Pause */
+#define MPC52xx_ATA_HOSTSTAT_RERR	0x02000000UL /* Read Error */
+#define MPC52xx_ATA_HOSTSTAT_WERR	0x01000000UL /* Write Error */
+
+#define MPC52xx_ATA_FIFOSTAT_EMPTY	0x01 /* FIFO Empty */
+#define MPC52xx_ATA_FIFOSTAT_ERROR	0x40 /* FIFO Error */
+
+#define MPC52xx_ATA_DMAMODE_WRITE	0x01 /* Write DMA */
+#define MPC52xx_ATA_DMAMODE_READ	0x02 /* Read DMA */
+#define MPC52xx_ATA_DMAMODE_UDMA	0x04 /* UDMA enabled */
+#define MPC52xx_ATA_DMAMODE_IE		0x08 /* Enable drive interrupt to CPU in DMA mode */
+#define MPC52xx_ATA_DMAMODE_FE		0x10 /* FIFO Flush enable in Rx mode */
+#define MPC52xx_ATA_DMAMODE_FR		0x20 /* FIFO Reset */
+#define MPC52xx_ATA_DMAMODE_HUT		0x40 /* Host UDMA burst terminate */
+
+#define MAX_DMA_BUFFERS 128
+#define MAX_DMA_BUFFER_SIZE 0x20000u
+
+/* Structure of the hardware registers */
+struct mpc52xx_ata {
+
+	/* Host interface registers */
+	u32 config;		/* ATA + 0x00 Host configuration */
+	u32 host_status;	/* ATA + 0x04 Host controller status */
+	u32 pio1;		/* ATA + 0x08 PIO Timing 1 */
+	u32 pio2;		/* ATA + 0x0c PIO Timing 2 */
+	u32 mdma1;		/* ATA + 0x10 MDMA Timing 1 */
+	u32 mdma2;		/* ATA + 0x14 MDMA Timing 2 */
+	u32 udma1;		/* ATA + 0x18 UDMA Timing 1 */
+	u32 udma2;		/* ATA + 0x1c UDMA Timing 2 */
+	u32 udma3;		/* ATA + 0x20 UDMA Timing 3 */
+	u32 udma4;		/* ATA + 0x24 UDMA Timing 4 */
+	u32 udma5;		/* ATA + 0x28 UDMA Timing 5 */
+	u32 share_cnt;		/* ATA + 0x2c ATA share counter */
+	u32 reserved0[3];
+
+	/* FIFO registers */
+	u32 fifo_data;		/* ATA + 0x3c */
+	u8  fifo_status_frame;	/* ATA + 0x40 */
+	u8  fifo_status;	/* ATA + 0x41 */
+	u16 reserved7[1];
+	u8  fifo_control;	/* ATA + 0x44 */
+	u8  reserved8[5];
+	u16 fifo_alarm;		/* ATA + 0x4a */
+	u16 reserved9;
+	u16 fifo_rdp;		/* ATA + 0x4e */
+	u16 reserved10;
+	u16 fifo_wrp;		/* ATA + 0x52 */
+	u16 reserved11;
+	u16 fifo_lfrdp;		/* ATA + 0x56 */
+	u16 reserved12;
+	u16 fifo_lfwrp;		/* ATA + 0x5a */
+
+	/* Drive TaskFile registers */
+	u8  tf_control;		/* ATA + 0x5c TASKFILE Control/Alt Status */
+	u8  reserved13[3];
+	u16 tf_data;		/* ATA + 0x60 TASKFILE Data */
+	u16 reserved14;
+	u8  tf_features;	/* ATA + 0x64 TASKFILE Features/Error */
+	u8  reserved15[3];
+	u8  tf_sec_count;	/* ATA + 0x68 TASKFILE Sector Count */
+	u8  reserved16[3];
+	u8  tf_sec_num;		/* ATA + 0x6c TASKFILE Sector Number */
+	u8  reserved17[3];
+	u8  tf_cyl_low;		/* ATA + 0x70 TASKFILE Cylinder Low */
+	u8  reserved18[3];
+	u8  tf_cyl_high;	/* ATA + 0x74 TASKFILE Cylinder High */
+	u8  reserved19[3];
+	u8  tf_dev_head;	/* ATA + 0x78 TASKFILE Device/Head */
+	u8  reserved20[3];
+	u8  tf_command;		/* ATA + 0x7c TASKFILE Command/Status */
+	u8  dma_mode;		/* ATA + 0x7d ATA Host DMA Mode configuration */
+	u8  reserved21[2];
+};
+
+
+/* ======================================================================== */
+/* Aux fns                                                                  */
+/* ======================================================================== */
+
+
+/* MPC52xx low level hw control */
+static int
+mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
+{
+	struct mpc52xx_ata_timings *timing = &priv->timings[dev];
+	unsigned int ipb_period = priv->ipb_period;
+	u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
+
+	if ((pio < 0) || (pio > 4))
+		return -EINVAL;
+
+	t0	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
+	t1	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
+	t2_8	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
+	t2_16	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
+	t2i	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
+	t4	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
+	ta	= CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
+
+	timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
+	timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
+
+	return 0;
+}
+
+static int
+mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
+				 int speed)
+{
+	struct mpc52xx_ata_timings *t = &priv->timings[dev];
+	const struct mdmaspec *s = &priv->mdmaspec[speed];
+
+	if (speed < 0 || speed > 2)
+		return -EINVAL;
+
+	t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
+	t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
+	t->using_udma = 0;
+
+	return 0;
+}
+
+static int
+mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
+				 int speed)
+{
+	struct mpc52xx_ata_timings *t = &priv->timings[dev];
+	const struct udmaspec *s = &priv->udmaspec[speed];
+
+	if (speed < 0 || speed > 2)
+		return -EINVAL;
+
+	t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
+	t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
+	t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
+	t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
+	t->udma5 = (u32)s->tzah << 24;
+	t->using_udma = 1;
+
+	return 0;
+}
+
+static void
+mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
+{
+	struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+	struct mpc52xx_ata_timings *timing = &priv->timings[device];
+
+	out_be32(&regs->pio1,  timing->pio1);
+	out_be32(&regs->pio2,  timing->pio2);
+	out_be32(&regs->mdma1, timing->mdma1);
+	out_be32(&regs->mdma2, timing->mdma2);
+	out_be32(&regs->udma1, timing->udma1);
+	out_be32(&regs->udma2, timing->udma2);
+	out_be32(&regs->udma3, timing->udma3);
+	out_be32(&regs->udma4, timing->udma4);
+	out_be32(&regs->udma5, timing->udma5);
+	priv->csel = device;
+}
+
+static int
+mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
+{
+	struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+	int tslot;
+
+	/* Clear share_cnt (all sample code do this ...) */
+	out_be32(&regs->share_cnt, 0);
+
+	/* Configure and reset host */
+	out_be32(&regs->config,
+			MPC52xx_ATA_HOSTCONF_IE |
+			MPC52xx_ATA_HOSTCONF_IORDY |
+			MPC52xx_ATA_HOSTCONF_SMR |
+			MPC52xx_ATA_HOSTCONF_FR);
+
+	udelay(10);
+
+	out_be32(&regs->config,
+			MPC52xx_ATA_HOSTCONF_IE |
+			MPC52xx_ATA_HOSTCONF_IORDY);
+
+	/* Set the time slot to 1us */
+	tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
+	out_be32(&regs->share_cnt, tslot << 16);
+
+	/* Init timings to PIO0 */
+	memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
+
+	mpc52xx_ata_compute_pio_timings(priv, 0, 0);
+	mpc52xx_ata_compute_pio_timings(priv, 1, 0);
+
+	mpc52xx_ata_apply_timings(priv, 0);
+
+	return 0;
+}
+
+
+/* ======================================================================== */
+/* libata driver                                                            */
+/* ======================================================================== */
+
+static void
+mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+	int pio, rv;
+
+	pio = adev->pio_mode - XFER_PIO_0;
+
+	rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
+
+	if (rv) {
+		dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio);
+		return;
+	}
+
+	mpc52xx_ata_apply_timings(priv, adev->devno);
+}
+
+static void
+mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+	int rv;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int dma = adev->dma_mode - XFER_UDMA_0;
+		rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma);
+	} else {
+		int dma = adev->dma_mode - XFER_MW_DMA_0;
+		rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma);
+	}
+
+	if (rv) {
+		dev_alert(ap->dev,
+			"Trying to select invalid DMA mode %d\n",
+			adev->dma_mode);
+		return;
+	}
+
+	mpc52xx_ata_apply_timings(priv, adev->devno);
+}
+
+static void
+mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+	if (device != priv->csel)
+		mpc52xx_ata_apply_timings(priv, device);
+
+	ata_sff_dev_select(ap, device);
+}
+
+static int
+mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+	struct bcom_ata_bd *bd;
+	unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si;
+	struct scatterlist *sg;
+	int count = 0;
+
+	if (read)
+		bcom_ata_rx_prepare(priv->dmatsk);
+	else
+		bcom_ata_tx_prepare(priv->dmatsk);
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t cur_addr = sg_dma_address(sg);
+		u32 cur_len = sg_dma_len(sg);
+
+		while (cur_len) {
+			unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE);
+			bd = (struct bcom_ata_bd *)
+				bcom_prepare_next_buffer(priv->dmatsk);
+
+			if (read) {
+				bd->status = tc;
+				bd->src_pa = (__force u32) priv->ata_regs_pa +
+					offsetof(struct mpc52xx_ata, fifo_data);
+				bd->dst_pa = (__force u32) cur_addr;
+			} else {
+				bd->status = tc;
+				bd->src_pa = (__force u32) cur_addr;
+				bd->dst_pa = (__force u32) priv->ata_regs_pa +
+					offsetof(struct mpc52xx_ata, fifo_data);
+			}
+
+			bcom_submit_next_buffer(priv->dmatsk, NULL);
+
+			cur_addr += tc;
+			cur_len -= tc;
+			count++;
+
+			if (count > MAX_DMA_BUFFERS) {
+				dev_alert(ap->dev, "dma table"
+					"too small\n");
+				goto use_pio_instead;
+			}
+		}
+	}
+	return 1;
+
+ use_pio_instead:
+	bcom_ata_reset_bd(priv->dmatsk);
+	return 0;
+}
+
+static void
+mpc52xx_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+	struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+
+	unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dma_mode;
+
+	if (!mpc52xx_ata_build_dmatable(qc))
+		dev_alert(ap->dev, "%s: %i, return 1?\n",
+			__func__, __LINE__);
+
+	/* Check FIFO is OK... */
+	if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
+		dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+			__func__, in_8(&priv->ata_regs->fifo_status));
+
+	if (read) {
+		dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ |
+				MPC52xx_ATA_DMAMODE_FE;
+
+		/* Setup FIFO if direction changed */
+		if (priv->mpc52xx_ata_dma_last_write != 0) {
+			priv->mpc52xx_ata_dma_last_write = 0;
+
+			/* Configure FIFO with granularity to 7 */
+			out_8(&regs->fifo_control, 7);
+			out_be16(&regs->fifo_alarm, 128);
+
+			/* Set FIFO Reset bit (FR) */
+			out_8(&regs->dma_mode, MPC52xx_ATA_DMAMODE_FR);
+		}
+	} else {
+		dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE;
+
+		/* Setup FIFO if direction changed */
+		if (priv->mpc52xx_ata_dma_last_write != 1) {
+			priv->mpc52xx_ata_dma_last_write = 1;
+
+			/* Configure FIFO with granularity to 4 */
+			out_8(&regs->fifo_control, 4);
+			out_be16(&regs->fifo_alarm, 128);
+		}
+	}
+
+	if (priv->timings[qc->dev->devno].using_udma)
+		dma_mode |= MPC52xx_ATA_DMAMODE_UDMA;
+
+	out_8(&regs->dma_mode, dma_mode);
+	priv->waiting_for_dma = ATA_DMA_ACTIVE;
+
+	ata_wait_idle(ap);
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void
+mpc52xx_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+	bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum);
+	bcom_enable(priv->dmatsk);
+}
+
+static void
+mpc52xx_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+	bcom_disable(priv->dmatsk);
+	bcom_ata_reset_bd(priv->dmatsk);
+	priv->waiting_for_dma = 0;
+
+	/* Check FIFO is OK... */
+	if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
+		dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+			__func__, in_8(&priv->ata_regs->fifo_status));
+}
+
+static u8
+mpc52xx_bmdma_status(struct ata_port *ap)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+	/* Check FIFO is OK... */
+	if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) {
+		dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+			__func__, in_8(&priv->ata_regs->fifo_status));
+		return priv->waiting_for_dma | ATA_DMA_ERR;
+	}
+
+	return priv->waiting_for_dma;
+}
+
+static irqreturn_t
+mpc52xx_ata_task_irq(int irq, void *vpriv)
+{
+	struct mpc52xx_ata_priv *priv = vpriv;
+	while (bcom_buffer_done(priv->dmatsk))
+		bcom_retrieve_buffer(priv->dmatsk, NULL, NULL);
+
+	priv->waiting_for_dma |= ATA_DMA_INTR;
+
+	return IRQ_HANDLED;
+}
+
+static struct scsi_host_template mpc52xx_ata_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations mpc52xx_ata_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.sff_dev_select		= mpc52xx_ata_dev_select,
+	.set_piomode		= mpc52xx_ata_set_piomode,
+	.set_dmamode		= mpc52xx_ata_set_dmamode,
+	.bmdma_setup		= mpc52xx_bmdma_setup,
+	.bmdma_start		= mpc52xx_bmdma_start,
+	.bmdma_stop		= mpc52xx_bmdma_stop,
+	.bmdma_status		= mpc52xx_bmdma_status,
+	.qc_prep		= ata_noop_qc_prep,
+};
+
+static int mpc52xx_ata_init_one(struct device *dev,
+				struct mpc52xx_ata_priv *priv,
+				unsigned long raw_ata_regs,
+				int mwdma_mask, int udma_mask)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct ata_ioports *aio;
+
+	host = ata_host_alloc(dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+	ap->flags		|= ATA_FLAG_SLAVE_POSS;
+	ap->pio_mask		= ATA_PIO4;
+	ap->mwdma_mask		= mwdma_mask;
+	ap->udma_mask		= udma_mask;
+	ap->ops			= &mpc52xx_ata_port_ops;
+	host->private_data	= priv;
+
+	aio = &ap->ioaddr;
+	aio->cmd_addr		= NULL;	/* Don't have a classic reg block */
+	aio->altstatus_addr	= &priv->ata_regs->tf_control;
+	aio->ctl_addr		= &priv->ata_regs->tf_control;
+	aio->data_addr		= &priv->ata_regs->tf_data;
+	aio->error_addr		= &priv->ata_regs->tf_features;
+	aio->feature_addr	= &priv->ata_regs->tf_features;
+	aio->nsect_addr		= &priv->ata_regs->tf_sec_count;
+	aio->lbal_addr		= &priv->ata_regs->tf_sec_num;
+	aio->lbam_addr		= &priv->ata_regs->tf_cyl_low;
+	aio->lbah_addr		= &priv->ata_regs->tf_cyl_high;
+	aio->device_addr	= &priv->ata_regs->tf_dev_head;
+	aio->status_addr	= &priv->ata_regs->tf_command;
+	aio->command_addr	= &priv->ata_regs->tf_command;
+
+	ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
+
+	/* activate host */
+	return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
+				 &mpc52xx_ata_sht);
+}
+
+/* ======================================================================== */
+/* OF Platform driver                                                       */
+/* ======================================================================== */
+
+static int mpc52xx_ata_probe(struct platform_device *op)
+{
+	unsigned int ipb_freq;
+	struct resource res_mem;
+	int ata_irq = 0;
+	struct mpc52xx_ata __iomem *ata_regs;
+	struct mpc52xx_ata_priv *priv = NULL;
+	int rv, task_irq;
+	int mwdma_mask = 0, udma_mask = 0;
+	const __be32 *prop;
+	int proplen;
+	struct bcom_task *dmatsk;
+
+	/* Get ipb frequency */
+	ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+	if (!ipb_freq) {
+		dev_err(&op->dev, "could not determine IPB bus frequency\n");
+		return -ENODEV;
+	}
+
+	/* Get device base address from device tree, request the region
+	 * and ioremap it. */
+	rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
+	if (rv) {
+		dev_err(&op->dev, "could not determine device base address\n");
+		return rv;
+	}
+
+	if (!devm_request_mem_region(&op->dev, res_mem.start,
+				     sizeof(*ata_regs), DRV_NAME)) {
+		dev_err(&op->dev, "error requesting register region\n");
+		return -EBUSY;
+	}
+
+	ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs));
+	if (!ata_regs) {
+		dev_err(&op->dev, "error mapping device registers\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * By default, all DMA modes are disabled for the MPC5200.  Some
+	 * boards don't have the required signals routed to make DMA work.
+	 * Also, the MPC5200B has a silicon bug that causes data corruption
+	 * with UDMA if it is used at the same time as the LocalPlus bus.
+	 *
+	 * Instead of trying to guess what modes are usable, check the
+	 * ATA device tree node to find out what DMA modes work on the board.
+	 * UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>"
+	 * to the kernel boot parameters.
+	 *
+	 * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
+	 * UDMA modes 0, 1 and 2.
+	 */
+	prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
+	if ((prop) && (proplen >= 4))
+		mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
+	prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
+	if ((prop) && (proplen >= 4))
+		udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
+
+	ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
+	if (ata_irq == NO_IRQ) {
+		dev_err(&op->dev, "error mapping irq\n");
+		return -EINVAL;
+	}
+
+	/* Prepare our private structure */
+	priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC);
+	if (!priv) {
+		rv = -ENOMEM;
+		goto err1;
+	}
+
+	priv->ipb_period = 1000000000 / (ipb_freq / 1000);
+	priv->ata_regs = ata_regs;
+	priv->ata_regs_pa = res_mem.start;
+	priv->ata_irq = ata_irq;
+	priv->csel = -1;
+	priv->mpc52xx_ata_dma_last_write = -1;
+
+	if (ipb_freq/1000000 == 66) {
+		priv->mdmaspec = mdmaspec66;
+		priv->udmaspec = udmaspec66;
+	} else {
+		priv->mdmaspec = mdmaspec132;
+		priv->udmaspec = udmaspec132;
+	}
+
+	/* Allocate a BestComm task for DMA */
+	dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE);
+	if (!dmatsk) {
+		dev_err(&op->dev, "bestcomm initialization failed\n");
+		rv = -ENOMEM;
+		goto err1;
+	}
+
+	task_irq = bcom_get_task_irq(dmatsk);
+	rv = devm_request_irq(&op->dev, task_irq, &mpc52xx_ata_task_irq, 0,
+				"ATA task", priv);
+	if (rv) {
+		dev_err(&op->dev, "error requesting DMA IRQ\n");
+		goto err2;
+	}
+	priv->dmatsk = dmatsk;
+
+	/* Init the hw */
+	rv = mpc52xx_ata_hw_init(priv);
+	if (rv) {
+		dev_err(&op->dev, "error initializing hardware\n");
+		goto err2;
+	}
+
+	/* Register ourselves to libata */
+	rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start,
+				  mwdma_mask, udma_mask);
+	if (rv) {
+		dev_err(&op->dev, "error registering with ATA layer\n");
+		goto err2;
+	}
+
+	return 0;
+
+ err2:
+	irq_dispose_mapping(task_irq);
+	bcom_ata_release(dmatsk);
+ err1:
+	irq_dispose_mapping(ata_irq);
+	return rv;
+}
+
+static int
+mpc52xx_ata_remove(struct platform_device *op)
+{
+	struct ata_host *host = platform_get_drvdata(op);
+	struct mpc52xx_ata_priv *priv = host->private_data;
+	int task_irq;
+
+	/* Deregister the ATA interface */
+	ata_platform_remove_one(op);
+
+	/* Clean up DMA */
+	task_irq = bcom_get_task_irq(priv->dmatsk);
+	irq_dispose_mapping(task_irq);
+	bcom_ata_release(priv->dmatsk);
+	irq_dispose_mapping(priv->ata_irq);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int
+mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
+{
+	struct ata_host *host = platform_get_drvdata(op);
+
+	return ata_host_suspend(host, state);
+}
+
+static int
+mpc52xx_ata_resume(struct platform_device *op)
+{
+	struct ata_host *host = platform_get_drvdata(op);
+	struct mpc52xx_ata_priv *priv = host->private_data;
+	int rv;
+
+	rv = mpc52xx_ata_hw_init(priv);
+	if (rv) {
+		dev_err(host->dev, "error initializing hardware\n");
+		return rv;
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static const struct of_device_id mpc52xx_ata_of_match[] = {
+	{ .compatible = "fsl,mpc5200-ata", },
+	{ .compatible = "mpc5200-ata", },
+	{},
+};
+
+
+static struct platform_driver mpc52xx_ata_of_platform_driver = {
+	.probe		= mpc52xx_ata_probe,
+	.remove		= mpc52xx_ata_remove,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= mpc52xx_ata_suspend,
+	.resume		= mpc52xx_ata_resume,
+#endif
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = mpc52xx_ata_of_match,
+	},
+};
+
+module_platform_driver(mpc52xx_ata_of_platform_driver);
+
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
+
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
new file mode 100644
index 0000000..202b4d6
--- /dev/null
+++ b/drivers/ata/pata_mpiix.c
@@ -0,0 +1,238 @@
+/*
+ * pata_mpiix.c 	- Intel MPIIX PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * The MPIIX is different enough to the PIIX4 and friends that we give it
+ * a separate driver. The old ide/pci code handles this by just not tuning
+ * MPIIX at all.
+ *
+ * The MPIIX also differs in another important way from the majority of PIIX
+ * devices. The chip is a bridge (pardon the pun) between the old world of
+ * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
+ * IDE controller is not decoded in PCI space and the chip does not claim to
+ * be IDE class PCI. This requires slightly non-standard probe logic compared
+ * with PCI IDE and also that we do not disable the device when our driver is
+ * unloaded (as it has many other functions).
+ *
+ * The driver consciously keeps this logic internally to avoid pushing quirky
+ * PATA history into the clean libata layer.
+ *
+ * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
+ * hard disk present this driver will not detect it. This is not a bug. In this
+ * configuration the secondary port of the MPIIX is disabled and the addresses
+ * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
+ * to operate.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_mpiix"
+#define DRV_VERSION "0.7.7"
+
+enum {
+	IDETIM = 0x6C,		/* IDE control register */
+	IORDY = (1 << 1),
+	PPE = (1 << 2),
+	FTIM = (1 << 0),
+	ENABLED = (1 << 15),
+	SECONDARY = (1 << 14)
+};
+
+static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
+
+	if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	mpiix_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. The MPIIX allows us to program the
+ *	IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
+ *	prefetching or IORDY are used.
+ *
+ *	This would get very ugly because we can only program timing for one
+ *	device at a time, the other gets PIO0. Fortunately libata calls
+ *	our qc_issue command before a command is issued so we can flip the
+ *	timings back and forth to reduce the pain.
+ */
+
+static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int control = 0;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 idetim;
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(pdev, IDETIM, &idetim);
+
+	/* Mask the IORDY/TIME/PPE for this device */
+	if (adev->class == ATA_DEV_ATA)
+		control |= PPE;		/* Enable prefetch/posting for disk */
+	if (ata_pio_need_iordy(adev))
+		control |= IORDY;
+	if (pio > 1)
+		control |= FTIM;	/* This drive is on the fast timing bank */
+
+	/* Mask out timing and clear both TIME bank selects */
+	idetim &= 0xCCEE;
+	idetim &= ~(0x07  << (4 * adev->devno));
+	idetim |= control << (4 * adev->devno);
+
+	idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+	pci_write_config_word(pdev, IDETIM, idetim);
+
+	/* We use ap->private_data as a pointer to the device currently
+	   loaded for timing */
+	ap->private_data = adev;
+}
+
+/**
+ *	mpiix_qc_issue		-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If modes have been configured and the channel data is not loaded
+	   then load it. We have to check if pio_mode is set as the core code
+	   does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+	   logical */
+
+	if (adev->pio_mode && adev != ap->private_data)
+		mpiix_set_piomode(ap, adev);
+
+	return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template mpiix_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations mpiix_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.qc_issue	= mpiix_qc_issue,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= mpiix_set_piomode,
+	.prereset	= mpiix_pre_reset,
+	.sff_data_xfer	= ata_sff_data_xfer32,
+};
+
+static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* Single threaded by the PCI probe logic */
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *cmd_addr, *ctl_addr;
+	u16 idetim;
+	int cmd, ctl, irq;
+
+	ata_print_version_once(&dev->dev, DRV_VERSION);
+
+	host = ata_host_alloc(&dev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	/* MPIIX has many functions which can be turned on or off according
+	   to other devices present. Make sure IDE is enabled before we try
+	   and use it */
+
+	pci_read_config_word(dev, IDETIM, &idetim);
+	if (!(idetim & ENABLED))
+		return -ENODEV;
+
+	/* See if it's primary or secondary channel... */
+	if (!(idetim & SECONDARY)) {
+		cmd = 0x1F0;
+		ctl = 0x3F6;
+		irq = 14;
+	} else {
+		cmd = 0x170;
+		ctl = 0x376;
+		irq = 15;
+	}
+
+	cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
+	ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
+	if (!cmd_addr || !ctl_addr)
+		return -ENOMEM;
+
+	ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
+
+	/* We do our own plumbing to avoid leaking special cases for whacko
+	   ancient hardware into the core code. There are two issues to
+	   worry about.  #1 The chip is a bridge so if in legacy mode and
+	   without BARs set fools the setup.  #2 If you pci_disable_device
+	   the MPIIX your box goes castors up */
+
+	ap->ops = &mpiix_port_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.cmd_addr = cmd_addr;
+	ap->ioaddr.ctl_addr = ctl_addr;
+	ap->ioaddr.altstatus_addr = ctl_addr;
+
+	/* Let libata fill in the port details */
+	ata_sff_std_ports(&ap->ioaddr);
+
+	/* activate host */
+	return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED,
+				 &mpiix_sht);
+}
+
+static const struct pci_device_id mpiix[] = {
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
+
+	{ },
+};
+
+static struct pci_driver mpiix_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= mpiix,
+	.probe 		= mpiix_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(mpiix_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, mpiix);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
new file mode 100644
index 0000000..0ea1833
--- /dev/null
+++ b/drivers/ata/pata_netcell.c
@@ -0,0 +1,107 @@
+/*
+ *    pata_netcell.c - Netcell PATA driver
+ *
+ *	(c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_netcell"
+#define DRV_VERSION	"0.1.7"
+
+/* No PIO or DMA methods needed for this device */
+
+static unsigned int netcell_read_id(struct ata_device *adev,
+					struct ata_taskfile *tf, u16 *id)
+{
+	unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+	/* Firmware forgets to mark words 85-87 valid */
+	if (err_mask == 0)
+		id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+	return err_mask;
+}
+
+static struct scsi_host_template netcell_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations netcell_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_80wire,
+	.read_id	= netcell_read_id,
+};
+
+
+/**
+ *	netcell_init_one - Register Netcell ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in netcell_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		/* Actually we don't really care about these as the
+		   firmware deals with it */
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask 	= ATA_UDMA5, /* UDMA 133 */
+		.port_ops	= &netcell_ops,
+	};
+	const struct ata_port_info *port_info[] = { &info, NULL };
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* Any chip specific setup/optimisation/messages here */
+	ata_pci_bmdma_clear_simplex(pdev);
+
+	/* And let the library code do the work */
+	return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
+}
+
+static const struct pci_device_id netcell_pci_tbl[] = {
+	{ PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver netcell_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= netcell_pci_tbl,
+	.probe			= netcell_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(netcell_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
new file mode 100644
index 0000000..44f97ad
--- /dev/null
+++ b/drivers/ata/pata_ninja32.c
@@ -0,0 +1,197 @@
+/*
+ * pata_ninja32.c 	- Ninja32 PATA for new ATA layer
+ *			  (C) 2007 Red Hat Inc
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
+ *
+ * The Ninja32 Cardbus is not a generic SFF controller. Instead it is
+ * laid out as follows off BAR 0. This is based upon Mark Lord's delkin
+ * driver and the extensive analysis done by the BSD developers, notably
+ * ITOH Yasufumi.
+ *
+ *	Base + 0x00 IRQ Status
+ *	Base + 0x01 IRQ control
+ *	Base + 0x02 Chipset control
+ *	Base + 0x03 Unknown
+ *	Base + 0x04 VDMA and reset control + wait bits
+ *	Base + 0x08 BMIMBA
+ *	Base + 0x0C DMA Length
+ *	Base + 0x10 Taskfile
+ *	Base + 0x18 BMDMA Status ?
+ *	Base + 0x1C
+ *	Base + 0x1D Bus master control
+ *		bit 0 = enable
+ *		bit 1 = 0 write/1 read
+ *		bit 2 = 1 sgtable
+ *		bit 3 = go
+ *		bit 4-6 wait bits
+ *		bit 7 = done
+ *	Base + 0x1E AltStatus
+ *	Base + 0x1F timing register
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_ninja32"
+#define DRV_VERSION "0.1.5"
+
+
+/**
+ *	ninja32_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	but we want to set the PIO timing by default.
+ */
+
+static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u16 pio_timing[5] = {
+		0xd6, 0x85, 0x44, 0x33, 0x13
+	};
+	iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
+		 ap->ioaddr.bmdma_addr + 0x1f);
+	ap->private_data = adev;
+}
+
+
+static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
+{
+	struct ata_device *adev = &ap->link.device[device];
+	if (ap->private_data != adev) {
+		iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
+		ata_sff_dev_select(ap, device);
+		ninja32_set_piomode(ap, adev);
+	}
+}
+
+static struct scsi_host_template ninja32_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ninja32_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.sff_dev_select = ninja32_dev_select,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= ninja32_set_piomode,
+	.sff_data_xfer	= ata_sff_data_xfer32
+};
+
+static void ninja32_program(void __iomem *base)
+{
+	iowrite8(0x05, base + 0x01);	/* Enable interrupt lines */
+	iowrite8(0xBE, base + 0x02);	/* Burst, ?? setup */
+	iowrite8(0x01, base + 0x03);	/* Unknown */
+	iowrite8(0x20, base + 0x04);	/* WAIT0 */
+	iowrite8(0x8f, base + 0x05);	/* Unknown */
+	iowrite8(0xa4, base + 0x1c);	/* Unknown */
+	iowrite8(0x83, base + 0x1d);	/* BMDMA control: WAIT0 */
+}
+
+static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *base;
+	int rc;
+
+	host = ata_host_alloc(&dev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	/* Set up the PCI device */
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+	rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(dev);
+	if (rc)
+		return rc;
+
+	host->iomap = pcim_iomap_table(dev);
+	rc = dma_set_mask(&dev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	pci_set_master(dev);
+
+	/* Set up the register mappings. We use the I/O mapping as only the
+	   older chips also have MMIO on BAR 1 */
+	base = host->iomap[0];
+	if (!base)
+		return -ENOMEM;
+	ap->ops = &ninja32_port_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.cmd_addr = base + 0x10;
+	ap->ioaddr.ctl_addr = base + 0x1E;
+	ap->ioaddr.altstatus_addr = base + 0x1E;
+	ap->ioaddr.bmdma_addr = base;
+	ata_sff_std_ports(&ap->ioaddr);
+	ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+
+	ninja32_program(base);
+	/* FIXME: Should we disable them at remove ? */
+	return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &ninja32_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ninja32_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+	ninja32_program(host->iomap[0]);
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id ninja32[] = {
+	{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ },
+};
+
+static struct pci_driver ninja32_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ninja32,
+	.probe 		= ninja32_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ninja32_reinit_one,
+#endif
+};
+
+module_pci_driver(ninja32_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ninja32);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
new file mode 100644
index 0000000..200e1eb
--- /dev/null
+++ b/drivers/ata/pata_ns87410.c
@@ -0,0 +1,176 @@
+/*
+ * pata_ns87410.c 	- National Semiconductor 87410 PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_ns87410"
+#define DRV_VERSION "0.4.6"
+
+/**
+ *	ns87410_pre_reset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Check enabled ports
+ */
+
+static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits ns87410_enable_bits[] = {
+		{ 0x43, 1, 0x08, 0x08 },
+		{ 0x47, 1, 0x08, 0x08 }
+	};
+
+	if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	ns87410_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program timing data. This is kept per channel not per device,
+ *	and only affects the data port.
+ */
+
+static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x40 + 4 * ap->port_no;
+	u8 idetcr, idefr;
+	struct ata_timing at;
+
+	static const u8 activebits[15] = {
+		0, 1, 2, 3, 4,
+		5, 5, 6, 6, 6,
+		6, 7, 7, 7, 7
+	};
+
+	static const u8 recoverbits[12] = {
+		0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
+	};
+
+	pci_read_config_byte(pdev, port + 3, &idefr);
+
+	if (ata_pio_need_iordy(adev))
+		idefr |= 0x04;	/* IORDY enable */
+	else
+		idefr &= ~0x04;
+
+	if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
+		dev_err(&pdev->dev, "unknown mode %d\n", adev->pio_mode);
+		return;
+	}
+
+	at.active = clamp_val(at.active, 2, 16) - 2;
+	at.setup = clamp_val(at.setup, 1, 4) - 1;
+	at.recover = clamp_val(at.recover, 1, 12) - 1;
+
+	idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
+
+	pci_write_config_byte(pdev, port, idetcr);
+	pci_write_config_byte(pdev, port + 3, idefr);
+	/* We use ap->private_data as a pointer to the device currently
+	   loaded for timing */
+	ap->private_data = adev;
+}
+
+/**
+ *	ns87410_qc_issue	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary.
+ */
+
+static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If modes have been configured and the channel data is not loaded
+	   then load it. We have to check if pio_mode is set as the core code
+	   does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+	   logical */
+
+	if (adev->pio_mode && adev != ap->private_data)
+		ns87410_set_piomode(ap, adev);
+
+	return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template ns87410_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ns87410_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.qc_issue	= ns87410_qc_issue,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= ns87410_set_piomode,
+	.prereset	= ns87410_pre_reset,
+};
+
+static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO3,
+		.port_ops = &ns87410_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL, 0);
+}
+
+static const struct pci_device_id ns87410[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), },
+
+	{ },
+};
+
+static struct pci_driver ns87410_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ns87410,
+	.probe 		= ns87410_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(ns87410_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87410);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
new file mode 100644
index 0000000..84c6b22
--- /dev/null
+++ b/drivers/ata/pata_ns87415.c
@@ -0,0 +1,422 @@
+/*
+ *    pata_ns87415.c - NS87415 (non PARISC) PATA
+ *
+ *	(C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk>
+ *
+ *    This is a fairly generic MWDMA controller. It has some limitations
+ *    as it requires timing reloads on PIO/DMA transitions but it is otherwise
+ *    fairly well designed.
+ *
+ *    This driver assumes the firmware has left the chip in a valid ST506
+ *    compliant state, either legacy IRQ 14/15 or native INTA shared. You
+ *    may need to add platform code if your system fails to do this.
+ *
+ *    The same cell appears in the 87560 controller used by some PARISC
+ *    systems. This has its own special mountain of errata.
+ *
+ *    TODO:
+ *	Test PARISC SuperIO
+ *	Get someone to test on SPARC
+ *	Implement lazy pio/dma switching for better performance
+ *	8bit shared timing.
+ *	See if we need to kill the FIFO for ATAPI
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_ns87415"
+#define DRV_VERSION	"0.0.1"
+
+/**
+ *	ns87415_set_mode - Initialize host controller mode timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *	@mode: Mode to set
+ *
+ *	Program the mode registers for this controller, channel and
+ *	device. Because the chip is quite an old design we have to do this
+ *	for PIO/DMA switches.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	int unit		= 2 * ap->port_no + adev->devno;
+	int timing		= 0x44 + 2 * unit;
+	unsigned long T		= 1000000000 / 33333;	/* PCI clocks */
+	struct ata_timing t;
+	u16 clocking;
+	u8 iordy;
+	u8 status;
+
+	/* Timing register format is 17 - low nybble read timing with
+	   the high nybble being 16 - x for recovery time in PCI clocks */
+
+	ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
+
+	clocking = 17 - clamp_val(t.active, 2, 17);
+	clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
+ 	/* Use the same timing for read and write bytes */
+	clocking |= (clocking << 8);
+	pci_write_config_word(dev, timing, clocking);
+
+	/* Set the IORDY enable versus DMA enable on or off properly */
+	pci_read_config_byte(dev, 0x42, &iordy);
+	iordy &= ~(1 << (4 + unit));
+	if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev))
+		iordy |= (1 << (4 + unit));
+
+	/* Paranoia: We shouldn't ever get here with busy write buffers
+	   but if so wait */
+
+	pci_read_config_byte(dev, 0x43, &status);
+	while (status & 0x03) {
+		udelay(1);
+		pci_read_config_byte(dev, 0x43, &status);
+	}
+	/* Flip the IORDY/DMA bits now we are sure the write buffers are
+	   clear */
+	pci_write_config_byte(dev, 0x42, iordy);
+
+	/* TODO: Set byte 54 command timing to the best 8bit
+	   mode shared by all four devices */
+}
+
+/**
+ *	ns87415_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	ns87415_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	ns87415_bmdma_setup		-	Set up DMA
+ *	@qc: Command block
+ *
+ *	Set up for bus masterng DMA. We have to do this ourselves
+ *	rather than use the helper due to a chip erratum
+ */
+
+static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	/* Due to an erratum we need to write these bits to the wrong
+	   place - which does save us an I/O bizarrely */
+	dmactl |= ATA_DMA_INTR | ATA_DMA_ERR;
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ns87415_bmdma_start		-	Begin DMA transfer
+ *	@qc: Command block
+ *
+ *	Switch the timings for the chip and set up for a DMA transfer
+ *	before the DMA burst begins.
+ *
+ *	FIXME: We should do lazy switching on bmdma_start versus
+ *	ata_pio_data_xfer for better performance.
+ */
+
+static void ns87415_bmdma_start(struct ata_queued_cmd *qc)
+{
+	ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	ns87415_bmdma_stop		-	End DMA transfer
+ *	@qc: Command block
+ *
+ *	End DMA mode and switch the controller back into PIO mode
+ */
+
+static void ns87415_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+	ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+/**
+ *	ns87415_irq_clear		-	Clear interrupt
+ *	@ap: Channel to clear
+ *
+ *	Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the
+ *	error bits) are reset by writing to register 00 or 08.
+ */
+
+static void ns87415_irq_clear(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
+		return;
+	iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR),
+			mmio + ATA_DMA_CMD);
+}
+
+/**
+ *	ns87415_check_atapi_dma		-	ATAPI DMA filter
+ *	@qc: Command block
+ *
+ *	Disable ATAPI DMA (for now). We may be able to do DMA if we
+ *	kill the prefetching. This isn't clear.
+ */
+
+static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return -EOPNOTSUPP;
+}
+
+#if defined(CONFIG_SUPERIO)
+
+/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
+ * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
+ * which use the integrated NS87514 cell for CD-ROM support.
+ * i.e we have to support for CD-ROM installs.
+ * See drivers/parisc/superio.c for more gory details.
+ *
+ * Workarounds taken from drivers/ide/pci/ns87415.c
+ */
+
+#include <asm/superio.h>
+
+#define SUPERIO_IDE_MAX_RETRIES 25
+
+/**
+ *	ns87560_read_buggy	-	workaround buggy Super I/O chip
+ *	@port: Port to read
+ *
+ *	Work around chipset problems in the 87560 SuperIO chip
+ */
+
+static u8 ns87560_read_buggy(void __iomem *port)
+{
+	u8 tmp;
+	int retries = SUPERIO_IDE_MAX_RETRIES;
+	do {
+		tmp = ioread8(port);
+		if (tmp != 0)
+			return tmp;
+		udelay(50);
+	} while(retries-- > 0);
+	return tmp;
+}
+
+/**
+ *	ns87560_check_status
+ *	@ap: channel to check
+ *
+ *	Return the status of the channel working around the
+ *	87560 flaws.
+ */
+
+static u8 ns87560_check_status(struct ata_port *ap)
+{
+	return ns87560_read_buggy(ap->ioaddr.status_addr);
+}
+
+/**
+ *	ns87560_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf. Work around the 87560 bugs.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ns87560_check_status(ap);
+	tf->feature = ioread8(ioaddr->error_addr);
+	tf->nsect = ioread8(ioaddr->nsect_addr);
+	tf->lbal = ioread8(ioaddr->lbal_addr);
+	tf->lbam = ioread8(ioaddr->lbam_addr);
+	tf->lbah = ioread8(ioaddr->lbah_addr);
+	tf->device = ns87560_read_buggy(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = ioread8(ioaddr->error_addr);
+		tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+		iowrite8(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+	}
+}
+
+/**
+ *	ns87560_bmdma_status
+ *	@ap: channel to check
+ *
+ *	Return the DMA status of the channel working around the
+ *	87560 flaws.
+ */
+
+static u8 ns87560_bmdma_status(struct ata_port *ap)
+{
+	return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+#endif		/* 87560 SuperIO Support */
+
+static struct ata_port_operations ns87415_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.check_atapi_dma	= ns87415_check_atapi_dma,
+	.bmdma_setup		= ns87415_bmdma_setup,
+	.bmdma_start		= ns87415_bmdma_start,
+	.bmdma_stop		= ns87415_bmdma_stop,
+	.sff_irq_clear		= ns87415_irq_clear,
+
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= ns87415_set_piomode,
+};
+
+#if defined(CONFIG_SUPERIO)
+static struct ata_port_operations ns87560_pata_ops = {
+	.inherits		= &ns87415_pata_ops,
+	.sff_tf_read		= ns87560_tf_read,
+	.sff_check_status	= ns87560_check_status,
+	.bmdma_status		= ns87560_bmdma_status,
+};
+#endif
+
+static struct scsi_host_template ns87415_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static void ns87415_fixup(struct pci_dev *pdev)
+{
+	/* Select 512 byte sectors */
+	pci_write_config_byte(pdev, 0x55, 0xEE);
+	/* Select PIO0 8bit clocking */
+	pci_write_config_byte(pdev, 0x54, 0xB7);
+}
+
+/**
+ *	ns87415_init_one - Register 87415 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in ns87415_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.port_ops	= &ns87415_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	int rc;
+#if defined(CONFIG_SUPERIO)
+	static const struct ata_port_info info87560 = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.port_ops	= &ns87560_pata_ops,
+	};
+
+	if (PCI_SLOT(pdev->devfn) == 0x0E)
+		ppi[0] = &info87560;
+#endif
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	ns87415_fixup(pdev);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
+}
+
+static const struct pci_device_id ns87415_pci_tbl[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), },
+
+	{ }	/* terminate list */
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int ns87415_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	ns87415_fixup(pdev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static struct pci_driver ns87415_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= ns87415_pci_tbl,
+	.probe			= ns87415_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ns87415_reinit_one,
+#endif
+};
+
+module_pci_driver(ns87415_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
new file mode 100644
index 0000000..d3d851b
--- /dev/null
+++ b/drivers/ata/pata_octeon_cf.c
@@ -0,0 +1,1068 @@
+/*
+ * Driver for the Octeon bootbus compact flash.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 - 2012 Cavium Inc.
+ * Copyright (C) 2008 Wind River Systems
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/byteorder.h>
+#include <asm/octeon/octeon.h>
+
+/*
+ * The Octeon bootbus compact flash interface is connected in at least
+ * 3 different configurations on various evaluation boards:
+ *
+ * -- 8  bits no irq, no DMA
+ * -- 16 bits no irq, no DMA
+ * -- 16 bits True IDE mode with DMA, but no irq.
+ *
+ * In the last case the DMA engine can generate an interrupt when the
+ * transfer is complete.  For the first two cases only PIO is supported.
+ *
+ */
+
+#define DRV_NAME	"pata_octeon_cf"
+#define DRV_VERSION	"2.2"
+
+/* Poll interval in nS. */
+#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
+
+#define DMA_CFG 0
+#define DMA_TIM 0x20
+#define DMA_INT 0x38
+#define DMA_INT_EN 0x50
+
+struct octeon_cf_port {
+	struct hrtimer delayed_finish;
+	struct ata_port *ap;
+	int dma_finished;
+	void		*c0;
+	unsigned int cs0;
+	unsigned int cs1;
+	bool is_true_ide;
+	u64 dma_base;
+};
+
+static struct scsi_host_template octeon_cf_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static int enable_dma;
+module_param(enable_dma, int, 0444);
+MODULE_PARM_DESC(enable_dma,
+		 "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
+
+/**
+ * Convert nanosecond based time to setting used in the
+ * boot bus timing register, based on timing multiple
+ */
+static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
+{
+	unsigned int val;
+
+	/*
+	 * Compute # of eclock periods to get desired duration in
+	 * nanoseconds.
+	 */
+	val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
+			  1000 * tim_mult);
+
+	return val;
+}
+
+static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
+{
+	union cvmx_mio_boot_reg_cfgx reg_cfg;
+	unsigned int tim_mult;
+
+	switch (multiplier) {
+	case 8:
+		tim_mult = 3;
+		break;
+	case 4:
+		tim_mult = 0;
+		break;
+	case 2:
+		tim_mult = 2;
+		break;
+	default:
+		tim_mult = 1;
+		break;
+	}
+
+	reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+	reg_cfg.s.dmack = 0;	/* Don't assert DMACK on access */
+	reg_cfg.s.tim_mult = tim_mult;	/* Timing mutiplier */
+	reg_cfg.s.rd_dly = 0;	/* Sample on falling edge of BOOT_OE */
+	reg_cfg.s.sam = 0;	/* Don't combine write and output enable */
+	reg_cfg.s.we_ext = 0;	/* No write enable extension */
+	reg_cfg.s.oe_ext = 0;	/* No read enable extension */
+	reg_cfg.s.en = 1;	/* Enable this region */
+	reg_cfg.s.orbit = 0;	/* Don't combine with previous region */
+	reg_cfg.s.ale = 0;	/* Don't do address multiplexing */
+	cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
+}
+
+/**
+ * Called after libata determines the needed PIO mode. This
+ * function programs the Octeon bootbus regions to support the
+ * timing requirements of the PIO mode.
+ *
+ * @ap:     ATA port information
+ * @dev:    ATA device
+ */
+static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
+{
+	struct octeon_cf_port *cf_port = ap->private_data;
+	union cvmx_mio_boot_reg_timx reg_tim;
+	int T;
+	struct ata_timing timing;
+
+	unsigned int div;
+	int use_iordy;
+	int trh;
+	int pause;
+	/* These names are timing parameters from the ATA spec */
+	int t2;
+
+	/*
+	 * A divisor value of four will overflow the timing fields at
+	 * clock rates greater than 800MHz
+	 */
+	if (octeon_get_io_clock_rate() <= 800000000)
+		div = 4;
+	else
+		div = 8;
+	T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
+
+	BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
+
+	t2 = timing.active;
+	if (t2)
+		t2--;
+
+	trh = ns_to_tim_reg(div, 20);
+	if (trh)
+		trh--;
+
+	pause = (int)timing.cycle - (int)timing.active -
+		(int)timing.setup - trh;
+	if (pause < 0)
+		pause = 0;
+	if (pause)
+		pause--;
+
+	octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
+	if (cf_port->is_true_ide)
+		/* True IDE mode, program both chip selects.  */
+		octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
+
+
+	use_iordy = ata_pio_need_iordy(dev);
+
+	reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
+	/* Disable page mode */
+	reg_tim.s.pagem = 0;
+	/* Enable dynamic timing */
+	reg_tim.s.waitm = use_iordy;
+	/* Pages are disabled */
+	reg_tim.s.pages = 0;
+	/* We don't use multiplexed address mode */
+	reg_tim.s.ale = 0;
+	/* Not used */
+	reg_tim.s.page = 0;
+	/* Time after IORDY to coninue to assert the data */
+	reg_tim.s.wait = 0;
+	/* Time to wait to complete the cycle. */
+	reg_tim.s.pause = pause;
+	/* How long to hold after a write to de-assert CE. */
+	reg_tim.s.wr_hld = trh;
+	/* How long to wait after a read to de-assert CE. */
+	reg_tim.s.rd_hld = trh;
+	/* How long write enable is asserted */
+	reg_tim.s.we = t2;
+	/* How long read enable is asserted */
+	reg_tim.s.oe = t2;
+	/* Time after CE that read/write starts */
+	reg_tim.s.ce = ns_to_tim_reg(div, 5);
+	/* Time before CE that address is valid */
+	reg_tim.s.adr = 0;
+
+	/* Program the bootbus region timing for the data port chip select. */
+	cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
+	if (cf_port->is_true_ide)
+		/* True IDE mode, program both chip selects.  */
+		cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
+			       reg_tim.u64);
+}
+
+static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
+{
+	struct octeon_cf_port *cf_port = ap->private_data;
+	union cvmx_mio_boot_pin_defs pin_defs;
+	union cvmx_mio_boot_dma_timx dma_tim;
+	unsigned int oe_a;
+	unsigned int oe_n;
+	unsigned int dma_ackh;
+	unsigned int dma_arq;
+	unsigned int pause;
+	unsigned int T0, Tkr, Td;
+	unsigned int tim_mult;
+	int c;
+
+	const struct ata_timing *timing;
+
+	timing = ata_timing_find_mode(dev->dma_mode);
+	T0	= timing->cycle;
+	Td	= timing->active;
+	Tkr	= timing->recover;
+	dma_ackh = timing->dmack_hold;
+
+	dma_tim.u64 = 0;
+	/* dma_tim.s.tim_mult = 0 --> 4x */
+	tim_mult = 4;
+
+	/* not spec'ed, value in eclocks, not affected by tim_mult */
+	dma_arq = 8;
+	pause = 25 - dma_arq * 1000 /
+		(octeon_get_io_clock_rate() / 1000000); /* Tz */
+
+	oe_a = Td;
+	/* Tkr from cf spec, lengthened to meet T0 */
+	oe_n = max(T0 - oe_a, Tkr);
+
+	pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
+
+	/* DMA channel number. */
+	c = (cf_port->dma_base & 8) >> 3;
+
+	/* Invert the polarity if the default is 0*/
+	dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
+
+	dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
+	dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
+
+	/*
+	 * This is tI, C.F. spec. says 0, but Sony CF card requires
+	 * more, we use 20 nS.
+	 */
+	dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
+	dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
+
+	dma_tim.s.dmarq = dma_arq;
+	dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
+
+	dma_tim.s.rd_dly = 0;	/* Sample right on edge */
+
+	/*  writes only */
+	dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
+	dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
+
+	pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
+		 ns_to_tim_reg(tim_mult, 60));
+	pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
+		 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
+		 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
+
+	cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
+}
+
+/**
+ * Handle an 8 bit I/O request.
+ *
+ * @qc:         Queued command
+ * @buffer:     Data buffer
+ * @buflen:     Length of the buffer.
+ * @rw:         True to write.
+ */
+static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
+					 unsigned char *buffer,
+					 unsigned int buflen,
+					 int rw)
+{
+	struct ata_port *ap		= qc->dev->link->ap;
+	void __iomem *data_addr		= ap->ioaddr.data_addr;
+	unsigned long words;
+	int count;
+
+	words = buflen;
+	if (rw) {
+		count = 16;
+		while (words--) {
+			iowrite8(*buffer, data_addr);
+			buffer++;
+			/*
+			 * Every 16 writes do a read so the bootbus
+			 * FIFO doesn't fill up.
+			 */
+			if (--count == 0) {
+				ioread8(ap->ioaddr.altstatus_addr);
+				count = 16;
+			}
+		}
+	} else {
+		ioread8_rep(data_addr, buffer, words);
+	}
+	return buflen;
+}
+
+/**
+ * Handle a 16 bit I/O request.
+ *
+ * @qc:         Queued command
+ * @buffer:     Data buffer
+ * @buflen:     Length of the buffer.
+ * @rw:         True to write.
+ */
+static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
+					  unsigned char *buffer,
+					  unsigned int buflen,
+					  int rw)
+{
+	struct ata_port *ap		= qc->dev->link->ap;
+	void __iomem *data_addr		= ap->ioaddr.data_addr;
+	unsigned long words;
+	int count;
+
+	words = buflen / 2;
+	if (rw) {
+		count = 16;
+		while (words--) {
+			iowrite16(*(uint16_t *)buffer, data_addr);
+			buffer += sizeof(uint16_t);
+			/*
+			 * Every 16 writes do a read so the bootbus
+			 * FIFO doesn't fill up.
+			 */
+			if (--count == 0) {
+				ioread8(ap->ioaddr.altstatus_addr);
+				count = 16;
+			}
+		}
+	} else {
+		while (words--) {
+			*(uint16_t *)buffer = ioread16(data_addr);
+			buffer += sizeof(uint16_t);
+		}
+	}
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		__le16 align_buf[1] = { 0 };
+
+		if (rw == READ) {
+			align_buf[0] = cpu_to_le16(ioread16(data_addr));
+			memcpy(buffer, align_buf, 1);
+		} else {
+			memcpy(align_buf, buffer, 1);
+			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+		}
+		words++;
+	}
+	return buflen;
+}
+
+/**
+ * Read the taskfile for 16bit non-True IDE only.
+ */
+static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	u16 blob;
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	blob = __raw_readw(base + 0xc);
+	tf->feature = blob >> 8;
+
+	blob = __raw_readw(base + 2);
+	tf->nsect = blob & 0xff;
+	tf->lbal = blob >> 8;
+
+	blob = __raw_readw(base + 4);
+	tf->lbam = blob & 0xff;
+	tf->lbah = blob >> 8;
+
+	blob = __raw_readw(base + 6);
+	tf->device = blob & 0xff;
+	tf->command = blob >> 8;
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		if (likely(ap->ioaddr.ctl_addr)) {
+			iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
+
+			blob = __raw_readw(base + 0xc);
+			tf->hob_feature = blob >> 8;
+
+			blob = __raw_readw(base + 2);
+			tf->hob_nsect = blob & 0xff;
+			tf->hob_lbal = blob >> 8;
+
+			blob = __raw_readw(base + 4);
+			tf->hob_lbam = blob & 0xff;
+			tf->hob_lbah = blob >> 8;
+
+			iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
+			ap->last_ctl = tf->ctl;
+		} else {
+			WARN_ON(1);
+		}
+	}
+}
+
+static u8 octeon_cf_check_status16(struct ata_port *ap)
+{
+	u16 blob;
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	blob = __raw_readw(base + 6);
+	return blob >> 8;
+}
+
+static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
+				 unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *base = ap->ioaddr.data_addr;
+	int rc;
+	u8 err;
+
+	DPRINTK("about to softreset\n");
+	__raw_writew(ap->ctl, base + 0xe);
+	udelay(20);
+	__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
+	udelay(20);
+	__raw_writew(ap->ctl, base + 0xe);
+
+	rc = ata_sff_wait_after_reset(link, 1, deadline);
+	if (rc) {
+		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
+	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+	return 0;
+}
+
+/**
+ * Load the taskfile for 16bit non-True IDE only.  The device_addr is
+ * not loaded, we do this as part of octeon_cf_exec_command16.
+ */
+static void octeon_cf_tf_load16(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	if (tf->ctl != ap->last_ctl) {
+		iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		__raw_writew(tf->hob_feature << 8, base + 0xc);
+		__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
+		__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+	if (is_addr) {
+		__raw_writew(tf->feature << 8, base + 0xc);
+		__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
+		__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
+		VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+	ata_wait_idle(ap);
+}
+
+
+static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
+{
+/*  There is only one device, do nothing. */
+	return;
+}
+
+/*
+ * Issue ATA command to host controller.  The device_addr is also sent
+ * as it must be written in a combined write with the command.
+ */
+static void octeon_cf_exec_command16(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+	u16 blob;
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		VPRINTK("device 0x%X\n", tf->device);
+		blob = tf->device;
+	} else {
+		blob = 0;
+	}
+
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+	blob |= (tf->command << 8);
+	__raw_writew(blob, base + 6);
+
+
+	ata_wait_idle(ap);
+}
+
+static void octeon_cf_ata_port_noaction(struct ata_port *ap)
+{
+}
+
+static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct octeon_cf_port *cf_port;
+
+	cf_port = ap->private_data;
+	DPRINTK("ENTER\n");
+	/* issue r/w command */
+	qc->cursg = qc->sg;
+	cf_port->dma_finished = 0;
+	ap->ops->sff_exec_command(ap, &qc->tf);
+	DPRINTK("EXIT\n");
+}
+
+/**
+ * Start a DMA transfer that was already setup
+ *
+ * @qc:     Information about the DMA
+ */
+static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
+{
+	struct octeon_cf_port *cf_port = qc->ap->private_data;
+	union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
+	union cvmx_mio_boot_dma_intx mio_boot_dma_int;
+	struct scatterlist *sg;
+
+	VPRINTK("%d scatterlists\n", qc->n_elem);
+
+	/* Get the scatter list entry we need to DMA into */
+	sg = qc->cursg;
+	BUG_ON(!sg);
+
+	/*
+	 * Clear the DMA complete status.
+	 */
+	mio_boot_dma_int.u64 = 0;
+	mio_boot_dma_int.s.done = 1;
+	cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
+
+	/* Enable the interrupt.  */
+	cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
+
+	/* Set the direction of the DMA */
+	mio_boot_dma_cfg.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+	mio_boot_dma_cfg.s.endian = 1;
+#endif
+	mio_boot_dma_cfg.s.en = 1;
+	mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
+
+	/*
+	 * Don't stop the DMA if the device deasserts DMARQ. Many
+	 * compact flashes deassert DMARQ for a short time between
+	 * sectors. Instead of stopping and restarting the DMA, we'll
+	 * let the hardware do it. If the DMA is really stopped early
+	 * due to an error condition, a later timeout will force us to
+	 * stop.
+	 */
+	mio_boot_dma_cfg.s.clr = 0;
+
+	/* Size is specified in 16bit words and minus one notation */
+	mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
+
+	/* We need to swap the high and low bytes of every 16 bits */
+	mio_boot_dma_cfg.s.swap8 = 1;
+
+	mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
+
+	VPRINTK("%s %d bytes address=%p\n",
+		(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
+		(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
+
+	cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
+}
+
+/**
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
+					struct ata_queued_cmd *qc)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct octeon_cf_port *cf_port = ap->private_data;
+	union cvmx_mio_boot_dma_cfgx dma_cfg;
+	union cvmx_mio_boot_dma_intx dma_int;
+	u8 status;
+
+	VPRINTK("ata%u: protocol %d task_state %d\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+
+	if (ap->hsm_task_state != HSM_ST_LAST)
+		return 0;
+
+	dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
+	if (dma_cfg.s.size != 0xfffff) {
+		/* Error, the transfer was not complete.  */
+		qc->err_mask |= AC_ERR_HOST_BUS;
+		ap->hsm_task_state = HSM_ST_ERR;
+	}
+
+	/* Stop and clear the dma engine.  */
+	dma_cfg.u64 = 0;
+	dma_cfg.s.size = -1;
+	cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
+
+	/* Disable the interrupt.  */
+	dma_int.u64 = 0;
+	cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
+
+	/* Clear the DMA complete status */
+	dma_int.s.done = 1;
+	cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
+
+	status = ap->ops->sff_check_status(ap);
+
+	ata_sff_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
+		ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
+
+	return 1;
+}
+
+/*
+ * Check if any queued commands have more DMAs, if so start the next
+ * transfer, else do end of transfer handling.
+ */
+static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct octeon_cf_port *cf_port;
+	int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	DPRINTK("ENTER\n");
+	for (i = 0; i < host->n_ports; i++) {
+		u8 status;
+		struct ata_port *ap;
+		struct ata_queued_cmd *qc;
+		union cvmx_mio_boot_dma_intx dma_int;
+		union cvmx_mio_boot_dma_cfgx dma_cfg;
+
+		ap = host->ports[i];
+		cf_port = ap->private_data;
+
+		dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
+		dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
+
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+		if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
+			continue;
+
+		if (dma_int.s.done && !dma_cfg.s.en) {
+			if (!sg_is_last(qc->cursg)) {
+				qc->cursg = sg_next(qc->cursg);
+				handled = 1;
+				octeon_cf_dma_start(qc);
+				continue;
+			} else {
+				cf_port->dma_finished = 1;
+			}
+		}
+		if (!cf_port->dma_finished)
+			continue;
+		status = ioread8(ap->ioaddr.altstatus_addr);
+		if (status & (ATA_BUSY | ATA_DRQ)) {
+			/*
+			 * We are busy, try to handle it later.  This
+			 * is the DMA finished interrupt, and it could
+			 * take a little while for the card to be
+			 * ready for more commands.
+			 */
+			/* Clear DMA irq. */
+			dma_int.u64 = 0;
+			dma_int.s.done = 1;
+			cvmx_write_csr(cf_port->dma_base + DMA_INT,
+				       dma_int.u64);
+			hrtimer_start_range_ns(&cf_port->delayed_finish,
+					       ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
+					       OCTEON_CF_BUSY_POLL_INTERVAL / 5,
+					       HRTIMER_MODE_REL);
+			handled = 1;
+		} else {
+			handled |= octeon_cf_dma_finished(ap, qc);
+		}
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+	DPRINTK("EXIT\n");
+	return IRQ_RETVAL(handled);
+}
+
+static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
+{
+	struct octeon_cf_port *cf_port = container_of(hrt,
+						      struct octeon_cf_port,
+						      delayed_finish);
+	struct ata_port *ap = cf_port->ap;
+	struct ata_host *host = ap->host;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	u8 status;
+	enum hrtimer_restart rv = HRTIMER_NORESTART;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	/*
+	 * If the port is not waiting for completion, it must have
+	 * handled it previously.  The hsm_task_state is
+	 * protected by host->lock.
+	 */
+	if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
+		goto out;
+
+	status = ioread8(ap->ioaddr.altstatus_addr);
+	if (status & (ATA_BUSY | ATA_DRQ)) {
+		/* Still busy, try again. */
+		hrtimer_forward_now(hrt,
+				    ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
+		rv = HRTIMER_RESTART;
+		goto out;
+	}
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+		octeon_cf_dma_finished(ap, qc);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
+	return rv;
+}
+
+static void octeon_cf_dev_config(struct ata_device *dev)
+{
+	/*
+	 * A maximum of 2^20 - 1 16 bit transfers are possible with
+	 * the bootbus DMA.  So we need to throttle max_sectors to
+	 * (2^12 - 1 == 4095) to assure that this can never happen.
+	 */
+	dev->max_sectors = min(dev->max_sectors, 4095U);
+}
+
+/*
+ * We don't do ATAPI DMA so return 0.
+ */
+static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 0;
+}
+
+static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		octeon_cf_dma_setup(qc);	    /* set up dma */
+		octeon_cf_dma_start(qc);	    /* initiate dma */
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+
+	case ATAPI_PROT_DMA:
+		dev_err(ap->dev, "Error, ATAPI not supported\n");
+		BUG();
+
+	default:
+		return ata_sff_qc_issue(qc);
+	}
+
+	return 0;
+}
+
+static struct ata_port_operations octeon_cf_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.check_atapi_dma	= octeon_cf_check_atapi_dma,
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= octeon_cf_qc_issue,
+	.sff_dev_select		= octeon_cf_dev_select,
+	.sff_irq_on		= octeon_cf_ata_port_noaction,
+	.sff_irq_clear		= octeon_cf_ata_port_noaction,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= octeon_cf_set_piomode,
+	.set_dmamode		= octeon_cf_set_dmamode,
+	.dev_config		= octeon_cf_dev_config,
+};
+
+static int octeon_cf_probe(struct platform_device *pdev)
+{
+	struct resource *res_cs0, *res_cs1;
+
+	bool is_16bit;
+	const __be32 *cs_num;
+	struct property *reg_prop;
+	int n_addr, n_size, reg_len;
+	struct device_node *node;
+	void __iomem *cs0;
+	void __iomem *cs1 = NULL;
+	struct ata_host *host;
+	struct ata_port *ap;
+	int irq = 0;
+	irq_handler_t irq_handler = NULL;
+	void __iomem *base;
+	struct octeon_cf_port *cf_port;
+	int rv = -ENOMEM;
+	u32 bus_width;
+
+	node = pdev->dev.of_node;
+	if (node == NULL)
+		return -EINVAL;
+
+	cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
+	if (!cf_port)
+		return -ENOMEM;
+
+	cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
+
+	if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
+		is_16bit = (bus_width == 16);
+	else
+		is_16bit = false;
+
+	n_addr = of_n_addr_cells(node);
+	n_size = of_n_size_cells(node);
+
+	reg_prop = of_find_property(node, "reg", &reg_len);
+	if (!reg_prop || reg_len < sizeof(__be32))
+		return -EINVAL;
+
+	cs_num = reg_prop->value;
+	cf_port->cs0 = be32_to_cpup(cs_num);
+
+	if (cf_port->is_true_ide) {
+		struct device_node *dma_node;
+		dma_node = of_parse_phandle(node,
+					    "cavium,dma-engine-handle", 0);
+		if (dma_node) {
+			struct platform_device *dma_dev;
+			dma_dev = of_find_device_by_node(dma_node);
+			if (dma_dev) {
+				struct resource *res_dma;
+				int i;
+				res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
+				if (!res_dma) {
+					of_node_put(dma_node);
+					return -EINVAL;
+				}
+				cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+									 resource_size(res_dma));
+				if (!cf_port->dma_base) {
+					of_node_put(dma_node);
+					return -EINVAL;
+				}
+
+				irq_handler = octeon_cf_interrupt;
+				i = platform_get_irq(dma_dev, 0);
+				if (i > 0)
+					irq = i;
+			}
+			of_node_put(dma_node);
+		}
+		res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!res_cs1)
+			return -EINVAL;
+
+		cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+					   resource_size(res_cs1));
+		if (!cs1)
+			return rv;
+
+		if (reg_len < (n_addr + n_size + 1) * sizeof(__be32))
+			return -EINVAL;
+
+		cs_num += n_addr + n_size;
+		cf_port->cs1 = be32_to_cpup(cs_num);
+	}
+
+	res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_cs0)
+		return -EINVAL;
+
+	cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+				   resource_size(res_cs0));
+	if (!cs0)
+		return rv;
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return rv;
+
+	ap = host->ports[0];
+	ap->private_data = cf_port;
+	pdev->dev.platform_data = cf_port;
+	cf_port->ap = ap;
+	ap->ops = &octeon_cf_ops;
+	ap->pio_mask = ATA_PIO6;
+	ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
+
+	if (!is_16bit) {
+		base = cs0 + 0x800;
+		ap->ioaddr.cmd_addr	= base;
+		ata_sff_std_ports(&ap->ioaddr);
+
+		ap->ioaddr.altstatus_addr = base + 0xe;
+		ap->ioaddr.ctl_addr	= base + 0xe;
+		octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
+	} else if (cf_port->is_true_ide) {
+		base = cs0;
+		ap->ioaddr.cmd_addr	= base + (ATA_REG_CMD << 1) + 1;
+		ap->ioaddr.data_addr	= base + (ATA_REG_DATA << 1);
+		ap->ioaddr.error_addr	= base + (ATA_REG_ERR << 1) + 1;
+		ap->ioaddr.feature_addr	= base + (ATA_REG_FEATURE << 1) + 1;
+		ap->ioaddr.nsect_addr	= base + (ATA_REG_NSECT << 1) + 1;
+		ap->ioaddr.lbal_addr	= base + (ATA_REG_LBAL << 1) + 1;
+		ap->ioaddr.lbam_addr	= base + (ATA_REG_LBAM << 1) + 1;
+		ap->ioaddr.lbah_addr	= base + (ATA_REG_LBAH << 1) + 1;
+		ap->ioaddr.device_addr	= base + (ATA_REG_DEVICE << 1) + 1;
+		ap->ioaddr.status_addr	= base + (ATA_REG_STATUS << 1) + 1;
+		ap->ioaddr.command_addr	= base + (ATA_REG_CMD << 1) + 1;
+		ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
+		ap->ioaddr.ctl_addr	= cs1 + (6 << 1) + 1;
+		octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
+
+		ap->mwdma_mask	= enable_dma ? ATA_MWDMA4 : 0;
+
+		/* True IDE mode needs a timer to poll for not-busy.  */
+		hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+	} else {
+		/* 16 bit but not True IDE */
+		base = cs0 + 0x800;
+		octeon_cf_ops.sff_data_xfer	= octeon_cf_data_xfer16;
+		octeon_cf_ops.softreset		= octeon_cf_softreset16;
+		octeon_cf_ops.sff_check_status	= octeon_cf_check_status16;
+		octeon_cf_ops.sff_tf_read	= octeon_cf_tf_read16;
+		octeon_cf_ops.sff_tf_load	= octeon_cf_tf_load16;
+		octeon_cf_ops.sff_exec_command	= octeon_cf_exec_command16;
+
+		ap->ioaddr.data_addr	= base + ATA_REG_DATA;
+		ap->ioaddr.nsect_addr	= base + ATA_REG_NSECT;
+		ap->ioaddr.lbal_addr	= base + ATA_REG_LBAL;
+		ap->ioaddr.ctl_addr	= base + 0xe;
+		ap->ioaddr.altstatus_addr = base + 0xe;
+	}
+	cf_port->c0 = ap->ioaddr.ctl_addr;
+
+	rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rv)
+		return rv;
+
+	ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
+
+	dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+		 is_16bit ? 16 : 8,
+		 cf_port->is_true_ide ? ", True IDE" : "");
+
+	return ata_host_activate(host, irq, irq_handler,
+				 IRQF_SHARED, &octeon_cf_sht);
+}
+
+static void octeon_cf_shutdown(struct device *dev)
+{
+	union cvmx_mio_boot_dma_cfgx dma_cfg;
+	union cvmx_mio_boot_dma_intx dma_int;
+
+	struct octeon_cf_port *cf_port = dev_get_platdata(dev);
+
+	if (cf_port->dma_base) {
+		/* Stop and clear the dma engine.  */
+		dma_cfg.u64 = 0;
+		dma_cfg.s.size = -1;
+		cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
+
+		/* Disable the interrupt.  */
+		dma_int.u64 = 0;
+		cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
+
+		/* Clear the DMA complete status */
+		dma_int.s.done = 1;
+		cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
+
+		__raw_writeb(0, cf_port->c0);
+		udelay(20);
+		__raw_writeb(ATA_SRST, cf_port->c0);
+		udelay(20);
+		__raw_writeb(0, cf_port->c0);
+		mdelay(100);
+	}
+}
+
+static const struct of_device_id octeon_cf_match[] = {
+	{
+		.compatible = "cavium,ebt3000-compact-flash",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
+
+static struct platform_driver octeon_cf_driver = {
+	.probe		= octeon_cf_probe,
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = octeon_cf_match,
+		.shutdown = octeon_cf_shutdown
+	},
+};
+
+static int __init octeon_cf_init(void)
+{
+	return platform_driver_register(&octeon_cf_driver);
+}
+
+
+MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
+MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+
+module_init(octeon_cf_init);
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
new file mode 100644
index 0000000..01161c1
--- /dev/null
+++ b/drivers/ata/pata_of_platform.c
@@ -0,0 +1,89 @@
+/*
+ * OF-platform PATA driver
+ *
+ * Copyright (c) 2007  MontaVista Software, Inc.
+ *                     Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_of_platform"
+
+static struct scsi_host_template pata_platform_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static int pata_of_platform_probe(struct platform_device *ofdev)
+{
+	int ret;
+	struct device_node *dn = ofdev->dev.of_node;
+	struct resource io_res;
+	struct resource ctl_res;
+	struct resource *irq_res;
+	unsigned int reg_shift = 0;
+	int pio_mode = 0;
+	int pio_mask;
+
+	ret = of_address_to_resource(dn, 0, &io_res);
+	if (ret) {
+		dev_err(&ofdev->dev, "can't get IO address from "
+			"device tree\n");
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(dn, 1, &ctl_res);
+	if (ret) {
+		dev_err(&ofdev->dev, "can't get CTL address from "
+			"device tree\n");
+		return -EINVAL;
+	}
+
+	irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+
+	of_property_read_u32(dn, "reg-shift", &reg_shift);
+
+	if (!of_property_read_u32(dn, "pio-mode", &pio_mode)) {
+		if (pio_mode > 6) {
+			dev_err(&ofdev->dev, "invalid pio-mode\n");
+			return -EINVAL;
+		}
+	} else {
+		dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n");
+	}
+
+	pio_mask = 1 << pio_mode;
+	pio_mask |= (1 << pio_mode) - 1;
+
+	return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
+				     reg_shift, pio_mask, &pata_platform_sht);
+}
+
+static const struct of_device_id pata_of_platform_match[] = {
+	{ .compatible = "ata-generic", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pata_of_platform_match);
+
+static struct platform_driver pata_of_platform_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = pata_of_platform_match,
+	},
+	.probe		= pata_of_platform_probe,
+	.remove		= ata_platform_remove_one,
+};
+
+module_platform_driver(pata_of_platform_driver);
+
+MODULE_DESCRIPTION("OF-platform PATA driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
new file mode 100644
index 0000000..b9bf78b
--- /dev/null
+++ b/drivers/ata/pata_oldpiix.c
@@ -0,0 +1,273 @@
+/*
+ *    pata_oldpiix.c - Intel PATA/SATA controllers
+ *
+ *	(C) 2005 Red Hat
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    Early PIIX differs significantly from the later PIIX as it lacks
+ *    SITRE and the slave timing registers. This means that you have to
+ *    set timing per channel, or be clever. Libata tells us whenever it
+ *    does drive selection and we use this to reload the timings.
+ *
+ *    Because of these behaviour differences PIIX gets its own driver module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_oldpiix"
+#define DRV_VERSION	"0.5.5"
+
+/**
+ *	oldpiix_pre_reset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits oldpiix_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+	};
+
+	if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	oldpiix_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. Note that the early PIIX does not have the slave
+	 *	timing port at 0x44.
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 1)
+		control |= 1;	/* TIME */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE */
+
+	/* Intel specifies that the prefetch/posting is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE */
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	/*
+	 * Set PPE, IE and TIME as appropriate.
+	 * Clear the other drive's timing bits.
+	 */
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCE0;
+		idetm_data |= control;
+	} else {
+		idetm_data &= 0xCC0E;
+		idetm_data |= (control << 4);
+	}
+	idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	pci_write_config_word(dev, idetm_port, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	oldpiix_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u8 idetm_port		= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	/*
+	 * MWDMA is driven by the PIO timings. We must also enable
+	 * IORDY unconditionally along with TIME1. PPE has already
+	 * been set when the PIO timing was set.
+	 */
+
+	unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+	unsigned int control;
+	const unsigned int needed_pio[3] = {
+		XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+	};
+	int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	control = 3;	/* IORDY|TIME0 */
+	/* Intel specifies that the PPE functionality is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+
+	/* If the drive MWDMA is faster than it can do PIO then
+	   we must force PIO into PIO0 */
+
+	if (adev->pio_mode < needed_pio[mwdma])
+		/* Enable DMA timing only */
+		control |= 8;	/* PIO cycles in PIO0 */
+
+	/* Mask out the relevant control and timing bits we will load. Also
+	   clear the other drive TIME register as a precaution */
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCE0;
+		idetm_data |= control;
+	} else {
+		idetm_data &= 0xCC0E;
+		idetm_data |= (control << 4);
+	}
+	idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+	pci_write_config_word(dev, idetm_port, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	oldpiix_qc_issue	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	if (adev != ap->private_data) {
+		oldpiix_set_piomode(ap, adev);
+		if (ata_dma_enabled(adev))
+			oldpiix_set_dmamode(ap, adev);
+	}
+	return ata_bmdma_qc_issue(qc);
+}
+
+
+static struct scsi_host_template oldpiix_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations oldpiix_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= oldpiix_qc_issue,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= oldpiix_set_piomode,
+	.set_dmamode		= oldpiix_set_dmamode,
+	.prereset		= oldpiix_pre_reset,
+};
+
+
+/**
+ *	oldpiix_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in oldpiix_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY,
+		.port_ops	= &oldpiix_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
+}
+
+static const struct pci_device_id oldpiix_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, 0x1230), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver oldpiix_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= oldpiix_pci_tbl,
+	.probe			= oldpiix_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(oldpiix_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
new file mode 100644
index 0000000..3a944a0
--- /dev/null
+++ b/drivers/ata/pata_opti.c
@@ -0,0 +1,199 @@
+/*
+ * pata_opti.c 	- ATI PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *
+ * Based on
+ *  linux/drivers/ide/pci/opti621.c		Version 0.7	Sept 10, 2002
+ *
+ *  Copyright (C) 1996-1998  Linus Torvalds & authors (see below)
+ *
+ * Authors:
+ * Jaromir Koutek <miri@punknet.cz>,
+ * Jan Harkes <jaharkes@cwi.nl>,
+ * Mark Lord <mlord@pobox.com>
+ * Some parts of code are from ali14xx.c and from rz1000.c.
+ *
+ * Also consulted the FreeBSD prototype driver by Kevin Day to try
+ * and resolve some confusions. Further documentation can be found in
+ * Ralf Brown's interrupt list
+ *
+ * If you have other variants of the Opti range (Viper/Vendetta) please
+ * try this driver with those PCI idents and report back. For the later
+ * chips see the pata_optidma driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_opti"
+#define DRV_VERSION "0.2.9"
+
+enum {
+	READ_REG	= 0,	/* index of Read cycle timing register */
+	WRITE_REG 	= 1,	/* index of Write cycle timing register */
+	CNTRL_REG 	= 3,	/* index of Control register */
+	STRAP_REG 	= 5,	/* index of Strap register */
+	MISC_REG 	= 6	/* index of Miscellaneous register */
+};
+
+/**
+ *	opti_pre_reset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits opti_enable_bits[] = {
+		{ 0x45, 1, 0x80, 0x00 },
+		{ 0x40, 1, 0x08, 0x00 }
+	};
+
+	if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	opti_write_reg		-	control register setup
+ *	@ap: ATA port
+ *	@value: value
+ *	@reg: control register number
+ *
+ *	The Opti uses magic 'trapdoor' register accesses to do configuration
+ *	rather than using PCI space as other controllers do. The double inw
+ *	on the error register activates configuration mode. We can then write
+ *	the control register
+ */
+
+static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* These 3 unlock the control register access */
+	ioread16(regio + 1);
+	ioread16(regio + 1);
+	iowrite8(3, regio + 2);
+
+	/* Do the I/O */
+	iowrite8(val, regio + reg);
+
+	/* Relock */
+	iowrite8(0x83, regio + 2);
+}
+
+/**
+ *	opti_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Timing numbers are taken from
+ *	the FreeBSD driver then pre computed to keep the code clean. There
+ *	are two tables depending on the hardware clock speed.
+ */
+
+static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+	u8 addr;
+
+	/* Address table precomputed with prefetch off and a DCLK of 2 */
+	static const u8 addr_timing[2][5] = {
+		{ 0x30, 0x20, 0x20, 0x10, 0x10 },
+		{ 0x20, 0x20, 0x10, 0x10, 0x10 }
+	};
+	static const u8 data_rec_timing[2][5] = {
+		{ 0x6B, 0x56, 0x42, 0x32, 0x31 },
+		{ 0x58, 0x44, 0x32, 0x22, 0x21 }
+	};
+
+	iowrite8(0xff, regio + 5);
+	clock = ioread16(regio + 5) & 1;
+
+	/*
+ 	 *	As with many controllers the address setup time is shared
+ 	 *	and must suit both devices if present.
+	 */
+
+	addr = addr_timing[clock][pio];
+	if (pair) {
+		/* Hardware constraint */
+		u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
+		if (pair_addr > addr)
+			addr = pair_addr;
+	}
+
+	/* Commence primary programming sequence */
+	opti_write_reg(ap, adev->devno, MISC_REG);
+	opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
+	opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
+	opti_write_reg(ap, addr, MISC_REG);
+
+	/* Programming sequence complete, override strapping */
+	opti_write_reg(ap, 0x85, CNTRL_REG);
+}
+
+static struct scsi_host_template opti_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations opti_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= opti_set_piomode,
+	.prereset	= opti_pre_reset,
+};
+
+static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &opti_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	ata_print_version_once(&dev->dev, DRV_VERSION);
+
+	return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0);
+}
+
+static const struct pci_device_id opti[] = {
+	{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
+	{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
+
+	{ },
+};
+
+static struct pci_driver opti_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= opti,
+	.probe 		= opti_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(opti_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, opti);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
new file mode 100644
index 0000000..bdec7ef
--- /dev/null
+++ b/drivers/ata/pata_optidma.c
@@ -0,0 +1,455 @@
+/*
+ * pata_optidma.c 	- Opti DMA PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *
+ *	The Opti DMA controllers are related to the older PIO PCI controllers
+ *	and indeed the VLB ones. The main differences are that the timing
+ *	numbers are now based off PCI clocks not VLB and differ, and that
+ *	MWDMA is supported.
+ *
+ *	This driver should support Viper-N+, FireStar, FireStar Plus.
+ *
+ *	These devices support virtual DMA for read (aka the CS5520). Later
+ *	chips support UDMA33, but only if the rest of the board logic does,
+ *	so you have to get this right. We don't support the virtual DMA
+ *	but we do handle UDMA.
+ *
+ *	Bits that are worth knowing
+ *		Most control registers are shadowed into I/O registers
+ *		0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
+ *		Virtual DMA registers *move* between rev 0x02 and rev 0x10
+ *		UDMA requires a 66MHz FSB
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_optidma"
+#define DRV_VERSION "0.3.2"
+
+enum {
+	READ_REG	= 0,	/* index of Read cycle timing register */
+	WRITE_REG 	= 1,	/* index of Write cycle timing register */
+	CNTRL_REG 	= 3,	/* index of Control register */
+	STRAP_REG 	= 5,	/* index of Strap register */
+	MISC_REG 	= 6	/* index of Miscellaneous register */
+};
+
+static int pci_clock;	/* 0 = 33 1 = 25 */
+
+/**
+ *	optidma_pre_reset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int optidma_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits optidma_enable_bits = {
+		0x40, 1, 0x08, 0x00
+	};
+
+	if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	optidma_unlock		-	unlock control registers
+ *	@ap: ATA port
+ *
+ *	Unlock the control register block for this adapter. Registers must not
+ *	be unlocked in a situation where libata might look at them.
+ */
+
+static void optidma_unlock(struct ata_port *ap)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* These 3 unlock the control register access */
+	ioread16(regio + 1);
+	ioread16(regio + 1);
+	iowrite8(3, regio + 2);
+}
+
+/**
+ *	optidma_lock		-	issue temporary relock
+ *	@ap: ATA port
+ *
+ *	Re-lock the configuration register settings.
+ */
+
+static void optidma_lock(struct ata_port *ap)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* Relock */
+	iowrite8(0x83, regio + 2);
+}
+
+/**
+ *	optidma_mode_setup	-	set mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@mode: Mode to set
+ *
+ *	Called to do the DMA or PIO mode setup. Timing numbers are all
+ *	pre computed to keep the code clean. There are two tables depending
+ *	on the hardware clock speed.
+ *
+ *	WARNING: While we do this the IDE registers vanish. If we take an
+ *	IRQ here we depend on the host set locking to avoid catastrophe.
+ */
+
+static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+	int dma = adev->dma_mode - XFER_MW_DMA_0;
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+	u8 addr;
+
+	/* Address table precomputed with a DCLK of 2 */
+	static const u8 addr_timing[2][5] = {
+		{ 0x30, 0x20, 0x20, 0x10, 0x10 },
+		{ 0x20, 0x20, 0x10, 0x10, 0x10 }
+	};
+	static const u8 data_rec_timing[2][5] = {
+		{ 0x59, 0x46, 0x30, 0x20, 0x20 },
+		{ 0x46, 0x32, 0x20, 0x20, 0x10 }
+	};
+	static const u8 dma_data_rec_timing[2][3] = {
+		{ 0x76, 0x20, 0x20 },
+		{ 0x54, 0x20, 0x10 }
+	};
+
+	/* Switch from IDE to control mode */
+	optidma_unlock(ap);
+
+
+	/*
+ 	 *	As with many controllers the address setup time is shared
+ 	 *	and must suit both devices if present. FIXME: Check if we
+ 	 *	need to look at slowest of PIO/DMA mode of either device
+	 */
+
+	if (mode >= XFER_MW_DMA_0)
+		addr = 0;
+	else
+		addr = addr_timing[pci_clock][pio];
+
+	if (pair) {
+		u8 pair_addr;
+		/* Hardware constraint */
+		if (pair->dma_mode)
+			pair_addr = 0;
+		else
+			pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
+		if (pair_addr > addr)
+			addr = pair_addr;
+	}
+
+	/* Commence primary programming sequence */
+	/* First we load the device number into the timing select */
+	iowrite8(adev->devno, regio + MISC_REG);
+	/* Now we load the data timings into read data/write data */
+	if (mode < XFER_MW_DMA_0) {
+		iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
+		iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
+	} else if (mode < XFER_UDMA_0) {
+		iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
+		iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
+	}
+	/* Finally we load the address setup into the misc register */
+	iowrite8(addr | adev->devno, regio + MISC_REG);
+
+	/* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
+	iowrite8(0x85, regio + CNTRL_REG);
+
+	/* Switch back to IDE mode */
+	optidma_lock(ap);
+
+	/* Note: at this point our programming is incomplete. We are
+	   not supposed to program PCI 0x43 "things we hacked onto the chip"
+	   until we've done both sets of PIO/DMA timings */
+}
+
+/**
+ *	optiplus_mode_setup	-	DMA setup for Firestar Plus
+ *	@ap: ATA port
+ *	@adev: device
+ *	@mode: desired mode
+ *
+ *	The Firestar plus has additional UDMA functionality for UDMA0-2 and
+ *	requires we do some additional work. Because the base work we must do
+ *	is mostly shared we wrap the Firestar setup functionality in this
+ *	one
+ */
+
+static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 udcfg;
+	u8 udslave;
+	int dev2 = 2 * adev->devno;
+	int unit = 2 * ap->port_no + adev->devno;
+	int udma = mode - XFER_UDMA_0;
+
+	pci_read_config_byte(pdev, 0x44, &udcfg);
+	if (mode <= XFER_UDMA_0) {
+		udcfg &= ~(1 << unit);
+		optidma_mode_setup(ap, adev, adev->dma_mode);
+	} else {
+		udcfg |=  (1 << unit);
+		if (ap->port_no) {
+			pci_read_config_byte(pdev, 0x45, &udslave);
+			udslave &= ~(0x03 << dev2);
+			udslave |= (udma << dev2);
+			pci_write_config_byte(pdev, 0x45, udslave);
+		} else {
+			udcfg &= ~(0x30 << dev2);
+			udcfg |= (udma << dev2);
+		}
+	}
+	pci_write_config_byte(pdev, 0x44, udcfg);
+}
+
+/**
+ *	optidma_set_pio_mode	-	PIO setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optidma_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	optidma_set_dma_mode	-	DMA setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optidma_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	optiplus_set_pio_mode	-	PIO setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optiplus_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	optiplus_set_dma_mode	-	DMA setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optiplus_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	optidma_make_bits	-	PCI setup helper
+ *	@adev: ATA device
+ *
+ *	Turn the ATA device setup into PCI configuration bits
+ *	for register 0x43 and return the two bits needed.
+ */
+
+static u8 optidma_make_bits43(struct ata_device *adev)
+{
+	static const u8 bits43[5] = {
+		0, 0, 0, 1, 2
+	};
+	if (!ata_dev_enabled(adev))
+		return 0;
+	if (adev->dma_mode)
+		return adev->dma_mode - XFER_MW_DMA_0;
+	return bits43[adev->pio_mode - XFER_PIO_0];
+}
+
+/**
+ *	optidma_set_mode	-	mode setup
+ *	@link: link to set up
+ *
+ *	Use the standard setup to tune the chipset and then finalise the
+ *	configuration by writing the nibble of extra bits of data into
+ *	the chip.
+ */
+
+static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+	struct ata_port *ap = link->ap;
+	u8 r;
+	int nybble = 4 * ap->port_no;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int rc  = ata_do_set_mode(link, r_failed);
+	if (rc == 0) {
+		pci_read_config_byte(pdev, 0x43, &r);
+
+		r &= (0x0F << nybble);
+		r |= (optidma_make_bits43(&link->device[0]) +
+		     (optidma_make_bits43(&link->device[0]) << 2)) << nybble;
+		pci_write_config_byte(pdev, 0x43, r);
+	}
+	return rc;
+}
+
+static struct scsi_host_template optidma_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations optidma_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= optidma_set_pio_mode,
+	.set_dmamode	= optidma_set_dma_mode,
+	.set_mode	= optidma_set_mode,
+	.prereset	= optidma_pre_reset,
+};
+
+static struct ata_port_operations optiplus_port_ops = {
+	.inherits	= &optidma_port_ops,
+	.set_piomode	= optiplus_set_pio_mode,
+	.set_dmamode	= optiplus_set_dma_mode,
+};
+
+/**
+ *	optiplus_with_udma	-	Look for UDMA capable setup
+ *	@pdev; ATA controller
+ */
+
+static int optiplus_with_udma(struct pci_dev *pdev)
+{
+	u8 r;
+	int ret = 0;
+	int ioport = 0x22;
+	struct pci_dev *dev1;
+
+	/* Find function 1 */
+	dev1 = pci_get_device(0x1045, 0xC701, NULL);
+	if (dev1 == NULL)
+		return 0;
+
+	/* Rev must be >= 0x10 */
+	pci_read_config_byte(dev1, 0x08, &r);
+	if (r < 0x10)
+		goto done_nomsg;
+	/* Read the chipset system configuration to check our mode */
+	pci_read_config_byte(dev1, 0x5F, &r);
+	ioport |= (r << 8);
+	outb(0x10, ioport);
+	/* Must be 66Mhz sync */
+	if ((inb(ioport + 2) & 1) == 0)
+		goto done;
+
+	/* Check the ATA arbitration/timing is suitable */
+	pci_read_config_byte(pdev, 0x42, &r);
+	if ((r & 0x36) != 0x36)
+		goto done;
+	pci_read_config_byte(dev1, 0x52, &r);
+	if (r & 0x80)	/* IDEDIR disabled */
+		ret = 1;
+done:
+	printk(KERN_WARNING "UDMA not supported in this configuration.\n");
+done_nomsg:		/* Wrong chip revision */
+	pci_dev_put(dev1);
+	return ret;
+}
+
+static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_82c700 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &optidma_port_ops
+	};
+	static const struct ata_port_info info_82c700_udma = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &optiplus_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info_82c700, NULL };
+	int rc;
+
+	ata_print_version_once(&dev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
+	/* Fixed location chipset magic */
+	inw(0x1F1);
+	inw(0x1F1);
+	pci_clock = inb(0x1F5) & 1;		/* 0 = 33Mhz, 1 = 25Mhz */
+
+	if (optiplus_with_udma(dev))
+		ppi[0] = &info_82c700_udma;
+
+	return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
+}
+
+static const struct pci_device_id optidma[] = {
+	{ PCI_VDEVICE(OPTI, 0xD568), },		/* Opti 82C700 */
+
+	{ },
+};
+
+static struct pci_driver optidma_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= optidma,
+	.probe 		= optidma_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(optidma_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, optidma);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
new file mode 100644
index 0000000..d071ab6
--- /dev/null
+++ b/drivers/ata/pata_palmld.c
@@ -0,0 +1,137 @@
+/*
+ * drivers/ata/pata_palmld.c
+ *
+ * Driver for IDE channel in Palm LifeDrive
+ *
+ * Based on research of:
+ *		Alex Osborne <ato@meshy.org>
+ *
+ * Rewrite for mainline:
+ *		Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Rewritten version based on pata_ixp4xx_cf.c:
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <scsi/scsi_host.h>
+#include <mach/palmld.h>
+
+#define DRV_NAME "pata_palmld"
+
+static struct gpio palmld_hdd_gpios[] = {
+	{ GPIO_NR_PALMLD_IDE_PWEN,	GPIOF_INIT_HIGH,	"HDD Power" },
+	{ GPIO_NR_PALMLD_IDE_RESET,	GPIOF_INIT_LOW,		"HDD Reset" },
+};
+
+static struct scsi_host_template palmld_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations palmld_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ata_sff_data_xfer32,
+	.cable_detect		= ata_cable_40wire,
+};
+
+static int palmld_pata_probe(struct platform_device *pdev)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *mem;
+	int ret;
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	/* remap drive's physical memory address */
+	mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
+	if (!mem) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	/* request and activate power GPIO, IRQ GPIO */
+	ret = gpio_request_array(palmld_hdd_gpios,
+				ARRAY_SIZE(palmld_hdd_gpios));
+	if (ret)
+		goto err1;
+
+	/* reset the drive */
+	gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
+	msleep(30);
+	gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1);
+	msleep(30);
+
+	/* setup the ata port */
+	ap = host->ports[0];
+	ap->ops	= &palmld_port_ops;
+	ap->pio_mask = ATA_PIO4;
+	ap->flags |= ATA_FLAG_PIO_POLLING;
+
+	/* memory mapping voodoo */
+	ap->ioaddr.cmd_addr = mem + 0x10;
+	ap->ioaddr.altstatus_addr = mem + 0xe;
+	ap->ioaddr.ctl_addr = mem + 0xe;
+
+	/* start the port */
+	ata_sff_std_ports(&ap->ioaddr);
+
+	/* activate host */
+	ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
+					&palmld_sht);
+	if (ret)
+		goto err2;
+
+	return ret;
+
+err2:
+	gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
+err1:
+	return ret;
+}
+
+static int palmld_pata_remove(struct platform_device *dev)
+{
+	ata_platform_remove_one(dev);
+
+	/* power down the HDD */
+	gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
+
+	gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
+
+	return 0;
+}
+
+static struct platform_driver palmld_pata_platform_driver = {
+	.driver	 = {
+		.name   = DRV_NAME,
+	},
+	.probe		= palmld_pata_probe,
+	.remove		= palmld_pata_remove,
+};
+
+module_platform_driver(palmld_pata_platform_driver);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("PalmLD PATA driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
new file mode 100644
index 0000000..9b0e6c7
--- /dev/null
+++ b/drivers/ata/pata_pcmcia.c
@@ -0,0 +1,394 @@
+/*
+ *   pata_pcmcia.c - PCMCIA PATA controller driver.
+ *   Copyright 2005-2006 Red Hat Inc, all rights reserved.
+ *   PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
+ *						<openembedded@hrw.one.pl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *   Heavily based upon ide-cs.c
+ *   The initial developer of the original code is David A. Hinds
+ *   <dahinds@users.sourceforge.net>.  Portions created by David A. Hinds
+ *   are Copyright (C) 1999 David A. Hinds.  All Rights Reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+
+#define DRV_NAME "pata_pcmcia"
+#define DRV_VERSION "0.3.5"
+
+/**
+ *	pcmcia_set_mode	-	PCMCIA specific mode setup
+ *	@link: link
+ *	@r_failed_dev: Return pointer for failed device
+ *
+ *	Perform the tuning and setup of the devices and timings, which
+ *	for PCMCIA is the same as any other controller. We wrap it however
+ *	as we need to spot hardware with incorrect or missing master/slave
+ *	decode, which alas is embarrassingly common in the PC world
+ */
+
+static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+	struct ata_device *master = &link->device[0];
+	struct ata_device *slave = &link->device[1];
+
+	if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
+		return ata_do_set_mode(link, r_failed_dev);
+
+	if (memcmp(master->id + ATA_ID_FW_REV,  slave->id + ATA_ID_FW_REV,
+			   ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
+		/* Suspicious match, but could be two cards from
+		   the same vendor - check serial */
+		if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
+			   ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
+			ata_dev_warn(slave, "is a ghost device, ignoring\n");
+			ata_dev_disable(slave);
+		}
+	}
+	return ata_do_set_mode(link, r_failed_dev);
+}
+
+/**
+ *	pcmcia_set_mode_8bit	-	PCMCIA specific mode setup
+ *	@link: link
+ *	@r_failed_dev: Return pointer for failed device
+ *
+ *	For the simple emulated 8bit stuff the less we do the better.
+ */
+
+static int pcmcia_set_mode_8bit(struct ata_link *link,
+				struct ata_device **r_failed_dev)
+{
+	return 0;
+}
+
+/**
+ *	ata_data_xfer_8bit	 -	Transfer data by 8bit PIO
+ *	@qc: queued command
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by 8 bit PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static unsigned int ata_data_xfer_8bit(struct ata_queued_cmd *qc,
+				unsigned char *buf, unsigned int buflen, int rw)
+{
+	struct ata_port *ap = qc->dev->link->ap;
+
+	if (rw == READ)
+		ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
+	else
+		iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
+
+	return buflen;
+}
+
+/**
+ *	pcmcia_8bit_drain_fifo - Stock FIFO drain logic for SFF controllers
+ *	@qc: command
+ *
+ *	Drain the FIFO and device of any stuck data following a command
+ *	failing to complete. In some cases this is necessary before a
+ *	reset will recover the device.
+ *
+ */
+
+static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
+{
+	int count;
+	struct ata_port *ap;
+
+	/* We only need to flush incoming data when a command was running */
+	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+		return;
+
+	ap = qc->ap;
+
+	/* Drain up to 64K of data before we give up this recovery method */
+	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
+							&& count++ < 65536;)
+		ioread8(ap->ioaddr.data_addr);
+
+	if (count)
+		ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count);
+
+}
+
+static struct scsi_host_template pcmcia_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pcmcia_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer32,
+	.cable_detect	= ata_cable_40wire,
+	.set_mode	= pcmcia_set_mode,
+};
+
+static struct ata_port_operations pcmcia_8bit_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.sff_data_xfer	= ata_data_xfer_8bit,
+	.cable_detect	= ata_cable_40wire,
+	.set_mode	= pcmcia_set_mode_8bit,
+	.sff_drain_fifo	= pcmcia_8bit_drain_fifo,
+};
+
+
+static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
+{
+	int *is_kme = priv_data;
+
+	if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+	    != IO_DATA_PATH_WIDTH_8) {
+		pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+		pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+	}
+	pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+	pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+
+	if (pdev->resource[1]->end) {
+		pdev->resource[0]->end = 8;
+		pdev->resource[1]->end = (*is_kme) ? 2 : 1;
+	} else {
+		if (pdev->resource[0]->end < 16)
+			return -ENODEV;
+	}
+
+	return pcmcia_request_io(pdev);
+}
+
+/**
+ *	pcmcia_init_one		-	attach a PCMCIA interface
+ *	@pdev: pcmcia device
+ *
+ *	Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
+ *	shared IRQ.
+ */
+
+static int pcmcia_init_one(struct pcmcia_device *pdev)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	int is_kme = 0, ret = -ENOMEM, p;
+	unsigned long io_base, ctl_base;
+	void __iomem *io_addr, *ctl_addr;
+	int n_ports = 1;
+	struct ata_port_operations *ops = &pcmcia_port_ops;
+
+	/* Set up attributes in order to probe card and get resources */
+	pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
+		CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+
+	/* See if we have a manufacturer identifier. Use it to set is_kme for
+	   vendor quirks */
+	is_kme = ((pdev->manf_id == MANFID_KME) &&
+		  ((pdev->card_id == PRODID_KME_KXLC005_A) ||
+		   (pdev->card_id == PRODID_KME_KXLC005_B)));
+
+	if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) {
+		pdev->config_flags &= ~CONF_AUTO_CHECK_VCC;
+		if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme))
+			goto failed; /* No suitable config found */
+	}
+	io_base = pdev->resource[0]->start;
+	if (pdev->resource[1]->end)
+		ctl_base = pdev->resource[1]->start;
+	else
+		ctl_base = pdev->resource[0]->start + 0x0e;
+
+	if (!pdev->irq)
+		goto failed;
+
+	ret = pcmcia_enable_device(pdev);
+	if (ret)
+		goto failed;
+
+	/* iomap */
+	ret = -ENOMEM;
+	io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
+	ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
+	if (!io_addr || !ctl_addr)
+		goto failed;
+
+	/* Success. Disable the IRQ nIEN line, do quirks */
+	iowrite8(0x02, ctl_addr);
+	if (is_kme)
+		iowrite8(0x81, ctl_addr + 0x01);
+
+	/* FIXME: Could be more ports at base + 0x10 but we only deal with
+	   one right now */
+	if (resource_size(pdev->resource[0]) >= 0x20)
+		n_ports = 2;
+
+	if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
+		ops = &pcmcia_8bit_port_ops;
+	/*
+	 *	Having done the PCMCIA plumbing the ATA side is relatively
+	 *	sane.
+	 */
+	ret = -ENOMEM;
+	host = ata_host_alloc(&pdev->dev, n_ports);
+	if (!host)
+		goto failed;
+
+	for (p = 0; p < n_ports; p++) {
+		ap = host->ports[p];
+
+		ap->ops = ops;
+		ap->pio_mask = ATA_PIO0;	/* ISA so PIO 0 cycles */
+		ap->flags |= ATA_FLAG_SLAVE_POSS;
+		ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
+		ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
+		ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
+		ata_sff_std_ports(&ap->ioaddr);
+
+		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
+	}
+
+	/* activate */
+	ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+				IRQF_SHARED, &pcmcia_sht);
+	if (ret)
+		goto failed;
+
+	pdev->priv = host;
+	return 0;
+
+failed:
+	pcmcia_disable_device(pdev);
+	return ret;
+}
+
+/**
+ *	pcmcia_remove_one	-	unplug an pcmcia interface
+ *	@pdev: pcmcia device
+ *
+ *	A PCMCIA ATA device has been unplugged. Perform the needed
+ *	cleanup. Also called on module unload for any active devices.
+ */
+
+static void pcmcia_remove_one(struct pcmcia_device *pdev)
+{
+	struct ata_host *host = pdev->priv;
+
+	if (host)
+		ata_host_detach(host);
+
+	pcmcia_disable_device(pdev);
+}
+
+static const struct pcmcia_device_id pcmcia_devices[] = {
+	PCMCIA_DEVICE_FUNC_ID(4),
+	PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000),	/* Corsair */
+	PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000),	/* Hitachi */
+	PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000),	/* I-O Data CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001),	/* Mitsubishi CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
+	PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),	/* SanDisk CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000),	/* Kingston */
+	PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), 	/* TI emulated */
+	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
+	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
+	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
+	PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),	/* Hitachi */
+	PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100),	/* Viking CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200),	/* Lexar, Viking CFA */
+	PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
+	PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
+	PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
+	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("CNF   ", "CD-ROM", 0x46d7db81, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
+	PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
+	PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
+	PCMCIA_DEVICE_PROD_ID12("EXP   ", "CD-ROM", 0x0a5c52fd, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("EXP   ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
+	PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
+	PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
+	PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
+	PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
+	PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+	PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
+	PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
+	PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
+	PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2      ", 0xe37be2b5, 0x8671043b),
+	PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
+	PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
+	PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
+	PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
+	PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
+	PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
+	PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
+	PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
+	PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
+	PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+	PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
+	PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
+	PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
+	PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
+	PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
+
+static struct pcmcia_driver pcmcia_driver = {
+	.owner		= THIS_MODULE,
+	.name		= DRV_NAME,
+	.id_table	= pcmcia_devices,
+	.probe		= pcmcia_init_one,
+	.remove		= pcmcia_remove_one,
+};
+module_pcmcia_driver(pcmcia_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
new file mode 100644
index 0000000..1a18e67
--- /dev/null
+++ b/drivers/ata/pata_pdc2027x.c
@@ -0,0 +1,785 @@
+/*
+ *  Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  Ported to libata by:
+ *  Albert Lee <albertcc@tw.ibm.com> IBM Corporation
+ *
+ *  Copyright (C) 1998-2002		Andre Hedrick <andre@linux-ide.org>
+ *  Portions Copyright (C) 1999 Promise Technology, Inc.
+ *
+ *  Author: Frank Tiernan (frankt@promise.com)
+ *  Released under terms of General Public License
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware information only available under NDA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_pdc2027x"
+#define DRV_VERSION	"1.0"
+#undef PDC_DEBUG
+
+#ifdef PDC_DEBUG
+#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+#else
+#define PDPRINTK(fmt, args...)
+#endif
+
+enum {
+	PDC_MMIO_BAR		= 5,
+
+	PDC_UDMA_100		= 0,
+	PDC_UDMA_133		= 1,
+
+	PDC_100_MHZ		= 100000000,
+	PDC_133_MHZ		= 133333333,
+
+	PDC_SYS_CTL		= 0x1100,
+	PDC_ATA_CTL		= 0x1104,
+	PDC_GLOBAL_CTL		= 0x1108,
+	PDC_CTCR0		= 0x110C,
+	PDC_CTCR1		= 0x1110,
+	PDC_BYTE_COUNT		= 0x1120,
+	PDC_PLL_CTL		= 0x1202,
+};
+
+static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int pdc2027x_reinit_one(struct pci_dev *pdev);
+#endif
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
+static int pdc2027x_cable_detect(struct ata_port *ap);
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
+
+/*
+ * ATA Timing Tables based on 133MHz controller clock.
+ * These tables are only used when the controller is in 133MHz clock.
+ * If the controller is in 100MHz clock, the ASIC hardware will
+ * set the timing registers automatically when "set feature" command
+ * is issued to the device. However, if the controller clock is 133MHz,
+ * the following tables must be used.
+ */
+static const struct pdc2027x_pio_timing {
+	u8 value0, value1, value2;
+} pdc2027x_pio_timing_tbl[] = {
+	{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
+	{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
+	{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
+	{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
+	{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
+};
+
+static const struct pdc2027x_mdma_timing {
+	u8 value0, value1;
+} pdc2027x_mdma_timing_tbl[] = {
+	{ 0xdf, 0x5f }, /* MDMA mode 0 */
+	{ 0x6b, 0x27 }, /* MDMA mode 1 */
+	{ 0x69, 0x25 }, /* MDMA mode 2 */
+};
+
+static const struct pdc2027x_udma_timing {
+	u8 value0, value1, value2;
+} pdc2027x_udma_timing_tbl[] = {
+	{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
+	{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
+	{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
+	{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
+	{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
+	{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
+	{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
+};
+
+static const struct pci_device_id pdc2027x_pci_tbl[] = {
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pdc2027x_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pdc2027x_pci_tbl,
+	.probe			= pdc2027x_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= pdc2027x_reinit_one,
+#endif
+};
+
+static struct scsi_host_template pdc2027x_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pdc2027x_pata100_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.check_atapi_dma	= pdc2027x_check_atapi_dma,
+	.cable_detect		= pdc2027x_cable_detect,
+	.prereset		= pdc2027x_prereset,
+};
+
+static struct ata_port_operations pdc2027x_pata133_ops = {
+	.inherits		= &pdc2027x_pata100_ops,
+	.mode_filter		= pdc2027x_mode_filter,
+	.set_piomode		= pdc2027x_set_piomode,
+	.set_dmamode		= pdc2027x_set_dmamode,
+	.set_mode		= pdc2027x_set_mode,
+};
+
+static struct ata_port_info pdc2027x_port_info[] = {
+	/* PDC_UDMA_100 */
+	{
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &pdc2027x_pata100_ops,
+	},
+	/* PDC_UDMA_133 */
+	{
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc2027x_pata133_ops,
+	},
+};
+
+MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
+MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
+
+/**
+ *	port_mmio - Get the MMIO address of PDC2027x extended registers
+ *	@ap: Port
+ *	@offset: offset from mmio base
+ */
+static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
+{
+	return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
+}
+
+/**
+ *	dev_mmio - Get the MMIO address of PDC2027x extended registers
+ *	@ap: Port
+ *	@adev: device
+ *	@offset: offset from mmio base
+ */
+static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
+{
+	u8 adj = (adev->devno) ? 0x08 : 0x00;
+	return port_mmio(ap, offset) + adj;
+}
+
+/**
+ *	pdc2027x_pata_cable_detect - Probe host controller cable detect info
+ *	@ap: Port for which cable detect info is desired
+ *
+ *	Read 80c cable indicator from Promise extended register.
+ *      This register is latched when the system is reset.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static int pdc2027x_cable_detect(struct ata_port *ap)
+{
+	u32 cgcr;
+
+	/* check cable detect results */
+	cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL));
+	if (cgcr & (1 << 26))
+		goto cbl40;
+
+	PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
+
+	return ATA_CBL_PATA80;
+cbl40:
+	printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
+	return ATA_CBL_PATA40;
+}
+
+/**
+ * pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
+ * @ap: Port to check
+ */
+static inline int pdc2027x_port_enabled(struct ata_port *ap)
+{
+	return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
+}
+
+/**
+ *	pdc2027x_prereset - prereset for PATA host controller
+ *	@link: Target link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Probeinit including cable detection.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
+{
+	/* Check whether port enabled */
+	if (!pdc2027x_port_enabled(link->ap))
+		return -ENOENT;
+	return ata_sff_prereset(link, deadline);
+}
+
+/**
+ *	pdc2720x_mode_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: list of modes proposed
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
+		return mask;
+
+	/* Check for slave of a Maxtor at UDMA6 */
+	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
+			  ATA_ID_PROD_LEN + 1);
+	/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
+	if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
+		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
+
+	return mask;
+}
+
+/**
+ *	pdc2027x_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port to configure
+ *	@adev: um
+ *
+ *	Set PIO mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio = adev->pio_mode - XFER_PIO_0;
+	u32 ctcr0, ctcr1;
+
+	PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
+
+	/* Sanity check */
+	if (pio > 4) {
+		printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
+		return;
+
+	}
+
+	/* Set the PIO timing registers using value table for 133MHz */
+	PDPRINTK("Set pio regs... \n");
+
+	ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
+	ctcr0 &= 0xffff0000;
+	ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
+		(pdc2027x_pio_timing_tbl[pio].value1 << 8);
+	iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+
+	ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+	ctcr1 &= 0x00ffffff;
+	ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
+	iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+	PDPRINTK("Set pio regs done\n");
+
+	PDPRINTK("Set to pio mode[%u] \n", pio);
+}
+
+/**
+ *	pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
+ *	@ap: Port to configure
+ *	@adev: um
+ *
+ *	Set UDMA mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int dma_mode = adev->dma_mode;
+	u32 ctcr0, ctcr1;
+
+	if ((dma_mode >= XFER_UDMA_0) &&
+	   (dma_mode <= XFER_UDMA_6)) {
+		/* Set the UDMA timing registers with value table for 133MHz */
+		unsigned int udma_mode = dma_mode & 0x07;
+
+		if (dma_mode == XFER_UDMA_2) {
+			/*
+			 * Turn off tHOLD.
+			 * If tHOLD is '1', the hardware will add half clock for data hold time.
+			 * This code segment seems to be no effect. tHOLD will be overwritten below.
+			 */
+			ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+			iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
+		}
+
+		PDPRINTK("Set udma regs... \n");
+
+		ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+		ctcr1 &= 0xff000000;
+		ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
+			(pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
+			(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
+		iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+		PDPRINTK("Set udma regs done\n");
+
+		PDPRINTK("Set to udma mode[%u] \n", udma_mode);
+
+	} else  if ((dma_mode >= XFER_MW_DMA_0) &&
+		   (dma_mode <= XFER_MW_DMA_2)) {
+		/* Set the MDMA timing registers with value table for 133MHz */
+		unsigned int mdma_mode = dma_mode & 0x07;
+
+		PDPRINTK("Set mdma regs... \n");
+		ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
+
+		ctcr0 &= 0x0000ffff;
+		ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
+			(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
+
+		iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+		PDPRINTK("Set mdma regs done\n");
+
+		PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
+	} else {
+		printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
+	}
+}
+
+/**
+ *	pdc2027x_set_mode - Set the timing registers back to correct values.
+ *	@link: link to configure
+ *	@r_failed: Returned device for failure
+ *
+ *	The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
+ *	automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
+ *	This function overwrites the possibly incorrect values set by the hardware to be correct.
+ */
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+	int rc;
+
+	rc = ata_do_set_mode(link, r_failed);
+	if (rc < 0)
+		return rc;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		pdc2027x_set_piomode(ap, dev);
+
+		/*
+		 * Enable prefetch if the device support PIO only.
+		 */
+		if (dev->xfer_shift == ATA_SHIFT_PIO) {
+			u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
+			ctcr1 |= (1 << 25);
+			iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
+
+			PDPRINTK("Turn on prefetch\n");
+		} else {
+			pdc2027x_set_dmamode(ap, dev);
+		}
+	}
+	return 0;
+}
+
+/**
+ *	pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *		 1 otherwise
+ */
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	u8 *scsicmd = cmd->cmnd;
+	int rc = 1; /* atapi dma off by default */
+
+	/*
+	 * This workaround is from Promise's GPL driver.
+	 * If ATAPI DMA is used for commands not in the
+	 * following white list, say MODE_SENSE and REQUEST_SENSE,
+	 * pdc2027x might hit the irq lost problem.
+	 */
+	switch (scsicmd[0]) {
+	case READ_10:
+	case WRITE_10:
+	case READ_12:
+	case WRITE_12:
+	case READ_6:
+	case WRITE_6:
+	case 0xad: /* READ_DVD_STRUCTURE */
+	case 0xbe: /* READ_CD */
+		/* ATAPI DMA is ok */
+		rc = 0;
+		break;
+	default:
+		;
+	}
+
+	return rc;
+}
+
+/**
+ * pdc_read_counter - Read the ctr counter
+ * @host: target ATA host
+ */
+
+static long pdc_read_counter(struct ata_host *host)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	long counter;
+	int retry = 1;
+	u32 bccrl, bccrh, bccrlv, bccrhv;
+
+retry:
+	bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+	bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
+
+	/* Read the counter values again for verification */
+	bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+	bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
+
+	counter = (bccrh << 15) | bccrl;
+
+	PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh,  bccrl);
+	PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
+
+	/*
+	 * The 30-bit decreasing counter are read by 2 pieces.
+	 * Incorrect value may be read when both bccrh and bccrl are changing.
+	 * Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
+	 */
+	if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
+		retry--;
+		PDPRINTK("rereading counter\n");
+		goto retry;
+	}
+
+	return counter;
+}
+
+/**
+ * adjust_pll - Adjust the PLL input clock in Hz.
+ *
+ * @pdc_controller: controller specific information
+ * @host: target ATA host
+ * @pll_clock: The input of PLL in HZ
+ */
+static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int board_idx)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	u16 pll_ctl;
+	long pll_clock_khz = pll_clock / 1000;
+	long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
+	long ratio = pout_required / pll_clock_khz;
+	int F, R;
+
+	/* Sanity check */
+	if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
+		printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
+		return;
+	}
+
+#ifdef PDC_DEBUG
+	PDPRINTK("pout_required is %ld\n", pout_required);
+
+	/* Show the current clock value of PLL control register
+	 * (maybe already configured by the firmware)
+	 */
+	pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
+
+	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+	/*
+	 * Calculate the ratio of F, R and OD
+	 * POUT = (F + 2) / (( R + 2) * NO)
+	 */
+	if (ratio < 8600L) { /* 8.6x */
+		/* Using NO = 0x01, R = 0x0D */
+		R = 0x0d;
+	} else if (ratio < 12900L) { /* 12.9x */
+		/* Using NO = 0x01, R = 0x08 */
+		R = 0x08;
+	} else if (ratio < 16100L) { /* 16.1x */
+		/* Using NO = 0x01, R = 0x06 */
+		R = 0x06;
+	} else if (ratio < 64000L) { /* 64x */
+		R = 0x00;
+	} else {
+		/* Invalid ratio */
+		printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
+		return;
+	}
+
+	F = (ratio * (R+2)) / 1000 - 2;
+
+	if (unlikely(F < 0 || F > 127)) {
+		/* Invalid F */
+		printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
+		return;
+	}
+
+	PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
+
+	pll_ctl = (R << 8) | F;
+
+	PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
+
+	iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
+	ioread16(mmio_base + PDC_PLL_CTL); /* flush */
+
+	/* Wait the PLL circuit to be stable */
+	msleep(30);
+
+#ifdef PDC_DEBUG
+	/*
+	 *  Show the current clock value of PLL control register
+	 * (maybe configured by the firmware)
+	 */
+	pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
+
+	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+	return;
+}
+
+/**
+ * detect_pll_input_clock - Detect the PLL input clock in Hz.
+ * @host: target ATA host
+ * Ex. 16949000 on 33MHz PCI bus for pdc20275.
+ *     Half of the PCI clock.
+ */
+static long pdc_detect_pll_input_clock(struct ata_host *host)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	u32 scr;
+	long start_count, end_count;
+	ktime_t start_time, end_time;
+	long pll_clock, usec_elapsed;
+
+	/* Start the test mode */
+	scr = ioread32(mmio_base + PDC_SYS_CTL);
+	PDPRINTK("scr[%X]\n", scr);
+	iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
+	ioread32(mmio_base + PDC_SYS_CTL); /* flush */
+
+	/* Read current counter value */
+	start_count = pdc_read_counter(host);
+	start_time = ktime_get();
+
+	/* Let the counter run for 100 ms. */
+	msleep(100);
+
+	/* Read the counter values again */
+	end_count = pdc_read_counter(host);
+	end_time = ktime_get();
+
+	/* Stop the test mode */
+	scr = ioread32(mmio_base + PDC_SYS_CTL);
+	PDPRINTK("scr[%X]\n", scr);
+	iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
+	ioread32(mmio_base + PDC_SYS_CTL); /* flush */
+
+	/* calculate the input clock in Hz */
+	usec_elapsed = (long) ktime_us_delta(end_time, start_time);
+
+	pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
+		(100000000 / usec_elapsed);
+
+	PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
+	PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
+
+	return pll_clock;
+}
+
+/**
+ * pdc_hardware_init - Initialize the hardware.
+ * @host: target ATA host
+ * @board_idx: board identifier
+ */
+static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+{
+	long pll_clock;
+
+	/*
+	 * Detect PLL input clock rate.
+	 * On some system, where PCI bus is running at non-standard clock rate.
+	 * Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
+	 * The pdc20275 controller employs PLL circuit to help correct timing registers setting.
+	 */
+	pll_clock = pdc_detect_pll_input_clock(host);
+
+	dev_info(host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
+
+	/* Adjust PLL control register */
+	pdc_adjust_pll(host, pll_clock, board_idx);
+}
+
+/**
+ * pdc_ata_setup_port - setup the mmio address
+ * @port: ata ioports to setup
+ * @base: base address
+ */
+static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		=
+	port->data_addr		= base;
+	port->feature_addr	=
+	port->error_addr	= base + 0x05;
+	port->nsect_addr	= base + 0x0a;
+	port->lbal_addr		= base + 0x0f;
+	port->lbam_addr		= base + 0x10;
+	port->lbah_addr		= base + 0x15;
+	port->device_addr	= base + 0x1a;
+	port->command_addr	=
+	port->status_addr	= base + 0x1f;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + 0x81a;
+}
+
+/**
+ * pdc2027x_init_one - PCI probe function
+ * Called when an instance of PCI adapter is inserted.
+ * This function checks whether the hardware is supported,
+ * initialize hardware and register an instance of ata_host to
+ * libata.  (implements struct pci_driver.probe() )
+ *
+ * @pdev: instance of pci_dev found
+ * @ent:  matching entry in the id_tbl[]
+ */
+static int pdc2027x_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
+	static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] =
+		{ &pdc2027x_port_info[board_idx], NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int i, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	mmio_base = host->iomap[PDC_MMIO_BAR];
+
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]);
+		ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i];
+
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd");
+	}
+
+	//pci_enable_intx(pdev);
+
+	/* initialize adapter */
+	pdc_hardware_init(host, board_idx);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &pdc2027x_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pdc2027x_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	unsigned int board_idx;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->device == PCI_DEVICE_ID_PROMISE_20268 ||
+	    pdev->device == PCI_DEVICE_ID_PROMISE_20270)
+		board_idx = PDC_UDMA_100;
+	else
+		board_idx = PDC_UDMA_133;
+
+	pdc_hardware_init(host, board_idx);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+module_pci_driver(pdc2027x_pci_driver);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
new file mode 100644
index 0000000..9001991
--- /dev/null
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -0,0 +1,392 @@
+/*
+ * pata_pdc202xx_old.c 	- Promise PDC202xx PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *			  (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
+ *
+ * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
+ *
+ * First cut with LBA48/ATAPI
+ *
+ * TODO:
+ *	Channel interlock/reset on both required ?
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_pdc202xx_old"
+#define DRV_VERSION "0.4.3"
+
+static int pdc2026x_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 cis;
+
+	pci_read_config_word(pdev, 0x50, &cis);
+	if (cis & (1 << (10 + ap->port_no)))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static void pdc202xx_exec_command(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	iowrite8(tf->command, ap->ioaddr.command_addr);
+	ndelay(400);
+}
+
+static bool pdc202xx_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	unsigned long master	= pci_resource_start(pdev, 4);
+	u8 sc1d			= inb(master + 0x1d);
+
+	if (ap->port_no) {
+		/*
+		 * bit 7: error, bit 6: interrupting,
+		 * bit 5: FIFO full, bit 4: FIFO empty
+		 */
+		return sc1d & 0x40;
+	} else	{
+		/*
+		 * bit 3: error, bit 2: interrupting,
+		 * bit 1: FIFO full, bit 0: FIFO empty
+		 */
+		return sc1d & 0x04;
+	}
+}
+
+/**
+ *	pdc202xx_configure_piomode	-	set chip PIO timing
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@pio: PIO mode
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	so a configure_dmamode call will undo any work we do here and vice
+ *	versa
+ */
+
+static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+	static u16 pio_timing[5] = {
+		0x0913, 0x050C , 0x0308, 0x0206, 0x0104
+	};
+	u8 r_ap, r_bp;
+
+	pci_read_config_byte(pdev, port, &r_ap);
+	pci_read_config_byte(pdev, port + 1, &r_bp);
+	r_ap &= ~0x3F;	/* Preserve ERRDY_EN, SYNC_IN */
+	r_bp &= ~0x1F;
+	r_ap |= (pio_timing[pio] >> 8);
+	r_bp |= (pio_timing[pio] & 0xFF);
+
+	if (ata_pio_need_iordy(adev))
+		r_ap |= 0x20;	/* IORDY enable */
+	if (adev->class == ATA_DEV_ATA)
+		r_ap |= 0x10;	/* FIFO enable */
+	pci_write_config_byte(pdev, port, r_ap);
+	pci_write_config_byte(pdev, port + 1, r_bp);
+}
+
+/**
+ *	pdc202xx_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	but we want to set the PIO timing by default.
+ */
+
+static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	pdc202xx_configure_dmamode	-	set DMA mode in chip
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Load DMA cycle times into the chip ready for a DMA transfer
+ *	to occur.
+ */
+
+static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+	static u8 udma_timing[6][2] = {
+		{ 0x60, 0x03 },	/* 33 Mhz Clock */
+		{ 0x40, 0x02 },
+		{ 0x20, 0x01 },
+		{ 0x40, 0x02 },	/* 66 Mhz Clock */
+		{ 0x20, 0x01 },
+		{ 0x20, 0x01 }
+	};
+	static u8 mdma_timing[3][2] = {
+		{ 0xe0, 0x0f },
+		{ 0x60, 0x04 },
+		{ 0x60, 0x03 },
+	};
+	u8 r_bp, r_cp;
+
+	pci_read_config_byte(pdev, port + 1, &r_bp);
+	pci_read_config_byte(pdev, port + 2, &r_cp);
+
+	r_bp &= ~0xE0;
+	r_cp &= ~0x0F;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int speed = adev->dma_mode - XFER_UDMA_0;
+		r_bp |= udma_timing[speed][0];
+		r_cp |= udma_timing[speed][1];
+
+	} else {
+		int speed = adev->dma_mode - XFER_MW_DMA_0;
+		r_bp |= mdma_timing[speed][0];
+		r_cp |= mdma_timing[speed][1];
+	}
+	pci_write_config_byte(pdev, port + 1, r_bp);
+	pci_write_config_byte(pdev, port + 2, r_cp);
+
+}
+
+/**
+ *	pdc2026x_bmdma_start		-	DMA engine begin
+ *	@qc: ATA command
+ *
+ *	In UDMA3 or higher we have to clock switch for the duration of the
+ *	DMA transfer sequence.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
+ */
+
+static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_taskfile *tf = &qc->tf;
+	int sel66 = ap->port_no ? 0x08: 0x02;
+
+	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+	void __iomem *clock = master + 0x11;
+	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+	u32 len;
+
+	/* Check we keep host level locking here */
+	if (adev->dma_mode > XFER_UDMA_2)
+		iowrite8(ioread8(clock) | sel66, clock);
+	else
+		iowrite8(ioread8(clock) & ~sel66, clock);
+
+	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
+	   and move to qc_issue ? */
+	pdc202xx_set_dmamode(ap, qc->dev);
+
+	/* Cases the state machine will not complete correctly without help */
+	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATAPI_PROT_DMA) {
+		len = qc->nbytes / 2;
+
+		if (tf->flags & ATA_TFLAG_WRITE)
+			len |= 0x06000000;
+		else
+			len |= 0x05000000;
+
+		iowrite32(len, atapi_reg);
+	}
+
+	/* Activate DMA */
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	pdc2026x_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	After a DMA completes we need to put the clock back to 33MHz for
+ *	PIO timings.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
+ */
+
+static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_taskfile *tf = &qc->tf;
+
+	int sel66 = ap->port_no ? 0x08: 0x02;
+	/* The clock bits are in the same register for both channels */
+	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+	void __iomem *clock = master + 0x11;
+	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+	/* Cases the state machine will not complete correctly */
+	if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
+		iowrite32(0, atapi_reg);
+		iowrite8(ioread8(clock) & ~sel66, clock);
+	}
+	/* Flip back to 33Mhz for PIO */
+	if (adev->dma_mode > XFER_UDMA_2)
+		iowrite8(ioread8(clock) & ~sel66, clock);
+	ata_bmdma_stop(qc);
+	pdc202xx_set_piomode(ap, adev);
+}
+
+/**
+ *	pdc2026x_dev_config	-	device setup hook
+ *	@adev: newly found device
+ *
+ *	Perform chip specific early setup. We need to lock the transfer
+ *	sizes to 8bit to avoid making the state engine on the 2026x cards
+ *	barf.
+ */
+
+static void pdc2026x_dev_config(struct ata_device *adev)
+{
+	adev->max_sectors = 256;
+}
+
+static int pdc2026x_port_start(struct ata_port *ap)
+{
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+	if (bmdma) {
+		/* Enable burst mode */
+		u8 burst = ioread8(bmdma + 0x1f);
+		iowrite8(burst | 0x01, bmdma + 0x1f);
+	}
+	return ata_bmdma_port_start(ap);
+}
+
+/**
+ *	pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	Just say no - not supported on older Promise.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *		 1 otherwise
+ */
+
+static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 1;
+}
+
+static struct scsi_host_template pdc202xx_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pdc2024x_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= pdc202xx_set_piomode,
+	.set_dmamode		= pdc202xx_set_dmamode,
+
+	.sff_exec_command	= pdc202xx_exec_command,
+	.sff_irq_check		= pdc202xx_irq_check,
+};
+
+static struct ata_port_operations pdc2026x_port_ops = {
+	.inherits		= &pdc2024x_port_ops,
+
+	.check_atapi_dma	= pdc2026x_check_atapi_dma,
+	.bmdma_start		= pdc2026x_bmdma_start,
+	.bmdma_stop		= pdc2026x_bmdma_stop,
+
+	.cable_detect		= pdc2026x_cable_detect,
+	.dev_config		= pdc2026x_dev_config,
+
+	.port_start		= pdc2026x_port_start,
+
+	.sff_exec_command	= pdc202xx_exec_command,
+	.sff_irq_check		= pdc202xx_irq_check,
+};
+
+static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[3] = {
+		{
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &pdc2024x_port_ops
+		},
+		{
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &pdc2026x_port_ops
+		},
+		{
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &pdc2026x_port_ops
+		}
+
+	};
+	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+
+	if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
+		struct pci_dev *bridge = dev->bus->self;
+		/* Don't grab anything behind a Promise I2O RAID */
+		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
+			if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
+				return -ENODEV;
+			if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
+				return -ENODEV;
+		}
+	}
+	return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
+}
+
+static const struct pci_device_id pdc202xx[] = {
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
+
+	{ },
+};
+
+static struct pci_driver pdc202xx_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pdc202xx,
+	.probe 		= pdc202xx_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(pdc202xx_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc202xx);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
new file mode 100644
index 0000000..35cb0e2
--- /dev/null
+++ b/drivers/ata/pata_piccolo.c
@@ -0,0 +1,125 @@
+/*
+ *  pata_piccolo.c - Toshiba Piccolo PATA/SATA controller driver.
+ *
+ *  This is basically an update to ata_generic.c to add Toshiba Piccolo support
+ *  then split out to keep ata_generic "clean".
+ *
+ *  Copyright 2005 Red Hat Inc, all rights reserved.
+ *
+ *  Elements from ide/pci/generic.c
+ *	    Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
+ *	    Portions (C) Copyright 2002  Red Hat Inc <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  The timing data tables/programming info are courtesy of the NetBSD driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_piccolo"
+#define DRV_VERSION "0.0.1"
+
+
+
+static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 pio[6] = {	/* For reg 0x50 low word & E088 */
+		0x0566, 0x0433, 0x0311, 0x0201, 0x0200, 0x0100
+	};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 conf;
+	pci_read_config_word(pdev, 0x50, &conf);
+	conf &= 0xE088;
+	conf |= pio[adev->pio_mode - XFER_PIO_0];
+	pci_write_config_word(pdev, 0x50, conf);
+}
+
+static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 conf;
+	pci_read_config_dword(pdev, 0x5C, &conf);
+	conf &= 0x78FFE088;	/* Keep the other bits */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int udma = adev->dma_mode - XFER_UDMA_0;
+		conf |= 0x80000000;
+		conf |= (udma + 2) << 28;
+		conf |= (2 - udma) * 0x111;	/* spread into three nibbles */
+	} else {
+		static const u32 mwdma[4] = {
+			0x0655, 0x0200, 0x0200, 0x0100
+		};
+		conf |= mwdma[adev->dma_mode - XFER_MW_DMA_0];
+	}
+	pci_write_config_dword(pdev, 0x5C, conf);
+}
+
+
+static struct scsi_host_template tosh_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations tosh_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= ata_cable_unknown,
+	.set_piomode	= tosh_set_piomode,
+	.set_dmamode	= tosh_set_dmamode
+};
+
+/**
+ *	ata_tosh_init		-	attach generic IDE
+ *	@dev: PCI device found
+ *	@id: match entry
+ *
+ *	Called each time a matching IDE interface is found. We check if the
+ *	interface is one we wish to claim and if so we perform any chip
+ *	specific hacks then let the ATA layer do the heavy lifting.
+ */
+
+static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO5,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &tosh_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+	/* Just one port for the moment */
+	return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
+}
+
+static struct pci_device_id ata_tosh[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3),  },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5),  },
+	{ 0, },
+};
+
+static struct pci_driver ata_tosh_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ata_tosh,
+	.probe 		= ata_tosh_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(ata_tosh_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ata_tosh);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
new file mode 100644
index 0000000..d6f8f54
--- /dev/null
+++ b/drivers/ata/pata_platform.c
@@ -0,0 +1,240 @@
+/*
+ * Generic platform device PATA driver
+ *
+ * Copyright (C) 2006 - 2007  Paul Mundt
+ *
+ * Based on pata_pcmcia:
+ *
+ *   Copyright 2005-2006 Red Hat Inc, all rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#define DRV_NAME "pata_platform"
+#define DRV_VERSION "1.2"
+
+static int pio_mask = 1;
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+		ata_dev_info(dev, "configured for PIO\n");
+	}
+	return 0;
+}
+
+static struct scsi_host_template pata_platform_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_platform_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ata_sff_data_xfer32,
+	.cable_detect		= ata_cable_unknown,
+	.set_mode		= pata_platform_set_mode,
+};
+
+static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+				     unsigned int shift)
+{
+	/* Fixup the port shift for platforms that need it */
+	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA    << shift);
+	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR     << shift);
+	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
+	ioaddr->nsect_addr	= ioaddr->cmd_addr + (ATA_REG_NSECT   << shift);
+	ioaddr->lbal_addr	= ioaddr->cmd_addr + (ATA_REG_LBAL    << shift);
+	ioaddr->lbam_addr	= ioaddr->cmd_addr + (ATA_REG_LBAM    << shift);
+	ioaddr->lbah_addr	= ioaddr->cmd_addr + (ATA_REG_LBAH    << shift);
+	ioaddr->device_addr	= ioaddr->cmd_addr + (ATA_REG_DEVICE  << shift);
+	ioaddr->status_addr	= ioaddr->cmd_addr + (ATA_REG_STATUS  << shift);
+	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD     << shift);
+}
+
+/**
+ *	__pata_platform_probe		-	attach a platform interface
+ *	@dev: device
+ *	@io_res: Resource representing I/O base
+ *	@ctl_res: Resource representing CTL base
+ *	@irq_res: Resource representing IRQ and its flags
+ *	@ioport_shift: I/O port shift
+ *	@__pio_mask: PIO mask
+ *	@sht: scsi_host_template to use when registering
+ *
+ *	Register a platform bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ *
+ *	Platform devices are expected to contain at least 2 resources per port:
+ *
+ *		- I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *		- CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *
+ *	and optionally:
+ *
+ *		- IRQ	   (IORESOURCE_IRQ)
+ *
+ *	If the base resources are both mem types, the ioremap() is handled
+ *	here. For IORESOURCE_IO, it's assumed that there's no remapping
+ *	necessary.
+ *
+ *	If no IRQ resource is present, PIO polling mode is used instead.
+ */
+int __pata_platform_probe(struct device *dev, struct resource *io_res,
+			  struct resource *ctl_res, struct resource *irq_res,
+			  unsigned int ioport_shift, int __pio_mask,
+			  struct scsi_host_template *sht)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	unsigned int mmio;
+	int irq = 0;
+	int irq_flags = 0;
+
+	/*
+	 * Check for MMIO
+	 */
+	mmio = (( io_res->flags == IORESOURCE_MEM) &&
+		(ctl_res->flags == IORESOURCE_MEM));
+
+	/*
+	 * And the IRQ
+	 */
+	if (irq_res && irq_res->start > 0) {
+		irq = irq_res->start;
+		irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
+	}
+
+	/*
+	 * Now that that's out of the way, wire up the port..
+	 */
+	host = ata_host_alloc(dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	ap->ops = &pata_platform_port_ops;
+	ap->pio_mask = __pio_mask;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	/*
+	 * Use polling mode if there's no IRQ
+	 */
+	if (!irq) {
+		ap->flags |= ATA_FLAG_PIO_POLLING;
+		ata_port_desc(ap, "no IRQ, using PIO polling");
+	}
+
+	/*
+	 * Handle the MMIO case
+	 */
+	if (mmio) {
+		ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
+				resource_size(io_res));
+		ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
+				resource_size(ctl_res));
+	} else {
+		ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
+				resource_size(io_res));
+		ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
+				resource_size(ctl_res));
+	}
+	if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
+		dev_err(dev, "failed to map IO/CTL base\n");
+		return -ENOMEM;
+	}
+
+	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+	pata_platform_setup_port(&ap->ioaddr, ioport_shift);
+
+	ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
+		      (unsigned long long)io_res->start,
+		      (unsigned long long)ctl_res->start);
+
+	/* activate */
+	return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
+				 irq_flags, sht);
+}
+EXPORT_SYMBOL_GPL(__pata_platform_probe);
+
+static int pata_platform_probe(struct platform_device *pdev)
+{
+	struct resource *io_res;
+	struct resource *ctl_res;
+	struct resource *irq_res;
+	struct pata_platform_info *pp_info = dev_get_platdata(&pdev->dev);
+
+	/*
+	 * Simple resource validation ..
+	 */
+	if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the I/O base first
+	 */
+	io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (io_res == NULL) {
+		io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (unlikely(io_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * Then the CTL base
+	 */
+	ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+	if (ctl_res == NULL) {
+		ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (unlikely(ctl_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * And the IRQ
+	 */
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+	return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
+				     pp_info ? pp_info->ioport_shift : 0,
+				     pio_mask, &pata_platform_sht);
+}
+
+static struct platform_driver pata_platform_driver = {
+	.probe		= pata_platform_probe,
+	.remove		= ata_platform_remove_one,
+	.driver = {
+		.name		= DRV_NAME,
+	},
+};
+
+module_platform_driver(pata_platform_driver);
+
+module_param(pio_mask, int, 0);
+
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("low-level driver for platform device ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
new file mode 100644
index 0000000..e8b6a2e
--- /dev/null
+++ b/drivers/ata/pata_pxa.c
@@ -0,0 +1,336 @@
+/*
+ * Generic PXA PATA driver
+ *
+ * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_host.h>
+
+#include <linux/platform_data/ata-pxa.h>
+
+#define DRV_NAME	"pata_pxa"
+#define DRV_VERSION	"0.1"
+
+struct pata_pxa_data {
+	struct dma_chan		*dma_chan;
+	dma_cookie_t		dma_cookie;
+	struct completion	dma_done;
+};
+
+/*
+ * DMA interrupt handler.
+ */
+static void pxa_ata_dma_irq(void *d)
+{
+	struct pata_pxa_data *pd = d;
+	enum dma_status status;
+
+	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
+	if (status == DMA_ERROR || status == DMA_COMPLETE)
+		complete(&pd->dma_done);
+}
+
+/*
+ * Prepare taskfile for submission.
+ */
+static void pxa_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct pata_pxa_data *pd = qc->ap->private_data;
+	struct dma_async_tx_descriptor *tx;
+	enum dma_transfer_direction dir;
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
+	tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
+				     DMA_PREP_INTERRUPT);
+	if (!tx) {
+		ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
+		return;
+	}
+	tx->callback = pxa_ata_dma_irq;
+	tx->callback_param = pd;
+	pd->dma_cookie = dmaengine_submit(tx);
+}
+
+/*
+ * Configure the DMA controller, load the DMA descriptors, but don't start the
+ * DMA controller yet. Only issue the ATA command.
+ */
+static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
+}
+
+/*
+ * Execute the DMA transfer.
+ */
+static void pxa_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct pata_pxa_data *pd = qc->ap->private_data;
+	init_completion(&pd->dma_done);
+	dma_async_issue_pending(pd->dma_chan);
+}
+
+/*
+ * Wait until the DMA transfer completes, then stop the DMA controller.
+ */
+static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct pata_pxa_data *pd = qc->ap->private_data;
+	enum dma_status status;
+
+	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
+	if (status != DMA_ERROR && status != DMA_COMPLETE &&
+	    wait_for_completion_timeout(&pd->dma_done, HZ))
+		ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
+
+	dmaengine_terminate_all(pd->dma_chan);
+}
+
+/*
+ * Read DMA status. The bmdma_stop() will take care of properly finishing the
+ * DMA transfer so we always have DMA-complete interrupt here.
+ */
+static unsigned char pxa_bmdma_status(struct ata_port *ap)
+{
+	struct pata_pxa_data *pd = ap->private_data;
+	unsigned char ret = ATA_DMA_INTR;
+	struct dma_tx_state state;
+	enum dma_status status;
+
+	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
+	if (status != DMA_COMPLETE)
+		ret |= ATA_DMA_ERR;
+
+	return ret;
+}
+
+/*
+ * No IRQ register present so we do nothing.
+ */
+static void pxa_irq_clear(struct ata_port *ap)
+{
+}
+
+/*
+ * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
+ * unclear why ATAPI has DMA issues.
+ */
+static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return -EOPNOTSUPP;
+}
+
+static struct scsi_host_template pxa_ata_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pxa_ata_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= pxa_bmdma_setup,
+	.bmdma_start		= pxa_bmdma_start,
+	.bmdma_stop		= pxa_bmdma_stop,
+	.bmdma_status		= pxa_bmdma_status,
+
+	.check_atapi_dma	= pxa_check_atapi_dma,
+
+	.sff_irq_clear		= pxa_irq_clear,
+
+	.qc_prep		= pxa_qc_prep,
+};
+
+static int pxa_ata_probe(struct platform_device *pdev)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct pata_pxa_data *data;
+	struct resource *cmd_res;
+	struct resource *ctl_res;
+	struct resource *dma_res;
+	struct resource *irq_res;
+	struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct dma_slave_config	config;
+	int ret = 0;
+
+	/*
+	 * Resource validation, three resources are needed:
+	 *  - CMD port base address
+	 *  - CTL port base address
+	 *  - DMA port base address
+	 *  - IRQ pin
+	 */
+	if (pdev->num_resources != 4) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * CMD port base address
+	 */
+	cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(cmd_res == NULL))
+		return -EINVAL;
+
+	/*
+	 * CTL port base address
+	 */
+	ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (unlikely(ctl_res == NULL))
+		return -EINVAL;
+
+	/*
+	 * DMA port base address
+	 */
+	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (unlikely(dma_res == NULL))
+		return -EINVAL;
+
+	/*
+	 * IRQ pin
+	 */
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (unlikely(irq_res == NULL))
+		return -EINVAL;
+
+	/*
+	 * Allocate the host
+	 */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	ap		= host->ports[0];
+	ap->ops		= &pxa_ata_port_ops;
+	ap->pio_mask	= ATA_PIO4;
+	ap->mwdma_mask	= ATA_MWDMA2;
+
+	ap->ioaddr.cmd_addr	= devm_ioremap(&pdev->dev, cmd_res->start,
+						resource_size(cmd_res));
+	ap->ioaddr.ctl_addr	= devm_ioremap(&pdev->dev, ctl_res->start,
+						resource_size(ctl_res));
+	ap->ioaddr.bmdma_addr	= devm_ioremap(&pdev->dev, dma_res->start,
+						resource_size(dma_res));
+
+	/*
+	 * Adjust register offsets
+	 */
+	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+	ap->ioaddr.data_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_DATA << pdata->reg_shift);
+	ap->ioaddr.error_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_ERR << pdata->reg_shift);
+	ap->ioaddr.feature_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_FEATURE << pdata->reg_shift);
+	ap->ioaddr.nsect_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_NSECT << pdata->reg_shift);
+	ap->ioaddr.lbal_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_LBAL << pdata->reg_shift);
+	ap->ioaddr.lbam_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_LBAM << pdata->reg_shift);
+	ap->ioaddr.lbah_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_LBAH << pdata->reg_shift);
+	ap->ioaddr.device_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_DEVICE << pdata->reg_shift);
+	ap->ioaddr.status_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_STATUS << pdata->reg_shift);
+	ap->ioaddr.command_addr	= ap->ioaddr.cmd_addr +
+					(ATA_REG_CMD << pdata->reg_shift);
+
+	/*
+	 * Allocate and load driver's internal data structure
+	 */
+	data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
+								GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	ap->private_data = data;
+
+	memset(&config, 0, sizeof(config));
+	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	config.src_addr = dma_res->start;
+	config.dst_addr = dma_res->start;
+	config.src_maxburst = 32;
+	config.dst_maxburst = 32;
+
+	/*
+	 * Request the DMA channel
+	 */
+	data->dma_chan =
+		dma_request_slave_channel(&pdev->dev, "data");
+	if (!data->dma_chan)
+		return -EBUSY;
+	ret = dmaengine_slave_config(data->dma_chan, &config);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Activate the ATA host
+	 */
+	ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
+				pdata->irq_flags, &pxa_ata_sht);
+	if (ret)
+		dma_release_channel(data->dma_chan);
+
+	return ret;
+}
+
+static int pxa_ata_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct pata_pxa_data *data = host->ports[0]->private_data;
+
+	dma_release_channel(data->dma_chan);
+
+	ata_host_detach(host);
+
+	return 0;
+}
+
+static struct platform_driver pxa_ata_driver = {
+	.probe		= pxa_ata_probe,
+	.remove		= pxa_ata_remove,
+	.driver		= {
+		.name		= DRV_NAME,
+	},
+};
+
+module_platform_driver(pxa_ata_driver);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
new file mode 100644
index 0000000..a3f1123
--- /dev/null
+++ b/drivers/ata/pata_radisys.c
@@ -0,0 +1,252 @@
+/*
+ *    pata_radisys.c - Intel PATA/SATA controllers
+ *
+ *	(C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk>
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    A PIIX relative, this device has a single ATA channel and no
+ *    slave timings, SITRE or PPE. In that sense it is a close relative
+ *    of the original PIIX. It does however support UDMA 33/66 per channel
+ *    although no other modes/timings. Also lacking is 32bit I/O on the ATA
+ *    port.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_radisys"
+#define DRV_VERSION	"0.4.4"
+
+/**
+ *	radisys_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: ATA port
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. Note that the early PIIX does not have the slave
+	 *	timing port at 0x44. The Radisys is a relative of the PIIX
+	 *	but not the same so be careful.
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },	/* Check me */
+			    { 0, 0 },
+			    { 1, 1 },
+			    { 2, 2 },
+			    { 3, 3 }, };
+
+	if (pio > 0)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE IORDY */
+
+	pci_read_config_word(dev, 0x40, &idetm_data);
+
+	/* Enable IE and TIME as appropriate. Clear the other
+	   drive timing bits */
+	idetm_data &= 0xCCCC;
+	idetm_data |= (control << (4 * adev->devno));
+	idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	pci_write_config_word(dev, 0x40, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	radisys_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 idetm_data;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 1 },
+			    { 2, 2 },
+			    { 3, 3 }, };
+
+	/*
+	 * MWDMA is driven by the PIO timings. We must also enable
+	 * IORDY unconditionally.
+	 */
+
+	pci_read_config_word(dev, 0x40, &idetm_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+		int control = 3;	/* IORDY|TIME0 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO0 for PIO cycles. */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			control = 1;
+
+		/* Mask out the relevant control and timing bits we will load. Also
+		   clear the other drive TIME register as a precaution */
+
+		idetm_data &= 0xCCCC;
+		idetm_data |= control << (4 * adev->devno);
+		idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+
+		udma_enable &= ~(1 << adev->devno);
+	} else {
+		u8 udma_mode;
+
+		/* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
+
+		pci_read_config_byte(dev, 0x4A, &udma_mode);
+
+		if (adev->xfer_mode == XFER_UDMA_2)
+			udma_mode &= ~(2 << (adev->devno * 4));
+		else /* UDMA 4 */
+			udma_mode |= (2 << (adev->devno * 4));
+
+		pci_write_config_byte(dev, 0x4A, udma_mode);
+
+		udma_enable |= (1 << adev->devno);
+	}
+	pci_write_config_word(dev, 0x40, idetm_data);
+	pci_write_config_byte(dev, 0x48, udma_enable);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	radisys_qc_issue	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	if (adev != ap->private_data) {
+		/* UDMA timing is not shared */
+		if (adev->dma_mode < XFER_UDMA_0) {
+			if (adev->dma_mode)
+				radisys_set_dmamode(ap, adev);
+			else if (adev->pio_mode)
+				radisys_set_piomode(ap, adev);
+		}
+	}
+	return ata_bmdma_qc_issue(qc);
+}
+
+
+static struct scsi_host_template radisys_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations radisys_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.qc_issue		= radisys_qc_issue,
+	.cable_detect		= ata_cable_unknown,
+	.set_piomode		= radisys_set_piomode,
+	.set_dmamode		= radisys_set_dmamode,
+};
+
+
+/**
+ *	radisys_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in radisys_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA12_ONLY,
+		.udma_mask	= ATA_UDMA24_ONLY,
+		.port_ops	= &radisys_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
+}
+
+static const struct pci_device_id radisys_pci_tbl[] = {
+	{ PCI_VDEVICE(RADISYS, 0x8201), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver radisys_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= radisys_pci_tbl,
+	.probe			= radisys_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(radisys_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
new file mode 100644
index 0000000..653b9a0
--- /dev/null
+++ b/drivers/ata/pata_rb532_cf.c
@@ -0,0 +1,214 @@
+/*
+ *  A low-level PATA driver to handle a Compact Flash connected on the
+ *  Mikrotik's RouterBoard 532 board.
+ *
+ *  Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
+ *  Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ *  This file was based on: drivers/ata/pata_ixp4xx_cf.c
+ *	Copyright (C) 2006-07 Tower Technologies
+ *	Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ *  Also was based on the driver for Linux 2.4.xx published by Mikrotik for
+ *  their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
+ *  seems not to have a license.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/mach-rc32434/rb.h>
+
+#define DRV_NAME	"pata-rb532-cf"
+#define DRV_VERSION	"0.1.0"
+#define DRV_DESC	"PATA driver for RouterBOARD 532 Compact Flash"
+
+#define RB500_CF_MAXPORTS	1
+#define RB500_CF_IO_DELAY	400
+
+#define RB500_CF_REG_BASE	0x0800
+#define RB500_CF_REG_ERR	0x080D
+#define RB500_CF_REG_CTRL	0x080E
+/* 32bit buffered data register offset */
+#define RB500_CF_REG_DBUF32	0x0C00
+
+struct rb532_cf_info {
+	void __iomem	*iobase;
+	unsigned int	gpio_line;
+	unsigned int	irq;
+};
+
+/* ------------------------------------------------------------------------ */
+
+static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
+{
+	struct ata_host *ah = dev_instance;
+	struct rb532_cf_info *info = ah->private_data;
+
+	if (gpio_get_value(info->gpio_line)) {
+		irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
+		ata_sff_interrupt(info->irq, dev_instance);
+	} else {
+		irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct ata_port_operations rb532_pata_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_data_xfer		= ata_sff_data_xfer32,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static struct scsi_host_template rb532_pata_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+/* ------------------------------------------------------------------------ */
+
+static void rb532_pata_setup_ports(struct ata_host *ah)
+{
+	struct rb532_cf_info *info = ah->private_data;
+	struct ata_port *ap;
+
+	ap = ah->ports[0];
+
+	ap->ops		= &rb532_pata_port_ops;
+	ap->pio_mask	= ATA_PIO4;
+
+	ap->ioaddr.cmd_addr	= info->iobase + RB500_CF_REG_BASE;
+	ap->ioaddr.ctl_addr	= info->iobase + RB500_CF_REG_CTRL;
+	ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
+
+	ata_sff_std_ports(&ap->ioaddr);
+
+	ap->ioaddr.data_addr	= info->iobase + RB500_CF_REG_DBUF32;
+	ap->ioaddr.error_addr	= info->iobase + RB500_CF_REG_ERR;
+}
+
+static int rb532_pata_driver_probe(struct platform_device *pdev)
+{
+	int irq;
+	int gpio;
+	struct resource *res;
+	struct ata_host *ah;
+	struct cf_device *pdata;
+	struct rb532_cf_info *info;
+	int ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no IOMEM resource found\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev, "no IRQ resource found\n");
+		return -ENOENT;
+	}
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data specified\n");
+		return -EINVAL;
+	}
+
+	gpio = pdata->gpio_pin;
+	if (gpio < 0) {
+		dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
+		return -ENOENT;
+	}
+
+	ret = gpio_request(gpio, DRV_NAME);
+	if (ret) {
+		dev_err(&pdev->dev, "GPIO request failed\n");
+		return ret;
+	}
+
+	/* allocate host */
+	ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
+	if (!ah)
+		return -ENOMEM;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	ah->private_data = info;
+	info->gpio_line = gpio;
+	info->irq = irq;
+
+	info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+				resource_size(res));
+	if (!info->iobase)
+		return -ENOMEM;
+
+	ret = gpio_direction_input(gpio);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
+				ret);
+		goto err_free_gpio;
+	}
+
+	rb532_pata_setup_ports(ah);
+
+	ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
+				IRQF_TRIGGER_LOW, &rb532_pata_sht);
+	if (ret)
+		goto err_free_gpio;
+
+	return 0;
+
+err_free_gpio:
+	gpio_free(gpio);
+
+	return ret;
+}
+
+static int rb532_pata_driver_remove(struct platform_device *pdev)
+{
+	struct ata_host *ah = platform_get_drvdata(pdev);
+	struct rb532_cf_info *info = ah->private_data;
+
+	ata_host_detach(ah);
+	gpio_free(info->gpio_line);
+
+	return 0;
+}
+
+static struct platform_driver rb532_pata_platform_driver = {
+	.probe		= rb532_pata_driver_probe,
+	.remove		= rb532_pata_driver_remove,
+	.driver	 = {
+		.name   = DRV_NAME,
+	},
+};
+
+#define DRV_INFO DRV_DESC " version " DRV_VERSION
+
+module_platform_driver(rb532_pata_platform_driver);
+
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
new file mode 100644
index 0000000..959bb54
--- /dev/null
+++ b/drivers/ata/pata_rdc.c
@@ -0,0 +1,398 @@
+/*
+ *  pata_rdc		-	Driver for later RDC PATA controllers
+ *
+ *  This is actually a driver for hardware meeting
+ *  INCITS 370-2004 (1510D): ATA Host Adapter Standards
+ *
+ *  Based on ata_piix.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"pata_rdc"
+#define DRV_VERSION	"0.01"
+
+struct rdc_host_priv {
+	u32 saved_iocfg;
+};
+
+/**
+ *	rdc_pata_cable_detect - Probe host controller cable detect info
+ *	@ap: Port for which cable detect info is desired
+ *
+ *	Read 80c cable indicator from ATA PCI device's PCI config
+ *	register.  This register is normally set by firmware (BIOS).
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static int rdc_pata_cable_detect(struct ata_port *ap)
+{
+	struct rdc_host_priv *hpriv = ap->host->private_data;
+	u8 mask;
+
+	/* check BIOS cable detect results */
+	mask = 0x30 << (2 * ap->port_no);
+	if ((hpriv->saved_iocfg & mask) == 0)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	rdc_pata_prereset - prereset for PATA host controller
+ *	@link: Target link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	static const struct pci_bits rdc_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+	};
+
+	if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
+		return -ENOENT;
+	return ata_sff_prereset(link, deadline);
+}
+
+static DEFINE_SPINLOCK(rdc_lock);
+
+/**
+ *	rdc_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned long flags;
+	unsigned int is_slave	= (adev->devno != 0);
+	unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+	unsigned int slave_port	= 0x44;
+	u16 master_data;
+	u8 slave_data;
+	u8 udma_enable;
+	int control = 0;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio >= 2)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE enable */
+
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+
+	spin_lock_irqsave(&rdc_lock, flags);
+
+	/* PIO configuration clears DTE unconditionally.  It will be
+	 * programmed in set_dmamode which is guaranteed to be called
+	 * after set_piomode if any DMA mode is available.
+	 */
+	pci_read_config_word(dev, master_port, &master_data);
+	if (is_slave) {
+		/* clear TIME1|IE1|PPE1|DTE1 */
+		master_data &= 0xff0f;
+		/* Enable SITRE (separate slave timing register) */
+		master_data |= 0x4000;
+		/* enable PPE1, IE1 and TIME1 as needed */
+		master_data |= (control << 4);
+		pci_read_config_byte(dev, slave_port, &slave_data);
+		slave_data &= (ap->port_no ? 0x0f : 0xf0);
+		/* Load the timing nibble for this slave */
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+						<< (ap->port_no ? 4 : 0);
+	} else {
+		/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+		master_data &= 0xccf0;
+		/* Enable PPE, IE and TIME as appropriate */
+		master_data |= control;
+		/* load ISP and RCT */
+		master_data |=
+			(timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	}
+	pci_write_config_word(dev, master_port, master_data);
+	if (is_slave)
+		pci_write_config_byte(dev, slave_port, slave_data);
+
+	/* Ensure the UDMA bit is off - it will be turned back on if
+	   UDMA is selected */
+
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+	udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+	pci_write_config_byte(dev, 0x48, udma_enable);
+
+	spin_unlock_irqrestore(&rdc_lock, flags);
+}
+
+/**
+ *	rdc_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Drive in question
+ *
+ *	Set UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned long flags;
+	u8 master_port		= ap->port_no ? 0x42 : 0x40;
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno + 2 * ap->port_no;
+	u8 udma_enable		= 0;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	spin_lock_irqsave(&rdc_lock, flags);
+
+	pci_read_config_word(dev, master_port, &master_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+		u16 ideconf;
+		int u_clock, u_speed;
+
+		/*
+		 * UDMA is handled by a combination of clock switching and
+		 * selection of dividers
+		 *
+		 * Handy rule: Odd modes are UDMATIMx 01, even are 02
+		 *	       except UDMA0 which is 00
+		 */
+		u_speed = min(2 - (udma & 1), udma);
+		if (udma == 5)
+			u_clock = 0x1000;	/* 100Mhz */
+		else if (udma > 2)
+			u_clock = 1;		/* 66Mhz */
+		else
+			u_clock = 0;		/* 33Mhz */
+
+		udma_enable |= (1 << devid);
+
+		/* Load the CT/RP selection */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(3 << (4 * devid));
+		udma_timing |= u_speed << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+
+		/* Select a 33/66/100Mhz clock */
+		pci_read_config_word(dev, 0x54, &ideconf);
+		ideconf &= ~(0x1001 << devid);
+		ideconf |= u_clock << devid;
+		pci_write_config_word(dev, 0x54, ideconf);
+	} else {
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (adev->devno) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= (ap->port_no ? 0x0f : 0xf0);
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, master_port, master_data);
+	}
+	pci_write_config_byte(dev, 0x48, udma_enable);
+
+	spin_unlock_irqrestore(&rdc_lock, flags);
+}
+
+static struct ata_port_operations rdc_pata_ops = {
+	.inherits		= &ata_bmdma32_port_ops,
+	.cable_detect		= rdc_pata_cable_detect,
+	.set_piomode		= rdc_set_piomode,
+	.set_dmamode		= rdc_set_dmamode,
+	.prereset		= rdc_pata_prereset,
+};
+
+static const struct ata_port_info rdc_port_info = {
+
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA12_ONLY,
+	.udma_mask	= ATA_UDMA5,
+	.port_ops	= &rdc_pata_ops,
+};
+
+static struct scsi_host_template rdc_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/**
+ *	rdc_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in rdc_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_port_info port_info[2];
+	const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+	struct ata_host *host;
+	struct rdc_host_priv *hpriv;
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	port_info[0] = rdc_port_info;
+	port_info[1] = rdc_port_info;
+
+	/* enable device and prepare host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+
+	/* Save IOCFG, this will be used for cable detection, quirk
+	 * detection and restoration on detach.
+	 */
+	pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
+
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+	host->private_data = hpriv;
+
+	pci_intx(pdev, 1);
+
+	host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+	pci_set_master(pdev);
+	return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
+}
+
+static void rdc_remove_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct rdc_host_priv *hpriv = host->private_data;
+
+	pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
+
+	ata_pci_remove_one(pdev);
+}
+
+static const struct pci_device_id rdc_pci_tbl[] = {
+	{ PCI_DEVICE(0x17F3, 0x1011), },
+	{ PCI_DEVICE(0x17F3, 0x1012), },
+	{ }	/* terminate list */
+};
+
+static struct pci_driver rdc_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= rdc_pci_tbl,
+	.probe			= rdc_init_one,
+	.remove			= rdc_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+
+module_pci_driver(rdc_pci_driver);
+
+MODULE_AUTHOR("Alan Cox (based on ata_piix)");
+MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
new file mode 100644
index 0000000..b3ec18c
--- /dev/null
+++ b/drivers/ata/pata_rz1000.c
@@ -0,0 +1,148 @@
+/*
+ *  RZ1000/1001 driver based upon
+ *
+ *  linux/drivers/ide/pci/rz1000.c	Version 0.06	January 12, 2003
+ *  Copyright (C) 1995-1998  Linus Torvalds & author (see below)
+ *  Principal Author:  mlord@pobox.com (Mark Lord)
+ *
+ *  See linux/MAINTAINERS for address of current maintainer.
+ *
+ *  This file provides support for disabling the buggy read-ahead
+ *  mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_rz1000"
+#define DRV_VERSION	"0.2.4"
+
+
+/**
+ *	rz1000_set_mode		-	mode setting function
+ *	@link: ATA link
+ *	@unused: returned device on set_mode failure
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned. We
+ *	would prefer to be BIOS generic but for the fact our hardware is
+ *	whacked out.
+ */
+
+static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+	struct ata_device *dev;
+
+	ata_for_each_dev(dev, link, ENABLED) {
+		/* We don't really care */
+		dev->pio_mode = XFER_PIO_0;
+		dev->xfer_mode = XFER_PIO_0;
+		dev->xfer_shift = ATA_SHIFT_PIO;
+		dev->flags |= ATA_DFLAG_PIO;
+		ata_dev_info(dev, "configured for PIO\n");
+	}
+	return 0;
+}
+
+
+static struct scsi_host_template rz1000_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations rz1000_port_ops = {
+	.inherits	= &ata_sff_port_ops,
+	.cable_detect	= ata_cable_40wire,
+	.set_mode	= rz1000_set_mode,
+};
+
+static int rz1000_fifo_disable(struct pci_dev *pdev)
+{
+	u16 reg;
+	/* Be exceptionally paranoid as we must be sure to apply the fix */
+	if (pci_read_config_word(pdev, 0x40, &reg) != 0)
+		return -1;
+	reg &= 0xDFFF;
+	if (pci_write_config_word(pdev, 0x40, reg) != 0)
+		return -1;
+	printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+	return 0;
+}
+
+/**
+ *	rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in rz1000_pci_tbl matching with @pdev
+ *
+ *	Configure an RZ1000 interface. This doesn't require much special
+ *	handling except that we *MUST* kill the chipset readahead or the
+ *	user may experience data corruption.
+ */
+
+static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &rz1000_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	if (rz1000_fifo_disable(pdev) == 0)
+		return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
+
+	printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
+	/* Not safe to use so skip */
+	return -ENODEV;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rz1000_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	/* If this fails on resume (which is a "can't happen" case), we
+	   must stop as any progress risks data loss */
+	if (rz1000_fifo_disable(pdev))
+		panic("rz1000 fifo");
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id pata_rz1000[] = {
+	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
+	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
+
+	{ },
+};
+
+static struct pci_driver rz1000_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pata_rz1000,
+	.probe 		= rz1000_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= rz1000_reinit_one,
+#endif
+};
+
+module_pci_driver(rz1000_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_rz1000);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
new file mode 100644
index 0000000..f5bd44b
--- /dev/null
+++ b/drivers/ata/pata_samsung_cf.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * PATA driver for Samsung SoCs.
+ * Supports CF Interface in True IDE mode. Currently only PIO mode has been
+ * implemented; UDMA support has to be added.
+ *
+ * Based on:
+ *	PATA driver for AT91SAM9260 Static Memory Controller
+ *	PATA driver for Toshiba SCC controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/platform_data/ata-samsung_cf.h>
+
+#define DRV_NAME "pata_samsung_cf"
+#define DRV_VERSION "0.1"
+
+#define S3C_CFATA_REG(x)	(x)
+#define S3C_CFATA_MUX		S3C_CFATA_REG(0x0)
+#define S3C_ATA_CTRL		S3C_CFATA_REG(0x0)
+#define S3C_ATA_CMD		S3C_CFATA_REG(0x8)
+#define S3C_ATA_IRQ		S3C_CFATA_REG(0x10)
+#define S3C_ATA_IRQ_MSK		S3C_CFATA_REG(0x14)
+#define S3C_ATA_CFG		S3C_CFATA_REG(0x18)
+
+#define S3C_ATA_PIO_TIME	S3C_CFATA_REG(0x2c)
+#define S3C_ATA_PIO_DTR		S3C_CFATA_REG(0x54)
+#define S3C_ATA_PIO_FED		S3C_CFATA_REG(0x58)
+#define S3C_ATA_PIO_SCR		S3C_CFATA_REG(0x5c)
+#define S3C_ATA_PIO_LLR		S3C_CFATA_REG(0x60)
+#define S3C_ATA_PIO_LMR		S3C_CFATA_REG(0x64)
+#define S3C_ATA_PIO_LHR		S3C_CFATA_REG(0x68)
+#define S3C_ATA_PIO_DVR		S3C_CFATA_REG(0x6c)
+#define S3C_ATA_PIO_CSD		S3C_CFATA_REG(0x70)
+#define S3C_ATA_PIO_DAD		S3C_CFATA_REG(0x74)
+#define S3C_ATA_PIO_RDATA	S3C_CFATA_REG(0x7c)
+
+#define S3C_CFATA_MUX_TRUEIDE	0x01
+#define S3C_ATA_CFG_SWAP	0x40
+#define S3C_ATA_CFG_IORDYEN	0x02
+
+enum s3c_cpu_type {
+	TYPE_S3C64XX,
+	TYPE_S5PV210,
+};
+
+/*
+ * struct s3c_ide_info - S3C PATA instance.
+ * @clk: The clock resource for this controller.
+ * @ide_addr: The area mapped for the hardware registers.
+ * @sfr_addr: The area mapped for the special function registers.
+ * @irq: The IRQ number we are using.
+ * @cpu_type: The exact type of this controller.
+ * @fifo_status_reg: The ATA_FIFO_STATUS register offset.
+ */
+struct s3c_ide_info {
+	struct clk *clk;
+	void __iomem *ide_addr;
+	void __iomem *sfr_addr;
+	int irq;
+	enum s3c_cpu_type cpu_type;
+	unsigned int fifo_status_reg;
+};
+
+static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
+{
+	u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
+	reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
+	writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
+}
+
+static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
+{
+	/* Select true-ide as the internal operating mode */
+	writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
+		s3c_ide_sfrbase + S3C_CFATA_MUX);
+}
+
+static unsigned long
+pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
+{
+	int t1 = ata->setup;
+	int t2 = ata->act8b;
+	int t2i = ata->rec8b;
+	ulong piotime;
+
+	piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
+
+	return piotime;
+}
+
+static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct s3c_ide_info *info = ap->host->private_data;
+	struct ata_timing timing;
+	int cycle_time;
+	ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
+	ulong piotime;
+
+	/* Enables IORDY if mode requires it */
+	if (ata_pio_need_iordy(adev))
+		ata_cfg |= S3C_ATA_CFG_IORDYEN;
+	else
+		ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
+
+	cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
+
+	ata_timing_compute(adev, adev->pio_mode, &timing,
+					cycle_time * 1000, 0);
+
+	piotime = pata_s3c_setup_timing(info, &timing);
+
+	writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
+	writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
+}
+
+/*
+ * Waits until the IDE controller is able to perform next read/write
+ * operation to the disk. Needed for 64XX series boards only.
+ */
+static int wait_for_host_ready(struct s3c_ide_info *info)
+{
+	ulong timeout;
+	void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
+
+	/* wait for maximum of 20 msec */
+	timeout = jiffies + msecs_to_jiffies(20);
+	while (time_before(jiffies, timeout)) {
+		if ((readl(fifo_reg) >> 28) == 0)
+			return 0;
+	}
+	return -EBUSY;
+}
+
+/*
+ * Writes to one of the task file registers.
+ */
+static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
+{
+	struct s3c_ide_info *info = host->private_data;
+
+	wait_for_host_ready(info);
+	writeb(addr, reg);
+}
+
+/*
+ * Reads from one of the task file registers.
+ */
+static u8 ata_inb(struct ata_host *host, void __iomem *reg)
+{
+	struct s3c_ide_info *info = host->private_data;
+	u8 temp;
+
+	wait_for_host_ready(info);
+	(void) readb(reg);
+	wait_for_host_ready(info);
+	temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
+	return temp;
+}
+
+/*
+ * pata_s3c_tf_load - send taskfile registers to host controller
+ */
+static void pata_s3c_tf_load(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
+		ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
+		ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
+		ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
+		ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
+	}
+
+	if (is_addr) {
+		ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
+		ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
+		ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
+		ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
+		ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE)
+		ata_outb(ap->host, tf->device, ioaddr->device_addr);
+
+	ata_wait_idle(ap);
+}
+
+/*
+ * pata_s3c_tf_read - input device's ATA taskfile shadow registers
+ */
+static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->feature = ata_inb(ap->host, ioaddr->error_addr);
+	tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+	tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+	tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
+	tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
+	tf->device = ata_inb(ap->host, ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
+		tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+		tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+		tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
+		tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
+		ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+	}
+}
+
+/*
+ * pata_s3c_exec_command - issue ATA command to host controller
+ */
+static void pata_s3c_exec_command(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
+	ata_sff_pause(ap);
+}
+
+/*
+ * pata_s3c_check_status - Read device status register
+ */
+static u8 pata_s3c_check_status(struct ata_port *ap)
+{
+	return ata_inb(ap->host, ap->ioaddr.status_addr);
+}
+
+/*
+ * pata_s3c_check_altstatus - Read alternate device status register
+ */
+static u8 pata_s3c_check_altstatus(struct ata_port *ap)
+{
+	return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
+}
+
+/*
+ * pata_s3c_data_xfer - Transfer data by PIO
+ */
+static unsigned int pata_s3c_data_xfer(struct ata_queued_cmd *qc,
+				unsigned char *buf, unsigned int buflen, int rw)
+{
+	struct ata_port *ap = qc->dev->link->ap;
+	struct s3c_ide_info *info = ap->host->private_data;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1, i;
+	u16 *data_ptr = (u16 *)buf;
+
+	/* Requires wait same as in ata_inb/ata_outb */
+	if (rw == READ)
+		for (i = 0; i < words; i++, data_ptr++) {
+			wait_for_host_ready(info);
+			(void) readw(data_addr);
+			wait_for_host_ready(info);
+			*data_ptr = readw(info->ide_addr
+					+ S3C_ATA_PIO_RDATA);
+		}
+	else
+		for (i = 0; i < words; i++, data_ptr++) {
+			wait_for_host_ready(info);
+			writew(*data_ptr, data_addr);
+		}
+
+	if (buflen & 0x01)
+		dev_err(ap->dev, "unexpected trailing data\n");
+
+	return words << 1;
+}
+
+/*
+ * pata_s3c_dev_select - Select device on ATA bus
+ */
+static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
+{
+	u8 tmp = ATA_DEVICE_OBS;
+
+	if (device != 0)
+		tmp |= ATA_DEV1;
+
+	ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
+	ata_sff_pause(ap);
+}
+
+/*
+ * pata_s3c_devchk - PATA device presence detection
+ */
+static unsigned int pata_s3c_devchk(struct ata_port *ap,
+				unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	pata_s3c_dev_select(ap, device);
+
+	ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
+	ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
+
+	ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
+	ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
+
+	ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
+	ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
+
+	nsect = ata_inb(ap->host, ioaddr->nsect_addr);
+	lbal = ata_inb(ap->host, ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/*
+ * pata_s3c_wait_after_reset - wait for devices to become ready after reset
+ */
+static int pata_s3c_wait_after_reset(struct ata_link *link,
+		unsigned long deadline)
+{
+	int rc;
+
+	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
+
+	/* always check readiness of the master device */
+	rc = ata_sff_wait_ready(link, deadline);
+	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
+	 * and TF status is 0xff, bail out on it too.
+	 */
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * pata_s3c_bus_softreset - PATA device software reset
+ */
+static int pata_s3c_bus_softreset(struct ata_port *ap,
+		unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	/* software reset.  causes dev0 to be selected */
+	ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
+	udelay(20);
+	ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+	udelay(20);
+	ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
+	ap->last_ctl = ap->ctl;
+
+	return pata_s3c_wait_after_reset(&ap->link, deadline);
+}
+
+/*
+ * pata_s3c_softreset - reset host port via ATA SRST
+ */
+static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
+			 unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned int devmask = 0;
+	int rc;
+	u8 err;
+
+	/* determine if device 0 is present */
+	if (pata_s3c_devchk(ap, 0))
+		devmask |= (1 << 0);
+
+	/* select device 0 again */
+	pata_s3c_dev_select(ap, 0);
+
+	/* issue bus reset */
+	rc = pata_s3c_bus_softreset(ap, deadline);
+	/* if link is occupied, -ENODEV too is an error */
+	if (rc && rc != -ENODEV) {
+		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&ap->link.device[0],
+					  devmask & (1 << 0), &err);
+
+	return 0;
+}
+
+/*
+ * pata_s3c_set_devctl - Write device control register
+ */
+static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
+{
+	ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
+}
+
+static struct scsi_host_template pata_s3c_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_s3c_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.sff_check_status	= pata_s3c_check_status,
+	.sff_check_altstatus    = pata_s3c_check_altstatus,
+	.sff_tf_load		= pata_s3c_tf_load,
+	.sff_tf_read		= pata_s3c_tf_read,
+	.sff_data_xfer		= pata_s3c_data_xfer,
+	.sff_exec_command	= pata_s3c_exec_command,
+	.sff_dev_select         = pata_s3c_dev_select,
+	.sff_set_devctl         = pata_s3c_set_devctl,
+	.softreset		= pata_s3c_softreset,
+	.set_piomode		= pata_s3c_set_piomode,
+};
+
+static struct ata_port_operations pata_s5p_port_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.set_piomode		= pata_s3c_set_piomode,
+};
+
+static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state)
+{
+	u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
+	temp = state ? (temp | 1) : (temp & ~1);
+	writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
+}
+
+static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct s3c_ide_info *info = host->private_data;
+	u32 reg;
+
+	reg = readl(info->ide_addr + S3C_ATA_IRQ);
+	writel(reg, info->ide_addr + S3C_ATA_IRQ);
+
+	return ata_sff_interrupt(irq, dev_instance);
+}
+
+static void pata_s3c_hwinit(struct s3c_ide_info *info,
+				struct s3c_ide_platdata *pdata)
+{
+	switch (info->cpu_type) {
+	case TYPE_S3C64XX:
+		/* Configure as big endian */
+		pata_s3c_cfg_mode(info->sfr_addr);
+		pata_s3c_set_endian(info->ide_addr, 1);
+		pata_s3c_enable(info->ide_addr, true);
+		msleep(100);
+
+		/* Remove IRQ Status */
+		writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
+		writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
+		break;
+
+	case TYPE_S5PV210:
+		/* Configure as little endian */
+		pata_s3c_set_endian(info->ide_addr, 0);
+		pata_s3c_enable(info->ide_addr, true);
+		msleep(100);
+
+		/* Remove IRQ Status */
+		writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
+		writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+static int __init pata_s3c_probe(struct platform_device *pdev)
+{
+	struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
+	struct device *dev = &pdev->dev;
+	struct s3c_ide_info *info;
+	struct resource *res;
+	struct ata_port *ap;
+	struct ata_host *host;
+	enum s3c_cpu_type cpu_type;
+	int ret;
+
+	cpu_type = platform_get_device_id(pdev)->driver_data;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->irq = platform_get_irq(pdev, 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	info->ide_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(info->ide_addr))
+		return PTR_ERR(info->ide_addr);
+
+	info->clk = devm_clk_get(&pdev->dev, "cfcon");
+	if (IS_ERR(info->clk)) {
+		dev_err(dev, "failed to get access to cf controller clock\n");
+		ret = PTR_ERR(info->clk);
+		info->clk = NULL;
+		return ret;
+	}
+
+	clk_enable(info->clk);
+
+	/* init ata host */
+	host = ata_host_alloc(dev, 1);
+	if (!host) {
+		dev_err(dev, "failed to allocate ide host\n");
+		ret = -ENOMEM;
+		goto stop_clk;
+	}
+
+	ap = host->ports[0];
+	ap->pio_mask = ATA_PIO4;
+
+	if (cpu_type == TYPE_S3C64XX) {
+		ap->ops = &pata_s3c_port_ops;
+		info->sfr_addr = info->ide_addr + 0x1800;
+		info->ide_addr += 0x1900;
+		info->fifo_status_reg = 0x94;
+	} else {
+		ap->ops = &pata_s5p_port_ops;
+		info->fifo_status_reg = 0x84;
+	}
+
+	info->cpu_type = cpu_type;
+
+	if (info->irq <= 0) {
+		ap->flags |= ATA_FLAG_PIO_POLLING;
+		info->irq = 0;
+		ata_port_desc(ap, "no IRQ, using PIO polling\n");
+	}
+
+	ap->ioaddr.cmd_addr =  info->ide_addr + S3C_ATA_CMD;
+	ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
+	ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
+	ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
+	ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
+	ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
+	ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
+	ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
+	ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
+	ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
+	ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
+	ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
+	ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
+
+	ata_port_desc(ap, "mmio cmd 0x%llx ",
+			(unsigned long long)res->start);
+
+	host->private_data = info;
+
+	if (pdata && pdata->setup_gpio)
+		pdata->setup_gpio();
+
+	/* Set endianness and enable the interface */
+	pata_s3c_hwinit(info, pdata);
+
+	ret = ata_host_activate(host, info->irq,
+				info->irq ? pata_s3c_irq : NULL,
+				0, &pata_s3c_sht);
+	if (ret)
+		goto stop_clk;
+
+	return 0;
+
+stop_clk:
+	clk_disable(info->clk);
+	return ret;
+}
+
+static int __exit pata_s3c_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct s3c_ide_info *info = host->private_data;
+
+	ata_host_detach(host);
+
+	clk_disable(info->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pata_s3c_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ata_host *host = platform_get_drvdata(pdev);
+
+	return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int pata_s3c_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
+	struct s3c_ide_info *info = host->private_data;
+
+	pata_s3c_hwinit(info, pdata);
+	ata_host_resume(host);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pata_s3c_pm_ops = {
+	.suspend	= pata_s3c_suspend,
+	.resume		= pata_s3c_resume,
+};
+#endif
+
+/* driver device registration */
+static const struct platform_device_id pata_s3c_driver_ids[] = {
+	{
+		.name		= "s3c64xx-pata",
+		.driver_data	= TYPE_S3C64XX,
+	}, {
+		.name		= "s5pv210-pata",
+		.driver_data	= TYPE_S5PV210,
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
+
+static struct platform_driver pata_s3c_driver = {
+	.remove		= __exit_p(pata_s3c_remove),
+	.id_table	= pata_s3c_driver_ids,
+	.driver		= {
+		.name	= DRV_NAME,
+#ifdef CONFIG_PM_SLEEP
+		.pm	= &pata_s3c_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
+
+MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
+MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
new file mode 100644
index 0000000..c71de5d
--- /dev/null
+++ b/drivers/ata/pata_sc1200.c
@@ -0,0 +1,269 @@
+/*
+ * New ATA layer SC1200 driver		Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * TODO: Mode selection filtering
+ * TODO: Needs custom DMA cleanup code
+ *
+ * Based very heavily on
+ *
+ * linux/drivers/ide/pci/sc1200.c		Version 0.91	28-Jan-2003
+ *
+ * Copyright (C) 2000-2002		Mark Lord <mlord@pobox.com>
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Development of this chipset driver was funded
+ * by the nice folks at National Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_sc1200"
+#define DRV_VERSION	"0.2.6"
+
+#define SC1200_REV_A	0x00
+#define SC1200_REV_B1	0x01
+#define SC1200_REV_B3	0x02
+#define SC1200_REV_C1	0x03
+#define SC1200_REV_D1	0x04
+
+/**
+ *	sc1200_clock	-	PCI clock
+ *
+ *	Return the PCI bus clocking for the SC1200 chipset configuration
+ *	in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
+ */
+
+static int sc1200_clock(void)
+{
+	/* Magic registers that give us the chipset data */
+	u8 chip_id = inb(0x903C);
+	u8 silicon_rev = inb(0x903D);
+	u16 pci_clock;
+
+	if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
+		return 0;	/* 33 MHz mode */
+
+	/* Clock generator configuration 0x901E its 8/9 are the PCI clocking
+	   0/3 is 33Mhz 1 is 48 2 is 66 */
+
+	pci_clock = inw(0x901E);
+	pci_clock >>= 8;
+	pci_clock &= 0x03;
+	if (pci_clock == 3)
+		pci_clock = 0;
+	return pci_clock;
+}
+
+/**
+ *	sc1200_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the SC1200
+ */
+
+static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 pio_timings[4][5] = {
+		/* format0, 33Mhz */
+		{ 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 },
+		/* format1, 33Mhz */
+		{ 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 },
+		/* format1, 48Mhz */
+		{ 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 },
+		/* format1, 66Mhz */
+		{ 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 }
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 format;
+	unsigned int reg = 0x40 + 0x10 * ap->port_no;
+	int mode = adev->pio_mode - XFER_PIO_0;
+
+	pci_read_config_dword(pdev, reg + 4, &format);
+	format >>= 31;
+	format += sc1200_clock();
+	pci_write_config_dword(pdev, reg + 8 * adev->devno,
+				pio_timings[format][mode]);
+}
+
+/**
+ *	sc1200_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	We cannot mix MWDMA and UDMA without reloading timings each switch
+ *	master to slave.
+ */
+
+static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 udma_timing[3][3] = {
+		{ 0x00921250, 0x00911140, 0x00911030 },
+		{ 0x00932470, 0x00922260, 0x00922140 },
+		{ 0x009436A1, 0x00933481, 0x00923261 }
+	};
+
+	static const u32 mwdma_timing[3][3] = {
+		{ 0x00077771, 0x00012121, 0x00002020 },
+		{ 0x000BBBB2, 0x00024241, 0x00013131 },
+		{ 0x000FFFF3, 0x00035352, 0x00015151 }
+	};
+
+	int clock = sc1200_clock();
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int reg = 0x40 + 0x10 * ap->port_no;
+	int mode = adev->dma_mode;
+	u32 format;
+
+	if (mode >= XFER_UDMA_0)
+		format = udma_timing[clock][mode - XFER_UDMA_0];
+	else
+		format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
+
+	if (adev->devno == 0) {
+		u32 timings;
+
+		pci_read_config_dword(pdev, reg + 4, &timings);
+		timings &= 0x80000000UL;
+		timings |= format;
+		pci_write_config_dword(pdev, reg + 4, timings);
+	} else
+		pci_write_config_dword(pdev, reg + 12, format);
+}
+
+/**
+ *	sc1200_qc_issue		-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	necessary.  Specifically we have a problem that there is only
+ *	one MWDMA/UDMA bit.
+ */
+
+static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_device *prev = ap->private_data;
+
+	/* See if the DMA settings could be wrong */
+	if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
+		/* Maybe, but do the channels match MWDMA/UDMA ? */
+		if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+		    (ata_using_udma(prev) && !ata_using_udma(adev)))
+		    	/* Switch the mode bits */
+		    	sc1200_set_dmamode(ap, adev);
+	}
+
+	return ata_bmdma_qc_issue(qc);
+}
+
+/**
+ *	sc1200_qc_defer	-	implement serialization
+ *	@qc: command
+ *
+ *	Serialize command issue on this controller.
+ */
+
+static int sc1200_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_host *host = qc->ap->host;
+	struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
+	int rc;
+
+	/* First apply the usual rules */
+	rc = ata_std_qc_defer(qc);
+	if (rc != 0)
+		return rc;
+
+	/* Now apply serialization rules. Only allow a command if the
+	   other channel state machine is idle */
+	if (alt && alt->qc_active)
+		return	ATA_DEFER_PORT;
+	return 0;
+}
+
+static struct scsi_host_template sc1200_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations sc1200_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.qc_prep 	= ata_bmdma_dumb_qc_prep,
+	.qc_issue	= sc1200_qc_issue,
+	.qc_defer	= sc1200_qc_defer,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= sc1200_set_piomode,
+	.set_dmamode	= sc1200_set_dmamode,
+};
+
+/**
+ *	sc1200_init_one		-	Initialise an SC1200
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Just throw the needed data at the libata helper and it does all
+ *	our work.
+ */
+
+static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &sc1200_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
+}
+
+static const struct pci_device_id sc1200[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
+
+	{ },
+};
+
+static struct pci_driver sc1200_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sc1200,
+	.probe 		= sc1200_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(sc1200_pci_driver);
+
+MODULE_AUTHOR("Alan Cox, Mark Lord");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sc1200);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
new file mode 100644
index 0000000..1b80a66
--- /dev/null
+++ b/drivers/ata/pata_sch.c
@@ -0,0 +1,180 @@
+/*
+ *  pata_sch.c - Intel SCH PATA controllers
+ *
+ *  Copyright (c) 2008 Alek Du <alek.du@intel.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+ *  Supports:
+ *    Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
+ *    http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"pata_sch"
+#define DRV_VERSION	"0.2"
+
+/* see SCH datasheet page 351 */
+enum {
+	D0TIM	= 0x80,		/* Device 0 Timing Register */
+	D1TIM	= 0x84,		/* Device 1 Timing Register */
+	PM	= 0x07,		/* PIO Mode Bit Mask */
+	MDM	= (0x03 << 8),	/* Multi-word DMA Mode Bit Mask */
+	UDM	= (0x07 << 16), /* Ultra DMA Mode Bit Mask */
+	PPE	= (1 << 30),	/* Prefetch/Post Enable */
+	USD	= (1 << 31),	/* Use Synchronous DMA */
+};
+
+static int sch_init_one(struct pci_dev *pdev,
+			 const struct pci_device_id *ent);
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+
+static const struct pci_device_id sch_pci_tbl[] = {
+	/* Intel SCH PATA Controller */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
+	{ }	/* terminate list */
+};
+
+static struct pci_driver sch_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sch_pci_tbl,
+	.probe			= sch_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sch_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sch_pata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.cable_detect		= ata_cable_unknown,
+	.set_piomode		= sch_set_piomode,
+	.set_dmamode		= sch_set_dmamode,
+};
+
+static const struct ata_port_info sch_port_info = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA5,
+	.port_ops	= &sch_pata_ops,
+};
+
+MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ *	sch_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: ATA device
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int port	= adev->devno ? D1TIM : D0TIM;
+	unsigned int data;
+
+	pci_read_config_dword(dev, port, &data);
+	/* see SCH datasheet page 351 */
+	/* set PIO mode */
+	data &= ~(PM | PPE);
+	data |= pio;
+	/* enable PPE for block device */
+	if (adev->class == ATA_DEV_ATA)
+		data |= PPE;
+	pci_write_config_dword(dev, port, data);
+}
+
+/**
+ *	sch_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: ATA device
+ *
+ *	Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int dma_mode	= adev->dma_mode;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int port	= adev->devno ? D1TIM : D0TIM;
+	unsigned int data;
+
+	pci_read_config_dword(dev, port, &data);
+	/* see SCH datasheet page 351 */
+	if (dma_mode >= XFER_UDMA_0) {
+		/* enable Synchronous DMA mode */
+		data |= USD;
+		data &= ~UDM;
+		data |= (dma_mode - XFER_UDMA_0) << 16;
+	} else { /* must be MWDMA mode, since we masked SWDMA already */
+		data &= ~(USD | MDM);
+		data |= (dma_mode - XFER_MW_DMA_0) << 8;
+	}
+	pci_write_config_dword(dev, port, data);
+}
+
+/**
+ *	sch_init_one - Register SCH ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in sch_pci_tbl matching with @pdev
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
+}
+
+module_pci_driver(sch_pci_driver);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
new file mode 100644
index 0000000..57de021
--- /dev/null
+++ b/drivers/ata/pata_serverworks.c
@@ -0,0 +1,492 @@
+/*
+ * pata_serverworks.c 	- Serverworks PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  (C) 2010 Bartlomiej Zolnierkiewicz
+ *
+ * based upon
+ *
+ * serverworks.c
+ *
+ * Copyright (C) 1998-2000 Michel Aubry
+ * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Portions copyright (c) 2001 Sun Microsystems
+ *
+ *
+ * RCC/ServerWorks IDE driver for Linux
+ *
+ *   OSB4: `Open South Bridge' IDE Interface (fn 1)
+ *         supports UDMA mode 2 (33 MB/s)
+ *
+ *   CSB5: `Champion South Bridge' IDE Interface (fn 1)
+ *         all revisions support UDMA mode 4 (66 MB/s)
+ *         revision A2.0 and up support UDMA mode 5 (100 MB/s)
+ *
+ *         *** The CSB5 does not provide ANY register ***
+ *         *** to detect 80-conductor cable presence. ***
+ *
+ *   CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
+ *
+ * Documentation:
+ *	Available under NDA only. Errata info very hard to get.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_serverworks"
+#define DRV_VERSION "0.4.3"
+
+#define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
+#define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
+
+/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5 */
+
+static const char *csb_bad_ata100[] = {
+	"ST320011A",
+	"ST340016A",
+	"ST360021A",
+	"ST380021A",
+	NULL
+};
+
+/**
+ *	oem_cable	-	Dell/Sun serverworks cable detection
+ *	@ap: ATA port to do cable detect
+ *
+ *	Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select
+ *	for their interfaces in the top two bits of the subsystem ID.
+ */
+
+static int oem_cable(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+struct sv_cable_table {
+	int device;
+	int subvendor;
+	int (*cable_detect)(struct ata_port *ap);
+};
+
+static struct sv_cable_table cable_detect[] = {
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE,   PCI_VENDOR_ID_DELL, oem_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE,   PCI_VENDOR_ID_DELL, oem_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE,   PCI_VENDOR_ID_SUN,  oem_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_OSB4IDE,   PCI_ANY_ID, ata_cable_40wire  },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE,   PCI_ANY_ID, ata_cable_unknown },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE,   PCI_ANY_ID, ata_cable_unknown },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2,  PCI_ANY_ID, ata_cable_unknown },
+	{ PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown },
+	{ }
+};
+
+/**
+ *	serverworks_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection according to the device and subvendor
+ *	identifications
+ */
+
+static int serverworks_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct sv_cable_table *cb = cable_detect;
+
+	while(cb->device) {
+		if (cb->device == pdev->device &&
+		    (cb->subvendor == pdev->subsystem_vendor ||
+		      cb->subvendor == PCI_ANY_ID)) {
+			return cb->cable_detect(ap);
+		}
+		cb++;
+	}
+
+	BUG();
+	return -1;	/* kill compiler warning */
+}
+
+/**
+ *	serverworks_is_csb	-	Check for CSB or OSB
+ *	@pdev: PCI device to check
+ *
+ *	Returns true if the device being checked is known to be a CSB
+ *	series device.
+ */
+
+static u8 serverworks_is_csb(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+		case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+			return 1;
+		default:
+			break;
+	}
+	return 0;
+}
+
+/**
+ *	serverworks_osb4_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: Mask of proposed modes
+ *
+ *	Filter the offered modes for the device to apply controller
+ *	specific rules. OSB4 requires no UDMA for disks due to a FIFO
+ *	bug we hit.
+ */
+
+static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA)
+		mask &= ~ATA_MASK_UDMA;
+	return mask;
+}
+
+
+/**
+ *	serverworks_csb_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: Mask of proposed modes
+ *
+ *	Check the blacklist and disable UDMA5 if matched
+ */
+
+static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask)
+{
+	const char *p;
+	char model_num[ATA_ID_PROD_LEN + 1];
+	int i;
+
+	/* Disk, UDMA */
+	if (adev->class != ATA_DEV_ATA)
+		return mask;
+
+	/* Actually do need to check */
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
+		if (!strcmp(p, model_num))
+			mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+	}
+	return mask;
+}
+
+/**
+ *	serverworks_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the OSB4/CSB5 timing registers for PIO. The PIO register
+ *	load is done as a simple lookup.
+ */
+static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+	int offset = 1 + 2 * ap->port_no - adev->devno;
+	int devbits = (2 * ap->port_no + adev->devno) * 4;
+	u16 csb5_pio;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+
+	pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
+
+	/* The OSB4 just requires the timing but the CSB series want the
+	   mode number as well */
+	if (serverworks_is_csb(pdev)) {
+		pci_read_config_word(pdev, 0x4A, &csb5_pio);
+		csb5_pio &= ~(0x0F << devbits);
+		pci_write_config_word(pdev, 0x4A, csb5_pio | (pio << devbits));
+	}
+}
+
+/**
+ *	serverworks_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
+ *	chipset. The MWDMA mode values are pulled from a lookup table
+ *	while the chipset uses mode number for UDMA.
+ */
+
+static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
+	int offset = 1 + 2 * ap->port_no - adev->devno;
+	int devbits = 2 * ap->port_no + adev->devno;
+	u8 ultra;
+	u8 ultra_cfg;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, 0x54, &ultra_cfg);
+	pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
+	ultra &= ~(0x0F << (adev->devno * 4));
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		pci_write_config_byte(pdev, 0x44 + offset,  0x20);
+
+		ultra |= (adev->dma_mode - XFER_UDMA_0)
+					<< (adev->devno * 4);
+		ultra_cfg |=  (1 << devbits);
+	} else {
+		pci_write_config_byte(pdev, 0x44 + offset,
+			dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
+		ultra_cfg &= ~(1 << devbits);
+	}
+	pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
+	pci_write_config_byte(pdev, 0x54, ultra_cfg);
+}
+
+static struct scsi_host_template serverworks_osb4_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_DUMB_MAX_PRD,
+};
+
+static struct scsi_host_template serverworks_csb_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations serverworks_osb4_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.qc_prep	= ata_bmdma_dumb_qc_prep,
+	.cable_detect	= serverworks_cable_detect,
+	.mode_filter	= serverworks_osb4_filter,
+	.set_piomode	= serverworks_set_piomode,
+	.set_dmamode	= serverworks_set_dmamode,
+};
+
+static struct ata_port_operations serverworks_csb_port_ops = {
+	.inherits	= &serverworks_osb4_port_ops,
+	.qc_prep	= ata_bmdma_qc_prep,
+	.mode_filter	= serverworks_csb_filter,
+};
+
+static int serverworks_fixup_osb4(struct pci_dev *pdev)
+{
+	u32 reg;
+	struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+		  PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+	if (isa_dev) {
+		pci_read_config_dword(isa_dev, 0x64, &reg);
+		reg &= ~0x00002000; /* disable 600ns interrupt mask */
+		if (!(reg & 0x00004000))
+			printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
+		reg |=  0x00004000; /* enable UDMA/33 support */
+		pci_write_config_dword(isa_dev, 0x64, reg);
+		pci_dev_put(isa_dev);
+		return 0;
+	}
+	printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n");
+	return -ENODEV;
+}
+
+static int serverworks_fixup_csb(struct pci_dev *pdev)
+{
+	u8 btr;
+
+	/* Third Channel Test */
+	if (!(PCI_FUNC(pdev->devfn) & 1)) {
+		struct pci_dev * findev = NULL;
+		u32 reg4c = 0;
+		findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+			PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
+		if (findev) {
+			pci_read_config_dword(findev, 0x4C, &reg4c);
+			reg4c &= ~0x000007FF;
+			reg4c |=  0x00000040;
+			reg4c |=  0x00000020;
+			pci_write_config_dword(findev, 0x4C, reg4c);
+			pci_dev_put(findev);
+		}
+	} else {
+		struct pci_dev * findev = NULL;
+		u8 reg41 = 0;
+
+		findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+				PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
+		if (findev) {
+			pci_read_config_byte(findev, 0x41, &reg41);
+			reg41 &= ~0x40;
+			pci_write_config_byte(findev, 0x41, reg41);
+			pci_dev_put(findev);
+		}
+	}
+	/* setup the UDMA Control register
+	 *
+	 * 1. clear bit 6 to enable DMA
+	 * 2. enable DMA modes with bits 0-1
+	 * 	00 : legacy
+	 * 	01 : udma2
+	 * 	10 : udma2/udma4
+	 * 	11 : udma2/udma4/udma5
+	 */
+	pci_read_config_byte(pdev, 0x5A, &btr);
+	btr &= ~0x40;
+	if (!(PCI_FUNC(pdev->devfn) & 1))
+		btr |= 0x2;
+	else
+		btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+	pci_write_config_byte(pdev, 0x5A, btr);
+
+	return btr;
+}
+
+static void serverworks_fixup_ht1000(struct pci_dev *pdev)
+{
+	u8 btr;
+	/* Setup HT1000 SouthBridge Controller - Single Channel Only */
+	pci_read_config_byte(pdev, 0x5A, &btr);
+	btr &= ~0x40;
+	btr |= 0x3;
+	pci_write_config_byte(pdev, 0x5A, btr);
+}
+
+static int serverworks_fixup(struct pci_dev *pdev)
+{
+	int rc = 0;
+
+	/* Force master latency timer to 64 PCI clocks */
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+		rc = serverworks_fixup_osb4(pdev);
+		break;
+	case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+		ata_pci_bmdma_clear_simplex(pdev);
+		/* fall through */
+	case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+	case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+		rc = serverworks_fixup_csb(pdev);
+		break;
+	case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+		serverworks_fixup_ht1000(pdev);
+		break;
+	}
+
+	return rc;
+}
+
+static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[4] = {
+		{ /* OSB4 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &serverworks_osb4_port_ops
+		}, { /* OSB4 no UDMA */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			/* No UDMA */
+			.port_ops = &serverworks_osb4_port_ops
+		}, { /* CSB5 */
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &serverworks_csb_port_ops
+		}, { /* CSB5 - later revisions*/
+			.flags = ATA_FLAG_SLAVE_POSS,
+			.pio_mask = ATA_PIO4,
+			.mwdma_mask = ATA_MWDMA2,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &serverworks_csb_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+	struct scsi_host_template *sht = &serverworks_csb_sht;
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = serverworks_fixup(pdev);
+
+	/* OSB4 : South Bridge and IDE */
+	if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
+		/* Select non UDMA capable OSB4 if we can't do fixups */
+		if (rc < 0)
+			ppi[0] = &info[1];
+		sht = &serverworks_osb4_sht;
+	}
+	/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+	else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+		 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
+		 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
+
+		 /* If the returned btr is the newer revision then
+		    select the right info block */
+		 if (rc == 3)
+		 	ppi[0] = &info[3];
+
+		/* Is this the 3rd channel CSB6 IDE ? */
+		if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
+			ppi[1] = &ata_dummy_port_info;
+	}
+
+	return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int serverworks_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	(void)serverworks_fixup(pdev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id serverworks[] = {
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
+
+	{ },
+};
+
+static struct pci_driver serverworks_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= serverworks,
+	.probe 		= serverworks_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= serverworks_reinit_one,
+#endif
+};
+
+module_pci_driver(serverworks_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, serverworks);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
new file mode 100644
index 0000000..c14071b
--- /dev/null
+++ b/drivers/ata/pata_sil680.c
@@ -0,0 +1,444 @@
+/*
+ * pata_sil680.c 	- SIL680 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *
+ * based upon
+ *
+ * linux/drivers/ide/pci/siimage.c		Version 1.07	Nov 30, 2003
+ *
+ * Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003		Red Hat <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  Documentation publicly available.
+ *
+ *	If you have strange problems with nVidia chipset systems please
+ *	see the SI support documentation and update your system BIOS
+ *	if necessary
+ *
+ * TODO
+ *	If we know all our devices are LBA28 (or LBA28 sized)  we could use
+ *	the command fifo mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sil680"
+#define DRV_VERSION "0.4.9"
+
+#define SIL680_MMIO_BAR		5
+
+/**
+ *	sil680_selreg		-	return register base
+ *	@ap: ATA interface
+ *	@r: config offset
+ *
+ *	Turn a config register offset into the right address in PCI space
+ *	to access the control register in question.
+ *
+ *	Thankfully this is a configuration operation so isn't performance
+ *	criticial.
+ */
+
+static unsigned long sil680_selreg(struct ata_port *ap, int r)
+{
+	unsigned long base = 0xA0 + r;
+	base += (ap->port_no << 4);
+	return base;
+}
+
+/**
+ *	sil680_seldev		-	return register base
+ *	@ap: ATA interface
+ *	@r: config offset
+ *
+ *	Turn a config register offset into the right address in PCI space
+ *	to access the control register in question including accounting for
+ *	the unit shift.
+ */
+
+static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
+{
+	unsigned long base = 0xA0 + r;
+	base += (ap->port_no << 4);
+	base |= adev->devno ? 2 : 0;
+	return base;
+}
+
+
+/**
+ *	sil680_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection. The SIL680 stores this in PCI config
+ *	space for us.
+ */
+
+static int sil680_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long addr = sil680_selreg(ap, 0);
+	u8 ata66;
+	pci_read_config_byte(pdev, addr, &ata66);
+	if (ata66 & 1)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	sil680_set_piomode	-	set PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the SIL680 registers for PIO mode. Note that the task speed
+ *	registers are shared between the devices so we must pick the lowest
+ *	mode for command work.
+ */
+
+static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 speed_p[5] = {
+		0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1
+	};
+	static const u16 speed_t[5] = {
+		0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
+	};
+
+	unsigned long tfaddr = sil680_selreg(ap, 0x02);
+	unsigned long addr = sil680_seldev(ap, adev, 0x04);
+	unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+	int lowest_pio = pio;
+	int port_shift = 4 * adev->devno;
+	u16 reg;
+	u8 mode;
+
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (pair != NULL && adev->pio_mode > pair->pio_mode)
+		lowest_pio = pair->pio_mode - XFER_PIO_0;
+
+	pci_write_config_word(pdev, addr, speed_p[pio]);
+	pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
+
+	pci_read_config_word(pdev, tfaddr-2, &reg);
+	pci_read_config_byte(pdev, addr_mask, &mode);
+
+	reg &= ~0x0200;			/* Clear IORDY */
+	mode &= ~(3 << port_shift);	/* Clear IORDY and DMA bits */
+
+	if (ata_pio_need_iordy(adev)) {
+		reg |= 0x0200;		/* Enable IORDY */
+		mode |= 1 << port_shift;
+	}
+	pci_write_config_word(pdev, tfaddr-2, reg);
+	pci_write_config_byte(pdev, addr_mask, mode);
+}
+
+/**
+ *	sil680_set_dmamode	-	set DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the sil680 chipset.
+ *
+ *	The MWDMA mode values are pulled from a lookup table
+ *	while the chipset uses mode number for UDMA.
+ */
+
+static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 ultra_table[2][7] = {
+		{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF },	/* 100MHz */
+		{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 },	/* 133Mhz */
+	};
+	static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long ma = sil680_seldev(ap, adev, 0x08);
+	unsigned long ua = sil680_seldev(ap, adev, 0x0C);
+	unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+	int port_shift = adev->devno * 4;
+	u8 scsc, mode;
+	u16 multi, ultra;
+
+	pci_read_config_byte(pdev, 0x8A, &scsc);
+	pci_read_config_byte(pdev, addr_mask, &mode);
+	pci_read_config_word(pdev, ma, &multi);
+	pci_read_config_word(pdev, ua, &ultra);
+
+	/* Mask timing bits */
+	ultra &= ~0x3F;
+	mode &= ~(0x03 << port_shift);
+
+	/* Extract scsc */
+	scsc = (scsc & 0x30) ? 1 : 0;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		multi = 0x10C1;
+		ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
+		mode |= (0x03 << port_shift);
+	} else {
+		multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
+		mode |= (0x02 << port_shift);
+	}
+	pci_write_config_byte(pdev, addr_mask, mode);
+	pci_write_config_word(pdev, ma, multi);
+	pci_write_config_word(pdev, ua, ultra);
+}
+
+/**
+ *	sil680_sff_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues ATA command, with proper synchronization with interrupt
+ *	handler / other threads. Use our MMIO space for PCI posting to avoid
+ *	a hideously slow cycle all the way to the device.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+static void sil680_sff_exec_command(struct ata_port *ap,
+				    const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+	iowrite8(tf->command, ap->ioaddr.command_addr);
+	ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+static bool sil680_sff_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	unsigned long addr	= sil680_selreg(ap, 1);
+	u8 val;
+
+	pci_read_config_byte(pdev, addr, &val);
+
+	return val & 0x08;
+}
+
+static struct scsi_host_template sil680_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+static struct ata_port_operations sil680_port_ops = {
+	.inherits		= &ata_bmdma32_port_ops,
+	.sff_exec_command	= sil680_sff_exec_command,
+	.sff_irq_check		= sil680_sff_irq_check,
+	.cable_detect		= sil680_cable_detect,
+	.set_piomode		= sil680_set_piomode,
+	.set_dmamode		= sil680_set_dmamode,
+};
+
+/**
+ *	sil680_init_chip		-	chip setup
+ *	@pdev: PCI device
+ *
+ *	Perform all the chip setup which must be done both when the device
+ *	is powered up on boot and when we resume in case we resumed from RAM.
+ *	Returns the final clock settings.
+ */
+
+static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
+{
+	u8 tmpbyte	= 0;
+
+	/* FIXME: double check */
+	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+			      pdev->revision ? 1 : 255);
+
+	pci_write_config_byte(pdev, 0x80, 0x00);
+	pci_write_config_byte(pdev, 0x84, 0x00);
+
+	pci_read_config_byte(pdev, 0x8A, &tmpbyte);
+
+	dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+		tmpbyte & 1, tmpbyte & 0x30);
+
+	*try_mmio = 0;
+#ifdef CONFIG_PPC
+	if (machine_is(cell))
+		*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
+#endif
+
+	switch (tmpbyte & 0x30) {
+	case 0x00:
+		/* 133 clock attempt to force it on */
+		pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
+		break;
+	case 0x30:
+		/* if clocking is disabled */
+		/* 133 clock attempt to force it on */
+		pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
+		break;
+	case 0x10:
+		/* 133 already */
+		break;
+	case 0x20:
+		/* BIOS set PCI x2 clocking */
+		break;
+	}
+
+	pci_read_config_byte(pdev,   0x8A, &tmpbyte);
+	dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+		tmpbyte & 1, tmpbyte & 0x30);
+
+	pci_write_config_byte(pdev,  0xA1, 0x72);
+	pci_write_config_word(pdev,  0xA2, 0x328A);
+	pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
+	pci_write_config_dword(pdev, 0xA8, 0x43924392);
+	pci_write_config_dword(pdev, 0xAC, 0x40094009);
+	pci_write_config_byte(pdev,  0xB1, 0x72);
+	pci_write_config_word(pdev,  0xB2, 0x328A);
+	pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
+	pci_write_config_dword(pdev, 0xB8, 0x43924392);
+	pci_write_config_dword(pdev, 0xBC, 0x40094009);
+
+	switch (tmpbyte & 0x30) {
+	case 0x00:
+		printk(KERN_INFO "sil680: 100MHz clock.\n");
+		break;
+	case 0x10:
+		printk(KERN_INFO "sil680: 133MHz clock.\n");
+		break;
+	case 0x20:
+		printk(KERN_INFO "sil680: Using PCI clock.\n");
+		break;
+	/* This last case is _NOT_ ok */
+	case 0x30:
+		printk(KERN_ERR "sil680: Clock disabled ?\n");
+	}
+	return tmpbyte & 0x30;
+}
+
+static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &sil680_port_ops
+	};
+	static const struct ata_port_info info_slow = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &sil680_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int rc, try_mmio;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	switch (sil680_init_chip(pdev, &try_mmio)) {
+		case 0:
+			ppi[0] = &info_slow;
+			break;
+		case 0x30:
+			return -ENODEV;
+	}
+
+	if (!try_mmio)
+		goto use_ioports;
+
+	/* Try to acquire MMIO resources and fallback to PIO if
+	 * that fails
+	 */
+	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
+	if (rc)
+		goto use_ioports;
+
+	/* Allocate host and set it up */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+	host->iomap = pcim_iomap_table(pdev);
+
+	/* Setup DMA masks */
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	pci_set_master(pdev);
+
+	/* Get MMIO base and initialize port addresses */
+	mmio_base = host->iomap[SIL680_MMIO_BAR];
+	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
+	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
+	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
+	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
+	ata_sff_std_ports(&host->ports[0]->ioaddr);
+	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
+	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
+	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
+	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
+	ata_sff_std_ports(&host->ports[1]->ioaddr);
+
+	/* Register & activate */
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &sil680_sht);
+
+use_ioports:
+	return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sil680_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int try_mmio, rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+	sil680_init_chip(pdev, &try_mmio);
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id sil680[] = {
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
+
+	{ },
+};
+
+static struct pci_driver sil680_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sil680,
+	.probe 		= sil680_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= sil680_reinit_one,
+#endif
+};
+
+module_pci_driver(sil680_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for SI680 PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil680);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
new file mode 100644
index 0000000..626f989
--- /dev/null
+++ b/drivers/ata/pata_sis.c
@@ -0,0 +1,914 @@
+/*
+ *    pata_sis.c - SiS ATA driver
+ *
+ *	(C) 2005 Red Hat
+ *	(C) 2007,2009 Bartlomiej Zolnierkiewicz
+ *
+ *    Based upon linux/drivers/ide/pci/sis5513.c
+ * Copyright (C) 1999-2000	Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2002		Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
+ * Copyright (C) 2003		Vojtech Pavlik <vojtech@suse.cz>
+ * SiS Taiwan		: for direct support and hardware.
+ * Daniela Engert	: for initial ATA100 advices and numerous others.
+ * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt	:
+ *			  for checking code correctness, providing patches.
+ * Original tests and design on the SiS620 chipset.
+ * ATA100 tests and design on the SiS735 chipset.
+ * ATA16/33 support from specs
+ * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
+ *
+ *
+ *	TODO
+ *	Check MWDMA on drives that don't support MWDMA speed pio cycles ?
+ *	More Testing
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+#include "sis.h"
+
+#define DRV_NAME	"pata_sis"
+#define DRV_VERSION	"0.5.2"
+
+struct sis_chipset {
+	u16 device;				/* PCI host ID */
+	const struct ata_port_info *info;	/* Info block */
+	/* Probably add family, cable detect type etc here to clean
+	   up code later */
+};
+
+struct sis_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+static const struct sis_laptop sis_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x5513, 0x1043, 0x1107 },	/* ASUS A6K */
+	{ 0x5513, 0x1734, 0x105F },	/* FSC Amilo A1630 */
+	{ 0x5513, 0x1071, 0x8640 },	/* EasyNote K5305 */
+	/* end marker */
+	{ 0, }
+};
+
+static int sis_short_ata40(struct pci_dev *dev)
+{
+	const struct sis_laptop *lap = &sis_laptop[0];
+
+	while (lap->device) {
+		if (lap->device == dev->device &&
+		    lap->subvendor == dev->subsystem_vendor &&
+		    lap->subdevice == dev->subsystem_device)
+			return 1;
+		lap++;
+	}
+
+	return 0;
+}
+
+/**
+ *	sis_old_port_base - return PCI configuration base for dev
+ *	@adev: device
+ *
+ *	Returns the base of the PCI configuration registers for this port
+ *	number.
+ */
+
+static int sis_old_port_base(struct ata_device *adev)
+{
+	return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
+}
+
+/**
+ *	sis_port_base - return PCI configuration base for dev
+ *	@adev: device
+ *
+ *	Returns the base of the PCI configuration registers for this port
+ *	number.
+ */
+
+static int sis_port_base(struct ata_device *adev)
+{
+	struct ata_port *ap = adev->link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x40;
+	u32 reg54;
+
+	/* If bit 30 is set then the registers are mapped at 0x70 not 0x40 */
+	pci_read_config_dword(pdev, 0x54, &reg54);
+	if (reg54 & 0x40000000)
+		port = 0x70;
+
+	return port + (8 * ap->port_no) + (4 * adev->devno);
+}
+
+/**
+ *	sis_133_cable_detect - check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for the later UDMA133 capable
+ *	SiS chipset.
+ */
+
+static int sis_133_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 tmp;
+
+	/* The top bit of this register is the cable detect bit */
+	pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
+	if ((tmp & 0x8000) && !sis_short_ata40(pdev))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	sis_66_cable_detect - check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform cable detection on the UDMA66, UDMA100 and early UDMA133
+ *	SiS IDE controllers.
+ */
+
+static int sis_66_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	/* Older chips keep cable detect in bits 4/5 of reg 0x48 */
+	pci_read_config_byte(pdev, 0x48, &tmp);
+	tmp >>= ap->port_no;
+	if ((tmp & 0x10) && !sis_short_ata40(pdev))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+
+/**
+ *	sis_pre_reset - probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits sis_enable_bits[] = {
+		{ 0x4aU, 1U, 0x02UL, 0x02UL },	/* port 0 */
+		{ 0x4aU, 1U, 0x04UL, 0x04UL },	/* port 1 */
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	/* Clear the FIFO settings. We can't enable the FIFO until
+	   we know we are poking at a disk */
+	pci_write_config_byte(pdev, 0x4B, 0);
+	return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ *	sis_set_fifo - Set RWP fifo bits for this device
+ *	@ap: Port
+ *	@adev: Device
+ *
+ *	SIS chipsets implement prefetch/postwrite bits for each device
+ *	on both channels. This functionality is not ATAPI compatible and
+ *	must be configured according to the class of device present
+ */
+
+static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 fifoctrl;
+	u8 mask = 0x11;
+
+	mask <<= (2 * ap->port_no);
+	mask <<= adev->devno;
+
+	/* This holds various bits including the FIFO control */
+	pci_read_config_byte(pdev, 0x4B, &fifoctrl);
+	fifoctrl &= ~mask;
+
+	/* Enable for ATA (disk) only */
+	if (adev->class == ATA_DEV_ATA)
+		fifoctrl |= mask;
+	pci_write_config_byte(pdev, 0x4B, fifoctrl);
+}
+
+/**
+ *	sis_old_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for all chips that are pre ATA100 and
+ *	also early ATA100 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = sis_old_port_base(adev);
+	u8 t1, t2;
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	static const u8 active[]   = { 0x00, 0x07, 0x04, 0x03, 0x01 };
+	static const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
+
+	sis_set_fifo(ap, adev);
+
+	pci_read_config_byte(pdev, port, &t1);
+	pci_read_config_byte(pdev, port + 1, &t2);
+
+	t1 &= ~0x0F;	/* Clear active/recovery timings */
+	t2 &= ~0x07;
+
+	t1 |= active[speed];
+	t2 |= recovery[speed];
+
+	pci_write_config_byte(pdev, port, t1);
+	pci_write_config_byte(pdev, port + 1, t2);
+}
+
+/**
+ *	sis_100_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for ATA100 devices and early ATA133.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = sis_old_port_base(adev);
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	static const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
+
+	sis_set_fifo(ap, adev);
+
+	pci_write_config_byte(pdev, port, actrec[speed]);
+}
+
+/**
+ *	sis_133_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for the later ATA133 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port;
+	u32 t1;
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	static const u32 timing133[] = {
+		0x28269000,	/* Recovery << 24 | Act << 16 | Ini << 12 */
+		0x0C266000,
+		0x04263000,
+		0x0C0A3000,
+		0x05093000
+	};
+	static const u32 timing100[] = {
+		0x1E1C6000,	/* Recovery << 24 | Act << 16 | Ini << 12 */
+		0x091C4000,
+		0x031C2000,
+		0x09072000,
+		0x04062000
+	};
+
+	sis_set_fifo(ap, adev);
+
+	port = sis_port_base(adev);
+	pci_read_config_dword(pdev, port, &t1);
+	t1 &= 0xC0C00FFF;	/* Mask out timing */
+
+	if (t1 & 0x08)		/* 100 or 133 ? */
+		t1 |= timing133[speed];
+	else
+		t1 |= timing100[speed];
+	pci_write_config_byte(pdev, port, t1);
+}
+
+/**
+ *	sis_old_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u16 timing;
+
+	static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+	static const u16 udma_bits[]  = { 0xE000, 0xC000, 0xA000 };
+
+	pci_read_config_word(pdev, drive_pci, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* bits 3-0 hold recovery timing bits 8-10 active timing and
+		   the higher bits are dependent on the device */
+		timing &= ~0x870F;
+		timing |= mwdma_bits[speed];
+	} else {
+		/* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x6000;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_word(pdev, drive_pci, timing);
+}
+
+/**
+ *	sis_66_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u16 timing;
+
+	/* MWDMA 0-2 and UDMA 0-5 */
+	static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+	static const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
+
+	pci_read_config_word(pdev, drive_pci, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* bits 3-0 hold recovery timing bits 8-10 active timing and
+		   the higher bits are dependent on the device, bit 15 udma */
+		timing &= ~0x870F;
+		timing |= mwdma_bits[speed];
+	} else {
+		/* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0xF000;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_word(pdev, drive_pci, timing);
+}
+
+/**
+ *	sis_100_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles UDMA66 and early UDMA100 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u8 timing;
+
+	static const u8 udma_bits[]  = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+	pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+	} else {
+		/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x8F;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ *	sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles early SiS 961 bridges.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u8 timing;
+	/* Low 4 bits are timing */
+	static const u8 udma_bits[]  = { 0x8F, 0x8A, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+	pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+	} else {
+		/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x8F;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ *	sis_133_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port;
+	u32 t1;
+
+	port = sis_port_base(adev);
+	pci_read_config_dword(pdev, port, &t1);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* Recovery << 24 | Act << 16 | Ini << 12, like PIO modes */
+		static const u32 timing_u100[] = { 0x19154000, 0x06072000, 0x04062000 };
+		static const u32 timing_u133[] = { 0x221C6000, 0x0C0A3000, 0x05093000 };
+		int speed = adev->dma_mode - XFER_MW_DMA_0;
+
+		t1 &= 0xC0C00FFF;
+		/* disable UDMA */
+		t1 &= ~0x00000004;
+		if (t1 & 0x08)
+			t1 |= timing_u133[speed];
+		else
+			t1 |= timing_u100[speed];
+	} else {
+		/* bits 4- cycle time 8 - cvs time */
+		static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
+		static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
+		int speed = adev->dma_mode - XFER_UDMA_0;
+
+		t1 &= ~0x00000FF0;
+		/* enable UDMA */
+		t1 |= 0x00000004;
+		if (t1 & 0x08)
+			t1 |= timing_u133[speed];
+		else
+			t1 |= timing_u100[speed];
+	}
+	pci_write_config_dword(pdev, port, t1);
+}
+
+/**
+ *	sis_133_mode_filter - mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA6 on devices that do not support it.
+ */
+
+static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	struct ata_port *ap = adev->link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = sis_port_base(adev);
+	u32 t1;
+
+	pci_read_config_dword(pdev, port, &t1);
+	/* if ATA133 is disabled, mask it out */
+	if (!(t1 & 0x08))
+		mask &= ~(0xC0 << ATA_SHIFT_UDMA);
+	return mask;
+}
+
+static struct scsi_host_template sis_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sis_133_for_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.set_piomode		= sis_133_set_piomode,
+	.set_dmamode		= sis_133_set_dmamode,
+	.cable_detect		= sis_133_cable_detect,
+};
+
+static struct ata_port_operations sis_base_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.prereset		= sis_pre_reset,
+};
+
+static struct ata_port_operations sis_133_ops = {
+	.inherits		= &sis_base_ops,
+	.set_piomode		= sis_133_set_piomode,
+	.set_dmamode		= sis_133_set_dmamode,
+	.cable_detect		= sis_133_cable_detect,
+	.mode_filter		= sis_133_mode_filter,
+};
+
+static struct ata_port_operations sis_133_early_ops = {
+	.inherits		= &sis_base_ops,
+	.set_piomode		= sis_100_set_piomode,
+	.set_dmamode		= sis_133_early_set_dmamode,
+	.cable_detect		= sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_100_ops = {
+	.inherits		= &sis_base_ops,
+	.set_piomode		= sis_100_set_piomode,
+	.set_dmamode		= sis_100_set_dmamode,
+	.cable_detect		= sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_66_ops = {
+	.inherits		= &sis_base_ops,
+	.set_piomode		= sis_old_set_piomode,
+	.set_dmamode		= sis_66_set_dmamode,
+	.cable_detect		= sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_old_ops = {
+	.inherits		= &sis_base_ops,
+	.set_piomode		= sis_old_set_piomode,
+	.set_dmamode		= sis_old_set_dmamode,
+	.cable_detect		= ata_cable_40wire,
+};
+
+static const struct ata_port_info sis_info = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	/* No UDMA */
+	.port_ops	= &sis_old_ops,
+};
+static const struct ata_port_info sis_info33 = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA2,
+	.port_ops	= &sis_old_ops,
+};
+static const struct ata_port_info sis_info66 = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA4,
+	.port_ops	= &sis_66_ops,
+};
+static const struct ata_port_info sis_info100 = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA5,
+	.port_ops	= &sis_100_ops,
+};
+static const struct ata_port_info sis_info100_early = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA5,
+	.port_ops	= &sis_66_ops,
+};
+static const struct ata_port_info sis_info133 = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_133_ops,
+};
+const struct ata_port_info sis_info133_for_sata = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_133_for_sata_ops,
+};
+static const struct ata_port_info sis_info133_early = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_133_early_ops,
+};
+
+/* Privately shared with the SiS180 SATA driver, not for use elsewhere */
+EXPORT_SYMBOL_GPL(sis_info133_for_sata);
+
+static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
+{
+	u16 regw;
+	u8 reg;
+
+	if (sis->info == &sis_info133) {
+		pci_read_config_word(pdev, 0x50, &regw);
+		if (regw & 0x08)
+			pci_write_config_word(pdev, 0x50, regw & ~0x08);
+		pci_read_config_word(pdev, 0x52, &regw);
+		if (regw & 0x08)
+			pci_write_config_word(pdev, 0x52, regw & ~0x08);
+		return;
+	}
+
+	if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
+		/* Fix up latency */
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+		/* Set compatibility bit */
+		pci_read_config_byte(pdev, 0x49, &reg);
+		if (!(reg & 0x01))
+			pci_write_config_byte(pdev, 0x49, reg | 0x01);
+		return;
+	}
+
+	if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
+		/* Fix up latency */
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+		/* Set compatibility bit */
+		pci_read_config_byte(pdev, 0x52, &reg);
+		if (!(reg & 0x04))
+			pci_write_config_byte(pdev, 0x52, reg | 0x04);
+		return;
+	}
+
+	if (sis->info == &sis_info33) {
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &reg);
+		if (( reg & 0x0F ) != 0x00)
+			pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
+		/* Fall through to ATA16 fixup below */
+	}
+
+	if (sis->info == &sis_info || sis->info == &sis_info33) {
+		/* force per drive recovery and active timings
+		   needed on ATA_33 and below chips */
+		pci_read_config_byte(pdev, 0x52, &reg);
+		if (!(reg & 0x08))
+			pci_write_config_byte(pdev, 0x52, reg|0x08);
+		return;
+	}
+
+	BUG();
+}
+
+/**
+ *	sis_init_one - Register SiS ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in sis_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer. We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	struct pci_dev *host = NULL;
+	struct sis_chipset *chipset = NULL;
+	struct sis_chipset *sets;
+	int rc;
+
+	static struct sis_chipset sis_chipsets[] = {
+
+		{ 0x0968, &sis_info133 },
+		{ 0x0966, &sis_info133 },
+		{ 0x0965, &sis_info133 },
+		{ 0x0745, &sis_info100 },
+		{ 0x0735, &sis_info100 },
+		{ 0x0733, &sis_info100 },
+		{ 0x0635, &sis_info100 },
+		{ 0x0633, &sis_info100 },
+
+		{ 0x0730, &sis_info100_early },	/* 100 with ATA 66 layout */
+		{ 0x0550, &sis_info100_early },	/* 100 with ATA 66 layout */
+
+		{ 0x0640, &sis_info66 },
+		{ 0x0630, &sis_info66 },
+		{ 0x0620, &sis_info66 },
+		{ 0x0540, &sis_info66 },
+		{ 0x0530, &sis_info66 },
+
+		{ 0x5600, &sis_info33 },
+		{ 0x5598, &sis_info33 },
+		{ 0x5597, &sis_info33 },
+		{ 0x5591, &sis_info33 },
+		{ 0x5582, &sis_info33 },
+		{ 0x5581, &sis_info33 },
+
+		{ 0x5596, &sis_info },
+		{ 0x5571, &sis_info },
+		{ 0x5517, &sis_info },
+		{ 0x5511, &sis_info },
+
+		{0}
+	};
+	static struct sis_chipset sis133_early = {
+		0x0, &sis_info133_early
+	};
+	static struct sis_chipset sis133 = {
+		0x0, &sis_info133
+	};
+	static struct sis_chipset sis100_early = {
+		0x0, &sis_info100_early
+	};
+	static struct sis_chipset sis100 = {
+		0x0, &sis_info100
+	};
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* We have to find the bridge first */
+	for (sets = &sis_chipsets[0]; sets->device; sets++) {
+		host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
+		if (host != NULL) {
+			chipset = sets;			/* Match found */
+			if (sets->device == 0x630) {	/* SIS630 */
+				if (host->revision >= 0x30)	/* 630 ET */
+					chipset = &sis100_early;
+			}
+			break;
+		}
+	}
+
+	/* Look for concealed bridges */
+	if (chipset == NULL) {
+		/* Second check */
+		u32 idemisc;
+		u16 trueid;
+
+		/* Disable ID masking and register remapping then
+		   see what the real ID is */
+
+		pci_read_config_dword(pdev, 0x54, &idemisc);
+		pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
+		pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+		pci_write_config_dword(pdev, 0x54, idemisc);
+
+		switch(trueid) {
+		case 0x5518:	/* SIS 962/963 */
+			dev_info(&pdev->dev,
+				 "SiS 962/963 MuTIOL IDE UDMA133 controller\n");
+			chipset = &sis133;
+			if ((idemisc & 0x40000000) == 0) {
+				pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
+				dev_info(&pdev->dev,
+					 "Switching to 5513 register mapping\n");
+			}
+			break;
+		case 0x0180:	/* SIS 965/965L */
+			chipset = &sis133;
+			break;
+		case 0x1180:	/* SIS 966/966L */
+			chipset = &sis133;
+			break;
+		}
+	}
+
+	/* Further check */
+	if (chipset == NULL) {
+		struct pci_dev *lpc_bridge;
+		u16 trueid;
+		u8 prefctl;
+		u8 idecfg;
+
+		/* Try the second unmasking technique */
+		pci_read_config_byte(pdev, 0x4a, &idecfg);
+		pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
+		pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+		pci_write_config_byte(pdev, 0x4a, idecfg);
+
+		switch(trueid) {
+		case 0x5517:
+			lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
+			if (lpc_bridge == NULL)
+				break;
+			pci_read_config_byte(pdev, 0x49, &prefctl);
+			pci_dev_put(lpc_bridge);
+
+			if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
+				chipset = &sis133_early;
+				break;
+			}
+			chipset = &sis100;
+			break;
+		}
+	}
+	pci_dev_put(host);
+
+	/* No chipset info, no support */
+	if (chipset == NULL)
+		return -ENODEV;
+
+	ppi[0] = chipset->info;
+
+	sis_fixup(pdev, chipset);
+
+	return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sis_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	sis_fixup(pdev, host->private_data);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id sis_pci_tbl[] = {
+	{ PCI_VDEVICE(SI, 0x5513), },	/* SiS 5513 */
+	{ PCI_VDEVICE(SI, 0x5518), },	/* SiS 5518 */
+	{ PCI_VDEVICE(SI, 0x1180), },	/* SiS 1180 */
+
+	{ }
+};
+
+static struct pci_driver sis_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sis_pci_tbl,
+	.probe			= sis_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= sis_reinit_one,
+#endif
+};
+
+module_pci_driver(sis_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
new file mode 100644
index 0000000..4935f61
--- /dev/null
+++ b/drivers/ata/pata_sl82c105.c
@@ -0,0 +1,380 @@
+/*
+ * pata_sl82c105.c 	- SL82C105 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  (C) 2011 Bartlomiej Zolnierkiewicz
+ *
+ * Based in part on linux/drivers/ide/pci/sl82c105.c
+ * 		SL82C105/Winbond 553 IDE driver
+ *
+ * and in part on the documentation and errata sheet
+ *
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sl82c105"
+#define DRV_VERSION "0.3.3"
+
+enum {
+	/*
+	 * SL82C105 PCI config register 0x40 bits.
+	 */
+	CTRL_IDE_IRQB	=	(1 << 30),
+	CTRL_IDE_IRQA   =	(1 << 28),
+	CTRL_LEGIRQ     =	(1 << 11),
+	CTRL_P1F16      =	(1 << 5),
+	CTRL_P1EN       =	(1 << 4),
+	CTRL_P0F16      =	(1 << 1),
+	CTRL_P0EN       =	(1 << 0)
+};
+
+/**
+ *	sl82c105_pre_reset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits sl82c105_enable_bits[] = {
+		{ 0x40, 1, 0x01, 0x01 },
+		{ 0x40, 1, 0x10, 0x10 }
+	};
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
+		return -ENOENT;
+	return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ *	sl82c105_configure_piomode	-	set chip PIO timing
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@pio: PIO mode
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	so a configure_dmamode call will undo any work we do here and vice
+ *	versa
+ */
+
+static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static u16 pio_timing[5] = {
+		0x50D, 0x407, 0x304, 0x242, 0x240
+	};
+	u16 dummy;
+	int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+
+	pci_write_config_word(pdev, timing, pio_timing[pio]);
+	/* Can we lose this oddity of the old driver */
+	pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ *	sl82c105_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	but we want to set the PIO timing by default.
+ */
+
+static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	sl82c105_configure_dmamode	-	set DMA mode in chip
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Load DMA cycle times into the chip ready for a DMA transfer
+ *	to occur.
+ */
+
+static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static u16 dma_timing[3] = {
+		0x707, 0x201, 0x200
+	};
+	u16 dummy;
+	int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+	int dma = adev->dma_mode - XFER_MW_DMA_0;
+
+	pci_write_config_word(pdev, timing, dma_timing[dma]);
+	/* Can we lose this oddity of the old driver */
+	pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ *	sl82c105_reset_engine	-	Reset the DMA engine
+ *	@ap: ATA interface
+ *
+ *	The sl82c105 has some serious problems with the DMA engine
+ *	when transfers don't run as expected or ATAPI is used. The
+ *	recommended fix is to reset the engine each use using a chip
+ *	test register.
+ */
+
+static void sl82c105_reset_engine(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 val;
+
+	pci_read_config_word(pdev, 0x7E, &val);
+	pci_write_config_word(pdev, 0x7E, val | 4);
+	pci_write_config_word(pdev, 0x7E, val & ~4);
+}
+
+/**
+ *	sl82c105_bmdma_start		-	DMA engine begin
+ *	@qc: ATA command
+ *
+ *	Reset the DMA engine each use as recommended by the errata
+ *	document.
+ *
+ *	FIXME: if we switch clock at BMDMA start/end we might get better
+ *	PIO performance on DMA capable devices.
+ */
+
+static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	udelay(100);
+	sl82c105_reset_engine(ap);
+	udelay(100);
+
+	/* Set the clocks for DMA */
+	sl82c105_configure_dmamode(ap, qc->dev);
+	/* Activate DMA */
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	sl82c105_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Reset the DMA engine each use as recommended by the errata
+ *	document.
+ *
+ *	This function is also called to turn off DMA when a timeout occurs
+ *	during DMA operation. In both cases we need to reset the engine,
+ *	so no actual eng_timeout handler is required.
+ *
+ *	We assume bmdma_stop is always called if bmdma_start as called. If
+ *	not then we may need to wrap qc_issue.
+ */
+
+static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	ata_bmdma_stop(qc);
+	sl82c105_reset_engine(ap);
+	udelay(100);
+
+	/* This will redo the initial setup of the DMA device to matching
+	   PIO timings */
+	sl82c105_set_piomode(ap, qc->dev);
+}
+
+/**
+ *	sl82c105_qc_defer	-	implement serialization
+ *	@qc: command
+ *
+ *	We must issue one command per host not per channel because
+ *	of the reset bug.
+ *
+ *	Q: is the scsi host lock sufficient ?
+ */
+
+static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_host *host = qc->ap->host;
+	struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
+	int rc;
+
+	/* First apply the usual rules */
+	rc = ata_std_qc_defer(qc);
+	if (rc != 0)
+		return rc;
+
+	/* Now apply serialization rules. Only allow a command if the
+	   other channel state machine is idle */
+	if (alt && alt->qc_active)
+		return	ATA_DEFER_PORT;
+	return 0;
+}
+
+static bool sl82c105_sff_irq_check(struct ata_port *ap)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	u32 val, mask		= ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
+
+	pci_read_config_dword(pdev, 0x40, &val);
+
+	return val & mask;
+}
+
+static struct scsi_host_template sl82c105_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sl82c105_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.qc_defer	= sl82c105_qc_defer,
+	.bmdma_start 	= sl82c105_bmdma_start,
+	.bmdma_stop	= sl82c105_bmdma_stop,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= sl82c105_set_piomode,
+	.prereset	= sl82c105_pre_reset,
+	.sff_irq_check	= sl82c105_sff_irq_check,
+};
+
+/**
+ *	sl82c105_bridge_revision	-	find bridge version
+ *	@pdev: PCI device for the ATA function
+ *
+ *	Locates the PCI bridge associated with the ATA function and
+ *	providing it is a Winbond 553 reports the revision. If it cannot
+ *	find a revision or the right device it returns -1
+ */
+
+static int sl82c105_bridge_revision(struct pci_dev *pdev)
+{
+	struct pci_dev *bridge;
+
+	/*
+	 * The bridge should be part of the same device, but function 0.
+	 */
+	bridge = pci_get_slot(pdev->bus,
+			       PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+	if (!bridge)
+		return -1;
+
+	/*
+	 * Make sure it is a Winbond 553 and is an ISA bridge.
+	 */
+	if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
+	    bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
+	    bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
+	    	pci_dev_put(bridge);
+		return -1;
+	}
+	/*
+	 * We need to find function 0's revision, not function 1
+	 */
+	pci_dev_put(bridge);
+	return bridge->revision;
+}
+
+static void sl82c105_fixup(struct pci_dev *pdev)
+{
+	u32 val;
+
+	pci_read_config_dword(pdev, 0x40, &val);
+	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+	pci_write_config_dword(pdev, 0x40, val);
+}
+
+static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_dma = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &sl82c105_port_ops
+	};
+	static const struct ata_port_info info_early = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.port_ops = &sl82c105_port_ops
+	};
+	/* for now use only the first port */
+	const struct ata_port_info *ppi[] = { &info_early,
+					       NULL };
+	int rev;
+	int rc;
+
+	rc = pcim_enable_device(dev);
+	if (rc)
+		return rc;
+
+	rev = sl82c105_bridge_revision(dev);
+
+	if (rev == -1)
+		dev_warn(&dev->dev,
+			 "pata_sl82c105: Unable to find bridge, disabling DMA\n");
+	else if (rev <= 5)
+		dev_warn(&dev->dev,
+			 "pata_sl82c105: Early bridge revision, no DMA available\n");
+	else
+		ppi[0] = &info_dma;
+
+	sl82c105_fixup(dev);
+
+	return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sl82c105_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	sl82c105_fixup(pdev);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id sl82c105[] = {
+	{ PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
+
+	{ },
+};
+
+static struct pci_driver sl82c105_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sl82c105,
+	.probe 		= sl82c105_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= sl82c105_reinit_one,
+#endif
+};
+
+module_pci_driver(sl82c105_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Sl82c105");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sl82c105);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
new file mode 100644
index 0000000..d9364af
--- /dev/null
+++ b/drivers/ata/pata_triflex.c
@@ -0,0 +1,248 @@
+/*
+ * pata_triflex.c 	- Compaq PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * based upon
+ *
+ * triflex.c
+ *
+ * IDE Chipset driver for the Compaq TriFlex IDE controller.
+ *
+ * Known to work with the Compaq Workstation 5x00 series.
+ *
+ * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
+ * Author: Torben Mathiasen <torben.mathiasen@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Not publicly available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_triflex"
+#define DRV_VERSION "0.2.8"
+
+/**
+ *	triflex_prereset		-	probe begin
+ *	@link: ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int triflex_prereset(struct ata_link *link, unsigned long deadline)
+{
+	static const struct pci_bits triflex_enable_bits[] = {
+		{ 0x80, 1, 0x01, 0x01 },
+		{ 0x80, 1, 0x02, 0x02 }
+	};
+
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_sff_prereset(link, deadline);
+}
+
+
+
+/**
+ *	triflex_load_timing		-	timing configuration
+ *	@ap: ATA interface
+ *	@adev: Device on the bus
+ *	@speed: speed to configure
+ *
+ *	The Triflex has one set of timings per device per channel. This
+ *	means we must do some switching. As the PIO and DMA timings don't
+ *	match we have to do some reloading unlike PIIX devices where tuning
+ *	tricks can avoid it.
+ */
+
+static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 timing = 0;
+	u32 triflex_timing, old_triflex_timing;
+	int channel_offset = ap->port_no ? 0x74: 0x70;
+	unsigned int is_slave	= (adev->devno != 0);
+
+
+	pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
+	triflex_timing = old_triflex_timing;
+
+	switch(speed)
+	{
+		case XFER_MW_DMA_2:
+			timing = 0x0103;break;
+		case XFER_MW_DMA_1:
+			timing = 0x0203;break;
+		case XFER_MW_DMA_0:
+			timing = 0x0808;break;
+		case XFER_SW_DMA_2:
+		case XFER_SW_DMA_1:
+		case XFER_SW_DMA_0:
+			timing = 0x0F0F;break;
+		case XFER_PIO_4:
+			timing = 0x0202;break;
+		case XFER_PIO_3:
+			timing = 0x0204;break;
+		case XFER_PIO_2:
+			timing = 0x0404;break;
+		case XFER_PIO_1:
+			timing = 0x0508;break;
+		case XFER_PIO_0:
+			timing = 0x0808;break;
+		default:
+			BUG();
+	}
+	triflex_timing &= ~ (0xFFFF << (16 * is_slave));
+	triflex_timing |= (timing << (16 * is_slave));
+
+	if (triflex_timing != old_triflex_timing)
+		pci_write_config_dword(pdev, channel_offset, triflex_timing);
+}
+
+/**
+ *	triflex_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Use the timing loader to set up the PIO mode. We have to do this
+ *	because DMA start/stop will only be called once DMA occurs. If there
+ *	has been no DMA then the PIO timings are still needed.
+ */
+static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	triflex_load_timing(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	triflex_dma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	Usually drivers set the DMA timing at the point the set_dmamode call
+ *	is made. Triflex however requires we load new timings on the
+ *	transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
+ *	We load the DMA timings just before starting DMA and then restore
+ *	the PIO timing when the DMA is finished.
+ */
+
+static void triflex_bmdma_start(struct ata_queued_cmd *qc)
+{
+	triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	triflex_dma_stop	-	DMA stop callback
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	We loaded new timings in dma_start, as a result we need to restore
+ *	the PIO timings in dma_stop so that the next command issue gets the
+ *	right clock values.
+ */
+
+static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+	triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+static struct scsi_host_template triflex_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations triflex_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.bmdma_start 	= triflex_bmdma_start,
+	.bmdma_stop	= triflex_bmdma_stop,
+	.cable_detect	= ata_cable_40wire,
+	.set_piomode	= triflex_set_piomode,
+	.prereset	= triflex_prereset,
+};
+
+static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &triflex_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	ata_print_version_once(&dev->dev, DRV_VERSION);
+
+	return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
+}
+
+static const struct pci_device_id triflex[] = {
+	{ PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), },
+
+	{ },
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	/*
+	 * We must not disable or powerdown the device.
+	 * APM bios refuses to suspend if IDE is not accessible.
+	 */
+	pci_save_state(pdev);
+
+	return 0;
+}
+
+#endif
+
+static struct pci_driver triflex_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= triflex,
+	.probe 		= triflex_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= triflex_ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+module_pci_driver(triflex_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, triflex);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
new file mode 100644
index 0000000..fd19f1c
--- /dev/null
+++ b/drivers/ata/pata_via.c
@@ -0,0 +1,719 @@
+/*
+ * pata_via.c 	- VIA PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *
+ *  Documentation
+ *	Most chipset documentation available under NDA only
+ *
+ *  VIA version guide
+ *	VIA VT82C561	-	early design, uses ata_generic currently
+ *	VIA VT82C576	-	MWDMA, 33Mhz
+ *	VIA VT82C586	-	MWDMA, 33Mhz
+ *	VIA VT82C586a	-	Added UDMA to 33Mhz
+ *	VIA VT82C586b	-	UDMA33
+ *	VIA VT82C596a	-	Nonfunctional UDMA66
+ *	VIA VT82C596b	-	Working UDMA66
+ *	VIA VT82C686	-	Nonfunctional UDMA66
+ *	VIA VT82C686a	-	Working UDMA66
+ *	VIA VT82C686b	-	Updated to UDMA100
+ *	VIA VT8231	-	UDMA100
+ *	VIA VT8233	-	UDMA100
+ *	VIA VT8233a	-	UDMA133
+ *	VIA VT8233c	-	UDMA100
+ *	VIA VT8235	-	UDMA133
+ *	VIA VT8237	-	UDMA133
+ *	VIA VT8237A	-	UDMA133
+ *	VIA VT8237S	-	UDMA133
+ *	VIA VT8251	-	UDMA133
+ *
+ *	Most registers remain compatible across chips. Others start reserved
+ *	and acquire sensible semantics if set to 1 (eg cable detect). A few
+ *	exceptions exist, notably around the FIFO settings.
+ *
+ *	One additional quirk of the VIA design is that like ALi they use few
+ *	PCI IDs for a lot of chips.
+ *
+ *	Based heavily on:
+ *
+ * Version 3.38
+ *
+ * VIA IDE driver for Linux. Supported southbridges:
+ *
+ *   vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
+ *   vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
+ *   vt8235, vt8237
+ *
+ * Copyright (c) 2000-2002 Vojtech Pavlik
+ *
+ * Based on the work of:
+ *	Michel Aubry
+ *	Jeff Garzik
+ *	Andre Hedrick
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_via"
+#define DRV_VERSION "0.3.4"
+
+enum {
+	VIA_BAD_PREQ	= 0x01, /* Crashes if PREQ# till DDACK# set */
+	VIA_BAD_CLK66	= 0x02, /* 66 MHz clock doesn't work correctly */
+	VIA_SET_FIFO	= 0x04, /* Needs to have FIFO split set */
+	VIA_NO_UNMASK	= 0x08, /* Doesn't work with IRQ unmasking on */
+	VIA_BAD_ID	= 0x10, /* Has wrong vendor ID (0x1107) */
+	VIA_BAD_AST	= 0x20, /* Don't touch Address Setup Timing */
+	VIA_NO_ENABLES	= 0x40, /* Has no enablebits */
+	VIA_SATA_PATA	= 0x80, /* SATA/PATA combined configuration */
+};
+
+enum {
+	VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
+};
+
+/*
+ * VIA SouthBridge chips.
+ */
+
+static const struct via_isa_bridge {
+	const char *name;
+	u16 id;
+	u8 rev_min;
+	u8 rev_max;
+	u8 udma_mask;
+	u8 flags;
+} via_isa_bridges[] = {
+	{ "vx855",	PCI_DEVICE_ID_VIA_VX855,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+	{ "vx800",	PCI_DEVICE_ID_VIA_VX800,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+	{ "vt8261",	PCI_DEVICE_ID_VIA_8261,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+	{ "vt6410",	PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
+	{ "vt6415",	PCI_DEVICE_ID_VIA_6415,     0x00, 0xff, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
+	{ "vt8237a",	PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8237",	PCI_DEVICE_ID_VIA_8237,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8235",	PCI_DEVICE_ID_VIA_8235,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8233a",	PCI_DEVICE_ID_VIA_8233A,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8233c",	PCI_DEVICE_ID_VIA_8233C_0,  0x00, 0x2f, ATA_UDMA5, },
+	{ "vt8233",	PCI_DEVICE_ID_VIA_8233_0,   0x00, 0x2f, ATA_UDMA5, },
+	{ "vt8231",	PCI_DEVICE_ID_VIA_8231,     0x00, 0x2f, ATA_UDMA5, },
+	{ "vt82c686b",	PCI_DEVICE_ID_VIA_82C686,   0x40, 0x4f, ATA_UDMA5, },
+	{ "vt82c686a",	PCI_DEVICE_ID_VIA_82C686,   0x10, 0x2f, ATA_UDMA4, },
+	{ "vt82c686",	PCI_DEVICE_ID_VIA_82C686,   0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
+	{ "vt82c596b",	PCI_DEVICE_ID_VIA_82C596,   0x10, 0x2f, ATA_UDMA4, },
+	{ "vt82c596a",	PCI_DEVICE_ID_VIA_82C596,   0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586a",	PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586",	PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f,      0x00, VIA_SET_FIFO },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f,      0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f,      0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+	{ "vtxxxx",	PCI_DEVICE_ID_VIA_ANON,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ NULL }
+};
+
+static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
+	{
+		.ident = "AVERATEC 3200",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
+			DMI_MATCH(DMI_BOARD_NAME, "3200"),
+		},
+	},
+	{ }
+};
+
+struct via_port {
+	u8 cached_device;
+};
+
+/*
+ *	Cable special cases
+ */
+
+static const struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "Acer Ferrari 3400",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
+			DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
+		},
+	},
+	{ }
+};
+
+static int via_cable_override(struct pci_dev *pdev)
+{
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+	/* Arima W730-K8/Targa Visionary 811/... */
+	if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032)
+		return 1;
+	return 0;
+}
+
+
+/**
+ *	via_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection. Actually for the VIA case the BIOS
+ *	already did this for us. We read the values provided by the
+ *	BIOS. If you are using an 8235 in a non-PC configuration you
+ *	may need to update this code.
+ *
+ *	Hotplug also impacts on this.
+ */
+
+static int via_cable_detect(struct ata_port *ap) {
+	const struct via_isa_bridge *config = ap->host->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 ata66;
+
+	if (via_cable_override(pdev))
+		return ATA_CBL_PATA40_SHORT;
+
+	if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
+		return ATA_CBL_SATA;
+
+	/* Early chips are 40 wire */
+	if (config->udma_mask < ATA_UDMA4)
+		return ATA_CBL_PATA40;
+	/* UDMA 66 chips have only drive side logic */
+	else if (config->udma_mask < ATA_UDMA5)
+		return ATA_CBL_PATA_UNK;
+	/* UDMA 100 or later */
+	pci_read_config_dword(pdev, 0x50, &ata66);
+	/* Check both the drive cable reporting bits, we might not have
+	   two drives */
+	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
+		return ATA_CBL_PATA80;
+	/* Check with ACPI so we can spot BIOS reported SATA bridges */
+	if (ata_acpi_init_gtm(ap) &&
+	    ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+static int via_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	const struct via_isa_bridge *config = ap->host->private_data;
+
+	if (!(config->flags & VIA_NO_ENABLES)) {
+		static const struct pci_bits via_enable_bits[] = {
+			{ 0x40, 1, 0x02, 0x02 },
+			{ 0x40, 1, 0x01, 0x01 }
+		};
+		struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+		if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no]))
+			return -ENOENT;
+	}
+
+	return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ *	via_do_set_mode	-	set transfer mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@mode: ATA mode being programmed
+ *	@set_ast: Set to program address setup
+ *	@udma_type: UDMA mode/format of registers
+ *
+ *	Program the VIA registers for DMA and PIO modes. Uses the ata timing
+ *	support in order to compute modes.
+ *
+ *	FIXME: Hotplug will require we serialize multiple mode changes
+ *	on the two channels.
+ */
+
+static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev,
+			    int mode, int set_ast, int udma_type)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *peer = ata_dev_pair(adev);
+	struct ata_timing t, p;
+	static int via_clock = 33333;	/* Bus clock in kHZ */
+	unsigned long T =  1000000000 / via_clock;
+	unsigned long UT = T;
+	int ut;
+	int offset = 3 - (2*ap->port_no) - adev->devno;
+
+	switch (udma_type) {
+	case ATA_UDMA4:
+		UT = T / 2; break;
+	case ATA_UDMA5:
+		UT = T / 3; break;
+	case ATA_UDMA6:
+		UT = T / 4; break;
+	}
+
+	/* Calculate the timing values we require */
+	ata_timing_compute(adev, mode, &t, T, UT);
+
+	/* We share 8bit timing so we must merge the constraints */
+	if (peer) {
+		if (peer->pio_mode) {
+			ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
+		}
+	}
+
+	/* Address setup is programmable but breaks on UDMA133 setups */
+	if (set_ast) {
+		u8 setup;	/* 2 bits per drive */
+		int shift = 2 * offset;
+
+		pci_read_config_byte(pdev, 0x4C, &setup);
+		setup &= ~(3 << shift);
+		setup |= (clamp_val(t.setup, 1, 4) - 1) << shift;
+		pci_write_config_byte(pdev, 0x4C, setup);
+	}
+
+	/* Load the PIO mode bits */
+	pci_write_config_byte(pdev, 0x4F - ap->port_no,
+		((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
+	pci_write_config_byte(pdev, 0x48 + offset,
+		((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
+
+	/* Load the UDMA bits according to type */
+	switch (udma_type) {
+	case ATA_UDMA2:
+	default:
+		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
+		break;
+	case ATA_UDMA4:
+		ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
+		break;
+	case ATA_UDMA5:
+		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
+		break;
+	case ATA_UDMA6:
+		ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
+		break;
+	}
+
+	/* Set UDMA unless device is not UDMA capable */
+	if (udma_type) {
+		u8 udma_etc;
+
+		pci_read_config_byte(pdev, 0x50 + offset, &udma_etc);
+
+		/* clear transfer mode bit */
+		udma_etc &= ~0x20;
+
+		if (t.udma) {
+			/* preserve 80-wire cable detection bit */
+			udma_etc &= 0x10;
+			udma_etc |= ut;
+		}
+
+		pci_write_config_byte(pdev, 0x50 + offset, udma_etc);
+	}
+}
+
+static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	const struct via_isa_bridge *config = ap->host->private_data;
+	int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+
+	via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask);
+}
+
+static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	const struct via_isa_bridge *config = ap->host->private_data;
+	int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+
+	via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask);
+}
+
+/**
+ *	via_mode_filter		-	filter buggy device/mode pairs
+ *	@dev: ATA device
+ *	@mask: Mode bitmask
+ *
+ *	We need to apply some minimal filtering for old controllers and at least
+ *	one breed of Transcend SSD. Return the updated mask.
+ */
+
+static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
+{
+	struct ata_host *host = dev->link->ap->host;
+	const struct via_isa_bridge *config = host->private_data;
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	if (config->id == PCI_DEVICE_ID_VIA_82C586_0) {
+		ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+		if (strcmp(model_num, "TS64GSSD25-M") == 0) {
+			ata_dev_warn(dev,
+	"disabling UDMA mode due to reported lockups with this device\n");
+			mask &= ~ ATA_MASK_UDMA;
+		}
+	}
+
+	if (dev->class == ATA_DEV_ATAPI &&
+	    dmi_check_system(no_atapi_dma_dmi_table)) {
+		ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
+		mask &= ATA_MASK_PIO;
+	}
+
+	return mask;
+}
+
+/**
+ *	via_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	Note: This is to fix the internal bug of via chipsets, which
+ *	will reset the device register after changing the IEN bit on
+ *	ctl register
+ */
+static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	struct via_port *vp = ap->private_data;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+	int newctl = 0;
+
+	if (tf->ctl != ap->last_ctl) {
+		iowrite8(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+		newctl = 1;
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		iowrite8(tf->device, ioaddr->device_addr);
+		vp->cached_device = tf->device;
+	} else if (newctl)
+		iowrite8(vp->cached_device, ioaddr->device_addr);
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		WARN_ON_ONCE(!ioaddr->ctl_addr);
+		iowrite8(tf->hob_feature, ioaddr->feature_addr);
+		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
+		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
+		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
+		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		iowrite8(tf->feature, ioaddr->feature_addr);
+		iowrite8(tf->nsect, ioaddr->nsect_addr);
+		iowrite8(tf->lbal, ioaddr->lbal_addr);
+		iowrite8(tf->lbam, ioaddr->lbam_addr);
+		iowrite8(tf->lbah, ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	ata_wait_idle(ap);
+}
+
+static int via_port_start(struct ata_port *ap)
+{
+	struct via_port *vp;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	int ret = ata_bmdma_port_start(ap);
+	if (ret < 0)
+		return ret;
+
+	vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL);
+	if (vp == NULL)
+		return -ENOMEM;
+	ap->private_data = vp;
+	return 0;
+}
+
+static struct scsi_host_template via_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations via_port_ops = {
+	.inherits	= &ata_bmdma_port_ops,
+	.cable_detect	= via_cable_detect,
+	.set_piomode	= via_set_piomode,
+	.set_dmamode	= via_set_dmamode,
+	.prereset	= via_pre_reset,
+	.sff_tf_load	= via_tf_load,
+	.port_start	= via_port_start,
+	.mode_filter	= via_mode_filter,
+};
+
+static struct ata_port_operations via_port_ops_noirq = {
+	.inherits	= &via_port_ops,
+	.sff_data_xfer	= ata_sff_data_xfer32,
+};
+
+/**
+ *	via_config_fifo		-	set up the FIFO
+ *	@pdev: PCI device
+ *	@flags: configuration flags
+ *
+ *	Set the FIFO properties for this device if necessary. Used both on
+ *	set up and on and the resume path
+ */
+
+static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
+{
+	u8 enable;
+
+	/* 0x40 low bits indicate enabled channels */
+	pci_read_config_byte(pdev, 0x40 , &enable);
+	enable &= 3;
+
+	if (flags & VIA_SET_FIFO) {
+		static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
+		u8 fifo;
+
+		pci_read_config_byte(pdev, 0x43, &fifo);
+
+		/* Clear PREQ# until DDACK# for errata */
+		if (flags & VIA_BAD_PREQ)
+			fifo &= 0x7F;
+		else
+			fifo &= 0x9f;
+		/* Turn on FIFO for enabled channels */
+		fifo |= fifo_setting[enable];
+		pci_write_config_byte(pdev, 0x43, fifo);
+	}
+}
+
+static void via_fixup(struct pci_dev *pdev, const struct via_isa_bridge *config)
+{
+	u32 timing;
+
+	/* Initialise the FIFO for the enabled channels. */
+	via_config_fifo(pdev, config->flags);
+
+	if (config->udma_mask == ATA_UDMA4) {
+		/* The 66 MHz devices require we enable the clock */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing |= 0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+	if (config->flags & VIA_BAD_CLK66) {
+		/* Disable the 66MHz clock on problem devices */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing &= ~0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+}
+
+/**
+ *	via_init_one		-	discovery callback
+ *	@pdev: PCI device
+ *	@id: PCI table info
+ *
+ *	A VIA IDE interface has been discovered. Figure out what revision
+ *	and perform configuration work before handing it to the ATA layer
+ */
+
+static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	/* Early VIA without UDMA support */
+	static const struct ata_port_info via_mwdma_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &via_port_ops
+	};
+	/* Ditto with IRQ masking required */
+	static const struct ata_port_info via_mwdma_info_borked = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.port_ops = &via_port_ops_noirq,
+	};
+	/* VIA UDMA 33 devices (and borked 66) */
+	static const struct ata_port_info via_udma33_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA2,
+		.port_ops = &via_port_ops
+	};
+	/* VIA UDMA 66 devices */
+	static const struct ata_port_info via_udma66_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA4,
+		.port_ops = &via_port_ops
+	};
+	/* VIA UDMA 100 devices */
+	static const struct ata_port_info via_udma100_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &via_port_ops
+	};
+	/* UDMA133 with bad AST (All current 133) */
+	static const struct ata_port_info via_udma133_info = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,	/* FIXME: should check north bridge */
+		.port_ops = &via_port_ops
+	};
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	struct pci_dev *isa;
+	const struct via_isa_bridge *config;
+	u8 enable;
+	unsigned long flags = id->driver_data;
+	int rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (flags & VIA_IDFLAG_SINGLE)
+		ppi[1] = &ata_dummy_port_info;
+
+	/* To find out how the IDE will behave and what features we
+	   actually have to look at the bridge not the IDE controller */
+	for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
+	     config++)
+		if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
+			!!(config->flags & VIA_BAD_ID),
+			config->id, NULL))) {
+			u8 rev = isa->revision;
+			pci_dev_put(isa);
+
+			if ((id->device == 0x0415 || id->device == 0x3164) &&
+			    (config->id != id->device))
+				continue;
+
+			if (rev >= config->rev_min && rev <= config->rev_max)
+				break;
+		}
+
+	if (!(config->flags & VIA_NO_ENABLES)) {
+		/* 0x40 low bits indicate enabled channels */
+		pci_read_config_byte(pdev, 0x40 , &enable);
+		enable &= 3;
+		if (enable == 0)
+			return -ENODEV;
+	}
+
+	/* Clock set up */
+	switch (config->udma_mask) {
+	case 0x00:
+		if (config->flags & VIA_NO_UNMASK)
+			ppi[0] = &via_mwdma_info_borked;
+		else
+			ppi[0] = &via_mwdma_info;
+		break;
+	case ATA_UDMA2:
+		ppi[0] = &via_udma33_info;
+		break;
+	case ATA_UDMA4:
+		ppi[0] = &via_udma66_info;
+		break;
+	case ATA_UDMA5:
+		ppi[0] = &via_udma100_info;
+		break;
+	case ATA_UDMA6:
+		ppi[0] = &via_udma133_info;
+		break;
+	default:
+		WARN_ON(1);
+		return -ENODEV;
+ 	}
+
+	via_fixup(pdev, config);
+
+	/* We have established the device type, now fire it up */
+	return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ *	via_reinit_one		-	reinit after resume
+ *	@pdev; PCI device
+ *
+ *	Called when the VIA PATA device is resumed. We must then
+ *	reconfigure the fifo and other setup we may have altered. In
+ *	addition the kernel needs to have the resume methods on PCI
+ *	quirk supported.
+ */
+
+static int via_reinit_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	via_fixup(pdev, host->private_data);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id via[] = {
+	{ PCI_VDEVICE(VIA, 0x0415), },
+	{ PCI_VDEVICE(VIA, 0x0571), },
+	{ PCI_VDEVICE(VIA, 0x0581), },
+	{ PCI_VDEVICE(VIA, 0x1571), },
+	{ PCI_VDEVICE(VIA, 0x3164), },
+	{ PCI_VDEVICE(VIA, 0x5324), },
+	{ PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
+	{ PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
+
+	{ },
+};
+
+static struct pci_driver via_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= via,
+	.probe 		= via_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= via_reinit_one,
+#endif
+};
+
+module_pci_driver(via_pci_driver);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for VIA PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, via);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
new file mode 100644
index 0000000..f1e873a
--- /dev/null
+++ b/drivers/ata/pdc_adma.c
@@ -0,0 +1,668 @@
+/*
+ *  pdc_adma.c - Pacific Digital Corporation ADMA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *
+ *  Copyright 2005 Mark Lord
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *
+ *  Supports ATA disks in single-packet ADMA mode.
+ *  Uses PIO for everything else.
+ *
+ *  TODO:  Use ADMA transfers for ATAPI devices, when possible.
+ *  This requires careful attention to a number of quirks of the chip.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pdc_adma"
+#define DRV_VERSION	"1.0"
+
+/* macro to calculate base address for ATA regs */
+#define ADMA_ATA_REGS(base, port_no)	((base) + ((port_no) * 0x40))
+
+/* macro to calculate base address for ADMA regs */
+#define ADMA_REGS(base, port_no)	((base) + 0x80 + ((port_no) * 0x20))
+
+/* macro to obtain addresses from ata_port */
+#define ADMA_PORT_REGS(ap) \
+	ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
+
+enum {
+	ADMA_MMIO_BAR		= 4,
+
+	ADMA_PORTS		= 2,
+	ADMA_CPB_BYTES		= 40,
+	ADMA_PRD_BYTES		= LIBATA_MAX_PRD * 16,
+	ADMA_PKT_BYTES		= ADMA_CPB_BYTES + ADMA_PRD_BYTES,
+
+	ADMA_DMA_BOUNDARY	= 0xffffffff,
+
+	/* global register offsets */
+	ADMA_MODE_LOCK		= 0x00c7,
+
+	/* per-channel register offsets */
+	ADMA_CONTROL		= 0x0000, /* ADMA control */
+	ADMA_STATUS		= 0x0002, /* ADMA status */
+	ADMA_CPB_COUNT		= 0x0004, /* CPB count */
+	ADMA_CPB_CURRENT	= 0x000c, /* current CPB address */
+	ADMA_CPB_NEXT		= 0x000c, /* next CPB address */
+	ADMA_CPB_LOOKUP		= 0x0010, /* CPB lookup table */
+	ADMA_FIFO_IN		= 0x0014, /* input FIFO threshold */
+	ADMA_FIFO_OUT		= 0x0016, /* output FIFO threshold */
+
+	/* ADMA_CONTROL register bits */
+	aNIEN			= (1 << 8), /* irq mask: 1==masked */
+	aGO			= (1 << 7), /* packet trigger ("Go!") */
+	aRSTADM			= (1 << 5), /* ADMA logic reset */
+	aPIOMD4			= 0x0003,   /* PIO mode 4 */
+
+	/* ADMA_STATUS register bits */
+	aPSD			= (1 << 6),
+	aUIRQ			= (1 << 4),
+	aPERR			= (1 << 0),
+
+	/* CPB bits */
+	cDONE			= (1 << 0),
+	cATERR			= (1 << 3),
+
+	cVLD			= (1 << 0),
+	cDAT			= (1 << 2),
+	cIEN			= (1 << 3),
+
+	/* PRD bits */
+	pORD			= (1 << 4),
+	pDIRO			= (1 << 5),
+	pEND			= (1 << 7),
+
+	/* ATA register flags */
+	rIGN			= (1 << 5),
+	rEND			= (1 << 7),
+
+	/* ATA register addresses */
+	ADMA_REGS_CONTROL	= 0x0e,
+	ADMA_REGS_SECTOR_COUNT	= 0x12,
+	ADMA_REGS_LBA_LOW	= 0x13,
+	ADMA_REGS_LBA_MID	= 0x14,
+	ADMA_REGS_LBA_HIGH	= 0x15,
+	ADMA_REGS_DEVICE	= 0x16,
+	ADMA_REGS_COMMAND	= 0x17,
+
+	/* PCI device IDs */
+	board_1841_idx		= 0,	/* ADMA 2-port controller */
+};
+
+typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
+
+struct adma_port_priv {
+	u8			*pkt;
+	dma_addr_t		pkt_dma;
+	adma_state_t		state;
+};
+
+static int adma_ata_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent);
+static int adma_port_start(struct ata_port *ap);
+static void adma_port_stop(struct ata_port *ap);
+static void adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void adma_freeze(struct ata_port *ap);
+static void adma_thaw(struct ata_port *ap);
+static int adma_prereset(struct ata_link *link, unsigned long deadline);
+
+static struct scsi_host_template adma_ata_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.dma_boundary		= ADMA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations adma_ata_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.lost_interrupt		= ATA_OP_NULL,
+
+	.check_atapi_dma	= adma_check_atapi_dma,
+	.qc_prep		= adma_qc_prep,
+	.qc_issue		= adma_qc_issue,
+
+	.freeze			= adma_freeze,
+	.thaw			= adma_thaw,
+	.prereset		= adma_prereset,
+
+	.port_start		= adma_port_start,
+	.port_stop		= adma_port_stop,
+};
+
+static struct ata_port_info adma_port_info[] = {
+	/* board_1841_idx */
+	{
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
+		.pio_mask	= ATA_PIO4_ONLY,
+		.udma_mask	= ATA_UDMA4,
+		.port_ops	= &adma_ata_ops,
+	},
+};
+
+static const struct pci_device_id adma_ata_pci_tbl[] = {
+	{ PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver adma_ata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= adma_ata_pci_tbl,
+	.probe			= adma_ata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 1;	/* ATAPI DMA not yet supported */
+}
+
+static void adma_reset_engine(struct ata_port *ap)
+{
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
+	/* reset ADMA to idle state */
+	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+	udelay(2);
+	writew(aPIOMD4, chan + ADMA_CONTROL);
+	udelay(2);
+}
+
+static void adma_reinit_engine(struct ata_port *ap)
+{
+	struct adma_port_priv *pp = ap->private_data;
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
+	/* mask/clear ATA interrupts */
+	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
+	ata_sff_check_status(ap);
+
+	/* reset the ADMA engine */
+	adma_reset_engine(ap);
+
+	/* set in-FIFO threshold to 0x100 */
+	writew(0x100, chan + ADMA_FIFO_IN);
+
+	/* set CPB pointer */
+	writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
+
+	/* set out-FIFO threshold to 0x100 */
+	writew(0x100, chan + ADMA_FIFO_OUT);
+
+	/* set CPB count */
+	writew(1, chan + ADMA_CPB_COUNT);
+
+	/* read/discard ADMA status */
+	readb(chan + ADMA_STATUS);
+}
+
+static inline void adma_enter_reg_mode(struct ata_port *ap)
+{
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
+	writew(aPIOMD4, chan + ADMA_CONTROL);
+	readb(chan + ADMA_STATUS);	/* flush */
+}
+
+static void adma_freeze(struct ata_port *ap)
+{
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
+	/* mask/clear ATA interrupts */
+	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
+	ata_sff_check_status(ap);
+
+	/* reset ADMA to idle state */
+	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+	udelay(2);
+	writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
+	udelay(2);
+}
+
+static void adma_thaw(struct ata_port *ap)
+{
+	adma_reinit_engine(ap);
+}
+
+static int adma_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct adma_port_priv *pp = ap->private_data;
+
+	if (pp->state != adma_state_idle) /* healthy paranoia */
+		pp->state = adma_state_mmio;
+	adma_reinit_engine(ap);
+
+	return ata_sff_prereset(link, deadline);
+}
+
+static int adma_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct scatterlist *sg;
+	struct ata_port *ap = qc->ap;
+	struct adma_port_priv *pp = ap->private_data;
+	u8  *buf = pp->pkt, *last_buf = NULL;
+	int i = (2 + buf[3]) * 8;
+	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
+	unsigned int si;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr;
+		u32 len;
+
+		addr = (u32)sg_dma_address(sg);
+		*(__le32 *)(buf + i) = cpu_to_le32(addr);
+		i += 4;
+
+		len = sg_dma_len(sg) >> 3;
+		*(__le32 *)(buf + i) = cpu_to_le32(len);
+		i += 4;
+
+		last_buf = &buf[i];
+		buf[i++] = pFLAGS;
+		buf[i++] = qc->dev->dma_mode & 0xf;
+		buf[i++] = 0;	/* pPKLW */
+		buf[i++] = 0;	/* reserved */
+
+		*(__le32 *)(buf + i) =
+			(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
+		i += 4;
+
+		VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
+					(unsigned long)addr, len);
+	}
+
+	if (likely(last_buf))
+		*last_buf |= pEND;
+
+	return i;
+}
+
+static void adma_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct adma_port_priv *pp = qc->ap->private_data;
+	u8  *buf = pp->pkt;
+	u32 pkt_dma = (u32)pp->pkt_dma;
+	int i = 0;
+
+	VPRINTK("ENTER\n");
+
+	adma_enter_reg_mode(qc->ap);
+	if (qc->tf.protocol != ATA_PROT_DMA)
+		return;
+
+	buf[i++] = 0;	/* Response flags */
+	buf[i++] = 0;	/* reserved */
+	buf[i++] = cVLD | cDAT | cIEN;
+	i++;		/* cLEN, gets filled in below */
+
+	*(__le32 *)(buf+i) = cpu_to_le32(pkt_dma);	/* cNCPB */
+	i += 4;		/* cNCPB */
+	i += 4;		/* cPRD, gets filled in below */
+
+	buf[i++] = 0;	/* reserved */
+	buf[i++] = 0;	/* reserved */
+	buf[i++] = 0;	/* reserved */
+	buf[i++] = 0;	/* reserved */
+
+	/* ATA registers; must be a multiple of 4 */
+	buf[i++] = qc->tf.device;
+	buf[i++] = ADMA_REGS_DEVICE;
+	if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
+		buf[i++] = qc->tf.hob_nsect;
+		buf[i++] = ADMA_REGS_SECTOR_COUNT;
+		buf[i++] = qc->tf.hob_lbal;
+		buf[i++] = ADMA_REGS_LBA_LOW;
+		buf[i++] = qc->tf.hob_lbam;
+		buf[i++] = ADMA_REGS_LBA_MID;
+		buf[i++] = qc->tf.hob_lbah;
+		buf[i++] = ADMA_REGS_LBA_HIGH;
+	}
+	buf[i++] = qc->tf.nsect;
+	buf[i++] = ADMA_REGS_SECTOR_COUNT;
+	buf[i++] = qc->tf.lbal;
+	buf[i++] = ADMA_REGS_LBA_LOW;
+	buf[i++] = qc->tf.lbam;
+	buf[i++] = ADMA_REGS_LBA_MID;
+	buf[i++] = qc->tf.lbah;
+	buf[i++] = ADMA_REGS_LBA_HIGH;
+	buf[i++] = 0;
+	buf[i++] = ADMA_REGS_CONTROL;
+	buf[i++] = rIGN;
+	buf[i++] = 0;
+	buf[i++] = qc->tf.command;
+	buf[i++] = ADMA_REGS_COMMAND | rEND;
+
+	buf[3] = (i >> 3) - 2;				/* cLEN */
+	*(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i);	/* cPRD */
+
+	i = adma_fill_sg(qc);
+	wmb();	/* flush PRDs and pkt to memory */
+#if 0
+	/* dump out CPB + PRDs for debug */
+	{
+		int j, len = 0;
+		static char obuf[2048];
+		for (j = 0; j < i; ++j) {
+			len += sprintf(obuf+len, "%02x ", buf[j]);
+			if ((j & 7) == 7) {
+				printk("%s\n", obuf);
+				len = 0;
+			}
+		}
+		if (len)
+			printk("%s\n", obuf);
+	}
+#endif
+}
+
+static inline void adma_packet_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
+	VPRINTK("ENTER, ap %p\n", ap);
+
+	/* fire up the ADMA engine */
+	writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
+}
+
+static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct adma_port_priv *pp = qc->ap->private_data;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		pp->state = adma_state_pkt;
+		adma_packet_start(qc);
+		return 0;
+
+	case ATAPI_PROT_DMA:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+
+	pp->state = adma_state_mmio;
+	return ata_sff_qc_issue(qc);
+}
+
+static inline unsigned int adma_intr_pkt(struct ata_host *host)
+{
+	unsigned int handled = 0, port_no;
+
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		struct adma_port_priv *pp;
+		struct ata_queued_cmd *qc;
+		void __iomem *chan = ADMA_PORT_REGS(ap);
+		u8 status = readb(chan + ADMA_STATUS);
+
+		if (status == 0)
+			continue;
+		handled = 1;
+		adma_enter_reg_mode(ap);
+		pp = ap->private_data;
+		if (!pp || pp->state != adma_state_pkt)
+			continue;
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+			if (status & aPERR)
+				qc->err_mask |= AC_ERR_HOST_BUS;
+			else if ((status & (aPSD | aUIRQ)))
+				qc->err_mask |= AC_ERR_OTHER;
+
+			if (pp->pkt[0] & cATERR)
+				qc->err_mask |= AC_ERR_DEV;
+			else if (pp->pkt[0] != cDONE)
+				qc->err_mask |= AC_ERR_OTHER;
+
+			if (!qc->err_mask)
+				ata_qc_complete(qc);
+			else {
+				struct ata_eh_info *ehi = &ap->link.eh_info;
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi,
+					"ADMA-status 0x%02X", status);
+				ata_ehi_push_desc(ehi,
+					"pkt[0] 0x%02X", pp->pkt[0]);
+
+				if (qc->err_mask == AC_ERR_DEV)
+					ata_port_abort(ap);
+				else
+					ata_port_freeze(ap);
+			}
+		}
+	}
+	return handled;
+}
+
+static inline unsigned int adma_intr_mmio(struct ata_host *host)
+{
+	unsigned int handled = 0, port_no;
+
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		struct adma_port_priv *pp = ap->private_data;
+		struct ata_queued_cmd *qc;
+
+		if (!pp || pp->state != adma_state_mmio)
+			continue;
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+
+			/* check main status, clearing INTRQ */
+			u8 status = ata_sff_check_status(ap);
+			if ((status & ATA_BUSY))
+				continue;
+			DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+				ap->print_id, qc->tf.protocol, status);
+
+			/* complete taskfile transaction */
+			pp->state = adma_state_idle;
+			qc->err_mask |= ac_err_mask(status);
+			if (!qc->err_mask)
+				ata_qc_complete(qc);
+			else {
+				struct ata_eh_info *ehi = &ap->link.eh_info;
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+				if (qc->err_mask == AC_ERR_DEV)
+					ata_port_abort(ap);
+				else
+					ata_port_freeze(ap);
+			}
+			handled = 1;
+		}
+	}
+	return handled;
+}
+
+static irqreturn_t adma_intr(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int handled = 0;
+
+	VPRINTK("ENTER\n");
+
+	spin_lock(&host->lock);
+	handled  = adma_intr_pkt(host) | adma_intr_mmio(host);
+	spin_unlock(&host->lock);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(handled);
+}
+
+static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		=
+	port->data_addr		= base + 0x000;
+	port->error_addr	=
+	port->feature_addr	= base + 0x004;
+	port->nsect_addr	= base + 0x008;
+	port->lbal_addr		= base + 0x00c;
+	port->lbam_addr		= base + 0x010;
+	port->lbah_addr		= base + 0x014;
+	port->device_addr	= base + 0x018;
+	port->status_addr	=
+	port->command_addr	= base + 0x01c;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + 0x038;
+}
+
+static int adma_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct adma_port_priv *pp;
+
+	adma_enter_reg_mode(ap);
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
+				      GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
+	/* paranoia? */
+	if ((pp->pkt_dma & 7) != 0) {
+		printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
+						(u32)pp->pkt_dma);
+		return -ENOMEM;
+	}
+	memset(pp->pkt, 0, ADMA_PKT_BYTES);
+	ap->private_data = pp;
+	adma_reinit_engine(ap);
+	return 0;
+}
+
+static void adma_port_stop(struct ata_port *ap)
+{
+	adma_reset_engine(ap);
+}
+
+static void adma_host_init(struct ata_host *host, unsigned int chip_id)
+{
+	unsigned int port_no;
+
+	/* enable/lock aGO operation */
+	writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
+
+	/* reset the ADMA logic */
+	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
+		adma_reset_engine(host->ports[port_no]);
+}
+
+static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
+{
+	int rc;
+
+	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc) {
+		dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+		return rc;
+	}
+	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc) {
+		dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
+		return rc;
+	}
+	return 0;
+}
+
+static int adma_ata_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int rc, port_no;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
+		return -ENODEV;
+
+	rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	mmio_base = host->iomap[ADMA_MMIO_BAR];
+
+	rc = adma_set_dma_masks(pdev, mmio_base);
+	if (rc)
+		return rc;
+
+	for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
+		unsigned int offset = port_base - mmio_base;
+
+		adma_ata_setup_port(&ap->ioaddr, port_base);
+
+		ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
+	}
+
+	/* initialize adapter */
+	adma_host_init(host, board_idx);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
+				 &adma_ata_sht);
+}
+
+module_pci_driver(adma_ata_pci_driver);
+
+MODULE_AUTHOR("Mark Lord");
+MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
new file mode 100644
index 0000000..6f142aa
--- /dev/null
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -0,0 +1,1334 @@
+/*
+ * drivers/ata/sata_dwc_460ex.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ *          Copyright 2006 Applied Micro Circuits Corporation
+ *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define DEBUG
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define VERBOSE_DEBUG
+#define DEBUG_NCQ
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/libata.h>
+#include <linux/slab.h>
+
+#include "libata.h"
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+/* These two are defined in "libata.h" */
+#undef	DRV_NAME
+#undef	DRV_VERSION
+
+#define DRV_NAME        "sata-dwc"
+#define DRV_VERSION     "1.3"
+
+#define sata_dwc_writel(a, v)	writel_relaxed(v, a)
+#define sata_dwc_readl(a)	readl_relaxed(a)
+
+#ifndef NO_IRQ
+#define NO_IRQ		0
+#endif
+
+#define AHB_DMA_BRST_DFLT	64	/* 16 data items burst length */
+
+enum {
+	SATA_DWC_MAX_PORTS = 1,
+
+	SATA_DWC_SCR_OFFSET = 0x24,
+	SATA_DWC_REG_OFFSET = 0x64,
+};
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+	u32 fptagr;		/* 1st party DMA tag */
+	u32 fpbor;		/* 1st party DMA buffer offset */
+	u32 fptcr;		/* 1st party DMA Xfr count */
+	u32 dmacr;		/* DMA Control */
+	u32 dbtsr;		/* DMA Burst Transac size */
+	u32 intpr;		/* Interrupt Pending */
+	u32 intmr;		/* Interrupt Mask */
+	u32 errmr;		/* Error Mask */
+	u32 llcr;		/* Link Layer Control */
+	u32 phycr;		/* PHY Control */
+	u32 physr;		/* PHY Status */
+	u32 rxbistpd;		/* Recvd BIST pattern def register */
+	u32 rxbistpd1;		/* Recvd BIST data dword1 */
+	u32 rxbistpd2;		/* Recvd BIST pattern data dword2 */
+	u32 txbistpd;		/* Trans BIST pattern def register */
+	u32 txbistpd1;		/* Trans BIST data dword1 */
+	u32 txbistpd2;		/* Trans BIST data dword2 */
+	u32 bistcr;		/* BIST Control Register */
+	u32 bistfctr;		/* BIST FIS Count Register */
+	u32 bistsr;		/* BIST Status Register */
+	u32 bistdecr;		/* BIST Dword Error count register */
+	u32 res[15];		/* Reserved locations */
+	u32 testr;		/* Test Register */
+	u32 versionr;		/* Version Register */
+	u32 idr;		/* ID Register */
+	u32 unimpl[192];	/* Unimplemented */
+	u32 dmadr[256];		/* FIFO Locations in DMA Mode */
+};
+
+enum {
+	SCR_SCONTROL_DET_ENABLE	=	0x00000001,
+	SCR_SSTATUS_DET_PRESENT	=	0x00000001,
+	SCR_SERROR_DIAG_X	=	0x04000000,
+/* DWC SATA Register Operations */
+	SATA_DWC_TXFIFO_DEPTH	=	0x01FF,
+	SATA_DWC_RXFIFO_DEPTH	=	0x01FF,
+	SATA_DWC_DMACR_TMOD_TXCHEN =	0x00000004,
+	SATA_DWC_DMACR_TXCHEN	= (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
+	SATA_DWC_DMACR_RXCHEN	= (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
+	SATA_DWC_DMACR_TXRXCH_CLEAR =	SATA_DWC_DMACR_TMOD_TXCHEN,
+	SATA_DWC_INTPR_DMAT	=	0x00000001,
+	SATA_DWC_INTPR_NEWFP	=	0x00000002,
+	SATA_DWC_INTPR_PMABRT	=	0x00000004,
+	SATA_DWC_INTPR_ERR	=	0x00000008,
+	SATA_DWC_INTPR_NEWBIST	=	0x00000010,
+	SATA_DWC_INTPR_IPF	=	0x10000000,
+	SATA_DWC_INTMR_DMATM	=	0x00000001,
+	SATA_DWC_INTMR_NEWFPM	=	0x00000002,
+	SATA_DWC_INTMR_PMABRTM	=	0x00000004,
+	SATA_DWC_INTMR_ERRM	=	0x00000008,
+	SATA_DWC_INTMR_NEWBISTM	=	0x00000010,
+	SATA_DWC_LLCR_SCRAMEN	=	0x00000001,
+	SATA_DWC_LLCR_DESCRAMEN	=	0x00000002,
+	SATA_DWC_LLCR_RPDEN	=	0x00000004,
+/* This is all error bits, zero's are reserved fields. */
+	SATA_DWC_SERROR_ERR_BITS =	0x0FFF0F03
+};
+
+#define SATA_DWC_SCR0_SPD_GET(v)	(((v) >> 4) & 0x0000000F)
+#define SATA_DWC_DMACR_TX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_TXCHEN) |\
+						 SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_RXCHEN) |\
+						 SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DBTSR_MWR(size)	(((size)/4) & SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size)	((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
+						 << 16)
+struct sata_dwc_device {
+	struct device		*dev;		/* generic device struct */
+	struct ata_probe_ent	*pe;		/* ptr to probe-ent */
+	struct ata_host		*host;
+	struct sata_dwc_regs __iomem *sata_dwc_regs;	/* DW SATA specific */
+	u32			sactive_issued;
+	u32			sactive_queued;
+	struct phy		*phy;
+	phys_addr_t		dmadr;
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	struct dw_dma_chip	*dma;
+#endif
+};
+
+#define SATA_DWC_QCMD_MAX	32
+
+struct sata_dwc_device_port {
+	struct sata_dwc_device	*hsdev;
+	int			cmd_issued[SATA_DWC_QCMD_MAX];
+	int			dma_pending[SATA_DWC_QCMD_MAX];
+
+	/* DMA info */
+	struct dma_chan			*chan;
+	struct dma_async_tx_descriptor	*desc[SATA_DWC_QCMD_MAX];
+	u32				dma_interrupt_count;
+};
+
+/*
+ * Commonly used DWC SATA driver macros
+ */
+#define HSDEV_FROM_HOST(host)	((struct sata_dwc_device *)(host)->private_data)
+#define HSDEV_FROM_AP(ap)	((struct sata_dwc_device *)(ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap)	((struct sata_dwc_device_port *)(ap)->private_data)
+#define HSDEV_FROM_QC(qc)	((struct sata_dwc_device *)(qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)(p)->hsdev)
+
+enum {
+	SATA_DWC_CMD_ISSUED_NOT		= 0,
+	SATA_DWC_CMD_ISSUED_PEND	= 1,
+	SATA_DWC_CMD_ISSUED_EXEC	= 2,
+	SATA_DWC_CMD_ISSUED_NODATA	= 3,
+
+	SATA_DWC_DMA_PENDING_NONE	= 0,
+	SATA_DWC_DMA_PENDING_TX		= 1,
+	SATA_DWC_DMA_PENDING_RX		= 2,
+};
+
+/*
+ * Prototypes
+ */
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+				u32 check_status);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
+
+static struct dw_dma_slave sata_dwc_dma_dws = {
+	.src_id = 0,
+	.dst_id = 0,
+	.m_master = 1,
+	.p_master = 0,
+};
+
+static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+
+	if (dws->dma_dev != chan->device->dev)
+		return false;
+
+	chan->private = dws;
+	return true;
+}
+
+static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
+{
+	struct sata_dwc_device *hsdev = hsdevp->hsdev;
+	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+	dma_cap_mask_t mask;
+
+	dws->dma_dev = hsdev->dev;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Acquire DMA channel */
+	hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
+	if (!hsdevp->chan) {
+		dev_err(hsdev->dev, "%s: dma channel unavailable\n",
+			 __func__);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int sata_dwc_dma_init_old(struct platform_device *pdev,
+				 struct sata_dwc_device *hsdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+
+	hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
+	if (!hsdev->dma)
+		return -ENOMEM;
+
+	hsdev->dma->dev = &pdev->dev;
+	hsdev->dma->id = pdev->id;
+
+	/* Get SATA DMA interrupt number */
+	hsdev->dma->irq = irq_of_parse_and_map(np, 1);
+	if (hsdev->dma->irq == NO_IRQ) {
+		dev_err(&pdev->dev, "no SATA DMA irq\n");
+		return -ENODEV;
+	}
+
+	/* Get physical SATA DMA register base address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hsdev->dma->regs))
+		return PTR_ERR(hsdev->dma->regs);
+
+	/* Initialize AHB DMAC */
+	return dw_dma_probe(hsdev->dma);
+}
+
+static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
+{
+	if (!hsdev->dma)
+		return;
+
+	dw_dma_remove(hsdev->dma);
+}
+
+#endif
+
+static const char *get_prot_descript(u8 protocol)
+{
+	switch (protocol) {
+	case ATA_PROT_NODATA:
+		return "ATA no data";
+	case ATA_PROT_PIO:
+		return "ATA PIO";
+	case ATA_PROT_DMA:
+		return "ATA DMA";
+	case ATA_PROT_NCQ:
+		return "ATA NCQ";
+	case ATA_PROT_NCQ_NODATA:
+		return "ATA NCQ no data";
+	case ATAPI_PROT_NODATA:
+		return "ATAPI no data";
+	case ATAPI_PROT_PIO:
+		return "ATAPI PIO";
+	case ATAPI_PROT_DMA:
+		return "ATAPI DMA";
+	default:
+		return "unknown";
+	}
+}
+
+static const char *get_dma_dir_descript(int dma_dir)
+{
+	switch ((enum dma_data_direction)dma_dir) {
+	case DMA_BIDIRECTIONAL:
+		return "bidirectional";
+	case DMA_TO_DEVICE:
+		return "to device";
+	case DMA_FROM_DEVICE:
+		return "from device";
+	default:
+		return "none";
+	}
+}
+
+static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	dev_vdbg(ap->dev,
+		"taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
+		tf->command, get_prot_descript(tf->protocol), tf->flags,
+		tf->device);
+	dev_vdbg(ap->dev,
+		"feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
+		tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
+	dev_vdbg(ap->dev,
+		"hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
+		tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+		tf->hob_lbah);
+}
+
+static void dma_dwc_xfer_done(void *hsdev_instance)
+{
+	unsigned long flags;
+	struct sata_dwc_device *hsdev = hsdev_instance;
+	struct ata_host *host = (struct ata_host *)hsdev->host;
+	struct ata_port *ap;
+	struct sata_dwc_device_port *hsdevp;
+	u8 tag = 0;
+	unsigned int port = 0;
+
+	spin_lock_irqsave(&host->lock, flags);
+	ap = host->ports[port];
+	hsdevp = HSDEVP_FROM_AP(ap);
+	tag = ap->link.active_tag;
+
+	/*
+	 * Each DMA command produces 2 interrupts.  Only
+	 * complete the command after both interrupts have been
+	 * seen. (See sata_dwc_isr())
+	 */
+	hsdevp->dma_interrupt_count++;
+	sata_dwc_clear_dmacr(hsdevp, tag);
+
+	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
+		dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
+			tag, hsdevp->dma_pending[tag]);
+	}
+
+	if ((hsdevp->dma_interrupt_count % 2) == 0)
+		sata_dwc_dma_xfer_complete(ap, 1);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+	struct dma_slave_config sconf;
+	struct dma_async_tx_descriptor *desc;
+
+	if (qc->dma_dir == DMA_DEV_TO_MEM) {
+		sconf.src_addr = hsdev->dmadr;
+		sconf.device_fc = false;
+	} else {	/* DMA_MEM_TO_DEV */
+		sconf.dst_addr = hsdev->dmadr;
+		sconf.device_fc = false;
+	}
+
+	sconf.direction = qc->dma_dir;
+	sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
+	sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
+	sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	dmaengine_slave_config(hsdevp->chan, &sconf);
+
+	/* Convert SG list to linked list of items (LLIs) for AHB DMA */
+	desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
+				       qc->dma_dir,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+	if (!desc)
+		return NULL;
+
+	desc->callback = dma_dwc_xfer_done;
+	desc->callback_param = hsdev;
+
+	dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
+		qc->sg, qc->n_elem, &hsdev->dmadr);
+
+	return desc;
+}
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+	if (scr > SCR_NOTIFICATION) {
+		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+			__func__, scr);
+		return -EINVAL;
+	}
+
+	*val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
+	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+		link->ap->print_id, scr, *val);
+
+	return 0;
+}
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+		link->ap->print_id, scr, val);
+	if (scr > SCR_NOTIFICATION) {
+		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+			 __func__, scr);
+		return -EINVAL;
+	}
+	sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+	return 0;
+}
+
+static void clear_serror(struct ata_port *ap)
+{
+	u32 val;
+	sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
+	sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
+}
+
+static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+	sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
+			sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
+}
+
+static u32 qcmd_tag_to_mask(u8 tag)
+{
+	return 0x00000001 << (tag & 0x1f);
+}
+
+/* See ahci.c */
+static void sata_dwc_error_intr(struct ata_port *ap,
+				struct sata_dwc_device *hsdev, uint intpr)
+{
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	unsigned int err_mask = 0, action = 0;
+	struct ata_queued_cmd *qc;
+	u32 serror;
+	u8 status, tag;
+
+	ata_ehi_clear_desc(ehi);
+
+	sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
+	status = ap->ops->sff_check_status(ap);
+
+	tag = ap->link.active_tag;
+
+	dev_err(ap->dev,
+		"%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
+		__func__, serror, intpr, status, hsdevp->dma_interrupt_count,
+		hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
+
+	/* Clear error register and interrupt bit */
+	clear_serror(ap);
+	clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
+
+	/* This is the only error happening now.  TODO check for exact error */
+
+	err_mask |= AC_ERR_HOST_BUS;
+	action |= ATA_EH_RESET;
+
+	/* Pass this on to EH */
+	ehi->serror |= serror;
+	ehi->action |= action;
+
+	qc = ata_qc_from_tag(ap, tag);
+	if (qc)
+		qc->err_mask |= err_mask;
+	else
+		ehi->err_mask |= err_mask;
+
+	ata_port_abort(ap);
+}
+
+/*
+ * Function : sata_dwc_isr
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+	struct ata_host *host = (struct ata_host *)dev_instance;
+	struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+	struct ata_port *ap;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	u8 status, tag;
+	int handled, num_processed, port = 0;
+	uint intpr, sactive, sactive2, tag_mask;
+	struct sata_dwc_device_port *hsdevp;
+	hsdev->sactive_issued = 0;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	/* Read the interrupt register */
+	intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
+
+	ap = host->ports[port];
+	hsdevp = HSDEVP_FROM_AP(ap);
+
+	dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
+		ap->link.active_tag);
+
+	/* Check for error interrupt */
+	if (intpr & SATA_DWC_INTPR_ERR) {
+		sata_dwc_error_intr(ap, hsdev, intpr);
+		handled = 1;
+		goto DONE;
+	}
+
+	/* Check for DMA SETUP FIS (FP DMA) interrupt */
+	if (intpr & SATA_DWC_INTPR_NEWFP) {
+		clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+
+		tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
+		dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
+		if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
+			dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+		hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
+
+		qc = ata_qc_from_tag(ap, tag);
+		/*
+		 * Start FP DMA for NCQ command.  At this point the tag is the
+		 * active tag.  It is the tag that matches the command about to
+		 * be completed.
+		 */
+		qc->ap->link.active_tag = tag;
+		sata_dwc_bmdma_start_by_tag(qc, tag);
+
+		handled = 1;
+		goto DONE;
+	}
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
+
+	/* If no sactive issued and tag_mask is zero then this is not NCQ */
+	if (hsdev->sactive_issued == 0 && tag_mask == 0) {
+		if (ap->link.active_tag == ATA_TAG_POISON)
+			tag = 0;
+		else
+			tag = ap->link.active_tag;
+		qc = ata_qc_from_tag(ap, tag);
+
+		/* DEV interrupt w/ no active qc? */
+		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+			dev_err(ap->dev,
+				"%s interrupt with no active qc qc=%p\n",
+				__func__, qc);
+			ap->ops->sff_check_status(ap);
+			handled = 1;
+			goto DONE;
+		}
+		status = ap->ops->sff_check_status(ap);
+
+		qc->ap->link.active_tag = tag;
+		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+		if (status & ATA_ERR) {
+			dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
+			sata_dwc_qc_complete(ap, qc, 1);
+			handled = 1;
+			goto DONE;
+		}
+
+		dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
+			__func__, get_prot_descript(qc->tf.protocol));
+DRVSTILLBUSY:
+		if (ata_is_dma(qc->tf.protocol)) {
+			/*
+			 * Each DMA transaction produces 2 interrupts. The DMAC
+			 * transfer complete interrupt and the SATA controller
+			 * operation done interrupt. The command should be
+			 * completed only after both interrupts are seen.
+			 */
+			hsdevp->dma_interrupt_count++;
+			if (hsdevp->dma_pending[tag] == \
+					SATA_DWC_DMA_PENDING_NONE) {
+				dev_err(ap->dev,
+					"%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
+					__func__, intpr, status,
+					hsdevp->dma_pending[tag]);
+			}
+
+			if ((hsdevp->dma_interrupt_count % 2) == 0)
+				sata_dwc_dma_xfer_complete(ap, 1);
+		} else if (ata_is_pio(qc->tf.protocol)) {
+			ata_sff_hsm_move(ap, qc, status, 0);
+			handled = 1;
+			goto DONE;
+		} else {
+			if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+				goto DRVSTILLBUSY;
+		}
+
+		handled = 1;
+		goto DONE;
+	}
+
+	/*
+	 * This is a NCQ command. At this point we need to figure out for which
+	 * tags we have gotten a completion interrupt.  One interrupt may serve
+	 * as completion for more than one operation when commands are queued
+	 * (NCQ).  We need to process each completed command.
+	 */
+
+	 /* process completed commands */
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
+
+	if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
+		dev_dbg(ap->dev,
+			"%s NCQ:sactive=0x%08x  sactive_issued=0x%08x tag_mask=0x%08x\n",
+			__func__, sactive, hsdev->sactive_issued, tag_mask);
+	}
+
+	if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
+		dev_warn(ap->dev,
+			 "Bad tag mask?  sactive=0x%08x sactive_issued=0x%08x  tag_mask=0x%08x\n",
+			 sactive, hsdev->sactive_issued, tag_mask);
+	}
+
+	/* read just to clear ... not bad if currently still busy */
+	status = ap->ops->sff_check_status(ap);
+	dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
+
+	tag = 0;
+	num_processed = 0;
+	while (tag_mask) {
+		num_processed++;
+		while (!(tag_mask & 0x00000001)) {
+			tag++;
+			tag_mask <<= 1;
+		}
+
+		tag_mask &= (~0x00000001);
+		qc = ata_qc_from_tag(ap, tag);
+
+		/* To be picked up by completion functions */
+		qc->ap->link.active_tag = tag;
+		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+		/* Let libata/scsi layers handle error */
+		if (status & ATA_ERR) {
+			dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
+				status);
+			sata_dwc_qc_complete(ap, qc, 1);
+			handled = 1;
+			goto DONE;
+		}
+
+		/* Process completed command */
+		dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
+			get_prot_descript(qc->tf.protocol));
+		if (ata_is_dma(qc->tf.protocol)) {
+			hsdevp->dma_interrupt_count++;
+			if (hsdevp->dma_pending[tag] == \
+					SATA_DWC_DMA_PENDING_NONE)
+				dev_warn(ap->dev, "%s: DMA not pending?\n",
+					__func__);
+			if ((hsdevp->dma_interrupt_count % 2) == 0)
+				sata_dwc_dma_xfer_complete(ap, 1);
+		} else {
+			if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+				goto STILLBUSY;
+		}
+		continue;
+
+STILLBUSY:
+		ap->stats.idle_irq++;
+		dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
+			ap->print_id);
+	} /* while tag_mask */
+
+	/*
+	 * Check to see if any commands completed while we were processing our
+	 * initial set of completed commands (read status clears interrupts,
+	 * so we might miss a completed command interrupt if one came in while
+	 * we were processing --we read status as part of processing a completed
+	 * command).
+	 */
+	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
+	if (sactive2 != sactive) {
+		dev_dbg(ap->dev,
+			"More completed - sactive=0x%x sactive2=0x%x\n",
+			sactive, sactive2);
+	}
+	handled = 1;
+
+DONE:
+	spin_unlock_irqrestore(&host->lock, flags);
+	return IRQ_RETVAL(handled);
+}
+
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+	struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+	u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
+
+	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+		dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
+	} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+		dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
+	} else {
+		/*
+		 * This should not happen, it indicates the driver is out of
+		 * sync.  If it does happen, clear dmacr anyway.
+		 */
+		dev_err(hsdev->dev,
+			"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
+			__func__, tag, hsdevp->dma_pending[tag], dmacr);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+				SATA_DWC_DMACR_TXRXCH_CLEAR);
+	}
+}
+
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+{
+	struct ata_queued_cmd *qc;
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+	u8 tag = 0;
+
+	tag = ap->link.active_tag;
+	qc = ata_qc_from_tag(ap, tag);
+	if (!qc) {
+		dev_err(ap->dev, "failed to get qc");
+		return;
+	}
+
+#ifdef DEBUG_NCQ
+	if (tag > 0) {
+		dev_info(ap->dev,
+			 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
+			 __func__, qc->hw_tag, qc->tf.command,
+			 get_dma_dir_descript(qc->dma_dir),
+			 get_prot_descript(qc->tf.protocol),
+			 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
+	}
+#endif
+
+	if (ata_is_dma(qc->tf.protocol)) {
+		if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
+			dev_err(ap->dev,
+				"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
+				__func__,
+				sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
+		}
+
+		hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+		sata_dwc_qc_complete(ap, qc, check_status);
+		ap->link.active_tag = ATA_TAG_POISON;
+	} else {
+		sata_dwc_qc_complete(ap, qc, check_status);
+	}
+}
+
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+				u32 check_status)
+{
+	u8 status = 0;
+	u32 mask = 0x0;
+	u8 tag = qc->hw_tag;
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	hsdev->sactive_queued = 0;
+	dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
+
+	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
+		dev_err(ap->dev, "TX DMA PENDING\n");
+	else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
+		dev_err(ap->dev, "RX DMA PENDING\n");
+	dev_dbg(ap->dev,
+		"QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
+		qc->tf.command, status, ap->print_id, qc->tf.protocol);
+
+	/* clear active bit */
+	mask = (~(qcmd_tag_to_mask(tag)));
+	hsdev->sactive_queued = hsdev->sactive_queued & mask;
+	hsdev->sactive_issued = hsdev->sactive_issued & mask;
+	ata_qc_complete(qc);
+	return 0;
+}
+
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+	/* Enable selective interrupts by setting the interrupt maskregister*/
+	sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
+			SATA_DWC_INTMR_ERRM |
+			SATA_DWC_INTMR_NEWFPM |
+			SATA_DWC_INTMR_PMABRTM |
+			SATA_DWC_INTMR_DMATM);
+	/*
+	 * Unmask the error bits that should trigger an error interrupt by
+	 * setting the error mask register.
+	 */
+	sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+
+	dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
+		 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
+		sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
+}
+
+static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		= base + 0x00;
+	port->data_addr		= base + 0x00;
+
+	port->error_addr	= base + 0x04;
+	port->feature_addr	= base + 0x04;
+
+	port->nsect_addr	= base + 0x08;
+
+	port->lbal_addr		= base + 0x0c;
+	port->lbam_addr		= base + 0x10;
+	port->lbah_addr		= base + 0x14;
+
+	port->device_addr	= base + 0x18;
+	port->command_addr	= base + 0x1c;
+	port->status_addr	= base + 0x1c;
+
+	port->altstatus_addr	= base + 0x20;
+	port->ctl_addr		= base + 0x20;
+}
+
+static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
+{
+	struct sata_dwc_device *hsdev = hsdevp->hsdev;
+	struct device *dev = hsdev->dev;
+
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	if (!of_find_property(dev->of_node, "dmas", NULL))
+		return sata_dwc_dma_get_channel_old(hsdevp);
+#endif
+
+	hsdevp->chan = dma_request_chan(dev, "sata-dma");
+	if (IS_ERR(hsdevp->chan)) {
+		dev_err(dev, "failed to allocate dma channel: %ld\n",
+			PTR_ERR(hsdevp->chan));
+		return PTR_ERR(hsdevp->chan);
+	}
+
+	return 0;
+}
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+	int err = 0;
+	struct sata_dwc_device *hsdev;
+	struct sata_dwc_device_port *hsdevp = NULL;
+	struct device *pdev;
+	int i;
+
+	hsdev = HSDEV_FROM_AP(ap);
+
+	dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+	hsdev->host = ap->host;
+	pdev = ap->host->dev;
+	if (!pdev) {
+		dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+		err = -ENODEV;
+		goto CLEANUP;
+	}
+
+	/* Allocate Port Struct */
+	hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
+	if (!hsdevp) {
+		err = -ENOMEM;
+		goto CLEANUP;
+	}
+	hsdevp->hsdev = hsdev;
+
+	err = sata_dwc_dma_get_channel(hsdevp);
+	if (err)
+		goto CLEANUP_ALLOC;
+
+	err = phy_power_on(hsdev->phy);
+	if (err)
+		goto CLEANUP_ALLOC;
+
+	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+		hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+	ap->bmdma_prd = NULL;	/* set these so libata doesn't use them */
+	ap->bmdma_prd_dma = 0;
+
+	if (ap->port_no == 0)  {
+		dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+			__func__);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+				SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+		dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
+			 __func__);
+		sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+				(SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+				 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+	}
+
+	/* Clear any error bits before libata starts issuing commands */
+	clear_serror(ap);
+	ap->private_data = hsdevp;
+	dev_dbg(ap->dev, "%s: done\n", __func__);
+	return 0;
+
+CLEANUP_ALLOC:
+	kfree(hsdevp);
+CLEANUP:
+	dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
+	return err;
+}
+
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+	dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
+
+	dmaengine_terminate_sync(hsdevp->chan);
+	dma_release_channel(hsdevp->chan);
+	phy_power_off(hsdev->phy);
+
+	kfree(hsdevp);
+	ap->private_data = NULL;
+}
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+					 struct ata_taskfile *tf,
+					 u8 tag, u32 cmd_issued)
+{
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+	dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
+		ata_get_cmd_descript(tf->command), tag);
+
+	hsdevp->cmd_issued[tag] = cmd_issued;
+
+	/*
+	 * Clear SError before executing a new command.
+	 * sata_dwc_scr_write and read can not be used here. Clearing the PM
+	 * managed SError register for the disk needs to be done before the
+	 * task file is loaded.
+	 */
+	clear_serror(ap);
+	ata_sff_exec_command(ap, tf);
+}
+
+static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+	sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
+				     SATA_DWC_CMD_ISSUED_PEND);
+}
+
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	u8 tag = qc->hw_tag;
+
+	if (ata_is_ncq(qc->tf.protocol)) {
+		dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+			__func__, qc->ap->link.sactive, tag);
+	} else {
+		tag = 0;
+	}
+	sata_dwc_bmdma_setup_by_tag(qc, tag);
+}
+
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+	int start_dma;
+	u32 reg;
+	struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+	struct ata_port *ap = qc->ap;
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+	struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
+	int dir = qc->dma_dir;
+
+	if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+		start_dma = 1;
+		if (dir == DMA_TO_DEVICE)
+			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+		else
+			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+	} else {
+		dev_err(ap->dev,
+			"%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
+			__func__, hsdevp->cmd_issued[tag], tag);
+		start_dma = 0;
+	}
+
+	dev_dbg(ap->dev,
+		"%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
+		__func__, qc, tag, qc->tf.command,
+		get_dma_dir_descript(qc->dma_dir), start_dma);
+	sata_dwc_tf_dump(ap, &qc->tf);
+
+	if (start_dma) {
+		sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
+		if (reg & SATA_DWC_SERROR_ERR_BITS) {
+			dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+				__func__, reg);
+		}
+
+		if (dir == DMA_TO_DEVICE)
+			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+					SATA_DWC_DMACR_TXCHEN);
+		else
+			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+					SATA_DWC_DMACR_RXCHEN);
+
+		/* Enable AHB DMA transfer on the specified channel */
+		dmaengine_submit(desc);
+		dma_async_issue_pending(hsdevp->chan);
+	}
+}
+
+static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
+{
+	u8 tag = qc->hw_tag;
+
+	if (ata_is_ncq(qc->tf.protocol)) {
+		dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+			__func__, qc->ap->link.sactive, tag);
+	} else {
+		tag = 0;
+	}
+	dev_dbg(qc->ap->dev, "%s\n", __func__);
+	sata_dwc_bmdma_start_by_tag(qc, tag);
+}
+
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+	u32 sactive;
+	u8 tag = qc->hw_tag;
+	struct ata_port *ap = qc->ap;
+	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+#ifdef DEBUG_NCQ
+	if (qc->hw_tag > 0 || ap->link.sactive > 1)
+		dev_info(ap->dev,
+			 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
+			 __func__, ap->print_id, qc->tf.command,
+			 ata_get_cmd_descript(qc->tf.command),
+			 qc->hw_tag, get_prot_descript(qc->tf.protocol),
+			 ap->link.active_tag, ap->link.sactive);
+#endif
+
+	if (!ata_is_ncq(qc->tf.protocol))
+		tag = 0;
+
+	if (ata_is_dma(qc->tf.protocol)) {
+		hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
+		if (!hsdevp->desc[tag])
+			return AC_ERR_SYSTEM;
+	} else {
+		hsdevp->desc[tag] = NULL;
+	}
+
+	if (ata_is_ncq(qc->tf.protocol)) {
+		sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+		sactive |= (0x00000001 << tag);
+		sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
+
+		dev_dbg(qc->ap->dev,
+			"%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
+			__func__, tag, qc->ap->link.sactive, sactive);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);
+		sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
+					     SATA_DWC_CMD_ISSUED_PEND);
+	} else {
+		return ata_bmdma_qc_issue(qc);
+	}
+	return 0;
+}
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+	ata_sff_error_handler(ap);
+}
+
+static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
+	int ret;
+
+	ret = sata_sff_hardreset(link, class, deadline);
+
+	sata_dwc_enable_interrupts(hsdev);
+
+	/* Reconfigure the DMA control register */
+	sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+			SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+	/* Reconfigure the DMA Burst Transaction Size register */
+	sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+			SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+			SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
+
+	return ret;
+}
+
+static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+	/* SATA DWC is master only */
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+	ATA_NCQ_SHT(DRV_NAME),
+	/*
+	 * test-only: Currently this driver doesn't handle NCQ
+	 * correctly. We enable NCQ but set the queue depth to a
+	 * max of 1. This will get fixed in in a future release.
+	 */
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	/* .can_queue		= ATA_MAX_QUEUE, */
+	/*
+	 * Make sure a LLI block is not created that will span 8K max FIS
+	 * boundary. If the block spans such a FIS boundary, there is a chance
+	 * that a DMA burst will cross that boundary -- this results in an
+	 * error in the host controller.
+	 */
+	.dma_boundary		= 0x1fff /* ATA_DMA_BOUNDARY */,
+};
+
+static struct ata_port_operations sata_dwc_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.error_handler		= sata_dwc_error_handler,
+	.hardreset		= sata_dwc_hardreset,
+
+	.qc_issue		= sata_dwc_qc_issue,
+
+	.scr_read		= sata_dwc_scr_read,
+	.scr_write		= sata_dwc_scr_write,
+
+	.port_start		= sata_dwc_port_start,
+	.port_stop		= sata_dwc_port_stop,
+
+	.sff_dev_select		= sata_dwc_dev_select,
+
+	.bmdma_setup		= sata_dwc_bmdma_setup,
+	.bmdma_start		= sata_dwc_bmdma_start,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &sata_dwc_ops,
+	},
+};
+
+static int sata_dwc_probe(struct platform_device *ofdev)
+{
+	struct sata_dwc_device *hsdev;
+	u32 idr, versionr;
+	char *ver = (char *)&versionr;
+	void __iomem *base;
+	int err = 0;
+	int irq;
+	struct ata_host *host;
+	struct ata_port_info pi = sata_dwc_port_info[0];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct device_node *np = ofdev->dev.of_node;
+	struct resource *res;
+
+	/* Allocate DWC SATA device */
+	host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+	hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
+	if (!host || !hsdev)
+		return -ENOMEM;
+
+	host->private_data = hsdev;
+
+	/* Ioremap SATA registers */
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+	dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+	/* Synopsys DWC SATA specific Registers */
+	hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
+	hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
+
+	/* Setup port */
+	host->ports[0]->ioaddr.cmd_addr = base;
+	host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+	sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
+
+	/* Read the ID and Version Registers */
+	idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
+	versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
+	dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+		   idr, ver[0], ver[1], ver[2]);
+
+	/* Save dev for later use in dev_xxx() routines */
+	hsdev->dev = &ofdev->dev;
+
+	/* Enable SATA Interrupts */
+	sata_dwc_enable_interrupts(hsdev);
+
+	/* Get SATA interrupt number */
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq == NO_IRQ) {
+		dev_err(&ofdev->dev, "no SATA DMA irq\n");
+		err = -ENODEV;
+		goto error_out;
+	}
+
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	if (!of_find_property(np, "dmas", NULL)) {
+		err = sata_dwc_dma_init_old(ofdev, hsdev);
+		if (err)
+			goto error_out;
+	}
+#endif
+
+	hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
+	if (IS_ERR(hsdev->phy)) {
+		err = PTR_ERR(hsdev->phy);
+		hsdev->phy = NULL;
+		goto error_out;
+	}
+
+	err = phy_init(hsdev->phy);
+	if (err)
+		goto error_out;
+
+	/*
+	 * Now, register with libATA core, this will also initiate the
+	 * device discovery process, invoking our port_start() handler &
+	 * error_handler() to execute a dummy Softreset EH session
+	 */
+	err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+	if (err)
+		dev_err(&ofdev->dev, "failed to activate host");
+
+	return 0;
+
+error_out:
+	phy_exit(hsdev->phy);
+	return err;
+}
+
+static int sata_dwc_remove(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_dwc_device *hsdev = host->private_data;
+
+	ata_host_detach(host);
+
+	phy_exit(hsdev->phy);
+
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+	/* Free SATA DMA resources */
+	sata_dwc_dma_exit_old(hsdev);
+#endif
+
+	dev_dbg(&ofdev->dev, "done\n");
+	return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+	{ .compatible = "amcc,sata-460ex", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct platform_driver sata_dwc_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = sata_dwc_match,
+	},
+	.probe = sata_dwc_probe,
+	.remove = sata_dwc_remove,
+};
+
+module_platform_driver(sata_dwc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
new file mode 100644
index 0000000..4dc528b
--- /dev/null
+++ b/drivers/ata/sata_fsl.c
@@ -0,0 +1,1636 @@
+/*
+ * drivers/ata/sata_fsl.c
+ *
+ * Freescale 3.0Gbps SATA device driver
+ *
+ * Author: Ashish Kalra <ashish.kalra@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Copyright (c) 2006-2007, 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+static unsigned int intr_coalescing_count;
+module_param(intr_coalescing_count, int, S_IRUGO);
+MODULE_PARM_DESC(intr_coalescing_count,
+				 "INT coalescing count threshold (1..31)");
+
+static unsigned int intr_coalescing_ticks;
+module_param(intr_coalescing_ticks, int, S_IRUGO);
+MODULE_PARM_DESC(intr_coalescing_ticks,
+				 "INT coalescing timer threshold in AHB ticks");
+/* Controller information */
+enum {
+	SATA_FSL_QUEUE_DEPTH	= 16,
+	SATA_FSL_MAX_PRD	= 63,
+	SATA_FSL_MAX_PRD_USABLE	= SATA_FSL_MAX_PRD - 1,
+	SATA_FSL_MAX_PRD_DIRECT	= 16,	/* Direct PRDT entries */
+
+	SATA_FSL_HOST_FLAGS	= (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+				   ATA_FLAG_PMP | ATA_FLAG_NCQ |
+				   ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
+
+	SATA_FSL_MAX_CMDS	= SATA_FSL_QUEUE_DEPTH,
+	SATA_FSL_CMD_HDR_SIZE	= 16,	/* 4 DWORDS */
+	SATA_FSL_CMD_SLOT_SIZE  = (SATA_FSL_MAX_CMDS * SATA_FSL_CMD_HDR_SIZE),
+
+	/*
+	 * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
+	 * chained indirect PRDEs up to a max count of 63.
+	 * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will
+	 * be setup as an indirect descriptor, pointing to it's next
+	 * (contiguous) PRDE. Though chained indirect PRDE arrays are
+	 * supported,it will be more efficient to use a direct PRDT and
+	 * a single chain/link to indirect PRDE array/PRDT.
+	 */
+
+	SATA_FSL_CMD_DESC_CFIS_SZ	= 32,
+	SATA_FSL_CMD_DESC_SFIS_SZ	= 32,
+	SATA_FSL_CMD_DESC_ACMD_SZ	= 16,
+	SATA_FSL_CMD_DESC_RSRVD		= 16,
+
+	SATA_FSL_CMD_DESC_SIZE	= (SATA_FSL_CMD_DESC_CFIS_SZ +
+				 SATA_FSL_CMD_DESC_SFIS_SZ +
+				 SATA_FSL_CMD_DESC_ACMD_SZ +
+				 SATA_FSL_CMD_DESC_RSRVD +
+				 SATA_FSL_MAX_PRD * 16),
+
+	SATA_FSL_CMD_DESC_OFFSET_TO_PRDT	=
+				(SATA_FSL_CMD_DESC_CFIS_SZ +
+				 SATA_FSL_CMD_DESC_SFIS_SZ +
+				 SATA_FSL_CMD_DESC_ACMD_SZ +
+				 SATA_FSL_CMD_DESC_RSRVD),
+
+	SATA_FSL_CMD_DESC_AR_SZ	= (SATA_FSL_CMD_DESC_SIZE * SATA_FSL_MAX_CMDS),
+	SATA_FSL_PORT_PRIV_DMA_SZ = (SATA_FSL_CMD_SLOT_SIZE +
+					SATA_FSL_CMD_DESC_AR_SZ),
+
+	/*
+	 * MPC8315 has two SATA controllers, SATA1 & SATA2
+	 * (one port per controller)
+	 * MPC837x has 2/4 controllers, one port per controller
+	 */
+
+	SATA_FSL_MAX_PORTS	= 1,
+
+	SATA_FSL_IRQ_FLAG	= IRQF_SHARED,
+};
+
+/*
+ * Interrupt Coalescing Control Register bitdefs  */
+enum {
+	ICC_MIN_INT_COUNT_THRESHOLD	= 1,
+	ICC_MAX_INT_COUNT_THRESHOLD	= ((1 << 5) - 1),
+	ICC_MIN_INT_TICKS_THRESHOLD	= 0,
+	ICC_MAX_INT_TICKS_THRESHOLD	= ((1 << 19) - 1),
+	ICC_SAFE_INT_TICKS		= 1,
+};
+
+/*
+* Host Controller command register set - per port
+*/
+enum {
+	CQ = 0,
+	CA = 8,
+	CC = 0x10,
+	CE = 0x18,
+	DE = 0x20,
+	CHBA = 0x24,
+	HSTATUS = 0x28,
+	HCONTROL = 0x2C,
+	CQPMP = 0x30,
+	SIGNATURE = 0x34,
+	ICC = 0x38,
+
+	/*
+	 * Host Status Register (HStatus) bitdefs
+	 */
+	ONLINE = (1 << 31),
+	GOING_OFFLINE = (1 << 30),
+	BIST_ERR = (1 << 29),
+	CLEAR_ERROR = (1 << 27),
+
+	FATAL_ERR_HC_MASTER_ERR = (1 << 18),
+	FATAL_ERR_PARITY_ERR_TX = (1 << 17),
+	FATAL_ERR_PARITY_ERR_RX = (1 << 16),
+	FATAL_ERR_DATA_UNDERRUN = (1 << 13),
+	FATAL_ERR_DATA_OVERRUN = (1 << 12),
+	FATAL_ERR_CRC_ERR_TX = (1 << 11),
+	FATAL_ERR_CRC_ERR_RX = (1 << 10),
+	FATAL_ERR_FIFO_OVRFL_TX = (1 << 9),
+	FATAL_ERR_FIFO_OVRFL_RX = (1 << 8),
+
+	FATAL_ERROR_DECODE = FATAL_ERR_HC_MASTER_ERR |
+	    FATAL_ERR_PARITY_ERR_TX |
+	    FATAL_ERR_PARITY_ERR_RX |
+	    FATAL_ERR_DATA_UNDERRUN |
+	    FATAL_ERR_DATA_OVERRUN |
+	    FATAL_ERR_CRC_ERR_TX |
+	    FATAL_ERR_CRC_ERR_RX |
+	    FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX,
+
+	INT_ON_DATA_LENGTH_MISMATCH = (1 << 12),
+	INT_ON_FATAL_ERR = (1 << 5),
+	INT_ON_PHYRDY_CHG = (1 << 4),
+
+	INT_ON_SIGNATURE_UPDATE = (1 << 3),
+	INT_ON_SNOTIFY_UPDATE = (1 << 2),
+	INT_ON_SINGL_DEVICE_ERR = (1 << 1),
+	INT_ON_CMD_COMPLETE = 1,
+
+	INT_ON_ERROR = INT_ON_FATAL_ERR | INT_ON_SNOTIFY_UPDATE |
+	    INT_ON_PHYRDY_CHG | INT_ON_SINGL_DEVICE_ERR,
+
+	/*
+	 * Host Control Register (HControl) bitdefs
+	 */
+	HCONTROL_ONLINE_PHY_RST = (1 << 31),
+	HCONTROL_FORCE_OFFLINE = (1 << 30),
+	HCONTROL_LEGACY = (1 << 28),
+	HCONTROL_PARITY_PROT_MOD = (1 << 14),
+	HCONTROL_DPATH_PARITY = (1 << 12),
+	HCONTROL_SNOOP_ENABLE = (1 << 10),
+	HCONTROL_PMP_ATTACHED = (1 << 9),
+	HCONTROL_COPYOUT_STATFIS = (1 << 8),
+	IE_ON_FATAL_ERR = (1 << 5),
+	IE_ON_PHYRDY_CHG = (1 << 4),
+	IE_ON_SIGNATURE_UPDATE = (1 << 3),
+	IE_ON_SNOTIFY_UPDATE = (1 << 2),
+	IE_ON_SINGL_DEVICE_ERR = (1 << 1),
+	IE_ON_CMD_COMPLETE = 1,
+
+	DEFAULT_PORT_IRQ_ENABLE_MASK = IE_ON_FATAL_ERR | IE_ON_PHYRDY_CHG |
+	    IE_ON_SIGNATURE_UPDATE | IE_ON_SNOTIFY_UPDATE |
+	    IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
+
+	EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
+	DATA_SNOOP_ENABLE_V1 = (1 << 22),
+	DATA_SNOOP_ENABLE_V2 = (1 << 28),
+};
+
+/*
+ * SATA Superset Registers
+ */
+enum {
+	SSTATUS = 0,
+	SERROR = 4,
+	SCONTROL = 8,
+	SNOTIFY = 0xC,
+};
+
+/*
+ * Control Status Register Set
+ */
+enum {
+	TRANSCFG = 0,
+	TRANSSTATUS = 4,
+	LINKCFG = 8,
+	LINKCFG1 = 0xC,
+	LINKCFG2 = 0x10,
+	LINKSTATUS = 0x14,
+	LINKSTATUS1 = 0x18,
+	PHYCTRLCFG = 0x1C,
+	COMMANDSTAT = 0x20,
+};
+
+/* TRANSCFG (transport-layer) configuration control */
+enum {
+	TRANSCFG_RX_WATER_MARK = (1 << 4),
+};
+
+/* PHY (link-layer) configuration control */
+enum {
+	PHY_BIST_ENABLE = 0x01,
+};
+
+/*
+ * Command Header Table entry, i.e, command slot
+ * 4 Dwords per command slot, command header size ==  64 Dwords.
+ */
+struct cmdhdr_tbl_entry {
+	u32 cda;
+	u32 prde_fis_len;
+	u32 ttl;
+	u32 desc_info;
+};
+
+/*
+ * Description information bitdefs
+ */
+enum {
+	CMD_DESC_RES = (1 << 11),
+	VENDOR_SPECIFIC_BIST = (1 << 10),
+	CMD_DESC_SNOOP_ENABLE = (1 << 9),
+	FPDMA_QUEUED_CMD = (1 << 8),
+	SRST_CMD = (1 << 7),
+	BIST = (1 << 6),
+	ATAPI_CMD = (1 << 5),
+};
+
+/*
+ * Command Descriptor
+ */
+struct command_desc {
+	u8 cfis[8 * 4];
+	u8 sfis[8 * 4];
+	u8 acmd[4 * 4];
+	u8 fill[4 * 4];
+	u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
+	u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
+};
+
+/*
+ * Physical region table descriptor(PRD)
+ */
+
+struct prde {
+	u32 dba;
+	u8 fill[2 * 4];
+	u32 ddc_and_ext;
+};
+
+/*
+ * ata_port private data
+ * This is our per-port instance data.
+ */
+struct sata_fsl_port_priv {
+	struct cmdhdr_tbl_entry *cmdslot;
+	dma_addr_t cmdslot_paddr;
+	struct command_desc *cmdentry;
+	dma_addr_t cmdentry_paddr;
+};
+
+/*
+ * ata_port->host_set private data
+ */
+struct sata_fsl_host_priv {
+	void __iomem *hcr_base;
+	void __iomem *ssr_base;
+	void __iomem *csr_base;
+	int irq;
+	int data_snoop;
+	struct device_attribute intr_coalescing;
+	struct device_attribute rx_watermark;
+};
+
+static void fsl_sata_set_irq_coalescing(struct ata_host *host,
+		unsigned int count, unsigned int ticks)
+{
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned long flags;
+
+	if (count > ICC_MAX_INT_COUNT_THRESHOLD)
+		count = ICC_MAX_INT_COUNT_THRESHOLD;
+	else if (count < ICC_MIN_INT_COUNT_THRESHOLD)
+		count = ICC_MIN_INT_COUNT_THRESHOLD;
+
+	if (ticks > ICC_MAX_INT_TICKS_THRESHOLD)
+		ticks = ICC_MAX_INT_TICKS_THRESHOLD;
+	else if ((ICC_MIN_INT_TICKS_THRESHOLD == ticks) &&
+			(count > ICC_MIN_INT_COUNT_THRESHOLD))
+		ticks = ICC_SAFE_INT_TICKS;
+
+	spin_lock_irqsave(&host->lock, flags);
+	iowrite32((count << 24 | ticks), hcr_base + ICC);
+
+	intr_coalescing_count = count;
+	intr_coalescing_ticks = ticks;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
+			intr_coalescing_count, intr_coalescing_ticks);
+	DPRINTK("ICC register status: (hcr base: 0x%x) = 0x%x\n",
+			hcr_base, ioread32(hcr_base + ICC));
+}
+
+static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d	%d\n",
+			intr_coalescing_count, intr_coalescing_ticks);
+}
+
+static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned int coalescing_count,	coalescing_ticks;
+
+	if (sscanf(buf, "%d%d",
+				&coalescing_count,
+				&coalescing_ticks) != 2) {
+		printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+		return -EINVAL;
+	}
+
+	fsl_sata_set_irq_coalescing(dev_get_drvdata(dev),
+			coalescing_count, coalescing_ticks);
+
+	return strlen(buf);
+}
+
+static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int rx_watermark;
+	unsigned long flags;
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *csr_base = host_priv->csr_base;
+
+	spin_lock_irqsave(&host->lock, flags);
+	rx_watermark = ioread32(csr_base + TRANSCFG);
+	rx_watermark &= 0x1f;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+	return sprintf(buf, "%d\n", rx_watermark);
+}
+
+static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned int rx_watermark;
+	unsigned long flags;
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *csr_base = host_priv->csr_base;
+	u32 temp;
+
+	if (sscanf(buf, "%d", &rx_watermark) != 1) {
+		printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	temp = ioread32(csr_base + TRANSCFG);
+	temp &= 0xffffffe0;
+	iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+	return strlen(buf);
+}
+
+static inline unsigned int sata_fsl_tag(unsigned int tag,
+					void __iomem *hcr_base)
+{
+	/* We let libATA core do actual (queue) tag allocation */
+
+	if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
+		DPRINTK("tag %d invalid : out of range\n", tag);
+		return 0;
+	}
+
+	if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
+		DPRINTK("tag %d invalid : in use!!\n", tag);
+		return 0;
+	}
+
+	return tag;
+}
+
+static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
+					 unsigned int tag, u32 desc_info,
+					 u32 data_xfer_len, u8 num_prde,
+					 u8 fis_len)
+{
+	dma_addr_t cmd_descriptor_address;
+
+	cmd_descriptor_address = pp->cmdentry_paddr +
+	    tag * SATA_FSL_CMD_DESC_SIZE;
+
+	/* NOTE: both data_xfer_len & fis_len are Dword counts */
+
+	pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address);
+	pp->cmdslot[tag].prde_fis_len =
+	    cpu_to_le32((num_prde << 16) | (fis_len << 2));
+	pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
+	pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
+
+	VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
+		pp->cmdslot[tag].cda,
+		pp->cmdslot[tag].prde_fis_len,
+		pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info);
+
+}
+
+static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
+				     u32 *ttl, dma_addr_t cmd_desc_paddr,
+				     int data_snoop)
+{
+	struct scatterlist *sg;
+	unsigned int num_prde = 0;
+	u32 ttl_dwords = 0;
+
+	/*
+	 * NOTE : direct & indirect prdt's are contiguously allocated
+	 */
+	struct prde *prd = (struct prde *)&((struct command_desc *)
+					    cmd_desc)->prdt;
+
+	struct prde *prd_ptr_to_indirect_ext = NULL;
+	unsigned indirect_ext_segment_sz = 0;
+	dma_addr_t indirect_ext_segment_paddr;
+	unsigned int si;
+
+	VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
+
+	indirect_ext_segment_paddr = cmd_desc_paddr +
+	    SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t sg_addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n",
+			(unsigned long long)sg_addr, sg_len);
+
+		/* warn if each s/g element is not dword aligned */
+		if (unlikely(sg_addr & 0x03))
+			ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
+				     (unsigned long long)sg_addr);
+		if (unlikely(sg_len & 0x03))
+			ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n",
+				     sg_len);
+
+		if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
+		    sg_next(sg) != NULL) {
+			VPRINTK("setting indirect prde\n");
+			prd_ptr_to_indirect_ext = prd;
+			prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
+			indirect_ext_segment_sz = 0;
+			++prd;
+			++num_prde;
+		}
+
+		ttl_dwords += sg_len;
+		prd->dba = cpu_to_le32(sg_addr);
+		prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
+
+		VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
+			ttl_dwords, prd->dba, prd->ddc_and_ext);
+
+		++num_prde;
+		++prd;
+		if (prd_ptr_to_indirect_ext)
+			indirect_ext_segment_sz += sg_len;
+	}
+
+	if (prd_ptr_to_indirect_ext) {
+		/* set indirect extension flag along with indirect ext. size */
+		prd_ptr_to_indirect_ext->ddc_and_ext =
+		    cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
+				 data_snoop |
+				 (indirect_ext_segment_sz & ~0x03)));
+	}
+
+	*ttl = ttl_dwords;
+	return num_prde;
+}
+
+static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+	struct command_desc *cd;
+	u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE;
+	u32 num_prde = 0;
+	u32 ttl_dwords = 0;
+	dma_addr_t cd_paddr;
+
+	cd = (struct command_desc *)pp->cmdentry + tag;
+	cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE;
+
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
+
+	VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
+		cd->cfis[0], cd->cfis[1], cd->cfis[2]);
+
+	if (qc->tf.protocol == ATA_PROT_NCQ) {
+		VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n",
+			cd->cfis[3], cd->cfis[11]);
+	}
+
+	/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
+	if (ata_is_atapi(qc->tf.protocol)) {
+		desc_info |= ATAPI_CMD;
+		memset((void *)&cd->acmd, 0, 32);
+		memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+	}
+
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		num_prde = sata_fsl_fill_sg(qc, (void *)cd,
+					    &ttl_dwords, cd_paddr,
+					    host_priv->data_snoop);
+
+	if (qc->tf.protocol == ATA_PROT_NCQ)
+		desc_info |= FPDMA_QUEUED_CMD;
+
+	sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords,
+				     num_prde, 5);
+
+	VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+		desc_info, ttl_dwords, num_prde);
+}
+
+static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+
+	VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base),
+		ioread32(CE + hcr_base), ioread32(CC + hcr_base));
+
+	iowrite32(qc->dev->link->pmp, CQPMP + hcr_base);
+
+	/* Simply queue command to the controller/device */
+	iowrite32(1 << tag, CQ + hcr_base);
+
+	VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n",
+		tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
+
+	VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
+		ioread32(CE + hcr_base),
+		ioread32(DE + hcr_base),
+		ioread32(CC + hcr_base),
+		ioread32(COMMANDSTAT + host_priv->csr_base));
+
+	return 0;
+}
+
+static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct sata_fsl_port_priv *pp = qc->ap->private_data;
+	struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+	struct command_desc *cd;
+
+	cd = pp->cmdentry + tag;
+
+	ata_tf_from_fis(cd->sfis, &qc->result_tf);
+	return true;
+}
+
+static int sata_fsl_scr_write(struct ata_link *link,
+			      unsigned int sc_reg_in, u32 val)
+{
+	struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
+	void __iomem *ssr_base = host_priv->ssr_base;
+	unsigned int sc_reg;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_ERROR:
+	case SCR_CONTROL:
+	case SCR_ACTIVE:
+		sc_reg = sc_reg_in;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg);
+
+	iowrite32(val, ssr_base + (sc_reg * 4));
+	return 0;
+}
+
+static int sata_fsl_scr_read(struct ata_link *link,
+			     unsigned int sc_reg_in, u32 *val)
+{
+	struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
+	void __iomem *ssr_base = host_priv->ssr_base;
+	unsigned int sc_reg;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_ERROR:
+	case SCR_CONTROL:
+	case SCR_ACTIVE:
+		sc_reg = sc_reg_in;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg);
+
+	*val = ioread32(ssr_base + (sc_reg * 4));
+	return 0;
+}
+
+static void sata_fsl_freeze(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base),
+		ioread32(CE + hcr_base), ioread32(DE + hcr_base));
+	VPRINTK("CmdStat = 0x%x\n",
+		ioread32(host_priv->csr_base + COMMANDSTAT));
+
+	/* disable interrupts on the controller/port */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+	VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n",
+		ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+static void sata_fsl_thaw(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/* ack. any pending IRQs for this controller/port */
+	temp = ioread32(hcr_base + HSTATUS);
+
+	VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F));
+
+	if (temp & 0x3F)
+		iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+	/* enable interrupts on the controller/port */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
+
+	VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n",
+		ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+static void sata_fsl_pmp_attach(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL);
+}
+
+static void sata_fsl_pmp_detach(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	temp = ioread32(hcr_base + HCONTROL);
+	temp &= ~HCONTROL_PMP_ATTACHED;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	/* enable interrupts on the controller/port */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
+
+}
+
+static int sata_fsl_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct sata_fsl_port_priv *pp;
+	void *mem;
+	dma_addr_t mem_dma;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+				  GFP_KERNEL);
+	if (!mem) {
+		kfree(pp);
+		return -ENOMEM;
+	}
+
+	pp->cmdslot = mem;
+	pp->cmdslot_paddr = mem_dma;
+
+	mem += SATA_FSL_CMD_SLOT_SIZE;
+	mem_dma += SATA_FSL_CMD_SLOT_SIZE;
+
+	pp->cmdentry = mem;
+	pp->cmdentry_paddr = mem_dma;
+
+	ap->private_data = pp;
+
+	VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n",
+		pp->cmdslot_paddr, pp->cmdentry_paddr);
+
+	/* Now, update the CHBA register in host controller cmd register set */
+	iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+	/*
+	 * Now, we can bring the controller on-line & also initiate
+	 * the COMINIT sequence, we simply return here and the boot-probing
+	 * & device discovery process is re-initiated by libATA using a
+	 * Softreset EH (dummy) session. Hence, boot probing and device
+	 * discovey will be part of sata_fsl_softreset() callback.
+	 */
+
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
+
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+	VPRINTK("CHBA  = 0x%x\n", ioread32(hcr_base + CHBA));
+
+	return 0;
+}
+
+static void sata_fsl_port_stop(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/*
+	 * Force host controller to go off-line, aborting current operations
+	 */
+	temp = ioread32(hcr_base + HCONTROL);
+	temp &= ~HCONTROL_ONLINE_PHY_RST;
+	temp |= HCONTROL_FORCE_OFFLINE;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	/* Poll for controller to go offline - should happen immediately */
+	ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1);
+
+	ap->private_data = NULL;
+	dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
+			  pp->cmdslot, pp->cmdslot_paddr);
+
+	kfree(pp);
+}
+
+static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	struct ata_taskfile tf;
+	u32 temp;
+
+	temp = ioread32(hcr_base + SIGNATURE);
+
+	VPRINTK("raw sig = 0x%x\n", temp);
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	tf.lbah = (temp >> 24) & 0xff;
+	tf.lbam = (temp >> 16) & 0xff;
+	tf.lbal = (temp >> 8) & 0xff;
+	tf.nsect = temp & 0xff;
+
+	return ata_dev_classify(&tf);
+}
+
+static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
+					unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+	int i = 0;
+	unsigned long start_jiffies;
+
+	DPRINTK("in xx_hardreset\n");
+
+try_offline_again:
+	/*
+	 * Force host controller to go off-line, aborting current operations
+	 */
+	temp = ioread32(hcr_base + HCONTROL);
+	temp &= ~HCONTROL_ONLINE_PHY_RST;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	/* Poll for controller to go offline */
+	temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE,
+				 1, 500);
+
+	if (temp & ONLINE) {
+		ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i);
+
+		/*
+		 * Try to offline controller atleast twice
+		 */
+		i++;
+		if (i == 2)
+			goto err;
+		else
+			goto try_offline_again;
+	}
+
+	DPRINTK("hardreset, controller off-lined\n");
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	/*
+	 * PHY reset should remain asserted for atleast 1ms
+	 */
+	ata_msleep(ap, 1);
+
+	sata_set_spd(link);
+
+	/*
+	 * Now, bring the host controller online again, this can take time
+	 * as PHY reset and communication establishment, 1st D2H FIS and
+	 * device signature update is done, on safe side assume 500ms
+	 * NOTE : Host online status may be indicated immediately!!
+	 */
+
+	temp = ioread32(hcr_base + HCONTROL);
+	temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE);
+	temp |= HCONTROL_PMP_ATTACHED;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500);
+
+	if (!(temp & ONLINE)) {
+		ata_port_err(ap, "Hardreset failed, not on-lined\n");
+		goto err;
+	}
+
+	DPRINTK("hardreset, controller off-lined & on-lined\n");
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	/*
+	 * First, wait for the PHYRDY change to occur before waiting for
+	 * the signature, and also verify if SStatus indicates device
+	 * presence
+	 */
+
+	temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500);
+	if ((!(temp & 0x10)) || ata_link_offline(link)) {
+		ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n",
+			      ioread32(hcr_base + HSTATUS));
+		*class = ATA_DEV_NONE;
+		return 0;
+	}
+
+	/*
+	 * Wait for the first D2H from device,i.e,signature update notification
+	 */
+	start_jiffies = jiffies;
+	temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10,
+			500, jiffies_to_msecs(deadline - start_jiffies));
+
+	if ((temp & 0xFF) != 0x18) {
+		ata_port_warn(ap, "No Signature Update\n");
+		*class = ATA_DEV_NONE;
+		goto do_followup_srst;
+	} else {
+		ata_port_info(ap, "Signature Update detected @ %d msecs\n",
+			      jiffies_to_msecs(jiffies - start_jiffies));
+		*class = sata_fsl_dev_classify(ap);
+		return 0;
+	}
+
+do_followup_srst:
+	/*
+	 * request libATA to perform follow-up softreset
+	 */
+	return -EAGAIN;
+
+err:
+	return -EIO;
+}
+
+static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
+					unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	int pmp = sata_srst_pmp(link);
+	u32 temp;
+	struct ata_taskfile tf;
+	u8 *cfis;
+	u32 Serror;
+
+	DPRINTK("in xx_softreset\n");
+
+	if (ata_link_offline(link)) {
+		DPRINTK("PHY reports no device\n");
+		*class = ATA_DEV_NONE;
+		return 0;
+	}
+
+	/*
+	 * Send a device reset (SRST) explicitly on command slot #0
+	 * Check : will the command queue (reg) be cleared during offlining ??
+	 * Also we will be online only if Phy commn. has been established
+	 * and device presence has been detected, therefore if we have
+	 * reached here, we can send a command to the target device
+	 */
+
+	DPRINTK("Sending SRST/device reset\n");
+
+	ata_tf_init(link->device, &tf);
+	cfis = (u8 *) &pp->cmdentry->cfis;
+
+	/* device reset/SRST is a control register update FIS, uses tag0 */
+	sata_fsl_setup_cmd_hdr_entry(pp, 0,
+		SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
+
+	tf.ctl |= ATA_SRST;	/* setup SRST bit in taskfile control reg */
+	ata_tf_to_fis(&tf, pmp, 0, cfis);
+
+	DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
+		cfis[0], cfis[1], cfis[2], cfis[3]);
+
+	/*
+	 * Queue SRST command to the controller/device, ensure that no
+	 * other commands are active on the controller/device
+	 */
+
+	DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+	iowrite32(0xFFFF, CC + hcr_base);
+	if (pmp != SATA_PMP_CTRL_PORT)
+		iowrite32(pmp, CQPMP + hcr_base);
+	iowrite32(1, CQ + hcr_base);
+
+	temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000);
+	if (temp & 0x1) {
+		ata_port_warn(ap, "ATA_SRST issue failed\n");
+
+		DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
+			ioread32(CQ + hcr_base),
+			ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+		sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
+
+		DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+		DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+		DPRINTK("Serror = 0x%x\n", Serror);
+		goto err;
+	}
+
+	ata_msleep(ap, 1);
+
+	/*
+	 * SATA device enters reset state after receiving a Control register
+	 * FIS with SRST bit asserted and it awaits another H2D Control reg.
+	 * FIS with SRST bit cleared, then the device does internal diags &
+	 * initialization, followed by indicating it's initialization status
+	 * using ATA signature D2H register FIS to the host controller.
+	 */
+
+	sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
+				      0, 0, 5);
+
+	tf.ctl &= ~ATA_SRST;	/* 2nd H2D Ctl. register FIS */
+	ata_tf_to_fis(&tf, pmp, 0, cfis);
+
+	if (pmp != SATA_PMP_CTRL_PORT)
+		iowrite32(pmp, CQPMP + hcr_base);
+	iowrite32(1, CQ + hcr_base);
+	ata_msleep(ap, 150);		/* ?? */
+
+	/*
+	 * The above command would have signalled an interrupt on command
+	 * complete, which needs special handling, by clearing the Nth
+	 * command bit of the CCreg
+	 */
+	iowrite32(0x01, CC + hcr_base);	/* We know it will be cmd#0 always */
+
+	DPRINTK("SATA FSL : Now checking device signature\n");
+
+	*class = ATA_DEV_NONE;
+
+	/* Verify if SStatus indicates device presence */
+	if (ata_link_online(link)) {
+		/*
+		 * if we are here, device presence has been detected,
+		 * 1st D2H FIS would have been received, but sfis in
+		 * command desc. is not updated, but signature register
+		 * would have been updated
+		 */
+
+		*class = sata_fsl_dev_classify(ap);
+
+		DPRINTK("class = %d\n", *class);
+		VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC));
+		VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
+	}
+
+	return 0;
+
+err:
+	return -EIO;
+}
+
+static void sata_fsl_error_handler(struct ata_port *ap)
+{
+
+	DPRINTK("in xx_error_handler\n");
+	sata_pmp_error_handler(ap);
+
+}
+
+static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		qc->err_mask |= AC_ERR_OTHER;
+
+	if (qc->err_mask) {
+		/* make DMA engine forget about the failed command */
+
+	}
+}
+
+static void sata_fsl_error_intr(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 hstatus, dereg=0, cereg = 0, SError = 0;
+	unsigned int err_mask = 0, action = 0;
+	int freeze = 0, abort=0;
+	struct ata_link *link = NULL;
+	struct ata_queued_cmd *qc = NULL;
+	struct ata_eh_info *ehi;
+
+	hstatus = ioread32(hcr_base + HSTATUS);
+	cereg = ioread32(hcr_base + CE);
+
+	/* first, analyze and record host port events */
+	link = &ap->link;
+	ehi = &link->eh_info;
+	ata_ehi_clear_desc(ehi);
+
+	/*
+	 * Handle & Clear SError
+	 */
+
+	sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
+	if (unlikely(SError & 0xFFFF0000))
+		sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
+
+	DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
+		hstatus, cereg, ioread32(hcr_base + DE), SError);
+
+	/* handle fatal errors */
+	if (hstatus & FATAL_ERROR_DECODE) {
+		ehi->err_mask |= AC_ERR_ATA_BUS;
+		ehi->action |= ATA_EH_SOFTRESET;
+
+		freeze = 1;
+	}
+
+	/* Handle SDB FIS receive & notify update */
+	if (hstatus & INT_ON_SNOTIFY_UPDATE)
+		sata_async_notification(ap);
+
+	/* Handle PHYRDY change notification */
+	if (hstatus & INT_ON_PHYRDY_CHG) {
+		DPRINTK("SATA FSL: PHYRDY change indication\n");
+
+		/* Setup a soft-reset EH action */
+		ata_ehi_hotplugged(ehi);
+		ata_ehi_push_desc(ehi, "%s", "PHY RDY changed");
+		freeze = 1;
+	}
+
+	/* handle single device errors */
+	if (cereg) {
+		/*
+		 * clear the command error, also clears queue to the device
+		 * in error, and we can (re)issue commands to this device.
+		 * When a device is in error all commands queued into the
+		 * host controller and at the device are considered aborted
+		 * and the queue for that device is stopped. Now, after
+		 * clearing the device error, we can issue commands to the
+		 * device to interrogate it to find the source of the error.
+		 */
+		abort = 1;
+
+		DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
+			ioread32(hcr_base + CE), ioread32(hcr_base + DE));
+
+		/* find out the offending link and qc */
+		if (ap->nr_pmp_links) {
+			unsigned int dev_num;
+
+			dereg = ioread32(hcr_base + DE);
+			iowrite32(dereg, hcr_base + DE);
+			iowrite32(cereg, hcr_base + CE);
+
+			dev_num = ffs(dereg) - 1;
+			if (dev_num < ap->nr_pmp_links && dereg != 0) {
+				link = &ap->pmp_link[dev_num];
+				ehi = &link->eh_info;
+				qc = ata_qc_from_tag(ap, link->active_tag);
+				/*
+				 * We should consider this as non fatal error,
+                                 * and TF must be updated as done below.
+		                 */
+
+				err_mask |= AC_ERR_DEV;
+
+			} else {
+				err_mask |= AC_ERR_HSM;
+				action |= ATA_EH_HARDRESET;
+				freeze = 1;
+			}
+		} else {
+			dereg = ioread32(hcr_base + DE);
+			iowrite32(dereg, hcr_base + DE);
+			iowrite32(cereg, hcr_base + CE);
+
+			qc = ata_qc_from_tag(ap, link->active_tag);
+			/*
+			 * We should consider this as non fatal error,
+                         * and TF must be updated as done below.
+	                */
+			err_mask |= AC_ERR_DEV;
+		}
+	}
+
+	/* record error info */
+	if (qc)
+		qc->err_mask |= err_mask;
+	else
+		ehi->err_mask |= err_mask;
+
+	ehi->action |= action;
+
+	/* freeze or abort */
+	if (freeze)
+		ata_port_freeze(ap);
+	else if (abort) {
+		if (qc)
+			ata_link_abort(qc->dev->link);
+		else
+			ata_port_abort(ap);
+	}
+}
+
+static void sata_fsl_host_intr(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 hstatus, done_mask = 0;
+	struct ata_queued_cmd *qc;
+	u32 SError;
+	u32 tag;
+	u32 status_mask = INT_ON_ERROR;
+
+	hstatus = ioread32(hcr_base + HSTATUS);
+
+	sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
+
+	/* Read command completed register */
+	done_mask = ioread32(hcr_base + CC);
+
+	/* Workaround for data length mismatch errata */
+	if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
+		ata_qc_for_each_with_internal(ap, qc, tag) {
+			if (qc && ata_is_atapi(qc->tf.protocol)) {
+				u32 hcontrol;
+				/* Set HControl[27] to clear error registers */
+				hcontrol = ioread32(hcr_base + HCONTROL);
+				iowrite32(hcontrol | CLEAR_ERROR,
+						hcr_base + HCONTROL);
+
+				/* Clear HControl[27] */
+				iowrite32(hcontrol & ~CLEAR_ERROR,
+						hcr_base + HCONTROL);
+
+				/* Clear SError[E] bit */
+				sata_fsl_scr_write(&ap->link, SCR_ERROR,
+						SError);
+
+				/* Ignore fatal error and device error */
+				status_mask &= ~(INT_ON_SINGL_DEVICE_ERR
+						| INT_ON_FATAL_ERR);
+				break;
+			}
+		}
+	}
+
+	if (unlikely(SError & 0xFFFF0000)) {
+		DPRINTK("serror @host_intr : 0x%x\n", SError);
+		sata_fsl_error_intr(ap);
+	}
+
+	if (unlikely(hstatus & status_mask)) {
+		DPRINTK("error interrupt!!\n");
+		sata_fsl_error_intr(ap);
+		return;
+	}
+
+	VPRINTK("Status of all queues :\n");
+	VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
+		done_mask,
+		ioread32(hcr_base + CA),
+		ioread32(hcr_base + CE),
+		ioread32(hcr_base + CQ),
+		ap->qc_active);
+
+	if (done_mask & ap->qc_active) {
+		int i;
+		/* clear CC bit, this will also complete the interrupt */
+		iowrite32(done_mask, hcr_base + CC);
+
+		DPRINTK("Status of all queues :\n");
+		DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+			done_mask, ioread32(hcr_base + CA),
+			ioread32(hcr_base + CE));
+
+		for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
+			if (done_mask & (1 << i))
+				DPRINTK
+				    ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
+				     i, ioread32(hcr_base + CC),
+				     ioread32(hcr_base + CA));
+		}
+		ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+		return;
+
+	} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
+		iowrite32(1, hcr_base + CC);
+		qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
+
+		DPRINTK("completing non-ncq cmd, CC=0x%x\n",
+			 ioread32(hcr_base + CC));
+
+		if (qc) {
+			ata_qc_complete(qc);
+		}
+	} else {
+		/* Spurious Interrupt!! */
+		DPRINTK("spurious interrupt!!, CC = 0x%x\n",
+			ioread32(hcr_base + CC));
+		iowrite32(done_mask, hcr_base + CC);
+		return;
+	}
+}
+
+static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 interrupt_enables;
+	unsigned handled = 0;
+	struct ata_port *ap;
+
+	/* ack. any pending IRQs for this controller/port */
+	interrupt_enables = ioread32(hcr_base + HSTATUS);
+	interrupt_enables &= 0x3F;
+
+	DPRINTK("interrupt status 0x%x\n", interrupt_enables);
+
+	if (!interrupt_enables)
+		return IRQ_NONE;
+
+	spin_lock(&host->lock);
+
+	/* Assuming one port per host controller */
+
+	ap = host->ports[0];
+	if (ap) {
+		sata_fsl_host_intr(ap);
+	} else {
+		dev_warn(host->dev, "interrupt on disabled port 0\n");
+	}
+
+	iowrite32(interrupt_enables, hcr_base + HSTATUS);
+	handled = 1;
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+/*
+ * Multiple ports are represented by multiple SATA controllers with
+ * one port per controller
+ */
+static int sata_fsl_init_controller(struct ata_host *host)
+{
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/*
+	 * NOTE : We cannot bring the controller online before setting
+	 * the CHBA, hence main controller initialization is done as
+	 * part of the port_start() callback
+	 */
+
+	/* sata controller to operate in enterprise mode */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
+	/* ack. any pending IRQs for this controller/port */
+	temp = ioread32(hcr_base + HSTATUS);
+	if (temp & 0x3F)
+		iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+	/* Keep interrupts disabled on the controller */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+	/* Disable interrupt coalescing control(icc), for the moment */
+	DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC));
+	iowrite32(0x01000000, hcr_base + ICC);
+
+	/* clear error registers, SError is cleared by libATA  */
+	iowrite32(0x00000FFFF, hcr_base + CE);
+	iowrite32(0x00000FFFF, hcr_base + DE);
+
+ 	/*
+	 * reset the number of command complete bits which will cause the
+	 * interrupt to be signaled
+	 */
+	fsl_sata_set_irq_coalescing(host, intr_coalescing_count,
+			intr_coalescing_ticks);
+
+	/*
+	 * host controller will be brought on-line, during xx_port_start()
+	 * callback, that should also initiate the OOB, COMINIT sequence
+	 */
+
+	DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	return 0;
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_fsl_sht = {
+	ATA_NCQ_SHT("sata_fsl"),
+	.can_queue = SATA_FSL_QUEUE_DEPTH,
+	.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
+	.dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations sata_fsl_ops = {
+	.inherits		= &sata_pmp_port_ops,
+
+	.qc_defer = ata_std_qc_defer,
+	.qc_prep = sata_fsl_qc_prep,
+	.qc_issue = sata_fsl_qc_issue,
+	.qc_fill_rtf = sata_fsl_qc_fill_rtf,
+
+	.scr_read = sata_fsl_scr_read,
+	.scr_write = sata_fsl_scr_write,
+
+	.freeze = sata_fsl_freeze,
+	.thaw = sata_fsl_thaw,
+	.softreset = sata_fsl_softreset,
+	.hardreset = sata_fsl_hardreset,
+	.pmp_softreset = sata_fsl_softreset,
+	.error_handler = sata_fsl_error_handler,
+	.post_internal_cmd = sata_fsl_post_internal_cmd,
+
+	.port_start = sata_fsl_port_start,
+	.port_stop = sata_fsl_port_stop,
+
+	.pmp_attach = sata_fsl_pmp_attach,
+	.pmp_detach = sata_fsl_pmp_detach,
+};
+
+static const struct ata_port_info sata_fsl_port_info[] = {
+	{
+	 .flags = SATA_FSL_HOST_FLAGS,
+	 .pio_mask = ATA_PIO4,
+	 .udma_mask = ATA_UDMA6,
+	 .port_ops = &sata_fsl_ops,
+	 },
+};
+
+static int sata_fsl_probe(struct platform_device *ofdev)
+{
+	int retval = -ENXIO;
+	void __iomem *hcr_base = NULL;
+	void __iomem *ssr_base = NULL;
+	void __iomem *csr_base = NULL;
+	struct sata_fsl_host_priv *host_priv = NULL;
+	int irq;
+	struct ata_host *host = NULL;
+	u32 temp;
+
+	struct ata_port_info pi = sata_fsl_port_info[0];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+
+	dev_info(&ofdev->dev, "Sata FSL Platform/CSB Driver init\n");
+
+	hcr_base = of_iomap(ofdev->dev.of_node, 0);
+	if (!hcr_base)
+		goto error_exit_with_cleanup;
+
+	ssr_base = hcr_base + 0x100;
+	csr_base = hcr_base + 0x140;
+
+	if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) {
+		temp = ioread32(csr_base + TRANSCFG);
+		temp = temp & 0xffffffe0;
+		iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
+	}
+
+	DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
+	DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
+	DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
+
+	host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
+	if (!host_priv)
+		goto error_exit_with_cleanup;
+
+	host_priv->hcr_base = hcr_base;
+	host_priv->ssr_base = ssr_base;
+	host_priv->csr_base = csr_base;
+
+	irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+	if (!irq) {
+		dev_err(&ofdev->dev, "invalid irq from platform\n");
+		goto error_exit_with_cleanup;
+	}
+	host_priv->irq = irq;
+
+	if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2"))
+		host_priv->data_snoop = DATA_SNOOP_ENABLE_V2;
+	else
+		host_priv->data_snoop = DATA_SNOOP_ENABLE_V1;
+
+	/* allocate host structure */
+	host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
+	if (!host) {
+		retval = -ENOMEM;
+		goto error_exit_with_cleanup;
+	}
+
+	/* host->iomap is not used currently */
+	host->private_data = host_priv;
+
+	/* initialize host controller */
+	sata_fsl_init_controller(host);
+
+	/*
+	 * Now, register with libATA core, this will also initiate the
+	 * device discovery process, invoking our port_start() handler &
+	 * error_handler() to execute a dummy Softreset EH session
+	 */
+	ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG,
+			  &sata_fsl_sht);
+
+	host_priv->intr_coalescing.show = fsl_sata_intr_coalescing_show;
+	host_priv->intr_coalescing.store = fsl_sata_intr_coalescing_store;
+	sysfs_attr_init(&host_priv->intr_coalescing.attr);
+	host_priv->intr_coalescing.attr.name = "intr_coalescing";
+	host_priv->intr_coalescing.attr.mode = S_IRUGO | S_IWUSR;
+	retval = device_create_file(host->dev, &host_priv->intr_coalescing);
+	if (retval)
+		goto error_exit_with_cleanup;
+
+	host_priv->rx_watermark.show = fsl_sata_rx_watermark_show;
+	host_priv->rx_watermark.store = fsl_sata_rx_watermark_store;
+	sysfs_attr_init(&host_priv->rx_watermark.attr);
+	host_priv->rx_watermark.attr.name = "rx_watermark";
+	host_priv->rx_watermark.attr.mode = S_IRUGO | S_IWUSR;
+	retval = device_create_file(host->dev, &host_priv->rx_watermark);
+	if (retval) {
+		device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
+		goto error_exit_with_cleanup;
+	}
+
+	return 0;
+
+error_exit_with_cleanup:
+
+	if (host)
+		ata_host_detach(host);
+
+	if (hcr_base)
+		iounmap(hcr_base);
+	kfree(host_priv);
+
+	return retval;
+}
+
+static int sata_fsl_remove(struct platform_device *ofdev)
+{
+	struct ata_host *host = platform_get_drvdata(ofdev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+
+	device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
+	device_remove_file(&ofdev->dev, &host_priv->rx_watermark);
+
+	ata_host_detach(host);
+
+	irq_dispose_mapping(host_priv->irq);
+	iounmap(host_priv->hcr_base);
+	kfree(host_priv);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
+{
+	struct ata_host *host = platform_get_drvdata(op);
+	return ata_host_suspend(host, state);
+}
+
+static int sata_fsl_resume(struct platform_device *op)
+{
+	struct ata_host *host = platform_get_drvdata(op);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	int ret;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	struct ata_port *ap = host->ports[0];
+	struct sata_fsl_port_priv *pp = ap->private_data;
+
+	ret = sata_fsl_init_controller(host);
+	if (ret) {
+		dev_err(&op->dev, "Error initializing hardware\n");
+		return ret;
+	}
+
+	/* Recovery the CHBA register in host controller cmd register set */
+	iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+	iowrite32((ioread32(hcr_base + HCONTROL)
+				| HCONTROL_ONLINE_PHY_RST
+				| HCONTROL_SNOOP_ENABLE
+				| HCONTROL_PMP_ATTACHED),
+			hcr_base + HCONTROL);
+
+	ata_host_resume(host);
+	return 0;
+}
+#endif
+
+static const struct of_device_id fsl_sata_match[] = {
+	{
+		.compatible = "fsl,pq-sata",
+	},
+	{
+		.compatible = "fsl,pq-sata-v2",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_sata_match);
+
+static struct platform_driver fsl_sata_driver = {
+	.driver = {
+		.name = "fsl-sata",
+		.of_match_table = fsl_sata_match,
+	},
+	.probe		= sata_fsl_probe,
+	.remove		= sata_fsl_remove,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= sata_fsl_suspend,
+	.resume		= sata_fsl_resume,
+#endif
+};
+
+module_platform_driver(fsl_sata_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
+MODULE_VERSION("1.10");
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
new file mode 100644
index 0000000..46950e0
--- /dev/null
+++ b/drivers/ata/sata_gemini.c
@@ -0,0 +1,439 @@
+/*
+ * Cortina Systems Gemini SATA bridge add-on to Faraday FTIDE010
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pinctrl/consumer.h>
+#include "sata_gemini.h"
+
+#define DRV_NAME "gemini_sata_bridge"
+
+/**
+ * struct sata_gemini - a state container for a Gemini SATA bridge
+ * @dev: the containing device
+ * @base: remapped I/O memory base
+ * @muxmode: the current muxing mode
+ * @ide_pins: if the device is using the plain IDE interface pins
+ * @sata_bridge: if the device enables the SATA bridge
+ * @sata0_reset: SATA0 reset handler
+ * @sata1_reset: SATA1 reset handler
+ * @sata0_pclk: SATA0 PCLK handler
+ * @sata1_pclk: SATA1 PCLK handler
+ */
+struct sata_gemini {
+	struct device *dev;
+	void __iomem *base;
+	enum gemini_muxmode muxmode;
+	bool ide_pins;
+	bool sata_bridge;
+	struct reset_control *sata0_reset;
+	struct reset_control *sata1_reset;
+	struct clk *sata0_pclk;
+	struct clk *sata1_pclk;
+};
+
+/* Miscellaneous Control Register */
+#define GEMINI_GLOBAL_MISC_CTRL		0x30
+/*
+ * Values of IDE IOMUX bits in the misc control register
+ *
+ * Bits 26:24 are "IDE IO Select", which decides what SATA
+ * adapters are connected to which of the two IDE/ATA
+ * controllers in the Gemini. We can connect the two IDE blocks
+ * to one SATA adapter each, both acting as master, or one IDE
+ * blocks to two SATA adapters so the IDE block can act in a
+ * master/slave configuration.
+ *
+ * We also bring out different blocks on the actual IDE
+ * pins (not SATA pins) if (and only if) these are muxed in.
+ *
+ * 111-100 - Reserved
+ * Mode 0: 000 - ata0 master <-> sata0
+ *               ata1 master <-> sata1
+ *               ata0 slave interface brought out on IDE pads
+ * Mode 1: 001 - ata0 master <-> sata0
+ *               ata1 master <-> sata1
+ *               ata1 slave interface brought out on IDE pads
+ * Mode 2: 010 - ata1 master <-> sata1
+ *               ata1 slave  <-> sata0
+ *               ata0 master and slave interfaces brought out
+ *                    on IDE pads
+ * Mode 3: 011 - ata0 master <-> sata0
+ *               ata1 slave  <-> sata1
+ *               ata1 master and slave interfaces brought out
+ *                    on IDE pads
+ */
+#define GEMINI_IDE_IOMUX_MASK			(7 << 24)
+#define GEMINI_IDE_IOMUX_MODE0			(0 << 24)
+#define GEMINI_IDE_IOMUX_MODE1			(1 << 24)
+#define GEMINI_IDE_IOMUX_MODE2			(2 << 24)
+#define GEMINI_IDE_IOMUX_MODE3			(3 << 24)
+#define GEMINI_IDE_IOMUX_SHIFT			(24)
+
+/*
+ * Registers directly controlling the PATA<->SATA adapters
+ */
+#define GEMINI_SATA_ID				0x00
+#define GEMINI_SATA_PHY_ID			0x04
+#define GEMINI_SATA0_STATUS			0x08
+#define GEMINI_SATA1_STATUS			0x0c
+#define GEMINI_SATA0_CTRL			0x18
+#define GEMINI_SATA1_CTRL			0x1c
+
+#define GEMINI_SATA_STATUS_BIST_DONE		BIT(5)
+#define GEMINI_SATA_STATUS_BIST_OK		BIT(4)
+#define GEMINI_SATA_STATUS_PHY_READY		BIT(0)
+
+#define GEMINI_SATA_CTRL_PHY_BIST_EN		BIT(14)
+#define GEMINI_SATA_CTRL_PHY_FORCE_IDLE		BIT(13)
+#define GEMINI_SATA_CTRL_PHY_FORCE_READY	BIT(12)
+#define GEMINI_SATA_CTRL_PHY_AFE_LOOP_EN	BIT(10)
+#define GEMINI_SATA_CTRL_PHY_DIG_LOOP_EN	BIT(9)
+#define GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN	BIT(4)
+#define GEMINI_SATA_CTRL_ATAPI_EN		BIT(3)
+#define GEMINI_SATA_CTRL_BUS_WITH_20		BIT(2)
+#define GEMINI_SATA_CTRL_SLAVE_EN		BIT(1)
+#define GEMINI_SATA_CTRL_EN			BIT(0)
+
+/*
+ * There is only ever one instance of this bridge on a system,
+ * so create a singleton so that the FTIDE010 instances can grab
+ * a reference to it.
+ */
+static struct sata_gemini *sg_singleton;
+
+struct sata_gemini *gemini_sata_bridge_get(void)
+{
+	if (sg_singleton)
+		return sg_singleton;
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(gemini_sata_bridge_get);
+
+bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1)
+{
+	if (!sg->sata_bridge)
+		return false;
+	/*
+	 * In muxmode 2 and 3 one of the ATA controllers is
+	 * actually not connected to any SATA bridge.
+	 */
+	if ((sg->muxmode == GEMINI_MUXMODE_2) &&
+	    !is_ata1)
+		return false;
+	if ((sg->muxmode == GEMINI_MUXMODE_3) &&
+	    is_ata1)
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL(gemini_sata_bridge_enabled);
+
+enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg)
+{
+	return sg->muxmode;
+}
+EXPORT_SYMBOL(gemini_sata_get_muxmode);
+
+static int gemini_sata_setup_bridge(struct sata_gemini *sg,
+				    unsigned int bridge)
+{
+	unsigned long timeout = jiffies + (HZ * 1);
+	bool bridge_online;
+	u32 val;
+
+	if (bridge == 0) {
+		val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
+		/* SATA0 slave mode is only used in muxmode 2 */
+		if (sg->muxmode == GEMINI_MUXMODE_2)
+			val |= GEMINI_SATA_CTRL_SLAVE_EN;
+		writel(val, sg->base + GEMINI_SATA0_CTRL);
+	} else {
+		val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
+		/* SATA1 slave mode is only used in muxmode 3 */
+		if (sg->muxmode == GEMINI_MUXMODE_3)
+			val |= GEMINI_SATA_CTRL_SLAVE_EN;
+		writel(val, sg->base + GEMINI_SATA1_CTRL);
+	}
+
+	/* Vendor code waits 10 ms here */
+	msleep(10);
+
+	/* Wait for PHY to become ready */
+	do {
+		msleep(100);
+
+		if (bridge == 0)
+			val = readl(sg->base + GEMINI_SATA0_STATUS);
+		else
+			val = readl(sg->base + GEMINI_SATA1_STATUS);
+		if (val & GEMINI_SATA_STATUS_PHY_READY)
+			break;
+	} while (time_before(jiffies, timeout));
+
+	bridge_online = !!(val & GEMINI_SATA_STATUS_PHY_READY);
+
+	dev_info(sg->dev, "SATA%d PHY %s\n", bridge,
+		 bridge_online ? "ready" : "not ready");
+
+	return bridge_online ? 0: -ENODEV;
+}
+
+int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
+{
+	struct clk *pclk;
+	int ret;
+
+	if (bridge == 0)
+		pclk = sg->sata0_pclk;
+	else
+		pclk = sg->sata1_pclk;
+	clk_enable(pclk);
+	msleep(10);
+
+	/* Do not keep clocking a bridge that is not online */
+	ret = gemini_sata_setup_bridge(sg, bridge);
+	if (ret)
+		clk_disable(pclk);
+
+	return ret;
+}
+EXPORT_SYMBOL(gemini_sata_start_bridge);
+
+void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
+{
+	if (bridge == 0)
+		clk_disable(sg->sata0_pclk);
+	else if (bridge == 1)
+		clk_disable(sg->sata1_pclk);
+}
+EXPORT_SYMBOL(gemini_sata_stop_bridge);
+
+int gemini_sata_reset_bridge(struct sata_gemini *sg,
+			     unsigned int bridge)
+{
+	if (bridge == 0)
+		reset_control_reset(sg->sata0_reset);
+	else
+		reset_control_reset(sg->sata1_reset);
+	msleep(10);
+	return gemini_sata_setup_bridge(sg, bridge);
+}
+EXPORT_SYMBOL(gemini_sata_reset_bridge);
+
+static int gemini_sata_bridge_init(struct sata_gemini *sg)
+{
+	struct device *dev = sg->dev;
+	u32 sata_id, sata_phy_id;
+	int ret;
+
+	sg->sata0_pclk = devm_clk_get(dev, "SATA0_PCLK");
+	if (IS_ERR(sg->sata0_pclk)) {
+		dev_err(dev, "no SATA0 PCLK");
+		return -ENODEV;
+	}
+	sg->sata1_pclk = devm_clk_get(dev, "SATA1_PCLK");
+	if (IS_ERR(sg->sata1_pclk)) {
+		dev_err(dev, "no SATA1 PCLK");
+		return -ENODEV;
+	}
+
+	ret = clk_prepare_enable(sg->sata0_pclk);
+	if (ret) {
+		pr_err("failed to enable SATA0 PCLK\n");
+		return ret;
+	}
+	ret = clk_prepare_enable(sg->sata1_pclk);
+	if (ret) {
+		pr_err("failed to enable SATA1 PCLK\n");
+		clk_disable_unprepare(sg->sata0_pclk);
+		return ret;
+	}
+
+	sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
+	if (IS_ERR(sg->sata0_reset)) {
+		dev_err(dev, "no SATA0 reset controller\n");
+		clk_disable_unprepare(sg->sata1_pclk);
+		clk_disable_unprepare(sg->sata0_pclk);
+		return PTR_ERR(sg->sata0_reset);
+	}
+	sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
+	if (IS_ERR(sg->sata1_reset)) {
+		dev_err(dev, "no SATA1 reset controller\n");
+		clk_disable_unprepare(sg->sata1_pclk);
+		clk_disable_unprepare(sg->sata0_pclk);
+		return PTR_ERR(sg->sata1_reset);
+	}
+
+	sata_id = readl(sg->base + GEMINI_SATA_ID);
+	sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
+	sg->sata_bridge = true;
+	clk_disable(sg->sata0_pclk);
+	clk_disable(sg->sata1_pclk);
+
+	dev_info(dev, "SATA ID %08x, PHY ID: %08x\n", sata_id, sata_phy_id);
+
+	return 0;
+}
+
+static int gemini_setup_ide_pins(struct device *dev)
+{
+	struct pinctrl *p;
+	struct pinctrl_state *ide_state;
+	int ret;
+
+	p = devm_pinctrl_get(dev);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	ide_state = pinctrl_lookup_state(p, "ide");
+	if (IS_ERR(ide_state))
+		return PTR_ERR(ide_state);
+
+	ret = pinctrl_select_state(p, ide_state);
+	if (ret) {
+		dev_err(dev, "could not select IDE state\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gemini_sata_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct sata_gemini *sg;
+	struct regmap *map;
+	struct resource *res;
+	enum gemini_muxmode muxmode;
+	u32 gmode;
+	u32 gmask;
+	int ret;
+
+	sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+	if (!sg)
+		return -ENOMEM;
+	sg->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	sg->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(sg->base))
+		return PTR_ERR(sg->base);
+
+	map = syscon_regmap_lookup_by_phandle(np, "syscon");
+	if (IS_ERR(map)) {
+		dev_err(dev, "no global syscon\n");
+		return PTR_ERR(map);
+	}
+
+	/* Set up the SATA bridge if need be */
+	if (of_property_read_bool(np, "cortina,gemini-enable-sata-bridge")) {
+		ret = gemini_sata_bridge_init(sg);
+		if (ret)
+			return ret;
+	}
+
+	if (of_property_read_bool(np, "cortina,gemini-enable-ide-pins"))
+		sg->ide_pins = true;
+
+	if (!sg->sata_bridge && !sg->ide_pins) {
+		dev_err(dev, "neither SATA bridge or IDE output enabled\n");
+		ret = -EINVAL;
+		goto out_unprep_clk;
+	}
+
+	ret = of_property_read_u32(np, "cortina,gemini-ata-muxmode", &muxmode);
+	if (ret) {
+		dev_err(dev, "could not parse ATA muxmode\n");
+		goto out_unprep_clk;
+	}
+	if (muxmode > GEMINI_MUXMODE_3) {
+		dev_err(dev, "illegal muxmode %d\n", muxmode);
+		ret = -EINVAL;
+		goto out_unprep_clk;
+	}
+	sg->muxmode = muxmode;
+	gmask = GEMINI_IDE_IOMUX_MASK;
+	gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
+
+	ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
+	if (ret) {
+		dev_err(dev, "unable to set up IDE muxing\n");
+		ret = -ENODEV;
+		goto out_unprep_clk;
+	}
+
+	/*
+	 * Route out the IDE pins if desired.
+	 * This is done by looking up a special pin control state called
+	 * "ide" that will route out the IDE pins.
+	 */
+	if (sg->ide_pins) {
+		ret = gemini_setup_ide_pins(dev);
+		if (ret)
+			return ret;
+	}
+
+	dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
+	platform_set_drvdata(pdev, sg);
+	sg_singleton = sg;
+
+	return 0;
+
+out_unprep_clk:
+	if (sg->sata_bridge) {
+		clk_unprepare(sg->sata1_pclk);
+		clk_unprepare(sg->sata0_pclk);
+	}
+	return ret;
+}
+
+static int gemini_sata_remove(struct platform_device *pdev)
+{
+	struct sata_gemini *sg = platform_get_drvdata(pdev);
+
+	if (sg->sata_bridge) {
+		clk_unprepare(sg->sata1_pclk);
+		clk_unprepare(sg->sata0_pclk);
+	}
+	sg_singleton = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id gemini_sata_of_match[] = {
+	{
+		.compatible = "cortina,gemini-sata-bridge",
+	},
+	{},
+};
+
+static struct platform_driver gemini_sata_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(gemini_sata_of_match),
+	},
+	.probe = gemini_sata_probe,
+	.remove = gemini_sata_remove,
+};
+module_platform_driver(gemini_sata_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
new file mode 100644
index 0000000..6f6e691
--- /dev/null
+++ b/drivers/ata/sata_gemini.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Header for the Gemini SATA bridge */
+#ifndef SATA_GEMINI_H
+#define SATA_GEMINI_H
+
+struct sata_gemini;
+
+enum gemini_muxmode {
+	GEMINI_MUXMODE_0 = 0,
+	GEMINI_MUXMODE_1,
+	GEMINI_MUXMODE_2,
+	GEMINI_MUXMODE_3,
+};
+
+struct sata_gemini *gemini_sata_bridge_get(void);
+bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
+enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
+int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
+void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
+int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
+
+#endif
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
new file mode 100644
index 0000000..e67815b
--- /dev/null
+++ b/drivers/ata/sata_highbank.c
@@ -0,0 +1,649 @@
+/*
+ * Calxeda Highbank AHCI SATA platform driver
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include "ahci.h"
+
+#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
+#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
+#define SERDES_CR_CTL			0x80a0
+#define SERDES_CR_ADDR			0x80a1
+#define SERDES_CR_DATA			0x80a2
+#define CR_BUSY				0x0001
+#define CR_START			0x0001
+#define CR_WR_RDN			0x0002
+#define CPHY_TX_INPUT_STS		0x2001
+#define CPHY_RX_INPUT_STS		0x2002
+#define CPHY_SATA_TX_OVERRIDE		0x8000
+#define CPHY_SATA_RX_OVERRIDE	 	0x4000
+#define CPHY_TX_OVERRIDE		0x2004
+#define CPHY_RX_OVERRIDE		0x2005
+#define SPHY_LANE			0x100
+#define SPHY_HALF_RATE			0x0001
+#define CPHY_SATA_DPLL_MODE		0x0700
+#define CPHY_SATA_DPLL_SHIFT		8
+#define CPHY_SATA_DPLL_RESET		(1 << 11)
+#define CPHY_SATA_TX_ATTEN		0x1c00
+#define CPHY_SATA_TX_ATTEN_SHIFT	10
+#define CPHY_PHY_COUNT			6
+#define CPHY_LANE_COUNT			4
+#define CPHY_PORT_COUNT			(CPHY_PHY_COUNT * CPHY_LANE_COUNT)
+
+static DEFINE_SPINLOCK(cphy_lock);
+/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
+ * sata ports to their phys and then to their lanes within the phys
+ */
+struct phy_lane_info {
+	void __iomem *phy_base;
+	u8 lane_mapping;
+	u8 phy_devs;
+	u8 tx_atten;
+};
+static struct phy_lane_info port_data[CPHY_PORT_COUNT];
+
+static DEFINE_SPINLOCK(sgpio_lock);
+#define SCLOCK				0
+#define SLOAD				1
+#define SDATA				2
+#define SGPIO_PINS			3
+#define SGPIO_PORTS			8
+
+struct ecx_plat_data {
+	u32		n_ports;
+	/* number of extra clocks that the SGPIO PIC controller expects */
+	u32		pre_clocks;
+	u32		post_clocks;
+	unsigned	sgpio_gpio[SGPIO_PINS];
+	u32		sgpio_pattern;
+	u32		port_to_sgpio[SGPIO_PORTS];
+};
+
+#define SGPIO_SIGNALS			3
+#define ECX_ACTIVITY_BITS		0x300000
+#define ECX_ACTIVITY_SHIFT		0
+#define ECX_LOCATE_BITS			0x80000
+#define ECX_LOCATE_SHIFT		1
+#define ECX_FAULT_BITS			0x400000
+#define ECX_FAULT_SHIFT			2
+static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
+				u32 shift)
+{
+	return 1 << (3 * pdata->port_to_sgpio[port] + shift);
+}
+
+static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
+{
+	if (state & ECX_ACTIVITY_BITS)
+		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
+						ECX_ACTIVITY_SHIFT);
+	else
+		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
+						ECX_ACTIVITY_SHIFT);
+	if (state & ECX_LOCATE_BITS)
+		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
+						ECX_LOCATE_SHIFT);
+	else
+		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
+						ECX_LOCATE_SHIFT);
+	if (state & ECX_FAULT_BITS)
+		pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
+						ECX_FAULT_SHIFT);
+	else
+		pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
+						ECX_FAULT_SHIFT);
+}
+
+/*
+ * Tell the LED controller that the signal has changed by raising the clock
+ * line for 50 uS and then lowering it for 50 uS.
+ */
+static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
+{
+	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
+	udelay(50);
+	gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
+	udelay(50);
+}
+
+static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
+					ssize_t size)
+{
+	struct ahci_host_priv *hpriv =  ap->host->private_data;
+	struct ecx_plat_data *pdata = hpriv->plat_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	unsigned long flags;
+	int pmp, i;
+	struct ahci_em_priv *emp;
+	u32 sgpio_out;
+
+	/* get the slot number from the message */
+	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+	if (pmp < EM_MAX_SLOTS)
+		emp = &pp->em_priv[pmp];
+	else
+		return -EINVAL;
+
+	if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
+		return size;
+
+	spin_lock_irqsave(&sgpio_lock, flags);
+	ecx_parse_sgpio(pdata, ap->port_no, state);
+	sgpio_out = pdata->sgpio_pattern;
+	for (i = 0; i < pdata->pre_clocks; i++)
+		ecx_led_cycle_clock(pdata);
+
+	gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
+	ecx_led_cycle_clock(pdata);
+	gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
+	/*
+	 * bit-bang out the SGPIO pattern, by consuming a bit and then
+	 * clocking it out.
+	 */
+	for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
+		gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
+		sgpio_out >>= 1;
+		ecx_led_cycle_clock(pdata);
+	}
+	for (i = 0; i < pdata->post_clocks; i++)
+		ecx_led_cycle_clock(pdata);
+
+	/* save off new led state for port/slot */
+	emp->led_state = state;
+
+	spin_unlock_irqrestore(&sgpio_lock, flags);
+	return size;
+}
+
+static void highbank_set_em_messages(struct device *dev,
+					struct ahci_host_priv *hpriv,
+					struct ata_port_info *pi)
+{
+	struct device_node *np = dev->of_node;
+	struct ecx_plat_data *pdata = hpriv->plat_data;
+	int i;
+	int err;
+
+	for (i = 0; i < SGPIO_PINS; i++) {
+		err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
+		if (err < 0)
+			return;
+
+		pdata->sgpio_gpio[i] = err;
+		err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
+		if (err) {
+			pr_err("sata_highbank gpio_request %d failed: %d\n",
+					i, err);
+			return;
+		}
+		gpio_direction_output(pdata->sgpio_gpio[i], 1);
+	}
+	of_property_read_u32_array(np, "calxeda,led-order",
+						pdata->port_to_sgpio,
+						pdata->n_ports);
+	if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
+		pdata->pre_clocks = 0;
+	if (of_property_read_u32(np, "calxeda,post-clocks",
+				&pdata->post_clocks))
+		pdata->post_clocks = 0;
+
+	/* store em_loc */
+	hpriv->em_loc = 0;
+	hpriv->em_buf_sz = 4;
+	hpriv->em_msg_type = EM_MSG_TYPE_LED;
+	pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
+}
+
+static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
+{
+	u32 data;
+	u8 dev = port_data[sata_port].phy_devs;
+	spin_lock(&cphy_lock);
+	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
+	data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
+	spin_unlock(&cphy_lock);
+	return data;
+}
+
+static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
+{
+	u8 dev = port_data[sata_port].phy_devs;
+	spin_lock(&cphy_lock);
+	writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
+	writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
+	spin_unlock(&cphy_lock);
+}
+
+static void combo_phy_wait_for_ready(u8 sata_port)
+{
+	while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
+		udelay(5);
+}
+
+static u32 combo_phy_read(u8 sata_port, u32 addr)
+{
+	combo_phy_wait_for_ready(sata_port);
+	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
+	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
+	combo_phy_wait_for_ready(sata_port);
+	return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
+}
+
+static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
+{
+	combo_phy_wait_for_ready(sata_port);
+	__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
+	__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
+	__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
+}
+
+static void highbank_cphy_disable_overrides(u8 sata_port)
+{
+	u8 lane = port_data[sata_port].lane_mapping;
+	u32 tmp;
+	if (unlikely(port_data[sata_port].phy_base == NULL))
+		return;
+	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
+	tmp &= ~CPHY_SATA_RX_OVERRIDE;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+}
+
+static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
+{
+	u8 lane = port_data[sata_port].lane_mapping;
+	u32 tmp;
+
+	if (val & 0x8)
+		return;
+
+	tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
+	tmp &= ~CPHY_SATA_TX_OVERRIDE;
+	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp |= CPHY_SATA_TX_OVERRIDE;
+	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
+	combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+}
+
+static void cphy_override_rx_mode(u8 sata_port, u32 val)
+{
+	u8 lane = port_data[sata_port].lane_mapping;
+	u32 tmp;
+	tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
+	tmp &= ~CPHY_SATA_RX_OVERRIDE;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp |= CPHY_SATA_RX_OVERRIDE;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp &= ~CPHY_SATA_DPLL_MODE;
+	tmp |= val << CPHY_SATA_DPLL_SHIFT;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp |= CPHY_SATA_DPLL_RESET;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	tmp &= ~CPHY_SATA_DPLL_RESET;
+	combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+	msleep(15);
+}
+
+static void highbank_cphy_override_lane(u8 sata_port)
+{
+	u8 lane = port_data[sata_port].lane_mapping;
+	u32 tmp, k = 0;
+
+	if (unlikely(port_data[sata_port].phy_base == NULL))
+		return;
+	do {
+		tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
+						lane * SPHY_LANE);
+	} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
+	cphy_override_rx_mode(sata_port, 3);
+	cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
+}
+
+static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
+{
+	struct device_node *sata_node = dev->of_node;
+	int phy_count = 0, phy, port = 0, i;
+	void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
+	struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
+	u32 tx_atten[CPHY_PORT_COUNT] = {};
+
+	memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
+
+	do {
+		u32 tmp;
+		struct of_phandle_args phy_data;
+		if (of_parse_phandle_with_args(sata_node,
+				"calxeda,port-phys", "#phy-cells",
+				port, &phy_data))
+			break;
+		for (phy = 0; phy < phy_count; phy++) {
+			if (phy_nodes[phy] == phy_data.np)
+				break;
+		}
+		if (phy_nodes[phy] == NULL) {
+			phy_nodes[phy] = phy_data.np;
+			cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
+			if (cphy_base[phy] == NULL) {
+				return 0;
+			}
+			phy_count += 1;
+		}
+		port_data[port].lane_mapping = phy_data.args[0];
+		of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
+		port_data[port].phy_devs = tmp;
+		port_data[port].phy_base = cphy_base[phy];
+		of_node_put(phy_data.np);
+		port += 1;
+	} while (port < CPHY_PORT_COUNT);
+	of_property_read_u32_array(sata_node, "calxeda,tx-atten",
+				tx_atten, port);
+	for (i = 0; i < port; i++)
+		port_data[i].tx_atten = (u8) tx_atten[i];
+	return 0;
+}
+
+/*
+ * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
+ * Retrying the phy hard reset can work around the issue, but the drive
+ * may fail again. In less than 150 out of 15000 test runs, it took more
+ * than 10 tries for the link to be established (but never more than 35).
+ * Triple the maximum observed retry count to provide plenty of margin for
+ * rare events and to guarantee that the link is established.
+ *
+ * Also, the default 2 second time-out on a failed drive is too long in
+ * this situation. The uboot implementation of the same driver function
+ * uses a much shorter time-out period and never experiences a time out
+ * issue. Reducing the time-out to 500ms improves the responsiveness.
+ * The other timing constants were kept the same as the stock AHCI driver.
+ * This change was also tested 15000 times on 24 drives and none of them
+ * experienced a time out.
+ */
+static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	static const unsigned long timing[] = { 5, 100, 500};
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	bool online;
+	u32 sstatus;
+	int rc;
+	int retry = 100;
+
+	hpriv->stop_engine(ap);
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = ATA_BUSY;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	do {
+		highbank_cphy_disable_overrides(link->ap->port_no);
+		rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+		highbank_cphy_override_lane(link->ap->port_no);
+
+		/* If the status is 1, we are connected, but the link did not
+		 * come up. So retry resetting the link again.
+		 */
+		if (sata_scr_read(link, SCR_STATUS, &sstatus))
+			break;
+		if (!(sstatus & 0x3))
+			break;
+	} while (!online && retry--);
+
+	hpriv->start_engine(ap);
+
+	if (online)
+		*class = ahci_dev_classify(ap);
+
+	return rc;
+}
+
+static struct ata_port_operations ahci_highbank_ops = {
+	.inherits		= &ahci_ops,
+	.hardreset		= ahci_highbank_hardreset,
+	.transmit_led_message   = ecx_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_highbank_port_info = {
+	.flags          = AHCI_FLAG_COMMON,
+	.pio_mask       = ATA_PIO4,
+	.udma_mask      = ATA_UDMA6,
+	.port_ops       = &ahci_highbank_ops,
+};
+
+static struct scsi_host_template ahci_highbank_platform_sht = {
+	AHCI_SHT("sata_highbank"),
+};
+
+static const struct of_device_id ahci_of_match[] = {
+	{ .compatible = "calxeda,hb-ahci" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static int ahci_highbank_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ecx_plat_data *pdata;
+	struct ata_host *host;
+	struct resource *mem;
+	int irq;
+	int i;
+	int rc;
+	u32 n_ports;
+	struct ata_port_info pi = ahci_highbank_port_info;
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(dev, "no mmio space\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "no irq\n");
+		return -EINVAL;
+	}
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv) {
+		dev_err(dev, "can't alloc ahci_host_priv\n");
+		return -ENOMEM;
+	}
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "can't alloc ecx_plat_data\n");
+		return -ENOMEM;
+	}
+
+	hpriv->irq = irq;
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+	if (!hpriv->mmio) {
+		dev_err(dev, "can't map %pR\n", mem);
+		return -ENOMEM;
+	}
+
+	rc = highbank_initialize_phys(dev, hpriv->mmio);
+	if (rc)
+		return rc;
+
+
+	ahci_save_initial_config(dev, hpriv);
+
+	/* prepare host */
+	if (hpriv->cap & HOST_CAP_NCQ)
+		pi.flags |= ATA_FLAG_NCQ;
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	if (hpriv->cap & HOST_CAP_64)
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+
+	/* CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	pdata->n_ports = n_ports;
+	hpriv->plat_data = pdata;
+	highbank_set_em_messages(dev, hpriv, &pi);
+
+	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+	if (!host) {
+		rc = -ENOMEM;
+		goto err0;
+	}
+
+	host->private_data = hpriv;
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_desc(ap, "mmio %pR", mem);
+		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+		/* set enclosure management message type */
+		if (ap->flags & ATA_FLAG_EM)
+			ap->em_message_type = hpriv->em_msg_type;
+
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	rc = ahci_reset_controller(host);
+	if (rc)
+		goto err0;
+
+	ahci_init_controller(host);
+	ahci_print_info(host, "platform");
+
+	rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
+	if (rc)
+		goto err0;
+
+	return 0;
+err0:
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_highbank_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+	int rc;
+
+	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_err(dev, "firmware update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	/*
+	 * AHCI spec rev1.1 section 8.3.3:
+	 * Software must disable interrupts prior to requesting a
+	 * transition of the HBA to D3 state.
+	 */
+	ctl = readl(mmio + HOST_CTL);
+	ctl &= ~HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+
+	rc = ata_host_suspend(host, PMSG_SUSPEND);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int ahci_highbank_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	int rc;
+
+	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
+		  ahci_highbank_suspend, ahci_highbank_resume);
+
+static struct platform_driver ahci_highbank_driver = {
+	.remove = ata_platform_remove_one,
+        .driver = {
+                .name = "highbank-ahci",
+                .of_match_table = ahci_of_match,
+                .pm = &ahci_highbank_pm_ops,
+        },
+	.probe = ahci_highbank_probe,
+};
+
+module_platform_driver(ahci_highbank_driver);
+
+MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("sata:highbank");
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
new file mode 100644
index 0000000..9b6d793
--- /dev/null
+++ b/drivers/ata/sata_inic162x.c
@@ -0,0 +1,915 @@
+/*
+ * sata_inic162x.c - Driver for Initio 162x SATA controllers
+ *
+ * Copyright 2006  SUSE Linux Products GmbH
+ * Copyright 2006  Tejun Heo <teheo@novell.com>
+ *
+ * This file is released under GPL v2.
+ *
+ * **** WARNING ****
+ *
+ * This driver never worked properly and unfortunately data corruption is
+ * relatively common.  There isn't anyone working on the driver and there's
+ * no support from the vendor.  Do not use this driver in any production
+ * environment.
+ *
+ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
+ *
+ * *****************
+ *
+ * This controller is eccentric and easily locks up if something isn't
+ * right.  Documentation is available at initio's website but it only
+ * documents registers (not programming model).
+ *
+ * This driver has interesting history.  The first version was written
+ * from the documentation and a 2.4 IDE driver posted on a Taiwan
+ * company, which didn't use any IDMA features and couldn't handle
+ * LBA48.  The resulting driver couldn't handle LBA48 devices either
+ * making it pretty useless.
+ *
+ * After a while, initio picked the driver up, renamed it to
+ * sata_initio162x, updated it to use IDMA for ATA DMA commands and
+ * posted it on their website.  It only used ATA_PROT_DMA for IDMA and
+ * attaching both devices and issuing IDMA and !IDMA commands
+ * simultaneously broke it due to PIRQ masking interaction but it did
+ * show how to use the IDMA (ADMA + some initio specific twists)
+ * engine.
+ *
+ * Then, I picked up their changes again and here's the usable driver
+ * which uses IDMA for everything.  Everything works now including
+ * LBA48, CD/DVD burning, suspend/resume and hotplug.  There are some
+ * issues tho.  Result Tf is not resported properly, NCQ isn't
+ * supported yet and CD/DVD writing works with DMA assisted PIO
+ * protocol (which, for native SATA devices, shouldn't cause any
+ * noticeable difference).
+ *
+ * Anyways, so, here's finally a working driver for inic162x.  Enjoy!
+ *
+ * initio: If you guys wanna improve the driver regarding result TF
+ * access and other stuff, please feel free to contact me.  I'll be
+ * happy to assist.
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_device.h>
+
+#define DRV_NAME	"sata_inic162x"
+#define DRV_VERSION	"0.4"
+
+enum {
+	MMIO_BAR_PCI		= 5,
+	MMIO_BAR_CARDBUS	= 1,
+
+	NR_PORTS		= 2,
+
+	IDMA_CPB_TBL_SIZE	= 4 * 32,
+
+	INIC_DMA_BOUNDARY	= 0xffffff,
+
+	HOST_ACTRL		= 0x08,
+	HOST_CTL		= 0x7c,
+	HOST_STAT		= 0x7e,
+	HOST_IRQ_STAT		= 0xbc,
+	HOST_IRQ_MASK		= 0xbe,
+
+	PORT_SIZE		= 0x40,
+
+	/* registers for ATA TF operation */
+	PORT_TF_DATA		= 0x00,
+	PORT_TF_FEATURE		= 0x01,
+	PORT_TF_NSECT		= 0x02,
+	PORT_TF_LBAL		= 0x03,
+	PORT_TF_LBAM		= 0x04,
+	PORT_TF_LBAH		= 0x05,
+	PORT_TF_DEVICE		= 0x06,
+	PORT_TF_COMMAND		= 0x07,
+	PORT_TF_ALT_STAT	= 0x08,
+	PORT_IRQ_STAT		= 0x09,
+	PORT_IRQ_MASK		= 0x0a,
+	PORT_PRD_CTL		= 0x0b,
+	PORT_PRD_ADDR		= 0x0c,
+	PORT_PRD_XFERLEN	= 0x10,
+	PORT_CPB_CPBLAR		= 0x18,
+	PORT_CPB_PTQFIFO	= 0x1c,
+
+	/* IDMA register */
+	PORT_IDMA_CTL		= 0x14,
+	PORT_IDMA_STAT		= 0x16,
+
+	PORT_RPQ_FIFO		= 0x1e,
+	PORT_RPQ_CNT		= 0x1f,
+
+	PORT_SCR		= 0x20,
+
+	/* HOST_CTL bits */
+	HCTL_LEDEN		= (1 << 3),  /* enable LED operation */
+	HCTL_IRQOFF		= (1 << 8),  /* global IRQ off */
+	HCTL_FTHD0		= (1 << 10), /* fifo threshold 0 */
+	HCTL_FTHD1		= (1 << 11), /* fifo threshold 1*/
+	HCTL_PWRDWN		= (1 << 12), /* power down PHYs */
+	HCTL_SOFTRST		= (1 << 13), /* global reset (no phy reset) */
+	HCTL_RPGSEL		= (1 << 15), /* register page select */
+
+	HCTL_KNOWN_BITS		= HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
+				  HCTL_RPGSEL,
+
+	/* HOST_IRQ_(STAT|MASK) bits */
+	HIRQ_PORT0		= (1 << 0),
+	HIRQ_PORT1		= (1 << 1),
+	HIRQ_SOFT		= (1 << 14),
+	HIRQ_GLOBAL		= (1 << 15), /* STAT only */
+
+	/* PORT_IRQ_(STAT|MASK) bits */
+	PIRQ_OFFLINE		= (1 << 0),  /* device unplugged */
+	PIRQ_ONLINE		= (1 << 1),  /* device plugged */
+	PIRQ_COMPLETE		= (1 << 2),  /* completion interrupt */
+	PIRQ_FATAL		= (1 << 3),  /* fatal error */
+	PIRQ_ATA		= (1 << 4),  /* ATA interrupt */
+	PIRQ_REPLY		= (1 << 5),  /* reply FIFO not empty */
+	PIRQ_PENDING		= (1 << 7),  /* port IRQ pending (STAT only) */
+
+	PIRQ_ERR		= PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
+	PIRQ_MASK_DEFAULT	= PIRQ_REPLY | PIRQ_ATA,
+	PIRQ_MASK_FREEZE	= 0xff,
+
+	/* PORT_PRD_CTL bits */
+	PRD_CTL_START		= (1 << 0),
+	PRD_CTL_WR		= (1 << 3),
+	PRD_CTL_DMAEN		= (1 << 7),  /* DMA enable */
+
+	/* PORT_IDMA_CTL bits */
+	IDMA_CTL_RST_ATA	= (1 << 2),  /* hardreset ATA bus */
+	IDMA_CTL_RST_IDMA	= (1 << 5),  /* reset IDMA machinary */
+	IDMA_CTL_GO		= (1 << 7),  /* IDMA mode go */
+	IDMA_CTL_ATA_NIEN	= (1 << 8),  /* ATA IRQ disable */
+
+	/* PORT_IDMA_STAT bits */
+	IDMA_STAT_PERR		= (1 << 0),  /* PCI ERROR MODE */
+	IDMA_STAT_CPBERR	= (1 << 1),  /* ADMA CPB error */
+	IDMA_STAT_LGCY		= (1 << 3),  /* ADMA legacy */
+	IDMA_STAT_UIRQ		= (1 << 4),  /* ADMA unsolicited irq */
+	IDMA_STAT_STPD		= (1 << 5),  /* ADMA stopped */
+	IDMA_STAT_PSD		= (1 << 6),  /* ADMA pause */
+	IDMA_STAT_DONE		= (1 << 7),  /* ADMA done */
+
+	IDMA_STAT_ERR		= IDMA_STAT_PERR | IDMA_STAT_CPBERR,
+
+	/* CPB Control Flags*/
+	CPB_CTL_VALID		= (1 << 0),  /* CPB valid */
+	CPB_CTL_QUEUED		= (1 << 1),  /* queued command */
+	CPB_CTL_DATA		= (1 << 2),  /* data, rsvd in datasheet */
+	CPB_CTL_IEN		= (1 << 3),  /* PCI interrupt enable */
+	CPB_CTL_DEVDIR		= (1 << 4),  /* device direction control */
+
+	/* CPB Response Flags */
+	CPB_RESP_DONE		= (1 << 0),  /* ATA command complete */
+	CPB_RESP_REL		= (1 << 1),  /* ATA release */
+	CPB_RESP_IGNORED	= (1 << 2),  /* CPB ignored */
+	CPB_RESP_ATA_ERR	= (1 << 3),  /* ATA command error */
+	CPB_RESP_SPURIOUS	= (1 << 4),  /* ATA spurious interrupt error */
+	CPB_RESP_UNDERFLOW	= (1 << 5),  /* APRD deficiency length error */
+	CPB_RESP_OVERFLOW	= (1 << 6),  /* APRD exccess length error */
+	CPB_RESP_CPB_ERR	= (1 << 7),  /* CPB error flag */
+
+	/* PRD Control Flags */
+	PRD_DRAIN		= (1 << 1),  /* ignore data excess */
+	PRD_CDB			= (1 << 2),  /* atapi packet command pointer */
+	PRD_DIRECT_INTR		= (1 << 3),  /* direct interrupt */
+	PRD_DMA			= (1 << 4),  /* data transfer method */
+	PRD_WRITE		= (1 << 5),  /* data dir, rsvd in datasheet */
+	PRD_IOM			= (1 << 6),  /* io/memory transfer */
+	PRD_END			= (1 << 7),  /* APRD chain end */
+};
+
+/* Comman Parameter Block */
+struct inic_cpb {
+	u8		resp_flags;	/* Response Flags */
+	u8		error;		/* ATA Error */
+	u8		status;		/* ATA Status */
+	u8		ctl_flags;	/* Control Flags */
+	__le32		len;		/* Total Transfer Length */
+	__le32		prd;		/* First PRD pointer */
+	u8		rsvd[4];
+	/* 16 bytes */
+	u8		feature;	/* ATA Feature */
+	u8		hob_feature;	/* ATA Ex. Feature */
+	u8		device;		/* ATA Device/Head */
+	u8		mirctl;		/* Mirror Control */
+	u8		nsect;		/* ATA Sector Count */
+	u8		hob_nsect;	/* ATA Ex. Sector Count */
+	u8		lbal;		/* ATA Sector Number */
+	u8		hob_lbal;	/* ATA Ex. Sector Number */
+	u8		lbam;		/* ATA Cylinder Low */
+	u8		hob_lbam;	/* ATA Ex. Cylinder Low */
+	u8		lbah;		/* ATA Cylinder High */
+	u8		hob_lbah;	/* ATA Ex. Cylinder High */
+	u8		command;	/* ATA Command */
+	u8		ctl;		/* ATA Control */
+	u8		slave_error;	/* Slave ATA Error */
+	u8		slave_status;	/* Slave ATA Status */
+	/* 32 bytes */
+} __packed;
+
+/* Physical Region Descriptor */
+struct inic_prd {
+	__le32		mad;		/* Physical Memory Address */
+	__le16		len;		/* Transfer Length */
+	u8		rsvd;
+	u8		flags;		/* Control Flags */
+} __packed;
+
+struct inic_pkt {
+	struct inic_cpb	cpb;
+	struct inic_prd	prd[LIBATA_MAX_PRD + 1];	/* + 1 for cdb */
+	u8		cdb[ATAPI_CDB_LEN];
+} __packed;
+
+struct inic_host_priv {
+	void __iomem	*mmio_base;
+	u16		cached_hctl;
+};
+
+struct inic_port_priv {
+	struct inic_pkt	*pkt;
+	dma_addr_t	pkt_dma;
+	u32		*cpb_tbl;
+	dma_addr_t	cpb_tbl_dma;
+};
+
+static struct scsi_host_template inic_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize	= LIBATA_MAX_PRD,	/* maybe it can be larger? */
+	.dma_boundary	= INIC_DMA_BOUNDARY,
+};
+
+static const int scr_map[] = {
+	[SCR_STATUS]	= 0,
+	[SCR_ERROR]	= 1,
+	[SCR_CONTROL]	= 2,
+};
+
+static void __iomem *inic_port_base(struct ata_port *ap)
+{
+	struct inic_host_priv *hpriv = ap->host->private_data;
+
+	return hpriv->mmio_base + ap->port_no * PORT_SIZE;
+}
+
+static void inic_reset_port(void __iomem *port_base)
+{
+	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+
+	/* stop IDMA engine */
+	readw(idma_ctl); /* flush */
+	msleep(1);
+
+	/* mask IRQ and assert reset */
+	writew(IDMA_CTL_RST_IDMA, idma_ctl);
+	readw(idma_ctl); /* flush */
+	msleep(1);
+
+	/* release reset */
+	writew(0, idma_ctl);
+
+	/* clear irq */
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+}
+
+static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
+{
+	void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
+
+	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+		return -EINVAL;
+
+	*val = readl(scr_addr + scr_map[sc_reg] * 4);
+
+	/* this controller has stuck DIAG.N, ignore it */
+	if (sc_reg == SCR_ERROR)
+		*val &= ~SERR_PHYRDY_CHG;
+	return 0;
+}
+
+static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
+{
+	void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
+
+	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+		return -EINVAL;
+
+	writel(val, scr_addr + scr_map[sc_reg] * 4);
+	return 0;
+}
+
+static void inic_stop_idma(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	readb(port_base + PORT_RPQ_FIFO);
+	readb(port_base + PORT_RPQ_CNT);
+	writew(0, port_base + PORT_IDMA_CTL);
+}
+
+static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct inic_port_priv *pp = ap->private_data;
+	struct inic_cpb *cpb = &pp->pkt->cpb;
+	bool freeze = false;
+
+	ata_ehi_clear_desc(ehi);
+	ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
+			  irq_stat, idma_stat);
+
+	inic_stop_idma(ap);
+
+	if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
+		ata_ehi_push_desc(ehi, "hotplug");
+		ata_ehi_hotplugged(ehi);
+		freeze = true;
+	}
+
+	if (idma_stat & IDMA_STAT_PERR) {
+		ata_ehi_push_desc(ehi, "PCI error");
+		freeze = true;
+	}
+
+	if (idma_stat & IDMA_STAT_CPBERR) {
+		ata_ehi_push_desc(ehi, "CPB error");
+
+		if (cpb->resp_flags & CPB_RESP_IGNORED) {
+			__ata_ehi_push_desc(ehi, " ignored");
+			ehi->err_mask |= AC_ERR_INVALID;
+			freeze = true;
+		}
+
+		if (cpb->resp_flags & CPB_RESP_ATA_ERR)
+			ehi->err_mask |= AC_ERR_DEV;
+
+		if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
+			__ata_ehi_push_desc(ehi, " spurious-intr");
+			ehi->err_mask |= AC_ERR_HSM;
+			freeze = true;
+		}
+
+		if (cpb->resp_flags &
+		    (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
+			__ata_ehi_push_desc(ehi, " data-over/underflow");
+			ehi->err_mask |= AC_ERR_HSM;
+			freeze = true;
+		}
+	}
+
+	if (freeze)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void inic_host_intr(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	u8 irq_stat;
+	u16 idma_stat;
+
+	/* read and clear IRQ status */
+	irq_stat = readb(port_base + PORT_IRQ_STAT);
+	writeb(irq_stat, port_base + PORT_IRQ_STAT);
+	idma_stat = readw(port_base + PORT_IDMA_STAT);
+
+	if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
+		inic_host_err_intr(ap, irq_stat, idma_stat);
+
+	if (unlikely(!qc))
+		goto spurious;
+
+	if (likely(idma_stat & IDMA_STAT_DONE)) {
+		inic_stop_idma(ap);
+
+		/* Depending on circumstances, device error
+		 * isn't reported by IDMA, check it explicitly.
+		 */
+		if (unlikely(readb(port_base + PORT_TF_COMMAND) &
+			     (ATA_DF | ATA_ERR)))
+			qc->err_mask |= AC_ERR_DEV;
+
+		ata_qc_complete(qc);
+		return;
+	}
+
+ spurious:
+	ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
+		      qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
+}
+
+static irqreturn_t inic_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct inic_host_priv *hpriv = host->private_data;
+	u16 host_irq_stat;
+	int i, handled = 0;
+
+	host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
+
+	if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
+		goto out;
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < NR_PORTS; i++)
+		if (host_irq_stat & (HIRQ_PORT0 << i)) {
+			inic_host_intr(host->ports[i]);
+			handled++;
+		}
+
+	spin_unlock(&host->lock);
+
+ out:
+	return IRQ_RETVAL(handled);
+}
+
+static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	/* For some reason ATAPI_PROT_DMA doesn't work for some
+	 * commands including writes and other misc ops.  Use PIO
+	 * protocol instead, which BTW is driven by the DMA engine
+	 * anyway, so it shouldn't make much difference for native
+	 * SATA devices.
+	 */
+	if (atapi_cmd_type(qc->cdb[0]) == READ)
+		return 0;
+	return 1;
+}
+
+static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
+{
+	struct scatterlist *sg;
+	unsigned int si;
+	u8 flags = 0;
+
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		flags |= PRD_WRITE;
+
+	if (ata_is_dma(qc->tf.protocol))
+		flags |= PRD_DMA;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		prd->mad = cpu_to_le32(sg_dma_address(sg));
+		prd->len = cpu_to_le16(sg_dma_len(sg));
+		prd->flags = flags;
+		prd++;
+	}
+
+	WARN_ON(!si);
+	prd[-1].flags |= PRD_END;
+}
+
+static void inic_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct inic_port_priv *pp = qc->ap->private_data;
+	struct inic_pkt *pkt = pp->pkt;
+	struct inic_cpb *cpb = &pkt->cpb;
+	struct inic_prd *prd = pkt->prd;
+	bool is_atapi = ata_is_atapi(qc->tf.protocol);
+	bool is_data = ata_is_data(qc->tf.protocol);
+	unsigned int cdb_len = 0;
+
+	VPRINTK("ENTER\n");
+
+	if (is_atapi)
+		cdb_len = qc->dev->cdb_len;
+
+	/* prepare packet, based on initio driver */
+	memset(pkt, 0, sizeof(struct inic_pkt));
+
+	cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
+	if (is_atapi || is_data)
+		cpb->ctl_flags |= CPB_CTL_DATA;
+
+	cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
+	cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
+
+	cpb->device = qc->tf.device;
+	cpb->feature = qc->tf.feature;
+	cpb->nsect = qc->tf.nsect;
+	cpb->lbal = qc->tf.lbal;
+	cpb->lbam = qc->tf.lbam;
+	cpb->lbah = qc->tf.lbah;
+
+	if (qc->tf.flags & ATA_TFLAG_LBA48) {
+		cpb->hob_feature = qc->tf.hob_feature;
+		cpb->hob_nsect = qc->tf.hob_nsect;
+		cpb->hob_lbal = qc->tf.hob_lbal;
+		cpb->hob_lbam = qc->tf.hob_lbam;
+		cpb->hob_lbah = qc->tf.hob_lbah;
+	}
+
+	cpb->command = qc->tf.command;
+	/* don't load ctl - dunno why.  it's like that in the initio driver */
+
+	/* setup PRD for CDB */
+	if (is_atapi) {
+		memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
+		prd->mad = cpu_to_le32(pp->pkt_dma +
+				       offsetof(struct inic_pkt, cdb));
+		prd->len = cpu_to_le16(cdb_len);
+		prd->flags = PRD_CDB | PRD_WRITE;
+		if (!is_data)
+			prd->flags |= PRD_END;
+		prd++;
+	}
+
+	/* setup sg table */
+	if (is_data)
+		inic_fill_sg(prd, qc);
+
+	pp->cpb_tbl[0] = pp->pkt_dma;
+}
+
+static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_base = inic_port_base(ap);
+
+	/* fire up the ADMA engine */
+	writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL);
+	writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
+	writeb(0, port_base + PORT_CPB_PTQFIFO);
+
+	return 0;
+}
+
+static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	tf->feature	= readb(port_base + PORT_TF_FEATURE);
+	tf->nsect	= readb(port_base + PORT_TF_NSECT);
+	tf->lbal	= readb(port_base + PORT_TF_LBAL);
+	tf->lbam	= readb(port_base + PORT_TF_LBAM);
+	tf->lbah	= readb(port_base + PORT_TF_LBAH);
+	tf->device	= readb(port_base + PORT_TF_DEVICE);
+	tf->command	= readb(port_base + PORT_TF_COMMAND);
+}
+
+static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *rtf = &qc->result_tf;
+	struct ata_taskfile tf;
+
+	/* FIXME: Except for status and error, result TF access
+	 * doesn't work.  I tried reading from BAR0/2, CPB and BAR5.
+	 * None works regardless of which command interface is used.
+	 * For now return true iff status indicates device error.
+	 * This means that we're reporting bogus sector for RW
+	 * failures.  Eeekk....
+	 */
+	inic_tf_read(qc->ap, &tf);
+
+	if (!(tf.command & ATA_ERR))
+		return false;
+
+	rtf->command = tf.command;
+	rtf->feature = tf.feature;
+	return true;
+}
+
+static void inic_freeze(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+}
+
+static void inic_thaw(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+	writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
+}
+
+static int inic_check_ready(struct ata_link *link)
+{
+	void __iomem *port_base = inic_port_base(link->ap);
+
+	return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
+}
+
+/*
+ * SRST and SControl hardreset don't give valid signature on this
+ * controller.  Only controller specific hardreset mechanism works.
+ */
+static int inic_hardreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *port_base = inic_port_base(ap);
+	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	int rc;
+
+	/* hammer it into sane state */
+	inic_reset_port(port_base);
+
+	writew(IDMA_CTL_RST_ATA, idma_ctl);
+	readw(idma_ctl);	/* flush */
+	ata_msleep(ap, 1);
+	writew(0, idma_ctl);
+
+	rc = sata_link_resume(link, timing, deadline);
+	if (rc) {
+		ata_link_warn(link,
+			      "failed to resume link after reset (errno=%d)\n",
+			      rc);
+		return rc;
+	}
+
+	*class = ATA_DEV_NONE;
+	if (ata_link_online(link)) {
+		struct ata_taskfile tf;
+
+		/* wait for link to become ready */
+		rc = ata_wait_after_reset(link, deadline, inic_check_ready);
+		/* link occupied, -ENODEV too is an error */
+		if (rc) {
+			ata_link_warn(link,
+				      "device not ready after hardreset (errno=%d)\n",
+				      rc);
+			return rc;
+		}
+
+		inic_tf_read(ap, &tf);
+		*class = ata_dev_classify(&tf);
+	}
+
+	return 0;
+}
+
+static void inic_error_handler(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	inic_reset_port(port_base);
+	ata_std_error_handler(ap);
+}
+
+static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		inic_reset_port(inic_port_base(qc->ap));
+}
+
+static void init_port(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct inic_port_priv *pp = ap->private_data;
+
+	/* clear packet and CPB table */
+	memset(pp->pkt, 0, sizeof(struct inic_pkt));
+	memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
+
+	/* setup CPB lookup table addresses */
+	writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
+}
+
+static int inic_port_resume(struct ata_port *ap)
+{
+	init_port(ap);
+	return 0;
+}
+
+static int inic_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct inic_port_priv *pp;
+
+	/* alloc and initialize private data */
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	ap->private_data = pp;
+
+	/* Alloc resources */
+	pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
+				      &pp->pkt_dma, GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
+
+	pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
+					  &pp->cpb_tbl_dma, GFP_KERNEL);
+	if (!pp->cpb_tbl)
+		return -ENOMEM;
+
+	init_port(ap);
+
+	return 0;
+}
+
+static struct ata_port_operations inic_port_ops = {
+	.inherits		= &sata_port_ops,
+
+	.check_atapi_dma	= inic_check_atapi_dma,
+	.qc_prep		= inic_qc_prep,
+	.qc_issue		= inic_qc_issue,
+	.qc_fill_rtf		= inic_qc_fill_rtf,
+
+	.freeze			= inic_freeze,
+	.thaw			= inic_thaw,
+	.hardreset		= inic_hardreset,
+	.error_handler		= inic_error_handler,
+	.post_internal_cmd	= inic_post_internal_cmd,
+
+	.scr_read		= inic_scr_read,
+	.scr_write		= inic_scr_write,
+
+	.port_resume		= inic_port_resume,
+	.port_start		= inic_port_start,
+};
+
+static const struct ata_port_info inic_port_info = {
+	.flags			= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+	.pio_mask		= ATA_PIO4,
+	.mwdma_mask		= ATA_MWDMA2,
+	.udma_mask		= ATA_UDMA6,
+	.port_ops		= &inic_port_ops
+};
+
+static int init_controller(void __iomem *mmio_base, u16 hctl)
+{
+	int i;
+	u16 val;
+
+	hctl &= ~HCTL_KNOWN_BITS;
+
+	/* Soft reset whole controller.  Spec says reset duration is 3
+	 * PCI clocks, be generous and give it 10ms.
+	 */
+	writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
+	readw(mmio_base + HOST_CTL); /* flush */
+
+	for (i = 0; i < 10; i++) {
+		msleep(1);
+		val = readw(mmio_base + HOST_CTL);
+		if (!(val & HCTL_SOFTRST))
+			break;
+	}
+
+	if (val & HCTL_SOFTRST)
+		return -EIO;
+
+	/* mask all interrupts and reset ports */
+	for (i = 0; i < NR_PORTS; i++) {
+		void __iomem *port_base = mmio_base + i * PORT_SIZE;
+
+		writeb(0xff, port_base + PORT_IRQ_MASK);
+		inic_reset_port(port_base);
+	}
+
+	/* port IRQ is masked now, unmask global IRQ */
+	writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
+	val = readw(mmio_base + HOST_IRQ_MASK);
+	val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
+	writew(val, mmio_base + HOST_IRQ_MASK);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int inic_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct inic_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
+		if (rc)
+			return rc;
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
+	struct ata_host *host;
+	struct inic_host_priv *hpriv;
+	void __iomem * const *iomap;
+	int mmio_bar;
+	int i, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+
+	host->private_data = hpriv;
+
+	/* Acquire resources and fill host.  Note that PCI and cardbus
+	 * use different BARs.
+	 */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
+		mmio_bar = MMIO_BAR_PCI;
+	else
+		mmio_bar = MMIO_BAR_CARDBUS;
+
+	rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = iomap = pcim_iomap_table(pdev);
+	hpriv->mmio_base = iomap[mmio_bar];
+	hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
+
+	for (i = 0; i < NR_PORTS; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
+		ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
+	}
+
+	/* Set dma_mask.  This devices doesn't support 64bit addressing. */
+	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc) {
+		dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+		return rc;
+	}
+
+	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc) {
+		dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
+		return rc;
+	}
+
+	/*
+	 * This controller is braindamaged.  dma_boundary is 0xffff
+	 * like others but it will lock up the whole machine HARD if
+	 * 65536 byte PRD entry is fed. Reduce maximum segment size.
+	 */
+	rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set the maximum segment size\n");
+		return rc;
+	}
+
+	rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to initialize controller\n");
+		return rc;
+	}
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
+				 &inic_sht);
+}
+
+static const struct pci_device_id inic_pci_tbl[] = {
+	{ PCI_VDEVICE(INIT, 0x1622), },
+	{ },
+};
+
+static struct pci_driver inic_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= inic_pci_tbl,
+#ifdef CONFIG_PM_SLEEP
+	.suspend	= ata_pci_device_suspend,
+	.resume		= inic_pci_device_resume,
+#endif
+	.probe 		= inic_init_one,
+	.remove		= ata_pci_remove_one,
+};
+
+module_pci_driver(inic_pci_driver);
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
new file mode 100644
index 0000000..73ba8e1
--- /dev/null
+++ b/drivers/ata/sata_mv.c
@@ -0,0 +1,4536 @@
+/*
+ * sata_mv.c - Marvell SATA support
+ *
+ * Copyright 2008-2009: Marvell Corporation, all rights reserved.
+ * Copyright 2005: EMC Corporation, all rights reserved.
+ * Copyright 2005 Red Hat, Inc.  All rights reserved.
+ *
+ * Originally written by Brett Russ.
+ * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
+ *
+ * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+/*
+ * sata_mv TODO list:
+ *
+ * --> Develop a low-power-consumption strategy, and implement it.
+ *
+ * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
+ *
+ * --> [Experiment, Marvell value added] Is it possible to use target
+ *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
+ *       creating LibATA target mode support would be very interesting.
+ *
+ *       Target mode, for those without docs, is the ability to directly
+ *       connect two SATA ports.
+ */
+
+/*
+ * 80x1-B2 errata PCI#11:
+ *
+ * Users of the 6041/6081 Rev.B2 chips (current is C0)
+ * should be careful to insert those cards only onto PCI-X bus #0,
+ * and only in device slots 0..7, not higher.  The chips may not
+ * work correctly otherwise  (note: this is a pretty rare condition).
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/mbus.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_mv"
+#define DRV_VERSION	"1.28"
+
+/*
+ * module options
+ */
+
+#ifdef CONFIG_PCI
+static int msi;
+module_param(msi, int, S_IRUGO);
+MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
+#endif
+
+static int irq_coalescing_io_count;
+module_param(irq_coalescing_io_count, int, S_IRUGO);
+MODULE_PARM_DESC(irq_coalescing_io_count,
+		 "IRQ coalescing I/O count threshold (0..255)");
+
+static int irq_coalescing_usecs;
+module_param(irq_coalescing_usecs, int, S_IRUGO);
+MODULE_PARM_DESC(irq_coalescing_usecs,
+		 "IRQ coalescing time threshold in usecs");
+
+enum {
+	/* BAR's are enumerated in terms of pci_resource_start() terms */
+	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
+	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
+	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
+
+	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
+	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
+
+	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
+	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
+	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
+	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
+
+	MV_PCI_REG_BASE		= 0,
+
+	/*
+	 * Per-chip ("all ports") interrupt coalescing feature.
+	 * This is only for GEN_II / GEN_IIE hardware.
+	 *
+	 * Coalescing defers the interrupt until either the IO_THRESHOLD
+	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
+	 */
+	COAL_REG_BASE		= 0x18000,
+	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
+	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
+
+	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
+	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
+
+	/*
+	 * Registers for the (unused here) transaction coalescing feature:
+	 */
+	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
+	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
+
+	SATAHC0_REG_BASE	= 0x20000,
+	FLASH_CTL		= 0x1046c,
+	GPIO_PORT_CTL		= 0x104f0,
+	RESET_CFG		= 0x180d8,
+
+	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
+	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
+	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
+	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
+
+	MV_MAX_Q_DEPTH		= 32,
+	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
+
+	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
+	 * CRPB needs alignment on a 256B boundary. Size == 256B
+	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
+	 */
+	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
+	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
+	MV_MAX_SG_CT		= 256,
+	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
+
+	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
+	MV_PORT_HC_SHIFT	= 2,
+	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
+	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
+	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
+
+	/* Host Flags */
+	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
+
+	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
+
+	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
+
+	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
+				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
+
+	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
+
+	CRQB_FLAG_READ		= (1 << 0),
+	CRQB_TAG_SHIFT		= 1,
+	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
+	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
+	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
+	CRQB_CMD_ADDR_SHIFT	= 8,
+	CRQB_CMD_CS		= (0x2 << 11),
+	CRQB_CMD_LAST		= (1 << 15),
+
+	CRPB_FLAG_STATUS_SHIFT	= 8,
+	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
+	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
+
+	EPRD_FLAG_END_OF_TBL	= (1 << 31),
+
+	/* PCI interface registers */
+
+	MV_PCI_COMMAND		= 0xc00,
+	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
+	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
+
+	PCI_MAIN_CMD_STS	= 0xd30,
+	STOP_PCI_MASTER		= (1 << 2),
+	PCI_MASTER_EMPTY	= (1 << 3),
+	GLOB_SFT_RST		= (1 << 4),
+
+	MV_PCI_MODE		= 0xd00,
+	MV_PCI_MODE_MASK	= 0x30,
+
+	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
+	MV_PCI_DISC_TIMER	= 0xd04,
+	MV_PCI_MSI_TRIGGER	= 0xc38,
+	MV_PCI_SERR_MASK	= 0xc28,
+	MV_PCI_XBAR_TMOUT	= 0x1d04,
+	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
+	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
+	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
+	MV_PCI_ERR_COMMAND	= 0x1d50,
+
+	PCI_IRQ_CAUSE		= 0x1d58,
+	PCI_IRQ_MASK		= 0x1d5c,
+	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
+
+	PCIE_IRQ_CAUSE		= 0x1900,
+	PCIE_IRQ_MASK		= 0x1910,
+	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
+
+	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
+	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
+	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
+	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
+	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
+	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
+	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
+	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
+	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
+	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
+	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
+	PCI_ERR			= (1 << 18),
+	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
+	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
+	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
+	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
+	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
+	GPIO_INT		= (1 << 22),
+	SELF_INT		= (1 << 23),
+	TWSI_INT		= (1 << 24),
+	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
+	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
+	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
+
+	/* SATAHC registers */
+	HC_CFG			= 0x00,
+
+	HC_IRQ_CAUSE		= 0x14,
+	DMA_IRQ			= (1 << 0),	/* shift by port # */
+	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
+	DEV_IRQ			= (1 << 8),	/* shift by port # */
+
+	/*
+	 * Per-HC (Host-Controller) interrupt coalescing feature.
+	 * This is present on all chip generations.
+	 *
+	 * Coalescing defers the interrupt until either the IO_THRESHOLD
+	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
+	 */
+	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
+	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
+
+	SOC_LED_CTRL		= 0x2c,
+	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
+	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
+						/*  with dev activity LED */
+
+	/* Shadow block registers */
+	SHD_BLK			= 0x100,
+	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
+
+	/* SATA registers */
+	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
+	SATA_ACTIVE		= 0x350,
+	FIS_IRQ_CAUSE		= 0x364,
+	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
+
+	LTMODE			= 0x30c,	/* requires read-after-write */
+	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
+
+	PHY_MODE2		= 0x330,
+	PHY_MODE3		= 0x310,
+
+	PHY_MODE4		= 0x314,	/* requires read-after-write */
+	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
+	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
+	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
+	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
+
+	SATA_IFCTL		= 0x344,
+	SATA_TESTCTL		= 0x348,
+	SATA_IFSTAT		= 0x34c,
+	VENDOR_UNIQUE_FIS	= 0x35c,
+
+	FISCFG			= 0x360,
+	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
+	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
+
+	PHY_MODE9_GEN2		= 0x398,
+	PHY_MODE9_GEN1		= 0x39c,
+	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
+
+	MV5_PHY_MODE		= 0x74,
+	MV5_LTMODE		= 0x30,
+	MV5_PHY_CTL		= 0x0C,
+	SATA_IFCFG		= 0x050,
+	LP_PHY_CTL		= 0x058,
+	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
+	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
+	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
+	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
+	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
+
+	MV_M2_PREAMP_MASK	= 0x7e0,
+
+	/* Port registers */
+	EDMA_CFG		= 0,
+	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
+	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
+	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
+	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
+	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
+	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
+	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
+
+	EDMA_ERR_IRQ_CAUSE	= 0x8,
+	EDMA_ERR_IRQ_MASK	= 0xc,
+	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
+	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
+	EDMA_ERR_DEV		= (1 << 2),	/* device error */
+	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
+	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
+	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
+	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
+	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
+	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
+	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
+	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
+	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
+	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
+	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
+
+	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
+	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
+	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
+	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
+	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
+
+	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
+
+	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
+	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
+	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
+	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
+	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
+	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
+
+	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
+
+	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
+	EDMA_ERR_OVERRUN_5	= (1 << 5),
+	EDMA_ERR_UNDERRUN_5	= (1 << 6),
+
+	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
+				  EDMA_ERR_LNK_CTRL_RX_1 |
+				  EDMA_ERR_LNK_CTRL_RX_3 |
+				  EDMA_ERR_LNK_CTRL_TX,
+
+	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
+				  EDMA_ERR_PRD_PAR |
+				  EDMA_ERR_DEV_DCON |
+				  EDMA_ERR_DEV_CON |
+				  EDMA_ERR_SERR |
+				  EDMA_ERR_SELF_DIS |
+				  EDMA_ERR_CRQB_PAR |
+				  EDMA_ERR_CRPB_PAR |
+				  EDMA_ERR_INTRL_PAR |
+				  EDMA_ERR_IORDY |
+				  EDMA_ERR_LNK_CTRL_RX_2 |
+				  EDMA_ERR_LNK_DATA_RX |
+				  EDMA_ERR_LNK_DATA_TX |
+				  EDMA_ERR_TRANS_PROTO,
+
+	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
+				  EDMA_ERR_PRD_PAR |
+				  EDMA_ERR_DEV_DCON |
+				  EDMA_ERR_DEV_CON |
+				  EDMA_ERR_OVERRUN_5 |
+				  EDMA_ERR_UNDERRUN_5 |
+				  EDMA_ERR_SELF_DIS_5 |
+				  EDMA_ERR_CRQB_PAR |
+				  EDMA_ERR_CRPB_PAR |
+				  EDMA_ERR_INTRL_PAR |
+				  EDMA_ERR_IORDY,
+
+	EDMA_REQ_Q_BASE_HI	= 0x10,
+	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
+
+	EDMA_REQ_Q_OUT_PTR	= 0x18,
+	EDMA_REQ_Q_PTR_SHIFT	= 5,
+
+	EDMA_RSP_Q_BASE_HI	= 0x1c,
+	EDMA_RSP_Q_IN_PTR	= 0x20,
+	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
+	EDMA_RSP_Q_PTR_SHIFT	= 3,
+
+	EDMA_CMD		= 0x28,		/* EDMA command register */
+	EDMA_EN			= (1 << 0),	/* enable EDMA */
+	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
+	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
+
+	EDMA_STATUS		= 0x30,		/* EDMA engine status */
+	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
+	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
+
+	EDMA_IORDY_TMOUT	= 0x34,
+	EDMA_ARB_CFG		= 0x38,
+
+	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
+	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
+
+	BMDMA_CMD		= 0x224,	/* bmdma command register */
+	BMDMA_STATUS		= 0x228,	/* bmdma status register */
+	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
+	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
+
+	/* Host private flags (hp_flags) */
+	MV_HP_FLAG_MSI		= (1 << 0),
+	MV_HP_ERRATA_50XXB0	= (1 << 1),
+	MV_HP_ERRATA_50XXB2	= (1 << 2),
+	MV_HP_ERRATA_60X1B2	= (1 << 3),
+	MV_HP_ERRATA_60X1C0	= (1 << 4),
+	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
+	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
+	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
+	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
+	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
+	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
+	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
+	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
+
+	/* Port private flags (pp_flags) */
+	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
+	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
+	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
+	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
+	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
+};
+
+#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
+#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
+#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
+#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
+#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
+
+#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
+#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
+
+enum {
+	/* DMA boundary 0xffff is required by the s/g splitting
+	 * we need on /length/ in mv_fill-sg().
+	 */
+	MV_DMA_BOUNDARY		= 0xffffU,
+
+	/* mask of register bits containing lower 32 bits
+	 * of EDMA request queue DMA address
+	 */
+	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
+
+	/* ditto, for response queue */
+	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
+};
+
+enum chip_type {
+	chip_504x,
+	chip_508x,
+	chip_5080,
+	chip_604x,
+	chip_608x,
+	chip_6042,
+	chip_7042,
+	chip_soc,
+};
+
+/* Command ReQuest Block: 32B */
+struct mv_crqb {
+	__le32			sg_addr;
+	__le32			sg_addr_hi;
+	__le16			ctrl_flags;
+	__le16			ata_cmd[11];
+};
+
+struct mv_crqb_iie {
+	__le32			addr;
+	__le32			addr_hi;
+	__le32			flags;
+	__le32			len;
+	__le32			ata_cmd[4];
+};
+
+/* Command ResPonse Block: 8B */
+struct mv_crpb {
+	__le16			id;
+	__le16			flags;
+	__le32			tmstmp;
+};
+
+/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
+struct mv_sg {
+	__le32			addr;
+	__le32			flags_size;
+	__le32			addr_hi;
+	__le32			reserved;
+};
+
+/*
+ * We keep a local cache of a few frequently accessed port
+ * registers here, to avoid having to read them (very slow)
+ * when switching between EDMA and non-EDMA modes.
+ */
+struct mv_cached_regs {
+	u32			fiscfg;
+	u32			ltmode;
+	u32			haltcond;
+	u32			unknown_rsvd;
+};
+
+struct mv_port_priv {
+	struct mv_crqb		*crqb;
+	dma_addr_t		crqb_dma;
+	struct mv_crpb		*crpb;
+	dma_addr_t		crpb_dma;
+	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
+	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
+
+	unsigned int		req_idx;
+	unsigned int		resp_idx;
+
+	u32			pp_flags;
+	struct mv_cached_regs	cached;
+	unsigned int		delayed_eh_pmp_map;
+};
+
+struct mv_port_signal {
+	u32			amps;
+	u32			pre;
+};
+
+struct mv_host_priv {
+	u32			hp_flags;
+	unsigned int 		board_idx;
+	u32			main_irq_mask;
+	struct mv_port_signal	signal[8];
+	const struct mv_hw_ops	*ops;
+	int			n_ports;
+	void __iomem		*base;
+	void __iomem		*main_irq_cause_addr;
+	void __iomem		*main_irq_mask_addr;
+	u32			irq_cause_offset;
+	u32			irq_mask_offset;
+	u32			unmask_all_irqs;
+
+	/*
+	 * Needed on some devices that require their clocks to be enabled.
+	 * These are optional: if the platform device does not have any
+	 * clocks, they won't be used.  Also, if the underlying hardware
+	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
+	 * all the clock operations become no-ops (see clk.h).
+	 */
+	struct clk		*clk;
+	struct clk              **port_clks;
+	/*
+	 * Some devices have a SATA PHY which can be enabled/disabled
+	 * in order to save power. These are optional: if the platform
+	 * devices does not have any phy, they won't be used.
+	 */
+	struct phy		**port_phys;
+	/*
+	 * These consistent DMA memory pools give us guaranteed
+	 * alignment for hardware-accessed data structures,
+	 * and less memory waste in accomplishing the alignment.
+	 */
+	struct dma_pool		*crqb_pool;
+	struct dma_pool		*crpb_pool;
+	struct dma_pool		*sg_tbl_pool;
+};
+
+struct mv_hw_ops {
+	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
+			   unsigned int port);
+	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
+	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio);
+	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int n_hc);
+	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
+	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
+};
+
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
+static int mv_port_start(struct ata_port *ap);
+static void mv_port_stop(struct ata_port *ap);
+static int mv_qc_defer(struct ata_queued_cmd *qc);
+static void mv_qc_prep(struct ata_queued_cmd *qc);
+static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline);
+static void mv_eh_freeze(struct ata_port *ap);
+static void mv_eh_thaw(struct ata_port *ap);
+static void mv6_dev_config(struct ata_device *dev);
+
+static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+			   unsigned int port);
+static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio);
+static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int n_hc);
+static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
+
+static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+			   unsigned int port);
+static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio);
+static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int n_hc);
+static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+				      void __iomem *mmio);
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+				      void __iomem *mmio);
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+				  void __iomem *mmio, unsigned int n_hc);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+				      void __iomem *mmio);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
+				  void __iomem *mmio, unsigned int port);
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
+			     unsigned int port_no);
+static int mv_stop_edma(struct ata_port *ap);
+static int mv_stop_edma_engine(void __iomem *port_mmio);
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
+
+static void mv_pmp_select(struct ata_port *ap, int pmp);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
+static int  mv_softreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline);
+static void mv_pmp_error_handler(struct ata_port *ap);
+static void mv_process_crpb_entries(struct ata_port *ap,
+					struct mv_port_priv *pp);
+
+static void mv_sff_irq_clear(struct ata_port *ap);
+static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
+static void mv_bmdma_setup(struct ata_queued_cmd *qc);
+static void mv_bmdma_start(struct ata_queued_cmd *qc);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc);
+static u8   mv_bmdma_status(struct ata_port *ap);
+static u8 mv_sff_check_status(struct ata_port *ap);
+
+/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
+ * because we have to allow room for worst case splitting of
+ * PRDs for 64K boundaries in mv_fill_sg().
+ */
+#ifdef CONFIG_PCI
+static struct scsi_host_template mv5_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= MV_MAX_SG_CT / 2,
+	.dma_boundary		= MV_DMA_BOUNDARY,
+};
+#endif
+static struct scsi_host_template mv6_sht = {
+	ATA_NCQ_SHT(DRV_NAME),
+	.can_queue		= MV_MAX_Q_DEPTH - 1,
+	.sg_tablesize		= MV_MAX_SG_CT / 2,
+	.dma_boundary		= MV_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations mv5_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.lost_interrupt		= ATA_OP_NULL,
+
+	.qc_defer		= mv_qc_defer,
+	.qc_prep		= mv_qc_prep,
+	.qc_issue		= mv_qc_issue,
+
+	.freeze			= mv_eh_freeze,
+	.thaw			= mv_eh_thaw,
+	.hardreset		= mv_hardreset,
+
+	.scr_read		= mv5_scr_read,
+	.scr_write		= mv5_scr_write,
+
+	.port_start		= mv_port_start,
+	.port_stop		= mv_port_stop,
+};
+
+static struct ata_port_operations mv6_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.lost_interrupt		= ATA_OP_NULL,
+
+	.qc_defer		= mv_qc_defer,
+	.qc_prep		= mv_qc_prep,
+	.qc_issue		= mv_qc_issue,
+
+	.dev_config             = mv6_dev_config,
+
+	.freeze			= mv_eh_freeze,
+	.thaw			= mv_eh_thaw,
+	.hardreset		= mv_hardreset,
+	.softreset		= mv_softreset,
+	.pmp_hardreset		= mv_pmp_hardreset,
+	.pmp_softreset		= mv_softreset,
+	.error_handler		= mv_pmp_error_handler,
+
+	.scr_read		= mv_scr_read,
+	.scr_write		= mv_scr_write,
+
+	.sff_check_status	= mv_sff_check_status,
+	.sff_irq_clear		= mv_sff_irq_clear,
+	.check_atapi_dma	= mv_check_atapi_dma,
+	.bmdma_setup		= mv_bmdma_setup,
+	.bmdma_start		= mv_bmdma_start,
+	.bmdma_stop		= mv_bmdma_stop,
+	.bmdma_status		= mv_bmdma_status,
+
+	.port_start		= mv_port_start,
+	.port_stop		= mv_port_stop,
+};
+
+static struct ata_port_operations mv_iie_ops = {
+	.inherits		= &mv6_ops,
+	.dev_config		= ATA_OP_NULL,
+	.qc_prep		= mv_qc_prep_iie,
+};
+
+static const struct ata_port_info mv_port_info[] = {
+	{  /* chip_504x */
+		.flags		= MV_GEN_I_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv5_ops,
+	},
+	{  /* chip_508x */
+		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv5_ops,
+	},
+	{  /* chip_5080 */
+		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv5_ops,
+	},
+	{  /* chip_604x */
+		.flags		= MV_GEN_II_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv6_ops,
+	},
+	{  /* chip_608x */
+		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv6_ops,
+	},
+	{  /* chip_6042 */
+		.flags		= MV_GEN_IIE_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv_iie_ops,
+	},
+	{  /* chip_7042 */
+		.flags		= MV_GEN_IIE_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv_iie_ops,
+	},
+	{  /* chip_soc */
+		.flags		= MV_GEN_IIE_FLAGS,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &mv_iie_ops,
+	},
+};
+
+static const struct pci_device_id mv_pci_tbl[] = {
+	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+	/* RocketRAID 1720/174x have different identifiers */
+	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+	/* Adaptec 1430SA */
+	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+	/* Marvell 7042 support */
+	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+	/* Highpoint RocketRAID PCIe series */
+	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+	{ }			/* terminate list */
+};
+
+static const struct mv_hw_ops mv5xxx_ops = {
+	.phy_errata		= mv5_phy_errata,
+	.enable_leds		= mv5_enable_leds,
+	.read_preamp		= mv5_read_preamp,
+	.reset_hc		= mv5_reset_hc,
+	.reset_flash		= mv5_reset_flash,
+	.reset_bus		= mv5_reset_bus,
+};
+
+static const struct mv_hw_ops mv6xxx_ops = {
+	.phy_errata		= mv6_phy_errata,
+	.enable_leds		= mv6_enable_leds,
+	.read_preamp		= mv6_read_preamp,
+	.reset_hc		= mv6_reset_hc,
+	.reset_flash		= mv6_reset_flash,
+	.reset_bus		= mv_reset_pci_bus,
+};
+
+static const struct mv_hw_ops mv_soc_ops = {
+	.phy_errata		= mv6_phy_errata,
+	.enable_leds		= mv_soc_enable_leds,
+	.read_preamp		= mv_soc_read_preamp,
+	.reset_hc		= mv_soc_reset_hc,
+	.reset_flash		= mv_soc_reset_flash,
+	.reset_bus		= mv_soc_reset_bus,
+};
+
+static const struct mv_hw_ops mv_soc_65n_ops = {
+	.phy_errata		= mv_soc_65n_phy_errata,
+	.enable_leds		= mv_soc_enable_leds,
+	.reset_hc		= mv_soc_reset_hc,
+	.reset_flash		= mv_soc_reset_flash,
+	.reset_bus		= mv_soc_reset_bus,
+};
+
+/*
+ * Functions
+ */
+
+static inline void writelfl(unsigned long data, void __iomem *addr)
+{
+	writel(data, addr);
+	(void) readl(addr);	/* flush to avoid PCI posted write */
+}
+
+static inline unsigned int mv_hc_from_port(unsigned int port)
+{
+	return port >> MV_PORT_HC_SHIFT;
+}
+
+static inline unsigned int mv_hardport_from_port(unsigned int port)
+{
+	return port & MV_PORT_MASK;
+}
+
+/*
+ * Consolidate some rather tricky bit shift calculations.
+ * This is hot-path stuff, so not a function.
+ * Simple code, with two return values, so macro rather than inline.
+ *
+ * port is the sole input, in range 0..7.
+ * shift is one output, for use with main_irq_cause / main_irq_mask registers.
+ * hardport is the other output, in range 0..3.
+ *
+ * Note that port and hardport may be the same variable in some cases.
+ */
+#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
+{								\
+	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
+	hardport = mv_hardport_from_port(port);			\
+	shift   += hardport * 2;				\
+}
+
+static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
+{
+	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
+}
+
+static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
+						 unsigned int port)
+{
+	return mv_hc_base(base, mv_hc_from_port(port));
+}
+
+static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
+{
+	return  mv_hc_base_from_port(base, port) +
+		MV_SATAHC_ARBTR_REG_SZ +
+		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
+}
+
+static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
+{
+	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
+	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
+
+	return hc_mmio + ofs;
+}
+
+static inline void __iomem *mv_host_base(struct ata_host *host)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	return hpriv->base;
+}
+
+static inline void __iomem *mv_ap_base(struct ata_port *ap)
+{
+	return mv_port_base(mv_host_base(ap->host), ap->port_no);
+}
+
+static inline int mv_get_hc_count(unsigned long port_flags)
+{
+	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
+}
+
+/**
+ *      mv_save_cached_regs - (re-)initialize cached port registers
+ *      @ap: the port whose registers we are caching
+ *
+ *	Initialize the local cache of port registers,
+ *	so that reading them over and over again can
+ *	be avoided on the hotter paths of this driver.
+ *	This saves a few microseconds each time we switch
+ *	to/from EDMA mode to perform (eg.) a drive cache flush.
+ */
+static void mv_save_cached_regs(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
+
+	pp->cached.fiscfg = readl(port_mmio + FISCFG);
+	pp->cached.ltmode = readl(port_mmio + LTMODE);
+	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
+	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
+}
+
+/**
+ *      mv_write_cached_reg - write to a cached port register
+ *      @addr: hardware address of the register
+ *      @old: pointer to cached value of the register
+ *      @new: new value for the register
+ *
+ *	Write a new value to a cached register,
+ *	but only if the value is different from before.
+ */
+static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
+{
+	if (new != *old) {
+		unsigned long laddr;
+		*old = new;
+		/*
+		 * Workaround for 88SX60x1-B2 FEr SATA#13:
+		 * Read-after-write is needed to prevent generating 64-bit
+		 * write cycles on the PCI bus for SATA interface registers
+		 * at offsets ending in 0x4 or 0xc.
+		 *
+		 * Looks like a lot of fuss, but it avoids an unnecessary
+		 * +1 usec read-after-write delay for unaffected registers.
+		 */
+		laddr = (unsigned long)addr & 0xffff;
+		if (laddr >= 0x300 && laddr <= 0x33c) {
+			laddr &= 0x000f;
+			if (laddr == 0x4 || laddr == 0xc) {
+				writelfl(new, addr); /* read after write */
+				return;
+			}
+		}
+		writel(new, addr); /* unaffected by the errata */
+	}
+}
+
+static void mv_set_edma_ptrs(void __iomem *port_mmio,
+			     struct mv_host_priv *hpriv,
+			     struct mv_port_priv *pp)
+{
+	u32 index;
+
+	/*
+	 * initialize request queue
+	 */
+	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
+	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
+
+	WARN_ON(pp->crqb_dma & 0x3ff);
+	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
+	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
+		 port_mmio + EDMA_REQ_Q_IN_PTR);
+	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
+
+	/*
+	 * initialize response queue
+	 */
+	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
+	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
+
+	WARN_ON(pp->crpb_dma & 0xff);
+	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
+	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
+	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
+		 port_mmio + EDMA_RSP_Q_OUT_PTR);
+}
+
+static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
+{
+	/*
+	 * When writing to the main_irq_mask in hardware,
+	 * we must ensure exclusivity between the interrupt coalescing bits
+	 * and the corresponding individual port DONE_IRQ bits.
+	 *
+	 * Note that this register is really an "IRQ enable" register,
+	 * not an "IRQ mask" register as Marvell's naming might suggest.
+	 */
+	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
+		mask &= ~DONE_IRQ_0_3;
+	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
+		mask &= ~DONE_IRQ_4_7;
+	writelfl(mask, hpriv->main_irq_mask_addr);
+}
+
+static void mv_set_main_irq_mask(struct ata_host *host,
+				 u32 disable_bits, u32 enable_bits)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	u32 old_mask, new_mask;
+
+	old_mask = hpriv->main_irq_mask;
+	new_mask = (old_mask & ~disable_bits) | enable_bits;
+	if (new_mask != old_mask) {
+		hpriv->main_irq_mask = new_mask;
+		mv_write_main_irq_mask(new_mask, hpriv);
+	}
+}
+
+static void mv_enable_port_irqs(struct ata_port *ap,
+				     unsigned int port_bits)
+{
+	unsigned int shift, hardport, port = ap->port_no;
+	u32 disable_bits, enable_bits;
+
+	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
+
+	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
+	enable_bits  = port_bits << shift;
+	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
+}
+
+static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
+					  void __iomem *port_mmio,
+					  unsigned int port_irqs)
+{
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	int hardport = mv_hardport_from_port(ap->port_no);
+	void __iomem *hc_mmio = mv_hc_base_from_port(
+				mv_host_base(ap->host), ap->port_no);
+	u32 hc_irq_cause;
+
+	/* clear EDMA event indicators, if any */
+	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
+
+	/* clear pending irq events */
+	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
+	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
+
+	/* clear FIS IRQ Cause */
+	if (IS_GEN_IIE(hpriv))
+		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
+
+	mv_enable_port_irqs(ap, port_irqs);
+}
+
+static void mv_set_irq_coalescing(struct ata_host *host,
+				  unsigned int count, unsigned int usecs)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base, *hc_mmio;
+	u32 coal_enable = 0;
+	unsigned long flags;
+	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
+	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
+							ALL_PORTS_COAL_DONE;
+
+	/* Disable IRQ coalescing if either threshold is zero */
+	if (!usecs || !count) {
+		clks = count = 0;
+	} else {
+		/* Respect maximum limits of the hardware */
+		clks = usecs * COAL_CLOCKS_PER_USEC;
+		if (clks > MAX_COAL_TIME_THRESHOLD)
+			clks = MAX_COAL_TIME_THRESHOLD;
+		if (count > MAX_COAL_IO_COUNT)
+			count = MAX_COAL_IO_COUNT;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	mv_set_main_irq_mask(host, coal_disable, 0);
+
+	if (is_dual_hc && !IS_GEN_I(hpriv)) {
+		/*
+		 * GEN_II/GEN_IIE with dual host controllers:
+		 * one set of global thresholds for the entire chip.
+		 */
+		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
+		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
+		/* clear leftover coal IRQ bit */
+		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
+		if (count)
+			coal_enable = ALL_PORTS_COAL_DONE;
+		clks = count = 0; /* force clearing of regular regs below */
+	}
+
+	/*
+	 * All chips: independent thresholds for each HC on the chip.
+	 */
+	hc_mmio = mv_hc_base_from_port(mmio, 0);
+	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
+	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
+	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
+	if (count)
+		coal_enable |= PORTS_0_3_COAL_DONE;
+	if (is_dual_hc) {
+		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
+		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
+		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
+		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
+		if (count)
+			coal_enable |= PORTS_4_7_COAL_DONE;
+	}
+
+	mv_set_main_irq_mask(host, 0, coal_enable);
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/**
+ *      mv_start_edma - Enable eDMA engine
+ *      @base: port base address
+ *      @pp: port private data
+ *
+ *      Verify the local cache of the eDMA state is accurate with a
+ *      WARN_ON.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
+			 struct mv_port_priv *pp, u8 protocol)
+{
+	int want_ncq = (protocol == ATA_PROT_NCQ);
+
+	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
+		if (want_ncq != using_ncq)
+			mv_stop_edma(ap);
+	}
+	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
+		struct mv_host_priv *hpriv = ap->host->private_data;
+
+		mv_edma_cfg(ap, want_ncq, 1);
+
+		mv_set_edma_ptrs(port_mmio, hpriv, pp);
+		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
+
+		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
+		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
+	}
+}
+
+static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
+	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
+	int i;
+
+	/*
+	 * Wait for the EDMA engine to finish transactions in progress.
+	 * No idea what a good "timeout" value might be, but measurements
+	 * indicate that it often requires hundreds of microseconds
+	 * with two drives in-use.  So we use the 15msec value above
+	 * as a rough guess at what even more drives might require.
+	 */
+	for (i = 0; i < timeout; ++i) {
+		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
+		if ((edma_stat & empty_idle) == empty_idle)
+			break;
+		udelay(per_loop);
+	}
+	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
+}
+
+/**
+ *      mv_stop_edma_engine - Disable eDMA engine
+ *      @port_mmio: io base address
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_stop_edma_engine(void __iomem *port_mmio)
+{
+	int i;
+
+	/* Disable eDMA.  The disable bit auto clears. */
+	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
+
+	/* Wait for the chip to confirm eDMA is off. */
+	for (i = 10000; i > 0; i--) {
+		u32 reg = readl(port_mmio + EDMA_CMD);
+		if (!(reg & EDMA_EN))
+			return 0;
+		udelay(10);
+	}
+	return -EIO;
+}
+
+static int mv_stop_edma(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
+	int err = 0;
+
+	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+		return 0;
+	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+	mv_wait_for_edma_empty_idle(ap);
+	if (mv_stop_edma_engine(port_mmio)) {
+		ata_port_err(ap, "Unable to stop eDMA\n");
+		err = -EIO;
+	}
+	mv_edma_cfg(ap, 0, 0);
+	return err;
+}
+
+#ifdef ATA_DEBUG
+static void mv_dump_mem(void __iomem *start, unsigned bytes)
+{
+	int b, w;
+	for (b = 0; b < bytes; ) {
+		DPRINTK("%p: ", start + b);
+		for (w = 0; b < bytes && w < 4; w++) {
+			printk("%08x ", readl(start + b));
+			b += sizeof(u32);
+		}
+		printk("\n");
+	}
+}
+#endif
+#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
+static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
+{
+#ifdef ATA_DEBUG
+	int b, w;
+	u32 dw;
+	for (b = 0; b < bytes; ) {
+		DPRINTK("%02x: ", b);
+		for (w = 0; b < bytes && w < 4; w++) {
+			(void) pci_read_config_dword(pdev, b, &dw);
+			printk("%08x ", dw);
+			b += sizeof(u32);
+		}
+		printk("\n");
+	}
+#endif
+}
+#endif
+static void mv_dump_all_regs(void __iomem *mmio_base, int port,
+			     struct pci_dev *pdev)
+{
+#ifdef ATA_DEBUG
+	void __iomem *hc_base = mv_hc_base(mmio_base,
+					   port >> MV_PORT_HC_SHIFT);
+	void __iomem *port_base;
+	int start_port, num_ports, p, start_hc, num_hcs, hc;
+
+	if (0 > port) {
+		start_hc = start_port = 0;
+		num_ports = 8;		/* shld be benign for 4 port devs */
+		num_hcs = 2;
+	} else {
+		start_hc = port >> MV_PORT_HC_SHIFT;
+		start_port = port;
+		num_ports = num_hcs = 1;
+	}
+	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
+		num_ports > 1 ? num_ports - 1 : start_port);
+
+	if (NULL != pdev) {
+		DPRINTK("PCI config space regs:\n");
+		mv_dump_pci_cfg(pdev, 0x68);
+	}
+	DPRINTK("PCI regs:\n");
+	mv_dump_mem(mmio_base+0xc00, 0x3c);
+	mv_dump_mem(mmio_base+0xd00, 0x34);
+	mv_dump_mem(mmio_base+0xf00, 0x4);
+	mv_dump_mem(mmio_base+0x1d00, 0x6c);
+	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
+		hc_base = mv_hc_base(mmio_base, hc);
+		DPRINTK("HC regs (HC %i):\n", hc);
+		mv_dump_mem(hc_base, 0x1c);
+	}
+	for (p = start_port; p < start_port + num_ports; p++) {
+		port_base = mv_port_base(mmio_base, p);
+		DPRINTK("EDMA regs (port %i):\n", p);
+		mv_dump_mem(port_base, 0x54);
+		DPRINTK("SATA regs (port %i):\n", p);
+		mv_dump_mem(port_base+0x300, 0x60);
+	}
+#endif
+}
+
+static unsigned int mv_scr_offset(unsigned int sc_reg_in)
+{
+	unsigned int ofs;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_CONTROL:
+	case SCR_ERROR:
+		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
+		break;
+	case SCR_ACTIVE:
+		ofs = SATA_ACTIVE;   /* active is not with the others */
+		break;
+	default:
+		ofs = 0xffffffffU;
+		break;
+	}
+	return ofs;
+}
+
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
+{
+	unsigned int ofs = mv_scr_offset(sc_reg_in);
+
+	if (ofs != 0xffffffffU) {
+		*val = readl(mv_ap_base(link->ap) + ofs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+{
+	unsigned int ofs = mv_scr_offset(sc_reg_in);
+
+	if (ofs != 0xffffffffU) {
+		void __iomem *addr = mv_ap_base(link->ap) + ofs;
+		struct mv_host_priv *hpriv = link->ap->host->private_data;
+		if (sc_reg_in == SCR_CONTROL) {
+			/*
+			 * Workaround for 88SX60x1 FEr SATA#26:
+			 *
+			 * COMRESETs have to take care not to accidentally
+			 * put the drive to sleep when writing SCR_CONTROL.
+			 * Setting bits 12..15 prevents this problem.
+			 *
+			 * So if we see an outbound COMMRESET, set those bits.
+			 * Ditto for the followup write that clears the reset.
+			 *
+			 * The proprietary driver does this for
+			 * all chip versions, and so do we.
+			 */
+			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
+				val |= 0xf000;
+
+			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
+				void __iomem *lp_phy_addr =
+					mv_ap_base(link->ap) + LP_PHY_CTL;
+				/*
+				 * Set PHY speed according to SControl speed.
+				 */
+				u32 lp_phy_val =
+					LP_PHY_CTL_PIN_PU_PLL |
+					LP_PHY_CTL_PIN_PU_RX  |
+					LP_PHY_CTL_PIN_PU_TX;
+
+				if ((val & 0xf0) != 0x10)
+					lp_phy_val |=
+						LP_PHY_CTL_GEN_TX_3G |
+						LP_PHY_CTL_GEN_RX_3G;
+
+				writelfl(lp_phy_val, lp_phy_addr);
+			}
+		}
+		writelfl(val, addr);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static void mv6_dev_config(struct ata_device *adev)
+{
+	/*
+	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
+	 *
+	 * Gen-II does not support NCQ over a port multiplier
+	 *  (no FIS-based switching).
+	 */
+	if (adev->flags & ATA_DFLAG_NCQ) {
+		if (sata_pmp_attached(adev->link->ap)) {
+			adev->flags &= ~ATA_DFLAG_NCQ;
+			ata_dev_info(adev,
+				"NCQ disabled for command-based switching\n");
+		}
+	}
+}
+
+static int mv_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_port *ap = link->ap;
+	struct mv_port_priv *pp = ap->private_data;
+
+	/*
+	 * Don't allow new commands if we're in a delayed EH state
+	 * for NCQ and/or FIS-based switching.
+	 */
+	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+		return ATA_DEFER_PORT;
+
+	/* PIO commands need exclusive link: no other commands [DMA or PIO]
+	 * can run concurrently.
+	 * set excl_link when we want to send a PIO command in DMA mode
+	 * or a non-NCQ command in NCQ mode.
+	 * When we receive a command from that link, and there are no
+	 * outstanding commands, mark a flag to clear excl_link and let
+	 * the command go through.
+	 */
+	if (unlikely(ap->excl_link)) {
+		if (link == ap->excl_link) {
+			if (ap->nr_active_links)
+				return ATA_DEFER_PORT;
+			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+			return 0;
+		} else
+			return ATA_DEFER_PORT;
+	}
+
+	/*
+	 * If the port is completely idle, then allow the new qc.
+	 */
+	if (ap->nr_active_links == 0)
+		return 0;
+
+	/*
+	 * The port is operating in host queuing mode (EDMA) with NCQ
+	 * enabled, allow multiple NCQ commands.  EDMA also allows
+	 * queueing multiple DMA commands but libata core currently
+	 * doesn't allow it.
+	 */
+	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
+	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
+		if (ata_is_ncq(qc->tf.protocol))
+			return 0;
+		else {
+			ap->excl_link = link;
+			return ATA_DEFER_PORT;
+		}
+	}
+
+	return ATA_DEFER_PORT;
+}
+
+static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
+{
+	struct mv_port_priv *pp = ap->private_data;
+	void __iomem *port_mmio;
+
+	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
+	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
+	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
+
+	ltmode   = *old_ltmode & ~LTMODE_BIT8;
+	haltcond = *old_haltcond | EDMA_ERR_DEV;
+
+	if (want_fbs) {
+		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
+		ltmode = *old_ltmode | LTMODE_BIT8;
+		if (want_ncq)
+			haltcond &= ~EDMA_ERR_DEV;
+		else
+			fiscfg |=  FISCFG_WAIT_DEV_ERR;
+	} else {
+		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
+	}
+
+	port_mmio = mv_ap_base(ap);
+	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
+	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
+	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
+}
+
+static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
+{
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	u32 old, new;
+
+	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
+	old = readl(hpriv->base + GPIO_PORT_CTL);
+	if (want_ncq)
+		new = old | (1 << 22);
+	else
+		new = old & ~(1 << 22);
+	if (new != old)
+		writel(new, hpriv->base + GPIO_PORT_CTL);
+}
+
+/**
+ *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
+ *	@ap: Port being initialized
+ *
+ *	There are two DMA modes on these chips:  basic DMA, and EDMA.
+ *
+ *	Bit-0 of the "EDMA RESERVED" register enables/disables use
+ *	of basic DMA on the GEN_IIE versions of the chips.
+ *
+ *	This bit survives EDMA resets, and must be set for basic DMA
+ *	to function, and should be cleared when EDMA is active.
+ */
+static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
+{
+	struct mv_port_priv *pp = ap->private_data;
+	u32 new, *old = &pp->cached.unknown_rsvd;
+
+	if (enable_bmdma)
+		new = *old | 1;
+	else
+		new = *old & ~1;
+	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
+}
+
+/*
+ * SOC chips have an issue whereby the HDD LEDs don't always blink
+ * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
+ * of the SOC takes care of it, generating a steady blink rate when
+ * any drive on the chip is active.
+ *
+ * Unfortunately, the blink mode is a global hardware setting for the SOC,
+ * so we must use it whenever at least one port on the SOC has NCQ enabled.
+ *
+ * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
+ * LED operation works then, and provides better (more accurate) feedback.
+ *
+ * Note that this code assumes that an SOC never has more than one HC onboard.
+ */
+static void mv_soc_led_blink_enable(struct ata_port *ap)
+{
+	struct ata_host *host = ap->host;
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *hc_mmio;
+	u32 led_ctrl;
+
+	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
+		return;
+	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
+	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
+	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
+	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
+}
+
+static void mv_soc_led_blink_disable(struct ata_port *ap)
+{
+	struct ata_host *host = ap->host;
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *hc_mmio;
+	u32 led_ctrl;
+	unsigned int port;
+
+	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
+		return;
+
+	/* disable led-blink only if no ports are using NCQ */
+	for (port = 0; port < hpriv->n_ports; port++) {
+		struct ata_port *this_ap = host->ports[port];
+		struct mv_port_priv *pp = this_ap->private_data;
+
+		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
+			return;
+	}
+
+	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
+	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
+	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
+	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
+}
+
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
+{
+	u32 cfg;
+	struct mv_port_priv *pp    = ap->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio    = mv_ap_base(ap);
+
+	/* set up non-NCQ EDMA configuration */
+	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
+	pp->pp_flags &=
+	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
+
+	if (IS_GEN_I(hpriv))
+		cfg |= (1 << 8);	/* enab config burst size mask */
+
+	else if (IS_GEN_II(hpriv)) {
+		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
+		mv_60x1_errata_sata25(ap, want_ncq);
+
+	} else if (IS_GEN_IIE(hpriv)) {
+		int want_fbs = sata_pmp_attached(ap);
+		/*
+		 * Possible future enhancement:
+		 *
+		 * The chip can use FBS with non-NCQ, if we allow it,
+		 * But first we need to have the error handling in place
+		 * for this mode (datasheet section 7.3.15.4.2.3).
+		 * So disallow non-NCQ FBS for now.
+		 */
+		want_fbs &= want_ncq;
+
+		mv_config_fbs(ap, want_ncq, want_fbs);
+
+		if (want_fbs) {
+			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
+			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
+		}
+
+		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
+		if (want_edma) {
+			cfg |= (1 << 22); /* enab 4-entry host queue cache */
+			if (!IS_SOC(hpriv))
+				cfg |= (1 << 18); /* enab early completion */
+		}
+		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
+			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
+		mv_bmdma_enable_iie(ap, !want_edma);
+
+		if (IS_SOC(hpriv)) {
+			if (want_ncq)
+				mv_soc_led_blink_enable(ap);
+			else
+				mv_soc_led_blink_disable(ap);
+		}
+	}
+
+	if (want_ncq) {
+		cfg |= EDMA_CFG_NCQ;
+		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
+	}
+
+	writelfl(cfg, port_mmio + EDMA_CFG);
+}
+
+static void mv_port_free_dma_mem(struct ata_port *ap)
+{
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	struct mv_port_priv *pp = ap->private_data;
+	int tag;
+
+	if (pp->crqb) {
+		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
+		pp->crqb = NULL;
+	}
+	if (pp->crpb) {
+		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
+		pp->crpb = NULL;
+	}
+	/*
+	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
+	 * For later hardware, we have one unique sg_tbl per NCQ tag.
+	 */
+	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+		if (pp->sg_tbl[tag]) {
+			if (tag == 0 || !IS_GEN_I(hpriv))
+				dma_pool_free(hpriv->sg_tbl_pool,
+					      pp->sg_tbl[tag],
+					      pp->sg_tbl_dma[tag]);
+			pp->sg_tbl[tag] = NULL;
+		}
+	}
+}
+
+/**
+ *      mv_port_start - Port specific init/start routine.
+ *      @ap: ATA channel to manipulate
+ *
+ *      Allocate and point to DMA memory, init port private memory,
+ *      zero indices.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	struct mv_port_priv *pp;
+	unsigned long flags;
+	int tag;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	ap->private_data = pp;
+
+	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+	if (!pp->crqb)
+		return -ENOMEM;
+
+	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+	if (!pp->crpb)
+		goto out_port_free_dma_mem;
+
+	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
+	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
+		ap->flags |= ATA_FLAG_AN;
+	/*
+	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
+	 * For later hardware, we need one unique sg_tbl per NCQ tag.
+	 */
+	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+		if (tag == 0 || !IS_GEN_I(hpriv)) {
+			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
+					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
+			if (!pp->sg_tbl[tag])
+				goto out_port_free_dma_mem;
+		} else {
+			pp->sg_tbl[tag]     = pp->sg_tbl[0];
+			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
+		}
+	}
+
+	spin_lock_irqsave(ap->lock, flags);
+	mv_save_cached_regs(ap);
+	mv_edma_cfg(ap, 0, 0);
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return 0;
+
+out_port_free_dma_mem:
+	mv_port_free_dma_mem(ap);
+	return -ENOMEM;
+}
+
+/**
+ *      mv_port_stop - Port specific cleanup/stop routine.
+ *      @ap: ATA channel to manipulate
+ *
+ *      Stop DMA, cleanup port memory.
+ *
+ *      LOCKING:
+ *      This routine uses the host lock to protect the DMA stop.
+ */
+static void mv_port_stop(struct ata_port *ap)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(ap->lock, flags);
+	mv_stop_edma(ap);
+	mv_enable_port_irqs(ap, 0);
+	spin_unlock_irqrestore(ap->lock, flags);
+	mv_port_free_dma_mem(ap);
+}
+
+/**
+ *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
+ *      @qc: queued command whose SG list to source from
+ *
+ *      Populate the SG list and mark the last entry.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct mv_port_priv *pp = qc->ap->private_data;
+	struct scatterlist *sg;
+	struct mv_sg *mv_sg, *last_sg = NULL;
+	unsigned int si;
+
+	mv_sg = pp->sg_tbl[qc->hw_tag];
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			u32 offset = addr & 0xffff;
+			u32 len = sg_len;
+
+			if (offset + len > 0x10000)
+				len = 0x10000 - offset;
+
+			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
+			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
+			mv_sg->reserved = 0;
+
+			sg_len -= len;
+			addr += len;
+
+			last_sg = mv_sg;
+			mv_sg++;
+		}
+	}
+
+	if (likely(last_sg))
+		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+	mb(); /* ensure data structure is visible to the chipset */
+}
+
+static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
+{
+	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+		(last ? CRQB_CMD_LAST : 0);
+	*cmdw = cpu_to_le16(tmp);
+}
+
+/**
+ *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	We need this only for ATAPI bmdma transactions,
+ *	as otherwise we experience spurious interrupts
+ *	after libata-sff handles the bmdma interrupts.
+ */
+static void mv_sff_irq_clear(struct ata_port *ap)
+{
+	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
+}
+
+/**
+ *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
+ *	@qc: queued command to check for chipset/DMA compatibility.
+ *
+ *	The bmdma engines cannot handle speculative data sizes
+ *	(bytecount under/over flow).  So only allow DMA for
+ *	data transfer commands with known data sizes.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *scmd = qc->scsicmd;
+
+	if (scmd) {
+		switch (scmd->cmnd[0]) {
+		case READ_6:
+		case READ_10:
+		case READ_12:
+		case WRITE_6:
+		case WRITE_10:
+		case WRITE_12:
+		case GPCMD_READ_CD:
+		case GPCMD_SEND_DVD_STRUCTURE:
+		case GPCMD_SEND_CUE_SHEET:
+			return 0; /* DMA is safe */
+		}
+	}
+	return -EOPNOTSUPP; /* use PIO instead */
+}
+
+/**
+ *	mv_bmdma_setup - Set up BMDMA transaction
+ *	@qc: queued command to prepare DMA for.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void mv_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
+
+	mv_fill_sg(qc);
+
+	/* clear all DMA cmd bits */
+	writel(0, port_mmio + BMDMA_CMD);
+
+	/* load PRD table addr. */
+	writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
+		port_mmio + BMDMA_PRD_HIGH);
+	writelfl(pp->sg_tbl_dma[qc->hw_tag],
+		port_mmio + BMDMA_PRD_LOW);
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ *	mv_bmdma_start - Start a BMDMA transaction
+ *	@qc: queued command to start DMA on.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void mv_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_mmio = mv_ap_base(ap);
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
+
+	/* start host DMA transaction */
+	writelfl(cmd, port_mmio + BMDMA_CMD);
+}
+
+/**
+ *	mv_bmdma_stop - Stop BMDMA transfer
+ *	@qc: queued command to stop DMA on.
+ *
+ *	Clears the ATA_DMA_START flag in the bmdma control register
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static void mv_bmdma_stop_ap(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 cmd;
+
+	/* clear start/stop bit */
+	cmd = readl(port_mmio + BMDMA_CMD);
+	if (cmd & ATA_DMA_START) {
+		cmd &= ~ATA_DMA_START;
+		writelfl(cmd, port_mmio + BMDMA_CMD);
+
+		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+		ata_sff_dma_pause(ap);
+	}
+}
+
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	mv_bmdma_stop_ap(qc->ap);
+}
+
+/**
+ *	mv_bmdma_status - Read BMDMA status
+ *	@ap: port for which to retrieve DMA status.
+ *
+ *	Read and return equivalent of the sff BMDMA status register.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 mv_bmdma_status(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 reg, status;
+
+	/*
+	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
+	 * and the ATA_DMA_INTR bit doesn't exist.
+	 */
+	reg = readl(port_mmio + BMDMA_STATUS);
+	if (reg & ATA_DMA_ACTIVE)
+		status = ATA_DMA_ACTIVE;
+	else if (reg & ATA_DMA_ERR)
+		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+	else {
+		/*
+		 * Just because DMA_ACTIVE is 0 (DMA completed),
+		 * this does _not_ mean the device is "done".
+		 * So we should not yet be signalling ATA_DMA_INTR
+		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
+		 */
+		mv_bmdma_stop_ap(ap);
+		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+			status = 0;
+		else
+			status = ATA_DMA_INTR;
+	}
+	return status;
+}
+
+static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	/*
+	 * Workaround for 88SX60x1 FEr SATA#24.
+	 *
+	 * Chip may corrupt WRITEs if multi_count >= 4kB.
+	 * Note that READs are unaffected.
+	 *
+	 * It's not clear if this errata really means "4K bytes",
+	 * or if it always happens for multi_count > 7
+	 * regardless of device sector_size.
+	 *
+	 * So, for safety, any write with multi_count > 7
+	 * gets converted here into a regular PIO write instead:
+	 */
+	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
+		if (qc->dev->multi_count > 7) {
+			switch (tf->command) {
+			case ATA_CMD_WRITE_MULTI:
+				tf->command = ATA_CMD_PIO_WRITE;
+				break;
+			case ATA_CMD_WRITE_MULTI_FUA_EXT:
+				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
+				/* fall through */
+			case ATA_CMD_WRITE_MULTI_EXT:
+				tf->command = ATA_CMD_PIO_WRITE_EXT;
+				break;
+			}
+		}
+	}
+}
+
+/**
+ *      mv_qc_prep - Host specific command preparation.
+ *      @qc: queued command to prepare
+ *
+ *      This routine simply redirects to the general purpose routine
+ *      if command is not DMA.  Else, it handles prep of the CRQB
+ *      (command request block), does some sanity checking, and calls
+ *      the SG load routine.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mv_port_priv *pp = ap->private_data;
+	__le16 *cw;
+	struct ata_taskfile *tf = &qc->tf;
+	u16 flags = 0;
+	unsigned in_index;
+
+	switch (tf->protocol) {
+	case ATA_PROT_DMA:
+		if (tf->command == ATA_CMD_DSM)
+			return;
+		/* fall-thru */
+	case ATA_PROT_NCQ:
+		break;	/* continue below */
+	case ATA_PROT_PIO:
+		mv_rw_multi_errata_sata24(qc);
+		return;
+	default:
+		return;
+	}
+
+	/* Fill in command request block
+	 */
+	if (!(tf->flags & ATA_TFLAG_WRITE))
+		flags |= CRQB_FLAG_READ;
+	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
+	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
+	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
+
+	/* get current queue index from software */
+	in_index = pp->req_idx;
+
+	pp->crqb[in_index].sg_addr =
+		cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
+	pp->crqb[in_index].sg_addr_hi =
+		cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
+	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
+
+	cw = &pp->crqb[in_index].ata_cmd[0];
+
+	/* Sadly, the CRQB cannot accommodate all registers--there are
+	 * only 11 bytes...so we must pick and choose required
+	 * registers based on the command.  So, we drop feature and
+	 * hob_feature for [RW] DMA commands, but they are needed for
+	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
+	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
+	 */
+	switch (tf->command) {
+	case ATA_CMD_READ:
+	case ATA_CMD_READ_EXT:
+	case ATA_CMD_WRITE:
+	case ATA_CMD_WRITE_EXT:
+	case ATA_CMD_WRITE_FUA_EXT:
+		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
+		break;
+	case ATA_CMD_FPDMA_READ:
+	case ATA_CMD_FPDMA_WRITE:
+		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
+		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
+		break;
+	default:
+		/* The only other commands EDMA supports in non-queued and
+		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
+		 * of which are defined/used by Linux.  If we get here, this
+		 * driver needs work.
+		 *
+		 * FIXME: modify libata to give qc_prep a return value and
+		 * return error here.
+		 */
+		BUG_ON(tf->command);
+		break;
+	}
+	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
+	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
+	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
+	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
+	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
+	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
+	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
+	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
+	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+	mv_fill_sg(qc);
+}
+
+/**
+ *      mv_qc_prep_iie - Host specific command preparation.
+ *      @qc: queued command to prepare
+ *
+ *      This routine simply redirects to the general purpose routine
+ *      if command is not DMA.  Else, it handles prep of the CRQB
+ *      (command request block), does some sanity checking, and calls
+ *      the SG load routine.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mv_port_priv *pp = ap->private_data;
+	struct mv_crqb_iie *crqb;
+	struct ata_taskfile *tf = &qc->tf;
+	unsigned in_index;
+	u32 flags = 0;
+
+	if ((tf->protocol != ATA_PROT_DMA) &&
+	    (tf->protocol != ATA_PROT_NCQ))
+		return;
+	if (tf->command == ATA_CMD_DSM)
+		return;  /* use bmdma for this */
+
+	/* Fill in Gen IIE command request block */
+	if (!(tf->flags & ATA_TFLAG_WRITE))
+		flags |= CRQB_FLAG_READ;
+
+	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
+	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
+	flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
+	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
+
+	/* get current queue index from software */
+	in_index = pp->req_idx;
+
+	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
+	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
+	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
+	crqb->flags = cpu_to_le32(flags);
+
+	crqb->ata_cmd[0] = cpu_to_le32(
+			(tf->command << 16) |
+			(tf->feature << 24)
+		);
+	crqb->ata_cmd[1] = cpu_to_le32(
+			(tf->lbal << 0) |
+			(tf->lbam << 8) |
+			(tf->lbah << 16) |
+			(tf->device << 24)
+		);
+	crqb->ata_cmd[2] = cpu_to_le32(
+			(tf->hob_lbal << 0) |
+			(tf->hob_lbam << 8) |
+			(tf->hob_lbah << 16) |
+			(tf->hob_feature << 24)
+		);
+	crqb->ata_cmd[3] = cpu_to_le32(
+			(tf->nsect << 0) |
+			(tf->hob_nsect << 8)
+		);
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+	mv_fill_sg(qc);
+}
+
+/**
+ *	mv_sff_check_status - fetch device status, if valid
+ *	@ap: ATA port to fetch status from
+ *
+ *	When using command issue via mv_qc_issue_fis(),
+ *	the initial ATA_BUSY state does not show up in the
+ *	ATA status (shadow) register.  This can confuse libata!
+ *
+ *	So we have a hook here to fake ATA_BUSY for that situation,
+ *	until the first time a BUSY, DRQ, or ERR bit is seen.
+ *
+ *	The rest of the time, it simply returns the ATA status register.
+ */
+static u8 mv_sff_check_status(struct ata_port *ap)
+{
+	u8 stat = ioread8(ap->ioaddr.status_addr);
+	struct mv_port_priv *pp = ap->private_data;
+
+	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
+		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
+			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
+		else
+			stat = ATA_BUSY;
+	}
+	return stat;
+}
+
+/**
+ *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
+ *	@fis: fis to be sent
+ *	@nwords: number of 32-bit words in the fis
+ */
+static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 ifctl, old_ifctl, ifstat;
+	int i, timeout = 200, final_word = nwords - 1;
+
+	/* Initiate FIS transmission mode */
+	old_ifctl = readl(port_mmio + SATA_IFCTL);
+	ifctl = 0x100 | (old_ifctl & 0xf);
+	writelfl(ifctl, port_mmio + SATA_IFCTL);
+
+	/* Send all words of the FIS except for the final word */
+	for (i = 0; i < final_word; ++i)
+		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
+
+	/* Flag end-of-transmission, and then send the final word */
+	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
+	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
+
+	/*
+	 * Wait for FIS transmission to complete.
+	 * This typically takes just a single iteration.
+	 */
+	do {
+		ifstat = readl(port_mmio + SATA_IFSTAT);
+	} while (!(ifstat & 0x1000) && --timeout);
+
+	/* Restore original port configuration */
+	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
+
+	/* See if it worked */
+	if ((ifstat & 0x3000) != 0x1000) {
+		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
+			      __func__, ifstat);
+		return AC_ERR_OTHER;
+	}
+	return 0;
+}
+
+/**
+ *	mv_qc_issue_fis - Issue a command directly as a FIS
+ *	@qc: queued command to start
+ *
+ *	Note that the ATA shadow registers are not updated
+ *	after command issue, so the device will appear "READY"
+ *	if polled, even while it is BUSY processing the command.
+ *
+ *	So we use a status hook to fake ATA_BUSY until the drive changes state.
+ *
+ *	Note: we don't get updated shadow regs on *completion*
+ *	of non-data commands. So avoid sending them via this function,
+ *	as they will appear to have completed immediately.
+ *
+ *	GEN_IIE has special registers that we could get the result tf from,
+ *	but earlier chipsets do not.  For now, we ignore those registers.
+ */
+static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct mv_port_priv *pp = ap->private_data;
+	struct ata_link *link = qc->dev->link;
+	u32 fis[5];
+	int err = 0;
+
+	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
+	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
+	if (err)
+		return err;
+
+	switch (qc->tf.protocol) {
+	case ATAPI_PROT_PIO:
+		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
+		/* fall through */
+	case ATAPI_PROT_NODATA:
+		ap->hsm_task_state = HSM_ST_FIRST;
+		break;
+	case ATA_PROT_PIO:
+		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
+		if (qc->tf.flags & ATA_TFLAG_WRITE)
+			ap->hsm_task_state = HSM_ST_FIRST;
+		else
+			ap->hsm_task_state = HSM_ST;
+		break;
+	default:
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+	}
+
+	if (qc->tf.flags & ATA_TFLAG_POLLING)
+		ata_sff_queue_pio_task(link, 0);
+	return 0;
+}
+
+/**
+ *      mv_qc_issue - Initiate a command to the host
+ *      @qc: queued command to start
+ *
+ *      This routine simply redirects to the general purpose routine
+ *      if command is not DMA.  Else, it sanity checks our local
+ *      caches of the request producer/consumer indices then enables
+ *      DMA and bumps the request producer index.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
+{
+	static int limit_warnings = 10;
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
+	u32 in_index;
+	unsigned int port_irqs;
+
+	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		if (qc->tf.command == ATA_CMD_DSM) {
+			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
+				return AC_ERR_OTHER;
+			break;  /* use bmdma for this */
+		}
+		/* fall thru */
+	case ATA_PROT_NCQ:
+		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
+		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
+		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
+
+		/* Write the request in pointer to kick the EDMA to life */
+		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
+					port_mmio + EDMA_REQ_Q_IN_PTR);
+		return 0;
+
+	case ATA_PROT_PIO:
+		/*
+		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
+		 *
+		 * Someday, we might implement special polling workarounds
+		 * for these, but it all seems rather unnecessary since we
+		 * normally use only DMA for commands which transfer more
+		 * than a single block of data.
+		 *
+		 * Much of the time, this could just work regardless.
+		 * So for now, just log the incident, and allow the attempt.
+		 */
+		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
+			--limit_warnings;
+			ata_link_warn(qc->dev->link, DRV_NAME
+				      ": attempting PIO w/multiple DRQ: "
+				      "this may fail due to h/w errata\n");
+		}
+		/* fall through */
+	case ATA_PROT_NODATA:
+	case ATAPI_PROT_PIO:
+	case ATAPI_PROT_NODATA:
+		if (ap->flags & ATA_FLAG_PIO_POLLING)
+			qc->tf.flags |= ATA_TFLAG_POLLING;
+		break;
+	}
+
+	if (qc->tf.flags & ATA_TFLAG_POLLING)
+		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
+	else
+		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
+
+	/*
+	 * We're about to send a non-EDMA capable command to the
+	 * port.  Turn off EDMA so there won't be problems accessing
+	 * shadow block, etc registers.
+	 */
+	mv_stop_edma(ap);
+	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
+	mv_pmp_select(ap, qc->dev->link->pmp);
+
+	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
+		struct mv_host_priv *hpriv = ap->host->private_data;
+		/*
+		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
+		 *
+		 * After any NCQ error, the READ_LOG_EXT command
+		 * from libata-eh *must* use mv_qc_issue_fis().
+		 * Otherwise it might fail, due to chip errata.
+		 *
+		 * Rather than special-case it, we'll just *always*
+		 * use this method here for READ_LOG_EXT, making for
+		 * easier testing.
+		 */
+		if (IS_GEN_II(hpriv))
+			return mv_qc_issue_fis(qc);
+	}
+	return ata_bmdma_qc_issue(qc);
+}
+
+static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
+{
+	struct mv_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc;
+
+	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
+		return NULL;
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+		return qc;
+	return NULL;
+}
+
+static void mv_pmp_error_handler(struct ata_port *ap)
+{
+	unsigned int pmp, pmp_map;
+	struct mv_port_priv *pp = ap->private_data;
+
+	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
+		/*
+		 * Perform NCQ error analysis on failed PMPs
+		 * before we freeze the port entirely.
+		 *
+		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
+		 */
+		pmp_map = pp->delayed_eh_pmp_map;
+		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
+		for (pmp = 0; pmp_map != 0; pmp++) {
+			unsigned int this_pmp = (1 << pmp);
+			if (pmp_map & this_pmp) {
+				struct ata_link *link = &ap->pmp_link[pmp];
+				pmp_map &= ~this_pmp;
+				ata_eh_analyze_ncq_error(link);
+			}
+		}
+		ata_port_freeze(ap);
+	}
+	sata_pmp_error_handler(ap);
+}
+
+static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+
+	return readl(port_mmio + SATA_TESTCTL) >> 16;
+}
+
+static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
+{
+	unsigned int pmp;
+
+	/*
+	 * Initialize EH info for PMPs which saw device errors
+	 */
+	for (pmp = 0; pmp_map != 0; pmp++) {
+		unsigned int this_pmp = (1 << pmp);
+		if (pmp_map & this_pmp) {
+			struct ata_link *link = &ap->pmp_link[pmp];
+			struct ata_eh_info *ehi = &link->eh_info;
+
+			pmp_map &= ~this_pmp;
+			ata_ehi_clear_desc(ehi);
+			ata_ehi_push_desc(ehi, "dev err");
+			ehi->err_mask |= AC_ERR_DEV;
+			ehi->action |= ATA_EH_RESET;
+			ata_link_abort(link);
+		}
+	}
+}
+
+static int mv_req_q_empty(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 in_ptr, out_ptr;
+
+	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
+			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
+			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
+}
+
+static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
+{
+	struct mv_port_priv *pp = ap->private_data;
+	int failed_links;
+	unsigned int old_map, new_map;
+
+	/*
+	 * Device error during FBS+NCQ operation:
+	 *
+	 * Set a port flag to prevent further I/O being enqueued.
+	 * Leave the EDMA running to drain outstanding commands from this port.
+	 * Perform the post-mortem/EH only when all responses are complete.
+	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
+	 */
+	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
+		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
+		pp->delayed_eh_pmp_map = 0;
+	}
+	old_map = pp->delayed_eh_pmp_map;
+	new_map = old_map | mv_get_err_pmp_map(ap);
+
+	if (old_map != new_map) {
+		pp->delayed_eh_pmp_map = new_map;
+		mv_pmp_eh_prep(ap, new_map & ~old_map);
+	}
+	failed_links = hweight16(new_map);
+
+	ata_port_info(ap,
+		      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
+		      __func__, pp->delayed_eh_pmp_map,
+		      ap->qc_active, failed_links,
+		      ap->nr_active_links);
+
+	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
+		mv_process_crpb_entries(ap, pp);
+		mv_stop_edma(ap);
+		mv_eh_freeze(ap);
+		ata_port_info(ap, "%s: done\n", __func__);
+		return 1;	/* handled */
+	}
+	ata_port_info(ap, "%s: waiting\n", __func__);
+	return 1;	/* handled */
+}
+
+static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
+{
+	/*
+	 * Possible future enhancement:
+	 *
+	 * FBS+non-NCQ operation is not yet implemented.
+	 * See related notes in mv_edma_cfg().
+	 *
+	 * Device error during FBS+non-NCQ operation:
+	 *
+	 * We need to snapshot the shadow registers for each failed command.
+	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
+	 */
+	return 0;	/* not handled */
+}
+
+static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
+{
+	struct mv_port_priv *pp = ap->private_data;
+
+	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+		return 0;	/* EDMA was not active: not handled */
+	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
+		return 0;	/* FBS was not active: not handled */
+
+	if (!(edma_err_cause & EDMA_ERR_DEV))
+		return 0;	/* non DEV error: not handled */
+	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
+	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
+		return 0;	/* other problems: not handled */
+
+	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
+		/*
+		 * EDMA should NOT have self-disabled for this case.
+		 * If it did, then something is wrong elsewhere,
+		 * and we cannot handle it here.
+		 */
+		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+				      __func__, edma_err_cause, pp->pp_flags);
+			return 0; /* not handled */
+		}
+		return mv_handle_fbs_ncq_dev_err(ap);
+	} else {
+		/*
+		 * EDMA should have self-disabled for this case.
+		 * If it did not, then something is wrong elsewhere,
+		 * and we cannot handle it here.
+		 */
+		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
+			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+				      __func__, edma_err_cause, pp->pp_flags);
+			return 0; /* not handled */
+		}
+		return mv_handle_fbs_non_ncq_dev_err(ap);
+	}
+	return 0;	/* not handled */
+}
+
+static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	char *when = "idle";
+
+	ata_ehi_clear_desc(ehi);
+	if (edma_was_enabled) {
+		when = "EDMA enabled";
+	} else {
+		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
+			when = "polling";
+	}
+	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
+	ehi->err_mask |= AC_ERR_OTHER;
+	ehi->action   |= ATA_EH_RESET;
+	ata_port_freeze(ap);
+}
+
+/**
+ *      mv_err_intr - Handle error interrupts on the port
+ *      @ap: ATA channel to manipulate
+ *
+ *      Most cases require a full reset of the chip's state machine,
+ *      which also performs a COMRESET.
+ *      Also, if the port disabled DMA, update our cached copy to match.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_err_intr(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 edma_err_cause, eh_freeze_mask, serr = 0;
+	u32 fis_cause = 0;
+	struct mv_port_priv *pp = ap->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	unsigned int action = 0, err_mask = 0;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct ata_queued_cmd *qc;
+	int abort = 0;
+
+	/*
+	 * Read and clear the SError and err_cause bits.
+	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
+	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
+	 */
+	sata_scr_read(&ap->link, SCR_ERROR, &serr);
+	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
+
+	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
+	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
+		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
+		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
+	}
+	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
+
+	if (edma_err_cause & EDMA_ERR_DEV) {
+		/*
+		 * Device errors during FIS-based switching operation
+		 * require special handling.
+		 */
+		if (mv_handle_dev_err(ap, edma_err_cause))
+			return;
+	}
+
+	qc = mv_get_active_qc(ap);
+	ata_ehi_clear_desc(ehi);
+	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
+			  edma_err_cause, pp->pp_flags);
+
+	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
+		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
+		if (fis_cause & FIS_IRQ_CAUSE_AN) {
+			u32 ec = edma_err_cause &
+			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
+			sata_async_notification(ap);
+			if (!ec)
+				return; /* Just an AN; no need for the nukes */
+			ata_ehi_push_desc(ehi, "SDB notify");
+		}
+	}
+	/*
+	 * All generations share these EDMA error cause bits:
+	 */
+	if (edma_err_cause & EDMA_ERR_DEV) {
+		err_mask |= AC_ERR_DEV;
+		action |= ATA_EH_RESET;
+		ata_ehi_push_desc(ehi, "dev error");
+	}
+	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
+			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
+			EDMA_ERR_INTRL_PAR)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_RESET;
+		ata_ehi_push_desc(ehi, "parity error");
+	}
+	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
+		ata_ehi_hotplugged(ehi);
+		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
+			"dev disconnect" : "dev connect");
+		action |= ATA_EH_RESET;
+	}
+
+	/*
+	 * Gen-I has a different SELF_DIS bit,
+	 * different FREEZE bits, and no SERR bit:
+	 */
+	if (IS_GEN_I(hpriv)) {
+		eh_freeze_mask = EDMA_EH_FREEZE_5;
+		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
+			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+			ata_ehi_push_desc(ehi, "EDMA self-disable");
+		}
+	} else {
+		eh_freeze_mask = EDMA_EH_FREEZE;
+		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+			ata_ehi_push_desc(ehi, "EDMA self-disable");
+		}
+		if (edma_err_cause & EDMA_ERR_SERR) {
+			ata_ehi_push_desc(ehi, "SError=%08x", serr);
+			err_mask |= AC_ERR_ATA_BUS;
+			action |= ATA_EH_RESET;
+		}
+	}
+
+	if (!err_mask) {
+		err_mask = AC_ERR_OTHER;
+		action |= ATA_EH_RESET;
+	}
+
+	ehi->serror |= serr;
+	ehi->action |= action;
+
+	if (qc)
+		qc->err_mask |= err_mask;
+	else
+		ehi->err_mask |= err_mask;
+
+	if (err_mask == AC_ERR_DEV) {
+		/*
+		 * Cannot do ata_port_freeze() here,
+		 * because it would kill PIO access,
+		 * which is needed for further diagnosis.
+		 */
+		mv_eh_freeze(ap);
+		abort = 1;
+	} else if (edma_err_cause & eh_freeze_mask) {
+		/*
+		 * Note to self: ata_port_freeze() calls ata_port_abort()
+		 */
+		ata_port_freeze(ap);
+	} else {
+		abort = 1;
+	}
+
+	if (abort) {
+		if (qc)
+			ata_link_abort(qc->dev->link);
+		else
+			ata_port_abort(ap);
+	}
+}
+
+static bool mv_process_crpb_response(struct ata_port *ap,
+		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
+{
+	u8 ata_status;
+	u16 edma_status = le16_to_cpu(response->flags);
+
+	/*
+	 * edma_status from a response queue entry:
+	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
+	 *   MSB is saved ATA status from command completion.
+	 */
+	if (!ncq_enabled) {
+		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
+		if (err_cause) {
+			/*
+			 * Error will be seen/handled by
+			 * mv_err_intr().  So do nothing at all here.
+			 */
+			return false;
+		}
+	}
+	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
+	if (!ac_err_mask(ata_status))
+		return true;
+	/* else: leave it for mv_err_intr() */
+	return false;
+}
+
+static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	u32 in_index;
+	bool work_done = false;
+	u32 done_mask = 0;
+	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
+
+	/* Get the hardware queue position index */
+	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
+			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+	/* Process new responses from since the last time we looked */
+	while (in_index != pp->resp_idx) {
+		unsigned int tag;
+		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
+
+		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
+
+		if (IS_GEN_I(hpriv)) {
+			/* 50xx: no NCQ, only one command active at a time */
+			tag = ap->link.active_tag;
+		} else {
+			/* Gen II/IIE: get command tag from CRPB entry */
+			tag = le16_to_cpu(response->id) & 0x1f;
+		}
+		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
+			done_mask |= 1 << tag;
+		work_done = true;
+	}
+
+	if (work_done) {
+		ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+
+		/* Update the software queue position index in hardware */
+		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
+			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
+			 port_mmio + EDMA_RSP_Q_OUT_PTR);
+	}
+}
+
+static void mv_port_intr(struct ata_port *ap, u32 port_cause)
+{
+	struct mv_port_priv *pp;
+	int edma_was_enabled;
+
+	/*
+	 * Grab a snapshot of the EDMA_EN flag setting,
+	 * so that we have a consistent view for this port,
+	 * even if something we call of our routines changes it.
+	 */
+	pp = ap->private_data;
+	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+	/*
+	 * Process completed CRPB response(s) before other events.
+	 */
+	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
+		mv_process_crpb_entries(ap, pp);
+		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+			mv_handle_fbs_ncq_dev_err(ap);
+	}
+	/*
+	 * Handle chip-reported errors, or continue on to handle PIO.
+	 */
+	if (unlikely(port_cause & ERR_IRQ)) {
+		mv_err_intr(ap);
+	} else if (!edma_was_enabled) {
+		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
+		if (qc)
+			ata_bmdma_port_intr(ap, qc);
+		else
+			mv_unexpected_intr(ap, edma_was_enabled);
+	}
+}
+
+/**
+ *      mv_host_intr - Handle all interrupts on the given host controller
+ *      @host: host specific structure
+ *      @main_irq_cause: Main interrupt cause register for the chip.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base, *hc_mmio;
+	unsigned int handled = 0, port;
+
+	/* If asserted, clear the "all ports" IRQ coalescing bit */
+	if (main_irq_cause & ALL_PORTS_COAL_DONE)
+		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
+
+	for (port = 0; port < hpriv->n_ports; port++) {
+		struct ata_port *ap = host->ports[port];
+		unsigned int p, shift, hardport, port_cause;
+
+		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
+		/*
+		 * Each hc within the host has its own hc_irq_cause register,
+		 * where the interrupting ports bits get ack'd.
+		 */
+		if (hardport == 0) {	/* first port on this hc ? */
+			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
+			u32 port_mask, ack_irqs;
+			/*
+			 * Skip this entire hc if nothing pending for any ports
+			 */
+			if (!hc_cause) {
+				port += MV_PORTS_PER_HC - 1;
+				continue;
+			}
+			/*
+			 * We don't need/want to read the hc_irq_cause register,
+			 * because doing so hurts performance, and
+			 * main_irq_cause already gives us everything we need.
+			 *
+			 * But we do have to *write* to the hc_irq_cause to ack
+			 * the ports that we are handling this time through.
+			 *
+			 * This requires that we create a bitmap for those
+			 * ports which interrupted us, and use that bitmap
+			 * to ack (only) those ports via hc_irq_cause.
+			 */
+			ack_irqs = 0;
+			if (hc_cause & PORTS_0_3_COAL_DONE)
+				ack_irqs = HC_COAL_IRQ;
+			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
+				if ((port + p) >= hpriv->n_ports)
+					break;
+				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
+				if (hc_cause & port_mask)
+					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
+			}
+			hc_mmio = mv_hc_base_from_port(mmio, port);
+			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
+			handled = 1;
+		}
+		/*
+		 * Handle interrupts signalled for this port:
+		 */
+		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
+		if (port_cause)
+			mv_port_intr(ap, port_cause);
+	}
+	return handled;
+}
+
+static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	struct ata_port *ap;
+	struct ata_queued_cmd *qc;
+	struct ata_eh_info *ehi;
+	unsigned int i, err_mask, printed = 0;
+	u32 err_cause;
+
+	err_cause = readl(mmio + hpriv->irq_cause_offset);
+
+	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
+
+	DPRINTK("All regs @ PCI error\n");
+	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
+
+	writelfl(0, mmio + hpriv->irq_cause_offset);
+
+	for (i = 0; i < host->n_ports; i++) {
+		ap = host->ports[i];
+		if (!ata_link_offline(&ap->link)) {
+			ehi = &ap->link.eh_info;
+			ata_ehi_clear_desc(ehi);
+			if (!printed++)
+				ata_ehi_push_desc(ehi,
+					"PCI err cause 0x%08x", err_cause);
+			err_mask = AC_ERR_HOST_BUS;
+			ehi->action = ATA_EH_RESET;
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc)
+				qc->err_mask |= err_mask;
+			else
+				ehi->err_mask |= err_mask;
+
+			ata_port_freeze(ap);
+		}
+	}
+	return 1;	/* handled */
+}
+
+/**
+ *      mv_interrupt - Main interrupt event handler
+ *      @irq: unused
+ *      @dev_instance: private data; in this case the host structure
+ *
+ *      Read the read only register to determine if any host
+ *      controllers have pending interrupts.  If so, call lower level
+ *      routine to handle.  Also check for PCI errors which are only
+ *      reported here.
+ *
+ *      LOCKING:
+ *      This routine holds the host lock while processing pending
+ *      interrupts.
+ */
+static irqreturn_t mv_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct mv_host_priv *hpriv = host->private_data;
+	unsigned int handled = 0;
+	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
+	u32 main_irq_cause, pending_irqs;
+
+	spin_lock(&host->lock);
+
+	/* for MSI:  block new interrupts while in here */
+	if (using_msi)
+		mv_write_main_irq_mask(0, hpriv);
+
+	main_irq_cause = readl(hpriv->main_irq_cause_addr);
+	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
+	/*
+	 * Deal with cases where we either have nothing pending, or have read
+	 * a bogus register value which can indicate HW removal or PCI fault.
+	 */
+	if (pending_irqs && main_irq_cause != 0xffffffffU) {
+		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
+			handled = mv_pci_error(host, hpriv->base);
+		else
+			handled = mv_host_intr(host, pending_irqs);
+	}
+
+	/* for MSI: unmask; interrupt cause bits will retrigger now */
+	if (using_msi)
+		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
+{
+	unsigned int ofs;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_ERROR:
+	case SCR_CONTROL:
+		ofs = sc_reg_in * sizeof(u32);
+		break;
+	default:
+		ofs = 0xffffffffU;
+		break;
+	}
+	return ofs;
+}
+
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
+{
+	struct mv_host_priv *hpriv = link->ap->host->private_data;
+	void __iomem *mmio = hpriv->base;
+	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
+	unsigned int ofs = mv5_scr_offset(sc_reg_in);
+
+	if (ofs != 0xffffffffU) {
+		*val = readl(addr + ofs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+{
+	struct mv_host_priv *hpriv = link->ap->host->private_data;
+	void __iomem *mmio = hpriv->base;
+	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
+	unsigned int ofs = mv5_scr_offset(sc_reg_in);
+
+	if (ofs != 0xffffffffU) {
+		writelfl(val, addr + ofs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	int early_5080;
+
+	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
+
+	if (!early_5080) {
+		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
+		tmp |= (1 << 0);
+		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
+	}
+
+	mv_reset_pci_bus(host, mmio);
+}
+
+static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+	writel(0x0fcfffff, mmio + FLASH_CTL);
+}
+
+static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio)
+{
+	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
+	u32 tmp;
+
+	tmp = readl(phy_mmio + MV5_PHY_MODE);
+
+	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
+	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
+}
+
+static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+	u32 tmp;
+
+	writel(0, mmio + GPIO_PORT_CTL);
+
+	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
+
+	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
+	tmp |= ~(1 << 0);
+	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
+}
+
+static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+			   unsigned int port)
+{
+	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
+	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
+	u32 tmp;
+	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
+
+	if (fix_apm_sq) {
+		tmp = readl(phy_mmio + MV5_LTMODE);
+		tmp |= (1 << 19);
+		writel(tmp, phy_mmio + MV5_LTMODE);
+
+		tmp = readl(phy_mmio + MV5_PHY_CTL);
+		tmp &= ~0x3;
+		tmp |= 0x1;
+		writel(tmp, phy_mmio + MV5_PHY_CTL);
+	}
+
+	tmp = readl(phy_mmio + MV5_PHY_MODE);
+	tmp &= ~mask;
+	tmp |= hpriv->signal[port].pre;
+	tmp |= hpriv->signal[port].amps;
+	writel(tmp, phy_mmio + MV5_PHY_MODE);
+}
+
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
+			     unsigned int port)
+{
+	void __iomem *port_mmio = mv_port_base(mmio, port);
+
+	mv_reset_channel(hpriv, mmio, port);
+
+	ZERO(0x028);	/* command */
+	writel(0x11f, port_mmio + EDMA_CFG);
+	ZERO(0x004);	/* timer */
+	ZERO(0x008);	/* irq err cause */
+	ZERO(0x00c);	/* irq err mask */
+	ZERO(0x010);	/* rq bah */
+	ZERO(0x014);	/* rq inp */
+	ZERO(0x018);	/* rq outp */
+	ZERO(0x01c);	/* respq bah */
+	ZERO(0x024);	/* respq outp */
+	ZERO(0x020);	/* respq inp */
+	ZERO(0x02c);	/* test control */
+	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+}
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int hc)
+{
+	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+	u32 tmp;
+
+	ZERO(0x00c);
+	ZERO(0x010);
+	ZERO(0x014);
+	ZERO(0x018);
+
+	tmp = readl(hc_mmio + 0x20);
+	tmp &= 0x1c1c1c1c;
+	tmp |= 0x03030303;
+	writel(tmp, hc_mmio + 0x20);
+}
+#undef ZERO
+
+static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int n_hc)
+{
+	unsigned int hc, port;
+
+	for (hc = 0; hc < n_hc; hc++) {
+		for (port = 0; port < MV_PORTS_PER_HC; port++)
+			mv5_reset_hc_port(hpriv, mmio,
+					  (hc * MV_PORTS_PER_HC) + port);
+
+		mv5_reset_one_hc(hpriv, mmio, hc);
+	}
+
+	return 0;
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, mmio + (reg))
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	u32 tmp;
+
+	tmp = readl(mmio + MV_PCI_MODE);
+	tmp &= 0xff00ffff;
+	writel(tmp, mmio + MV_PCI_MODE);
+
+	ZERO(MV_PCI_DISC_TIMER);
+	ZERO(MV_PCI_MSI_TRIGGER);
+	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
+	ZERO(MV_PCI_SERR_MASK);
+	ZERO(hpriv->irq_cause_offset);
+	ZERO(hpriv->irq_mask_offset);
+	ZERO(MV_PCI_ERR_LOW_ADDRESS);
+	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
+	ZERO(MV_PCI_ERR_ATTRIBUTE);
+	ZERO(MV_PCI_ERR_COMMAND);
+}
+#undef ZERO
+
+static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+	u32 tmp;
+
+	mv5_reset_flash(hpriv, mmio);
+
+	tmp = readl(mmio + GPIO_PORT_CTL);
+	tmp &= 0x3;
+	tmp |= (1 << 5) | (1 << 6);
+	writel(tmp, mmio + GPIO_PORT_CTL);
+}
+
+/**
+ *      mv6_reset_hc - Perform the 6xxx global soft reset
+ *      @mmio: base address of the HBA
+ *
+ *      This routine only applies to 6xxx parts.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+			unsigned int n_hc)
+{
+	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
+	int i, rc = 0;
+	u32 t;
+
+	/* Following procedure defined in PCI "main command and status
+	 * register" table.
+	 */
+	t = readl(reg);
+	writel(t | STOP_PCI_MASTER, reg);
+
+	for (i = 0; i < 1000; i++) {
+		udelay(1);
+		t = readl(reg);
+		if (PCI_MASTER_EMPTY & t)
+			break;
+	}
+	if (!(PCI_MASTER_EMPTY & t)) {
+		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
+		rc = 1;
+		goto done;
+	}
+
+	/* set reset */
+	i = 5;
+	do {
+		writel(t | GLOB_SFT_RST, reg);
+		t = readl(reg);
+		udelay(1);
+	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
+
+	if (!(GLOB_SFT_RST & t)) {
+		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
+		rc = 1;
+		goto done;
+	}
+
+	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
+	i = 5;
+	do {
+		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
+		t = readl(reg);
+		udelay(1);
+	} while ((GLOB_SFT_RST & t) && (i-- > 0));
+
+	if (GLOB_SFT_RST & t) {
+		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
+		rc = 1;
+	}
+done:
+	return rc;
+}
+
+static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio)
+{
+	void __iomem *port_mmio;
+	u32 tmp;
+
+	tmp = readl(mmio + RESET_CFG);
+	if ((tmp & (1 << 0)) == 0) {
+		hpriv->signal[idx].amps = 0x7 << 8;
+		hpriv->signal[idx].pre = 0x1 << 5;
+		return;
+	}
+
+	port_mmio = mv_port_base(mmio, idx);
+	tmp = readl(port_mmio + PHY_MODE2);
+
+	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
+	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
+}
+
+static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+	writel(0x00000060, mmio + GPIO_PORT_CTL);
+}
+
+static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+			   unsigned int port)
+{
+	void __iomem *port_mmio = mv_port_base(mmio, port);
+
+	u32 hp_flags = hpriv->hp_flags;
+	int fix_phy_mode2 =
+		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
+	int fix_phy_mode4 =
+		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
+	u32 m2, m3;
+
+	if (fix_phy_mode2) {
+		m2 = readl(port_mmio + PHY_MODE2);
+		m2 &= ~(1 << 16);
+		m2 |= (1 << 31);
+		writel(m2, port_mmio + PHY_MODE2);
+
+		udelay(200);
+
+		m2 = readl(port_mmio + PHY_MODE2);
+		m2 &= ~((1 << 16) | (1 << 31));
+		writel(m2, port_mmio + PHY_MODE2);
+
+		udelay(200);
+	}
+
+	/*
+	 * Gen-II/IIe PHY_MODE3 errata RM#2:
+	 * Achieves better receiver noise performance than the h/w default:
+	 */
+	m3 = readl(port_mmio + PHY_MODE3);
+	m3 = (m3 & 0x1f) | (0x5555601 << 5);
+
+	/* Guideline 88F5182 (GL# SATA-S11) */
+	if (IS_SOC(hpriv))
+		m3 &= ~0x1c;
+
+	if (fix_phy_mode4) {
+		u32 m4 = readl(port_mmio + PHY_MODE4);
+		/*
+		 * Enforce reserved-bit restrictions on GenIIe devices only.
+		 * For earlier chipsets, force only the internal config field
+		 *  (workaround for errata FEr SATA#10 part 1).
+		 */
+		if (IS_GEN_IIE(hpriv))
+			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
+		else
+			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
+		writel(m4, port_mmio + PHY_MODE4);
+	}
+	/*
+	 * Workaround for 60x1-B2 errata SATA#13:
+	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
+	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
+	 * Or ensure we use writelfl() when writing PHY_MODE4.
+	 */
+	writel(m3, port_mmio + PHY_MODE3);
+
+	/* Revert values of pre-emphasis and signal amps to the saved ones */
+	m2 = readl(port_mmio + PHY_MODE2);
+
+	m2 &= ~MV_M2_PREAMP_MASK;
+	m2 |= hpriv->signal[port].amps;
+	m2 |= hpriv->signal[port].pre;
+	m2 &= ~(1 << 16);
+
+	/* according to mvSata 3.6.1, some IIE values are fixed */
+	if (IS_GEN_IIE(hpriv)) {
+		m2 &= ~0xC30FF01F;
+		m2 |= 0x0000900F;
+	}
+
+	writel(m2, port_mmio + PHY_MODE2);
+}
+
+/* TODO: use the generic LED interface to configure the SATA Presence */
+/* & Acitivy LEDs on the board */
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+				      void __iomem *mmio)
+{
+	return;
+}
+
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+			   void __iomem *mmio)
+{
+	void __iomem *port_mmio;
+	u32 tmp;
+
+	port_mmio = mv_port_base(mmio, idx);
+	tmp = readl(port_mmio + PHY_MODE2);
+
+	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
+	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
+					void __iomem *mmio, unsigned int port)
+{
+	void __iomem *port_mmio = mv_port_base(mmio, port);
+
+	mv_reset_channel(hpriv, mmio, port);
+
+	ZERO(0x028);		/* command */
+	writel(0x101f, port_mmio + EDMA_CFG);
+	ZERO(0x004);		/* timer */
+	ZERO(0x008);		/* irq err cause */
+	ZERO(0x00c);		/* irq err mask */
+	ZERO(0x010);		/* rq bah */
+	ZERO(0x014);		/* rq inp */
+	ZERO(0x018);		/* rq outp */
+	ZERO(0x01c);		/* respq bah */
+	ZERO(0x024);		/* respq outp */
+	ZERO(0x020);		/* respq inp */
+	ZERO(0x02c);		/* test control */
+	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
+}
+
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
+				       void __iomem *mmio)
+{
+	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
+
+	ZERO(0x00c);
+	ZERO(0x010);
+	ZERO(0x014);
+
+}
+
+#undef ZERO
+
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+				  void __iomem *mmio, unsigned int n_hc)
+{
+	unsigned int port;
+
+	for (port = 0; port < hpriv->n_ports; port++)
+		mv_soc_reset_hc_port(hpriv, mmio, port);
+
+	mv_soc_reset_one_hc(hpriv, mmio);
+
+	return 0;
+}
+
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+				      void __iomem *mmio)
+{
+	return;
+}
+
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+	return;
+}
+
+static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
+				  void __iomem *mmio, unsigned int port)
+{
+	void __iomem *port_mmio = mv_port_base(mmio, port);
+	u32	reg;
+
+	reg = readl(port_mmio + PHY_MODE3);
+	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
+	reg |= (0x1 << 27);
+	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
+	reg |= (0x1 << 29);
+	writel(reg, port_mmio + PHY_MODE3);
+
+	reg = readl(port_mmio + PHY_MODE4);
+	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
+	reg |= (0x1 << 16);
+	writel(reg, port_mmio + PHY_MODE4);
+
+	reg = readl(port_mmio + PHY_MODE9_GEN2);
+	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
+	reg |= 0x8;
+	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
+	writel(reg, port_mmio + PHY_MODE9_GEN2);
+
+	reg = readl(port_mmio + PHY_MODE9_GEN1);
+	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
+	reg |= 0x8;
+	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
+	writel(reg, port_mmio + PHY_MODE9_GEN1);
+}
+
+/**
+ *	soc_is_65 - check if the soc is 65 nano device
+ *
+ *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
+ *	register, this register should contain non-zero value and it exists only
+ *	in the 65 nano devices, when reading it from older devices we get 0.
+ */
+static bool soc_is_65n(struct mv_host_priv *hpriv)
+{
+	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
+
+	if (readl(port0_mmio + PHYCFG_OFS))
+		return true;
+	return false;
+}
+
+static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
+{
+	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
+
+	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
+	if (want_gen2i)
+		ifcfg |= (1 << 7);		/* enable gen2i speed */
+	writelfl(ifcfg, port_mmio + SATA_IFCFG);
+}
+
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
+			     unsigned int port_no)
+{
+	void __iomem *port_mmio = mv_port_base(mmio, port_no);
+
+	/*
+	 * The datasheet warns against setting EDMA_RESET when EDMA is active
+	 * (but doesn't say what the problem might be).  So we first try
+	 * to disable the EDMA engine before doing the EDMA_RESET operation.
+	 */
+	mv_stop_edma_engine(port_mmio);
+	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
+
+	if (!IS_GEN_I(hpriv)) {
+		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
+		mv_setup_ifcfg(port_mmio, 1);
+	}
+	/*
+	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
+	 * link, and physical layers.  It resets all SATA interface registers
+	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
+	 */
+	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
+	udelay(25);	/* allow reset propagation */
+	writelfl(0, port_mmio + EDMA_CMD);
+
+	hpriv->ops->phy_errata(hpriv, mmio, port_no);
+
+	if (IS_GEN_I(hpriv))
+		usleep_range(500, 1000);
+}
+
+static void mv_pmp_select(struct ata_port *ap, int pmp)
+{
+	if (sata_pmp_supported(ap)) {
+		void __iomem *port_mmio = mv_ap_base(ap);
+		u32 reg = readl(port_mmio + SATA_IFCTL);
+		int old = reg & 0xf;
+
+		if (old != pmp) {
+			reg = (reg & ~0xf) | pmp;
+			writelfl(reg, port_mmio + SATA_IFCTL);
+		}
+	}
+}
+
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	mv_pmp_select(link->ap, sata_srst_pmp(link));
+	return sata_std_hardreset(link, class, deadline);
+}
+
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	mv_pmp_select(link->ap, sata_srst_pmp(link));
+	return ata_sff_softreset(link, class, deadline);
+}
+
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	struct mv_port_priv *pp = ap->private_data;
+	void __iomem *mmio = hpriv->base;
+	int rc, attempts = 0, extra = 0;
+	u32 sstatus;
+	bool online;
+
+	mv_reset_channel(hpriv, mmio, ap->port_no);
+	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+	pp->pp_flags &=
+	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
+
+	/* Workaround for errata FEr SATA#10 (part 2) */
+	do {
+		const unsigned long *timing =
+				sata_ehc_deb_timing(&link->eh_context);
+
+		rc = sata_link_hardreset(link, timing, deadline + extra,
+					 &online, NULL);
+		rc = online ? -EAGAIN : rc;
+		if (rc)
+			return rc;
+		sata_scr_read(link, SCR_STATUS, &sstatus);
+		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
+			/* Force 1.5gb/s link speed and try again */
+			mv_setup_ifcfg(mv_ap_base(ap), 0);
+			if (time_after(jiffies + HZ, deadline))
+				extra = HZ; /* only extend it once, max */
+		}
+	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
+	mv_save_cached_regs(ap);
+	mv_edma_cfg(ap, 0, 0);
+
+	return rc;
+}
+
+static void mv_eh_freeze(struct ata_port *ap)
+{
+	mv_stop_edma(ap);
+	mv_enable_port_irqs(ap, 0);
+}
+
+static void mv_eh_thaw(struct ata_port *ap)
+{
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	unsigned int port = ap->port_no;
+	unsigned int hardport = mv_hardport_from_port(port);
+	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 hc_irq_cause;
+
+	/* clear EDMA errors on this port */
+	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
+
+	/* clear pending irq events */
+	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
+	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
+
+	mv_enable_port_irqs(ap, ERR_IRQ);
+}
+
+/**
+ *      mv_port_init - Perform some early initialization on a single port.
+ *      @port: libata data structure storing shadow register addresses
+ *      @port_mmio: base address of the port
+ *
+ *      Initialize shadow register mmio addresses, clear outstanding
+ *      interrupts on the port, and unmask interrupts for the future
+ *      start of the port.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
+{
+	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
+
+	/* PIO related setup
+	 */
+	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
+	port->error_addr =
+		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
+	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
+	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
+	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
+	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
+	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
+	port->status_addr =
+		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
+	/* special case: control/altstatus doesn't have ATA_REG_ address */
+	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
+
+	/* Clear any currently outstanding port interrupt conditions */
+	serr = port_mmio + mv_scr_offset(SCR_ERROR);
+	writelfl(readl(serr), serr);
+	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
+
+	/* unmask all non-transient EDMA error interrupts */
+	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
+
+	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
+		readl(port_mmio + EDMA_CFG),
+		readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
+		readl(port_mmio + EDMA_ERR_IRQ_MASK));
+}
+
+static unsigned int mv_in_pcix_mode(struct ata_host *host)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base;
+	u32 reg;
+
+	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
+		return 0;	/* not PCI-X capable */
+	reg = readl(mmio + MV_PCI_MODE);
+	if ((reg & MV_PCI_MODE_MASK) == 0)
+		return 0;	/* conventional PCI mode */
+	return 1;	/* chip is in PCI-X mode */
+}
+
+static int mv_pci_cut_through_okay(struct ata_host *host)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base;
+	u32 reg;
+
+	if (!mv_in_pcix_mode(host)) {
+		reg = readl(mmio + MV_PCI_COMMAND);
+		if (reg & MV_PCI_COMMAND_MRDTRIG)
+			return 0; /* not okay */
+	}
+	return 1; /* okay */
+}
+
+static void mv_60x1b2_errata_pci7(struct ata_host *host)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base;
+
+	/* workaround for 60x1-B2 errata PCI#7 */
+	if (mv_in_pcix_mode(host)) {
+		u32 reg = readl(mmio + MV_PCI_COMMAND);
+		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
+	}
+}
+
+static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct mv_host_priv *hpriv = host->private_data;
+	u32 hp_flags = hpriv->hp_flags;
+
+	switch (board_idx) {
+	case chip_5080:
+		hpriv->ops = &mv5xxx_ops;
+		hp_flags |= MV_HP_GEN_I;
+
+		switch (pdev->revision) {
+		case 0x1:
+			hp_flags |= MV_HP_ERRATA_50XXB0;
+			break;
+		case 0x3:
+			hp_flags |= MV_HP_ERRATA_50XXB2;
+			break;
+		default:
+			dev_warn(&pdev->dev,
+				 "Applying 50XXB2 workarounds to unknown rev\n");
+			hp_flags |= MV_HP_ERRATA_50XXB2;
+			break;
+		}
+		break;
+
+	case chip_504x:
+	case chip_508x:
+		hpriv->ops = &mv5xxx_ops;
+		hp_flags |= MV_HP_GEN_I;
+
+		switch (pdev->revision) {
+		case 0x0:
+			hp_flags |= MV_HP_ERRATA_50XXB0;
+			break;
+		case 0x3:
+			hp_flags |= MV_HP_ERRATA_50XXB2;
+			break;
+		default:
+			dev_warn(&pdev->dev,
+				 "Applying B2 workarounds to unknown rev\n");
+			hp_flags |= MV_HP_ERRATA_50XXB2;
+			break;
+		}
+		break;
+
+	case chip_604x:
+	case chip_608x:
+		hpriv->ops = &mv6xxx_ops;
+		hp_flags |= MV_HP_GEN_II;
+
+		switch (pdev->revision) {
+		case 0x7:
+			mv_60x1b2_errata_pci7(host);
+			hp_flags |= MV_HP_ERRATA_60X1B2;
+			break;
+		case 0x9:
+			hp_flags |= MV_HP_ERRATA_60X1C0;
+			break;
+		default:
+			dev_warn(&pdev->dev,
+				 "Applying B2 workarounds to unknown rev\n");
+			hp_flags |= MV_HP_ERRATA_60X1B2;
+			break;
+		}
+		break;
+
+	case chip_7042:
+		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
+		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
+		    (pdev->device == 0x2300 || pdev->device == 0x2310))
+		{
+			/*
+			 * Highpoint RocketRAID PCIe 23xx series cards:
+			 *
+			 * Unconfigured drives are treated as "Legacy"
+			 * by the BIOS, and it overwrites sector 8 with
+			 * a "Lgcy" metadata block prior to Linux boot.
+			 *
+			 * Configured drives (RAID or JBOD) leave sector 8
+			 * alone, but instead overwrite a high numbered
+			 * sector for the RAID metadata.  This sector can
+			 * be determined exactly, by truncating the physical
+			 * drive capacity to a nice even GB value.
+			 *
+			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
+			 *
+			 * Warn the user, lest they think we're just buggy.
+			 */
+			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
+				" BIOS CORRUPTS DATA on all attached drives,"
+				" regardless of if/how they are configured."
+				" BEWARE!\n");
+			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
+				" use sectors 8-9 on \"Legacy\" drives,"
+				" and avoid the final two gigabytes on"
+				" all RocketRAID BIOS initialized drives.\n");
+		}
+		/* fall through */
+	case chip_6042:
+		hpriv->ops = &mv6xxx_ops;
+		hp_flags |= MV_HP_GEN_IIE;
+		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
+			hp_flags |= MV_HP_CUT_THROUGH;
+
+		switch (pdev->revision) {
+		case 0x2: /* Rev.B0: the first/only public release */
+			hp_flags |= MV_HP_ERRATA_60X1C0;
+			break;
+		default:
+			dev_warn(&pdev->dev,
+				 "Applying 60X1C0 workarounds to unknown rev\n");
+			hp_flags |= MV_HP_ERRATA_60X1C0;
+			break;
+		}
+		break;
+	case chip_soc:
+		if (soc_is_65n(hpriv))
+			hpriv->ops = &mv_soc_65n_ops;
+		else
+			hpriv->ops = &mv_soc_ops;
+		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
+			MV_HP_ERRATA_60X1C0;
+		break;
+
+	default:
+		dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
+		return 1;
+	}
+
+	hpriv->hp_flags = hp_flags;
+	if (hp_flags & MV_HP_PCIE) {
+		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
+		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
+		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
+	} else {
+		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
+		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
+		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
+	}
+
+	return 0;
+}
+
+/**
+ *      mv_init_host - Perform some early initialization of the host.
+ *	@host: ATA host to initialize
+ *
+ *      If possible, do an early global reset of the host.  Then do
+ *      our port init and clear/unmask all/relevant host interrupts.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_init_host(struct ata_host *host)
+{
+	int rc = 0, n_hc, port, hc;
+	struct mv_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->base;
+
+	rc = mv_chip_id(host, hpriv->board_idx);
+	if (rc)
+		goto done;
+
+	if (IS_SOC(hpriv)) {
+		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
+		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
+	} else {
+		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
+		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
+	}
+
+	/* initialize shadow irq mask with register's value */
+	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
+
+	/* global interrupt mask: 0 == mask everything */
+	mv_set_main_irq_mask(host, ~0, 0);
+
+	n_hc = mv_get_hc_count(host->ports[0]->flags);
+
+	for (port = 0; port < host->n_ports; port++)
+		if (hpriv->ops->read_preamp)
+			hpriv->ops->read_preamp(hpriv, port, mmio);
+
+	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
+	if (rc)
+		goto done;
+
+	hpriv->ops->reset_flash(hpriv, mmio);
+	hpriv->ops->reset_bus(host, mmio);
+	hpriv->ops->enable_leds(hpriv, mmio);
+
+	for (port = 0; port < host->n_ports; port++) {
+		struct ata_port *ap = host->ports[port];
+		void __iomem *port_mmio = mv_port_base(mmio, port);
+
+		mv_port_init(&ap->ioaddr, port_mmio);
+	}
+
+	for (hc = 0; hc < n_hc; hc++) {
+		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+
+		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
+			"(before clear)=0x%08x\n", hc,
+			readl(hc_mmio + HC_CFG),
+			readl(hc_mmio + HC_IRQ_CAUSE));
+
+		/* Clear any currently outstanding hc interrupt conditions */
+		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
+	}
+
+	if (!IS_SOC(hpriv)) {
+		/* Clear any currently outstanding host interrupt conditions */
+		writelfl(0, mmio + hpriv->irq_cause_offset);
+
+		/* and unmask interrupt generation for host regs */
+		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
+	}
+
+	/*
+	 * enable only global host interrupts for now.
+	 * The per-port interrupts get done later as ports are set up.
+	 */
+	mv_set_main_irq_mask(host, 0, PCI_ERR);
+	mv_set_irq_coalescing(host, irq_coalescing_io_count,
+				    irq_coalescing_usecs);
+done:
+	return rc;
+}
+
+static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
+{
+	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
+							     MV_CRQB_Q_SZ, 0);
+	if (!hpriv->crqb_pool)
+		return -ENOMEM;
+
+	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
+							     MV_CRPB_Q_SZ, 0);
+	if (!hpriv->crpb_pool)
+		return -ENOMEM;
+
+	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
+							     MV_SG_TBL_SZ, 0);
+	if (!hpriv->sg_tbl_pool)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
+				 const struct mbus_dram_target_info *dram)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		writel(0, hpriv->base + WINDOW_CTRL(i));
+		writel(0, hpriv->base + WINDOW_BASE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel(((cs->size - 1) & 0xffff0000) |
+			(cs->mbus_attr << 8) |
+			(dram->mbus_dram_target_id << 4) | 1,
+			hpriv->base + WINDOW_CTRL(i));
+		writel(cs->base, hpriv->base + WINDOW_BASE(i));
+	}
+}
+
+/**
+ *      mv_platform_probe - handle a positive probe of an soc Marvell
+ *      host
+ *      @pdev: platform device found
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_platform_probe(struct platform_device *pdev)
+{
+	const struct mv_sata_platform_data *mv_platform_data;
+	const struct mbus_dram_target_info *dram;
+	const struct ata_port_info *ppi[] =
+	    { &mv_port_info[chip_soc], NULL };
+	struct ata_host *host;
+	struct mv_host_priv *hpriv;
+	struct resource *res;
+	int n_ports = 0, irq = 0;
+	int rc;
+	int port;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/*
+	 * Simple resource validation ..
+	 */
+	if (unlikely(pdev->num_resources != 2)) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the register base first
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+
+	/* allocate host */
+	if (pdev->dev.of_node) {
+		rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
+					   &n_ports);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"error parsing nr-ports property: %d\n", rc);
+			return rc;
+		}
+
+		if (n_ports <= 0) {
+			dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
+				n_ports);
+			return -EINVAL;
+		}
+
+		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	} else {
+		mv_platform_data = dev_get_platdata(&pdev->dev);
+		n_ports = mv_platform_data->n_ports;
+		irq = platform_get_irq(pdev, 0);
+	}
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+
+	if (!host || !hpriv)
+		return -ENOMEM;
+	hpriv->port_clks = devm_kcalloc(&pdev->dev,
+					n_ports, sizeof(struct clk *),
+					GFP_KERNEL);
+	if (!hpriv->port_clks)
+		return -ENOMEM;
+	hpriv->port_phys = devm_kcalloc(&pdev->dev,
+					n_ports, sizeof(struct phy *),
+					GFP_KERNEL);
+	if (!hpriv->port_phys)
+		return -ENOMEM;
+	host->private_data = hpriv;
+	hpriv->board_idx = chip_soc;
+
+	host->iomap = NULL;
+	hpriv->base = devm_ioremap(&pdev->dev, res->start,
+				   resource_size(res));
+	if (!hpriv->base)
+		return -ENOMEM;
+
+	hpriv->base -= SATAHC0_REG_BASE;
+
+	hpriv->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hpriv->clk))
+		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
+	else
+		clk_prepare_enable(hpriv->clk);
+
+	for (port = 0; port < n_ports; port++) {
+		char port_number[16];
+		sprintf(port_number, "%d", port);
+		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+		if (!IS_ERR(hpriv->port_clks[port]))
+			clk_prepare_enable(hpriv->port_clks[port]);
+
+		sprintf(port_number, "port%d", port);
+		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+							       port_number);
+		if (IS_ERR(hpriv->port_phys[port])) {
+			rc = PTR_ERR(hpriv->port_phys[port]);
+			hpriv->port_phys[port] = NULL;
+			if (rc != -EPROBE_DEFER)
+				dev_warn(&pdev->dev, "error getting phy %d", rc);
+
+			/* Cleanup only the initialized ports */
+			hpriv->n_ports = port;
+			goto err;
+		} else
+			phy_power_on(hpriv->port_phys[port]);
+	}
+
+	/* All the ports have been initialized */
+	hpriv->n_ports = n_ports;
+
+	/*
+	 * (Re-)program MBUS remapping windows if we are asked to.
+	 */
+	dram = mv_mbus_dram_info();
+	if (dram)
+		mv_conf_mbus_windows(hpriv, dram);
+
+	rc = mv_create_dma_pools(hpriv, &pdev->dev);
+	if (rc)
+		goto err;
+
+	/*
+	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
+	 * updated in the LP_PHY_CTL register.
+	 */
+	if (pdev->dev.of_node &&
+		of_device_is_compatible(pdev->dev.of_node,
+					"marvell,armada-370-sata"))
+		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
+
+	/* initialize adapter */
+	rc = mv_init_host(host);
+	if (rc)
+		goto err;
+
+	dev_info(&pdev->dev, "slots %u ports %d\n",
+		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
+
+	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
+	if (!rc)
+		return 0;
+
+err:
+	if (!IS_ERR(hpriv->clk)) {
+		clk_disable_unprepare(hpriv->clk);
+		clk_put(hpriv->clk);
+	}
+	for (port = 0; port < hpriv->n_ports; port++) {
+		if (!IS_ERR(hpriv->port_clks[port])) {
+			clk_disable_unprepare(hpriv->port_clks[port]);
+			clk_put(hpriv->port_clks[port]);
+		}
+		phy_power_off(hpriv->port_phys[port]);
+	}
+
+	return rc;
+}
+
+/*
+ *
+ *      mv_platform_remove    -       unplug a platform interface
+ *      @pdev: platform device
+ *
+ *      A platform bus SATA device has been unplugged. Perform the needed
+ *      cleanup. Also called on module unload for any active devices.
+ */
+static int mv_platform_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct mv_host_priv *hpriv = host->private_data;
+	int port;
+	ata_host_detach(host);
+
+	if (!IS_ERR(hpriv->clk)) {
+		clk_disable_unprepare(hpriv->clk);
+		clk_put(hpriv->clk);
+	}
+	for (port = 0; port < host->n_ports; port++) {
+		if (!IS_ERR(hpriv->port_clks[port])) {
+			clk_disable_unprepare(hpriv->port_clks[port]);
+			clk_put(hpriv->port_clks[port]);
+		}
+		phy_power_off(hpriv->port_phys[port]);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	if (host)
+		return ata_host_suspend(host, state);
+	else
+		return 0;
+}
+
+static int mv_platform_resume(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	const struct mbus_dram_target_info *dram;
+	int ret;
+
+	if (host) {
+		struct mv_host_priv *hpriv = host->private_data;
+
+		/*
+		 * (Re-)program MBUS remapping windows if we are asked to.
+		 */
+		dram = mv_mbus_dram_info();
+		if (dram)
+			mv_conf_mbus_windows(hpriv, dram);
+
+		/* initialize adapter */
+		ret = mv_init_host(host);
+		if (ret) {
+			printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+			return ret;
+		}
+		ata_host_resume(host);
+	}
+
+	return 0;
+}
+#else
+#define mv_platform_suspend NULL
+#define mv_platform_resume NULL
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id mv_sata_dt_ids[] = {
+	{ .compatible = "marvell,armada-370-sata", },
+	{ .compatible = "marvell,orion-sata", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
+#endif
+
+static struct platform_driver mv_platform_driver = {
+	.probe		= mv_platform_probe,
+	.remove		= mv_platform_remove,
+	.suspend	= mv_platform_suspend,
+	.resume		= mv_platform_resume,
+	.driver		= {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(mv_sata_dt_ids),
+	},
+};
+
+
+#ifdef CONFIG_PCI
+static int mv_pci_init_one(struct pci_dev *pdev,
+			   const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int mv_pci_device_resume(struct pci_dev *pdev);
+#endif
+
+
+static struct pci_driver mv_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= mv_pci_tbl,
+	.probe			= mv_pci_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= mv_pci_device_resume,
+#endif
+
+};
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+	int rc;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ *      mv_print_info - Dump key info to kernel log for perusal.
+ *      @host: ATA host to print info about
+ *
+ *      FIXME: complete this.
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static void mv_print_info(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct mv_host_priv *hpriv = host->private_data;
+	u8 scc;
+	const char *scc_s, *gen;
+
+	/* Use this to determine the HW stepping of the chip so we know
+	 * what errata to workaround
+	 */
+	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
+	if (scc == 0)
+		scc_s = "SCSI";
+	else if (scc == 0x01)
+		scc_s = "RAID";
+	else
+		scc_s = "?";
+
+	if (IS_GEN_I(hpriv))
+		gen = "I";
+	else if (IS_GEN_II(hpriv))
+		gen = "II";
+	else if (IS_GEN_IIE(hpriv))
+		gen = "IIE";
+	else
+		gen = "?";
+
+	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
+		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
+		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
+}
+
+/**
+ *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
+ *      @pdev: PCI device found
+ *      @ent: PCI device ID entry for the matched host
+ *
+ *      LOCKING:
+ *      Inherited from caller.
+ */
+static int mv_pci_init_one(struct pci_dev *pdev,
+			   const struct pci_device_id *ent)
+{
+	unsigned int board_idx = (unsigned int)ent->driver_data;
+	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
+	struct ata_host *host;
+	struct mv_host_priv *hpriv;
+	int n_ports, port, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* allocate host */
+	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+	host->private_data = hpriv;
+	hpriv->n_ports = n_ports;
+	hpriv->board_idx = board_idx;
+
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	hpriv->base = host->iomap[MV_PRIMARY_BAR];
+
+	rc = pci_go_64(pdev);
+	if (rc)
+		return rc;
+
+	rc = mv_create_dma_pools(hpriv, &pdev->dev);
+	if (rc)
+		return rc;
+
+	for (port = 0; port < host->n_ports; port++) {
+		struct ata_port *ap = host->ports[port];
+		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
+		unsigned int offset = port_mmio - hpriv->base;
+
+		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+	}
+
+	/* initialize adapter */
+	rc = mv_init_host(host);
+	if (rc)
+		return rc;
+
+	/* Enable message-switched interrupts, if requested */
+	if (msi && pci_enable_msi(pdev) == 0)
+		hpriv->hp_flags |= MV_HP_FLAG_MSI;
+
+	mv_dump_pci_cfg(pdev, 0x68);
+	mv_print_info(host);
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
+				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mv_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	/* initialize adapter */
+	rc = mv_init_host(host);
+	if (rc)
+		return rc;
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+#endif
+
+static int __init mv_init(void)
+{
+	int rc = -ENODEV;
+#ifdef CONFIG_PCI
+	rc = pci_register_driver(&mv_pci_driver);
+	if (rc < 0)
+		return rc;
+#endif
+	rc = platform_driver_register(&mv_platform_driver);
+
+#ifdef CONFIG_PCI
+	if (rc < 0)
+		pci_unregister_driver(&mv_pci_driver);
+#endif
+	return rc;
+}
+
+static void __exit mv_exit(void)
+{
+#ifdef CONFIG_PCI
+	pci_unregister_driver(&mv_pci_driver);
+#endif
+	platform_driver_unregister(&mv_platform_driver);
+}
+
+MODULE_AUTHOR("Brett Russ");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+
+module_init(mv_init);
+module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
new file mode 100644
index 0000000..72c9b92
--- /dev/null
+++ b/drivers/ata/sata_nv.c
@@ -0,0 +1,2502 @@
+/*
+ *  sata_nv.c - NVIDIA nForce SATA
+ *
+ *  Copyright 2004 NVIDIA Corp.  All rights reserved.
+ *  Copyright 2004 Andrew Chew
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  No hardware documentation available outside of NVIDIA.
+ *  This driver programs the NVIDIA SATA controller in a similar
+ *  fashion as with other PCI IDE BMDMA controllers, with a few
+ *  NV-specific details such as register offsets, SATA phy location,
+ *  hotplug info, etc.
+ *
+ *  CK804/MCP04 controllers support an alternate programming interface
+ *  similar to the ADMA specification (with some modifications).
+ *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
+ *  sent through the legacy interface.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#define DRV_NAME			"sata_nv"
+#define DRV_VERSION			"3.5"
+
+#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
+
+enum {
+	NV_MMIO_BAR			= 5,
+
+	NV_PORTS			= 2,
+	NV_PIO_MASK			= ATA_PIO4,
+	NV_MWDMA_MASK			= ATA_MWDMA2,
+	NV_UDMA_MASK			= ATA_UDMA6,
+	NV_PORT0_SCR_REG_OFFSET		= 0x00,
+	NV_PORT1_SCR_REG_OFFSET		= 0x40,
+
+	/* INT_STATUS/ENABLE */
+	NV_INT_STATUS			= 0x10,
+	NV_INT_ENABLE			= 0x11,
+	NV_INT_STATUS_CK804		= 0x440,
+	NV_INT_ENABLE_CK804		= 0x441,
+
+	/* INT_STATUS/ENABLE bits */
+	NV_INT_DEV			= 0x01,
+	NV_INT_PM			= 0x02,
+	NV_INT_ADDED			= 0x04,
+	NV_INT_REMOVED			= 0x08,
+
+	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
+
+	NV_INT_ALL			= 0x0f,
+	NV_INT_MASK			= NV_INT_DEV |
+					  NV_INT_ADDED | NV_INT_REMOVED,
+
+	/* INT_CONFIG */
+	NV_INT_CONFIG			= 0x12,
+	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
+
+	// For PCI config register 20
+	NV_MCP_SATA_CFG_20		= 0x50,
+	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
+	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
+	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
+	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
+	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
+
+	NV_ADMA_MAX_CPBS		= 32,
+	NV_ADMA_CPB_SZ			= 128,
+	NV_ADMA_APRD_SZ			= 16,
+	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
+					   NV_ADMA_APRD_SZ,
+	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
+	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
+	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
+					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
+
+	/* BAR5 offset to ADMA general registers */
+	NV_ADMA_GEN			= 0x400,
+	NV_ADMA_GEN_CTL			= 0x00,
+	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
+
+	/* BAR5 offset to ADMA ports */
+	NV_ADMA_PORT			= 0x480,
+
+	/* size of ADMA port register space  */
+	NV_ADMA_PORT_SIZE		= 0x100,
+
+	/* ADMA port registers */
+	NV_ADMA_CTL			= 0x40,
+	NV_ADMA_CPB_COUNT		= 0x42,
+	NV_ADMA_NEXT_CPB_IDX		= 0x43,
+	NV_ADMA_STAT			= 0x44,
+	NV_ADMA_CPB_BASE_LOW		= 0x48,
+	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
+	NV_ADMA_APPEND			= 0x50,
+	NV_ADMA_NOTIFIER		= 0x68,
+	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
+
+	/* NV_ADMA_CTL register bits */
+	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
+	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
+	NV_ADMA_CTL_GO			= (1 << 7),
+	NV_ADMA_CTL_AIEN		= (1 << 8),
+	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
+	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
+
+	/* CPB response flag bits */
+	NV_CPB_RESP_DONE		= (1 << 0),
+	NV_CPB_RESP_ATA_ERR		= (1 << 3),
+	NV_CPB_RESP_CMD_ERR		= (1 << 4),
+	NV_CPB_RESP_CPB_ERR		= (1 << 7),
+
+	/* CPB control flag bits */
+	NV_CPB_CTL_CPB_VALID		= (1 << 0),
+	NV_CPB_CTL_QUEUE		= (1 << 1),
+	NV_CPB_CTL_APRD_VALID		= (1 << 2),
+	NV_CPB_CTL_IEN			= (1 << 3),
+	NV_CPB_CTL_FPDMA		= (1 << 4),
+
+	/* APRD flags */
+	NV_APRD_WRITE			= (1 << 1),
+	NV_APRD_END			= (1 << 2),
+	NV_APRD_CONT			= (1 << 3),
+
+	/* NV_ADMA_STAT flags */
+	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
+	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
+	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
+	NV_ADMA_STAT_CPBERR		= (1 << 4),
+	NV_ADMA_STAT_SERROR		= (1 << 5),
+	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
+	NV_ADMA_STAT_IDLE		= (1 << 8),
+	NV_ADMA_STAT_LEGACY		= (1 << 9),
+	NV_ADMA_STAT_STOPPED		= (1 << 10),
+	NV_ADMA_STAT_DONE		= (1 << 12),
+	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
+					  NV_ADMA_STAT_TIMEOUT,
+
+	/* port flags */
+	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
+	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
+
+	/* MCP55 reg offset */
+	NV_CTL_MCP55			= 0x400,
+	NV_INT_STATUS_MCP55		= 0x440,
+	NV_INT_ENABLE_MCP55		= 0x444,
+	NV_NCQ_REG_MCP55		= 0x448,
+
+	/* MCP55 */
+	NV_INT_ALL_MCP55		= 0xffff,
+	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
+	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
+
+	/* SWNCQ ENABLE BITS*/
+	NV_CTL_PRI_SWNCQ		= 0x02,
+	NV_CTL_SEC_SWNCQ		= 0x04,
+
+	/* SW NCQ status bits*/
+	NV_SWNCQ_IRQ_DEV		= (1 << 0),
+	NV_SWNCQ_IRQ_PM			= (1 << 1),
+	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
+	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
+
+	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
+	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
+	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
+	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
+
+	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
+					  NV_SWNCQ_IRQ_REMOVED,
+
+};
+
+/* ADMA Physical Region Descriptor - one SG segment */
+struct nv_adma_prd {
+	__le64			addr;
+	__le32			len;
+	u8			flags;
+	u8			packet_len;
+	__le16			reserved;
+};
+
+enum nv_adma_regbits {
+	CMDEND	= (1 << 15),		/* end of command list */
+	WNB	= (1 << 14),		/* wait-not-BSY */
+	IGN	= (1 << 13),		/* ignore this entry */
+	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
+	DA2	= (1 << (2 + 8)),
+	DA1	= (1 << (1 + 8)),
+	DA0	= (1 << (0 + 8)),
+};
+
+/* ADMA Command Parameter Block
+   The first 5 SG segments are stored inside the Command Parameter Block itself.
+   If there are more than 5 segments the remainder are stored in a separate
+   memory area indicated by next_aprd. */
+struct nv_adma_cpb {
+	u8			resp_flags;    /* 0 */
+	u8			reserved1;     /* 1 */
+	u8			ctl_flags;     /* 2 */
+	/* len is length of taskfile in 64 bit words */
+	u8			len;		/* 3  */
+	u8			tag;           /* 4 */
+	u8			next_cpb_idx;  /* 5 */
+	__le16			reserved2;     /* 6-7 */
+	__le16			tf[12];        /* 8-31 */
+	struct nv_adma_prd	aprd[5];       /* 32-111 */
+	__le64			next_aprd;     /* 112-119 */
+	__le64			reserved3;     /* 120-127 */
+};
+
+
+struct nv_adma_port_priv {
+	struct nv_adma_cpb	*cpb;
+	dma_addr_t		cpb_dma;
+	struct nv_adma_prd	*aprd;
+	dma_addr_t		aprd_dma;
+	void __iomem		*ctl_block;
+	void __iomem		*gen_block;
+	void __iomem		*notifier_clear_block;
+	u64			adma_dma_mask;
+	u8			flags;
+	int			last_issue_ncq;
+};
+
+struct nv_host_priv {
+	unsigned long		type;
+};
+
+struct defer_queue {
+	u32		defer_bits;
+	unsigned int	head;
+	unsigned int	tail;
+	unsigned int	tag[ATA_MAX_QUEUE];
+};
+
+enum ncq_saw_flag_list {
+	ncq_saw_d2h	= (1U << 0),
+	ncq_saw_dmas	= (1U << 1),
+	ncq_saw_sdb	= (1U << 2),
+	ncq_saw_backout	= (1U << 3),
+};
+
+struct nv_swncq_port_priv {
+	struct ata_bmdma_prd *prd;	 /* our SG list */
+	dma_addr_t	prd_dma; /* and its DMA mapping */
+	void __iomem	*sactive_block;
+	void __iomem	*irq_block;
+	void __iomem	*tag_block;
+	u32		qc_active;
+
+	unsigned int	last_issue_tag;
+
+	/* fifo circular queue to store deferral command */
+	struct defer_queue defer_queue;
+
+	/* for NCQ interrupt analysis */
+	u32		dhfis_bits;
+	u32		dmafis_bits;
+	u32		sdbfis_bits;
+
+	unsigned int	ncq_flags;
+};
+
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int nv_pci_device_resume(struct pci_dev *pdev);
+#endif
+static void nv_ck804_host_stop(struct ata_host *host);
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline);
+static void nv_nf2_freeze(struct ata_port *ap);
+static void nv_nf2_thaw(struct ata_port *ap);
+static void nv_ck804_freeze(struct ata_port *ap);
+static void nv_ck804_thaw(struct ata_port *ap);
+static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
+static void nv_adma_irq_clear(struct ata_port *ap);
+static int nv_adma_port_start(struct ata_port *ap);
+static void nv_adma_port_stop(struct ata_port *ap);
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_adma_port_resume(struct ata_port *ap);
+#endif
+static void nv_adma_freeze(struct ata_port *ap);
+static void nv_adma_thaw(struct ata_port *ap);
+static void nv_adma_error_handler(struct ata_port *ap);
+static void nv_adma_host_stop(struct ata_host *host);
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+
+static void nv_mcp55_thaw(struct ata_port *ap);
+static void nv_mcp55_freeze(struct ata_port *ap);
+static void nv_swncq_error_handler(struct ata_port *ap);
+static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_port_start(struct ata_port *ap);
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_swncq_port_resume(struct ata_port *ap);
+#endif
+
+enum nv_host_type
+{
+	GENERIC,
+	NFORCE2,
+	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
+	CK804,
+	ADMA,
+	MCP5x,
+	SWNCQ,
+};
+
+static const struct pci_device_id nv_pci_tbl[] = {
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
+
+	{ } /* terminate list */
+};
+
+static struct pci_driver nv_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= nv_pci_tbl,
+	.probe			= nv_init_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= nv_pci_device_resume,
+#endif
+	.remove			= ata_pci_remove_one,
+};
+
+static struct scsi_host_template nv_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct scsi_host_template nv_adma_sht = {
+	ATA_NCQ_SHT(DRV_NAME),
+	.can_queue		= NV_ADMA_MAX_CPBS,
+	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
+	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
+	.slave_configure	= nv_adma_slave_config,
+};
+
+static struct scsi_host_template nv_swncq_sht = {
+	ATA_NCQ_SHT(DRV_NAME),
+	.can_queue		= ATA_MAX_QUEUE - 1,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= nv_swncq_slave_config,
+};
+
+/*
+ * NV SATA controllers have various different problems with hardreset
+ * protocol depending on the specific controller and device.
+ *
+ * GENERIC:
+ *
+ *  bko11195 reports that link doesn't come online after hardreset on
+ *  generic nv's and there have been several other similar reports on
+ *  linux-ide.
+ *
+ *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
+ *  softreset.
+ *
+ * NF2/3:
+ *
+ *  bko3352 reports nf2/3 controllers can't determine device signature
+ *  reliably after hardreset.  The following thread reports detection
+ *  failure on cold boot with the standard debouncing timing.
+ *
+ *  http://thread.gmane.org/gmane.linux.ide/34098
+ *
+ *  bko12176 reports that hardreset fails to bring up the link during
+ *  boot on nf2.
+ *
+ * CK804:
+ *
+ *  For initial probing after boot and hot plugging, hardreset mostly
+ *  works fine on CK804 but curiously, reprobing on the initial port
+ *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
+ *  FIS in somewhat undeterministic way.
+ *
+ * SWNCQ:
+ *
+ *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
+ *  hardreset should be used and hardreset can't report proper
+ *  signature, which suggests that mcp5x is closer to nf2 as long as
+ *  reset quirkiness is concerned.
+ *
+ *  bko12703 reports that boot probing fails for intel SSD with
+ *  hardreset.  Link fails to come online.  Softreset works fine.
+ *
+ * The failures are varied but the following patterns seem true for
+ * all flavors.
+ *
+ * - Softreset during boot always works.
+ *
+ * - Hardreset during boot sometimes fails to bring up the link on
+ *   certain comibnations and device signature acquisition is
+ *   unreliable.
+ *
+ * - Hardreset is often necessary after hotplug.
+ *
+ * So, preferring softreset for boot probing and error handling (as
+ * hardreset might bring down the link) but using hardreset for
+ * post-boot probing should work around the above issues in most
+ * cases.  Define nv_hardreset() which only kicks in for post-boot
+ * probing and use it for all variants.
+ */
+static struct ata_port_operations nv_generic_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.lost_interrupt		= ATA_OP_NULL,
+	.scr_read		= nv_scr_read,
+	.scr_write		= nv_scr_write,
+	.hardreset		= nv_hardreset,
+};
+
+static struct ata_port_operations nv_nf2_ops = {
+	.inherits		= &nv_generic_ops,
+	.freeze			= nv_nf2_freeze,
+	.thaw			= nv_nf2_thaw,
+};
+
+static struct ata_port_operations nv_ck804_ops = {
+	.inherits		= &nv_generic_ops,
+	.freeze			= nv_ck804_freeze,
+	.thaw			= nv_ck804_thaw,
+	.host_stop		= nv_ck804_host_stop,
+};
+
+static struct ata_port_operations nv_adma_ops = {
+	.inherits		= &nv_ck804_ops,
+
+	.check_atapi_dma	= nv_adma_check_atapi_dma,
+	.sff_tf_read		= nv_adma_tf_read,
+	.qc_defer		= ata_std_qc_defer,
+	.qc_prep		= nv_adma_qc_prep,
+	.qc_issue		= nv_adma_qc_issue,
+	.sff_irq_clear		= nv_adma_irq_clear,
+
+	.freeze			= nv_adma_freeze,
+	.thaw			= nv_adma_thaw,
+	.error_handler		= nv_adma_error_handler,
+	.post_internal_cmd	= nv_adma_post_internal_cmd,
+
+	.port_start		= nv_adma_port_start,
+	.port_stop		= nv_adma_port_stop,
+#ifdef CONFIG_PM
+	.port_suspend		= nv_adma_port_suspend,
+	.port_resume		= nv_adma_port_resume,
+#endif
+	.host_stop		= nv_adma_host_stop,
+};
+
+static struct ata_port_operations nv_swncq_ops = {
+	.inherits		= &nv_generic_ops,
+
+	.qc_defer		= ata_std_qc_defer,
+	.qc_prep		= nv_swncq_qc_prep,
+	.qc_issue		= nv_swncq_qc_issue,
+
+	.freeze			= nv_mcp55_freeze,
+	.thaw			= nv_mcp55_thaw,
+	.error_handler		= nv_swncq_error_handler,
+
+#ifdef CONFIG_PM
+	.port_suspend		= nv_swncq_port_suspend,
+	.port_resume		= nv_swncq_port_resume,
+#endif
+	.port_start		= nv_swncq_port_start,
+};
+
+struct nv_pi_priv {
+	irq_handler_t			irq_handler;
+	struct scsi_host_template	*sht;
+};
+
+#define NV_PI_PRIV(_irq_handler, _sht) \
+	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
+
+static const struct ata_port_info nv_port_info[] = {
+	/* generic */
+	{
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_generic_ops,
+		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
+	},
+	/* nforce2/3 */
+	{
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_nf2_ops,
+		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
+	},
+	/* ck804 */
+	{
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_ck804_ops,
+		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
+	},
+	/* ADMA */
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_adma_ops,
+		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
+	},
+	/* MCP5x */
+	{
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_generic_ops,
+		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
+	},
+	/* SWNCQ */
+	{
+		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_swncq_ops,
+		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
+	},
+};
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static bool adma_enabled;
+static bool swncq_enabled = true;
+static bool msi_enabled;
+
+static void nv_adma_register_mode(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp, status;
+	int count = 0;
+
+	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+		return;
+
+	status = readw(mmio + NV_ADMA_STAT);
+	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
+	}
+	if (count == 20)
+		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
+			      status);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	count = 0;
+	status = readw(mmio + NV_ADMA_STAT);
+	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
+	}
+	if (count == 20)
+		ata_port_warn(ap,
+			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
+			      status);
+
+	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static void nv_adma_mode(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp, status;
+	int count = 0;
+
+	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
+		return;
+
+	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	status = readw(mmio + NV_ADMA_STAT);
+	while (((status & NV_ADMA_STAT_LEGACY) ||
+	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
+	}
+	if (count == 20)
+		ata_port_warn(ap,
+			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
+			status);
+
+	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static int nv_adma_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	struct nv_adma_port_priv *port0, *port1;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long segment_boundary, flags;
+	unsigned short sg_tablesize;
+	int rc;
+	int adma_enable;
+	u32 current_reg, new_reg, config_mask;
+
+	rc = ata_scsi_slave_config(sdev);
+
+	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+		/* Not a proper libata device, ignore */
+		return rc;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
+		/*
+		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
+		 * Therefore ATAPI commands are sent through the legacy interface.
+		 * However, the legacy interface only supports 32-bit DMA.
+		 * Restrict DMA parameters as required by the legacy interface
+		 * when an ATAPI device is connected.
+		 */
+		segment_boundary = ATA_DMA_BOUNDARY;
+		/* Subtract 1 since an extra entry may be needed for padding, see
+		   libata-scsi.c */
+		sg_tablesize = LIBATA_MAX_PRD - 1;
+
+		/* Since the legacy DMA engine is in use, we need to disable ADMA
+		   on the port. */
+		adma_enable = 0;
+		nv_adma_register_mode(ap);
+	} else {
+		segment_boundary = NV_ADMA_DMA_BOUNDARY;
+		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
+		adma_enable = 1;
+	}
+
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
+
+	if (ap->port_no == 1)
+		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
+			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+	else
+		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
+			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
+
+	if (adma_enable) {
+		new_reg = current_reg | config_mask;
+		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
+	} else {
+		new_reg = current_reg & ~config_mask;
+		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
+	}
+
+	if (current_reg != new_reg)
+		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
+
+	port0 = ap->host->ports[0]->private_data;
+	port1 = ap->host->ports[1]->private_data;
+	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+		/*
+		 * We have to set the DMA mask to 32-bit if either port is in
+		 * ATAPI mode, since they are on the same PCI device which is
+		 * used for DMA mapping.  If either SCSI device is not allocated
+		 * yet, it's OK since that port will discover its correct
+		 * setting when it does get allocated.
+		 */
+		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	} else {
+		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
+	}
+
+	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
+	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
+	ata_port_info(ap,
+		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+		      (unsigned long long)*ap->host->dev->dma_mask,
+		      segment_boundary, sg_tablesize);
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return rc;
+}
+
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+}
+
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	/* Other than when internal or pass-through commands are executed,
+	   the only time this function will be called in ADMA mode will be
+	   if a command fails. In the failure case we don't care about going
+	   into register mode with ADMA commands pending, as the commands will
+	   all shortly be aborted anyway. We assume that NCQ commands are not
+	   issued via passthrough, which is the only way that switching into
+	   ADMA mode could abort outstanding commands. */
+	nv_adma_register_mode(ap);
+
+	ata_sff_tf_read(ap, tf);
+}
+
+static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
+{
+	unsigned int idx = 0;
+
+	if (tf->flags & ATA_TFLAG_ISADDR) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
+			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
+		} else
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
+
+		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE)
+		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
+
+	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
+
+	while (idx < 12)
+		cpb[idx++] = cpu_to_le16(IGN);
+
+	return idx;
+}
+
+static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u8 flags = pp->cpb[cpb_num].resp_flags;
+
+	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+
+	if (unlikely((force_err ||
+		     flags & (NV_CPB_RESP_ATA_ERR |
+			      NV_CPB_RESP_CMD_ERR |
+			      NV_CPB_RESP_CPB_ERR)))) {
+		struct ata_eh_info *ehi = &ap->link.eh_info;
+		int freeze = 0;
+
+		ata_ehi_clear_desc(ehi);
+		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
+		if (flags & NV_CPB_RESP_ATA_ERR) {
+			ata_ehi_push_desc(ehi, "ATA error");
+			ehi->err_mask |= AC_ERR_DEV;
+		} else if (flags & NV_CPB_RESP_CMD_ERR) {
+			ata_ehi_push_desc(ehi, "CMD error");
+			ehi->err_mask |= AC_ERR_DEV;
+		} else if (flags & NV_CPB_RESP_CPB_ERR) {
+			ata_ehi_push_desc(ehi, "CPB error");
+			ehi->err_mask |= AC_ERR_SYSTEM;
+			freeze = 1;
+		} else {
+			/* notifier error, but no error in CPB flags? */
+			ata_ehi_push_desc(ehi, "unknown");
+			ehi->err_mask |= AC_ERR_OTHER;
+			freeze = 1;
+		}
+		/* Kill all commands. EH will determine what actually failed. */
+		if (freeze)
+			ata_port_freeze(ap);
+		else
+			ata_port_abort(ap);
+		return -1;
+	}
+
+	if (likely(flags & NV_CPB_RESP_DONE))
+		return 1;
+	return 0;
+}
+
+static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
+{
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+	/* freeze if hotplugged */
+	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
+		ata_port_freeze(ap);
+		return 1;
+	}
+
+	/* bail out if not our interrupt */
+	if (!(irq_stat & NV_INT_DEV))
+		return 0;
+
+	/* DEV interrupt w/ no active qc? */
+	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+		ata_sff_check_status(ap);
+		return 1;
+	}
+
+	/* handle interrupt */
+	return ata_bmdma_port_intr(ap, qc);
+}
+
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	int i, handled = 0;
+	u32 notifier_clears[2];
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct nv_adma_port_priv *pp = ap->private_data;
+		void __iomem *mmio = pp->ctl_block;
+		u16 status;
+		u32 gen_ctl;
+		u32 notifier, notifier_error;
+
+		notifier_clears[i] = 0;
+
+		/* if ADMA is disabled, use standard ata interrupt handler */
+		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+				>> (NV_INT_PORT_SHIFT * i);
+			handled += nv_host_intr(ap, irq_stat);
+			continue;
+		}
+
+		/* if in ATA register mode, check for standard interrupts */
+		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+				>> (NV_INT_PORT_SHIFT * i);
+			if (ata_tag_valid(ap->link.active_tag))
+				/** NV_INT_DEV indication seems unreliable
+				    at times at least in ADMA mode. Force it
+				    on always when a command is active, to
+				    prevent losing interrupts. */
+				irq_stat |= NV_INT_DEV;
+			handled += nv_host_intr(ap, irq_stat);
+		}
+
+		notifier = readl(mmio + NV_ADMA_NOTIFIER);
+		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+		notifier_clears[i] = notifier | notifier_error;
+
+		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+		    !notifier_error)
+			/* Nothing to do */
+			continue;
+
+		status = readw(mmio + NV_ADMA_STAT);
+
+		/*
+		 * Clear status. Ensure the controller sees the
+		 * clearing before we start looking at any of the CPB
+		 * statuses, so that any CPB completions after this
+		 * point in the handler will raise another interrupt.
+		 */
+		writew(status, mmio + NV_ADMA_STAT);
+		readw(mmio + NV_ADMA_STAT); /* flush posted write */
+		rmb();
+
+		handled++; /* irq handled if we got here */
+
+		/* freeze if hotplugged or controller error */
+		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+				       NV_ADMA_STAT_HOTUNPLUG |
+				       NV_ADMA_STAT_TIMEOUT |
+				       NV_ADMA_STAT_SERROR))) {
+			struct ata_eh_info *ehi = &ap->link.eh_info;
+
+			ata_ehi_clear_desc(ehi);
+			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
+			if (status & NV_ADMA_STAT_TIMEOUT) {
+				ehi->err_mask |= AC_ERR_SYSTEM;
+				ata_ehi_push_desc(ehi, "timeout");
+			} else if (status & NV_ADMA_STAT_HOTPLUG) {
+				ata_ehi_hotplugged(ehi);
+				ata_ehi_push_desc(ehi, "hotplug");
+			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+				ata_ehi_hotplugged(ehi);
+				ata_ehi_push_desc(ehi, "hot unplug");
+			} else if (status & NV_ADMA_STAT_SERROR) {
+				/* let EH analyze SError and figure out cause */
+				ata_ehi_push_desc(ehi, "SError");
+			} else
+				ata_ehi_push_desc(ehi, "unknown");
+			ata_port_freeze(ap);
+			continue;
+		}
+
+		if (status & (NV_ADMA_STAT_DONE |
+			      NV_ADMA_STAT_CPBERR |
+			      NV_ADMA_STAT_CMD_COMPLETE)) {
+			u32 check_commands = notifier_clears[i];
+			u32 done_mask = 0;
+			int pos, rc;
+
+			if (status & NV_ADMA_STAT_CPBERR) {
+				/* check all active commands */
+				if (ata_tag_valid(ap->link.active_tag))
+					check_commands = 1 <<
+						ap->link.active_tag;
+				else
+					check_commands = ap->link.sactive;
+			}
+
+			/* check CPBs for completed commands */
+			while ((pos = ffs(check_commands))) {
+				pos--;
+				rc = nv_adma_check_cpb(ap, pos,
+						notifier_error & (1 << pos));
+				if (rc > 0)
+					done_mask |= 1 << pos;
+				else if (unlikely(rc < 0))
+					check_commands = 0;
+				check_commands &= ~(1 << pos);
+			}
+			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+		}
+	}
+
+	if (notifier_clears[0] || notifier_clears[1]) {
+		/* Note: Both notifier clear registers must be written
+		   if either is set, even if one is zero, according to NVIDIA. */
+		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
+		writel(notifier_clears[0], pp->notifier_clear_block);
+		pp = host->ports[1]->private_data;
+		writel(notifier_clears[1], pp->notifier_clear_block);
+	}
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+static void nv_adma_freeze(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	nv_ck804_freeze(ap);
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+		return;
+
+	/* clear any outstanding CK804 notifications */
+	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+	/* Disable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+		mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+}
+
+static void nv_adma_thaw(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	nv_ck804_thaw(ap);
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+		return;
+
+	/* Enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+		mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+}
+
+static void nv_adma_irq_clear(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u32 notifier_clears[2];
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+		ata_bmdma_irq_clear(ap);
+		return;
+	}
+
+	/* clear any outstanding CK804 notifications */
+	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+	/* clear ADMA status */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* clear notifiers - note both ports need to be written with
+	   something even though we are only clearing on one */
+	if (ap->port_no == 0) {
+		notifier_clears[0] = 0xFFFFFFFF;
+		notifier_clears[1] = 0;
+	} else {
+		notifier_clears[0] = 0;
+		notifier_clears[1] = 0xFFFFFFFF;
+	}
+	pp = ap->host->ports[0]->private_data;
+	writel(notifier_clears[0], pp->notifier_clear_block);
+	pp = ap->host->ports[1]->private_data;
+	writel(notifier_clears[1], pp->notifier_clear_block);
+}
+
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+		ata_bmdma_post_internal_cmd(qc);
+}
+
+static int nv_adma_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct nv_adma_port_priv *pp;
+	int rc;
+	void *mem;
+	dma_addr_t mem_dma;
+	void __iomem *mmio;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	u16 tmp;
+
+	VPRINTK("ENTER\n");
+
+	/*
+	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
+	 * pad buffers.
+	 */
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
+
+	/* we might fallback to bmdma, allocate bmdma resources */
+	rc = ata_bmdma_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
+	       ap->port_no * NV_ADMA_PORT_SIZE;
+	pp->ctl_block = mmio;
+	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
+	pp->notifier_clear_block = pp->gen_block +
+	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
+
+	/*
+	 * Now that the legacy PRD and padding buffer are allocated we can
+	 * try to raise the DMA mask to allocate the CPB/APRD table.
+	 */
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc)
+			return rc;
+	}
+	pp->adma_dma_mask = *dev->dma_mask;
+
+	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
+				  &mem_dma, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
+
+	/*
+	 * First item in chunk of DMA memory:
+	 * 128-byte command parameter block (CPB)
+	 * one for each command tag
+	 */
+	pp->cpb     = mem;
+	pp->cpb_dma = mem_dma;
+
+	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
+	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+
+	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+
+	/*
+	 * Second item: block of ADMA_SGTBL_LEN s/g entries
+	 */
+	pp->aprd = mem;
+	pp->aprd_dma = mem_dma;
+
+	ap->private_data = pp;
+
+	/* clear any outstanding interrupt conditions */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* initialize port variables */
+	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* clear GO for register mode, enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+	udelay(1);
+	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+
+	return 0;
+}
+
+static void nv_adma_port_stop(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+
+	VPRINTK("ENTER\n");
+	writew(0, mmio + NV_ADMA_CTL);
+}
+
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+
+	/* Go to register mode - clears GO */
+	nv_adma_register_mode(ap);
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* disable interrupt, shut down port */
+	writew(0, mmio + NV_ADMA_CTL);
+
+	return 0;
+}
+
+static int nv_adma_port_resume(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	/* set CPB block location */
+	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
+	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+
+	/* clear any outstanding interrupt conditions */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* initialize port variables */
+	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* clear GO for register mode, enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+	udelay(1);
+	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+
+	return 0;
+}
+#endif
+
+static void nv_adma_setup_port(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	struct ata_ioports *ioport = &ap->ioaddr;
+
+	VPRINTK("ENTER\n");
+
+	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
+
+	ioport->cmd_addr	= mmio;
+	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
+	ioport->error_addr	=
+	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
+	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
+	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
+	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
+	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
+	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
+	ioport->status_addr	=
+	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
+	ioport->altstatus_addr	=
+	ioport->ctl_addr	= mmio + 0x20;
+}
+
+static int nv_adma_host_init(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	unsigned int i;
+	u32 tmp32;
+
+	VPRINTK("ENTER\n");
+
+	/* enable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
+		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+	for (i = 0; i < host->n_ports; i++)
+		nv_adma_setup_port(host->ports[i]);
+
+	return 0;
+}
+
+static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
+			      struct scatterlist *sg,
+			      int idx,
+			      struct nv_adma_prd *aprd)
+{
+	u8 flags = 0;
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		flags |= NV_APRD_WRITE;
+	if (idx == qc->n_elem - 1)
+		flags |= NV_APRD_END;
+	else if (idx != 4)
+		flags |= NV_APRD_CONT;
+
+	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
+	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
+	aprd->flags = flags;
+	aprd->packet_len = 0;
+}
+
+static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	struct nv_adma_prd *aprd;
+	struct scatterlist *sg;
+	unsigned int si;
+
+	VPRINTK("ENTER\n");
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		aprd = (si < 5) ? &cpb->aprd[si] :
+			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
+		nv_adma_fill_aprd(qc, sg, si, aprd);
+	}
+	if (si > 5)
+		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
+	else
+		cpb->next_aprd = cpu_to_le64(0);
+}
+
+static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+	/* ADMA engine can only be used for non-ATAPI DMA commands,
+	   or interrupt-driven no-data commands. */
+	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+	   (qc->tf.flags & ATA_TFLAG_POLLING))
+		return 1;
+
+	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
+	   (qc->tf.protocol == ATA_PROT_NODATA))
+		return 0;
+
+	return 1;
+}
+
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
+	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
+		       NV_CPB_CTL_IEN;
+
+	if (nv_adma_use_reg_mode(qc)) {
+		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+			(qc->flags & ATA_QCFLAG_DMAMAP));
+		nv_adma_register_mode(qc->ap);
+		ata_bmdma_qc_prep(qc);
+		return;
+	}
+
+	cpb->resp_flags = NV_CPB_RESP_DONE;
+	wmb();
+	cpb->ctl_flags = 0;
+	wmb();
+
+	cpb->len		= 3;
+	cpb->tag		= qc->hw_tag;
+	cpb->next_cpb_idx	= 0;
+
+	/* turn on NCQ flags for NCQ commands */
+	if (qc->tf.protocol == ATA_PROT_NCQ)
+		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
+
+	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
+
+	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
+
+	if (qc->flags & ATA_QCFLAG_DMAMAP) {
+		nv_adma_fill_sg(qc, cpb);
+		ctl_flags |= NV_CPB_CTL_APRD_VALID;
+	} else
+		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
+
+	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
+	   until we are finished filling in all of the contents */
+	wmb();
+	cpb->ctl_flags = ctl_flags;
+	wmb();
+	cpb->resp_flags = 0;
+}
+
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
+
+	VPRINTK("ENTER\n");
+
+	/* We can't handle result taskfile with NCQ commands, since
+	   retrieving the taskfile switches us out of ADMA mode and would abort
+	   existing commands. */
+	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
+		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
+		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
+		return AC_ERR_SYSTEM;
+	}
+
+	if (nv_adma_use_reg_mode(qc)) {
+		/* use ATA register mode */
+		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
+		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+			(qc->flags & ATA_QCFLAG_DMAMAP));
+		nv_adma_register_mode(qc->ap);
+		return ata_bmdma_qc_issue(qc);
+	} else
+		nv_adma_mode(qc->ap);
+
+	/* write append register, command tag in lower 8 bits
+	   and (number of cpbs to append -1) in top 8 bits */
+	wmb();
+
+	if (curr_ncq != pp->last_issue_ncq) {
+		/* Seems to need some delay before switching between NCQ and
+		   non-NCQ commands, else we get command timeouts and such. */
+		udelay(20);
+		pp->last_issue_ncq = curr_ncq;
+	}
+
+	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
+
+	DPRINTK("Issued tag %u\n", qc->hw_tag);
+
+	return 0;
+}
+
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_queued_cmd *qc;
+
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+			handled += ata_bmdma_port_intr(ap, qc);
+		} else {
+			/*
+			 * No request pending?  Clear interrupt status
+			 * anyway, in case there's one pending.
+			 */
+			ap->ops->sff_check_status(ap);
+		}
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
+{
+	int i, handled = 0;
+
+	for (i = 0; i < host->n_ports; i++) {
+		handled += nv_host_intr(host->ports[i], irq_stat);
+		irq_stat >>= NV_INT_PORT_SHIFT;
+	}
+
+	return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	u8 irq_stat;
+	irqreturn_t ret;
+
+	spin_lock(&host->lock);
+	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
+	ret = nv_do_interrupt(host, irq_stat);
+	spin_unlock(&host->lock);
+
+	return ret;
+}
+
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	u8 irq_stat;
+	irqreturn_t ret;
+
+	spin_lock(&host->lock);
+	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+	ret = nv_do_interrupt(host, irq_stat);
+	spin_unlock(&host->lock);
+
+	return ret;
+}
+
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+
+	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+
+	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+
+	/* Do hardreset iff it's post-boot probing, please read the
+	 * comment above port ops for details.
+	 */
+	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
+	    !ata_dev_enabled(link->device))
+		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
+				    NULL, NULL);
+	else {
+		const unsigned long *timing = sata_ehc_deb_timing(ehc);
+		int rc;
+
+		if (!(ehc->i.flags & ATA_EHI_QUIET))
+			ata_link_info(link,
+				      "nv: skipping hardreset on occupied port\n");
+
+		/* make sure the link is online */
+		rc = sata_link_resume(link, timing, deadline);
+		/* whine about phy resume failure but proceed */
+		if (rc && rc != -EOPNOTSUPP)
+			ata_link_warn(link, "failed to resume link (errno=%d)\n",
+				      rc);
+	}
+
+	/* device signature acquisition is unreliable */
+	return -EAGAIN;
+}
+
+static void nv_nf2_freeze(struct ata_port *ap)
+{
+	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
+	int shift = ap->port_no * NV_INT_PORT_SHIFT;
+	u8 mask;
+
+	mask = ioread8(scr_addr + NV_INT_ENABLE);
+	mask &= ~(NV_INT_ALL << shift);
+	iowrite8(mask, scr_addr + NV_INT_ENABLE);
+}
+
+static void nv_nf2_thaw(struct ata_port *ap)
+{
+	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
+	int shift = ap->port_no * NV_INT_PORT_SHIFT;
+	u8 mask;
+
+	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
+
+	mask = ioread8(scr_addr + NV_INT_ENABLE);
+	mask |= (NV_INT_MASK << shift);
+	iowrite8(mask, scr_addr + NV_INT_ENABLE);
+}
+
+static void nv_ck804_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT;
+	u8 mask;
+
+	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
+	mask &= ~(NV_INT_ALL << shift);
+	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
+}
+
+static void nv_ck804_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT;
+	u8 mask;
+
+	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
+
+	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
+	mask |= (NV_INT_MASK << shift);
+	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
+}
+
+static void nv_mcp55_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+	u32 mask;
+
+	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+	mask &= ~(NV_INT_ALL_MCP55 << shift);
+	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+}
+
+static void nv_mcp55_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+	u32 mask;
+
+	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+	mask |= (NV_INT_MASK_MCP55 << shift);
+	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+}
+
+static void nv_adma_error_handler(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+		void __iomem *mmio = pp->ctl_block;
+		int i;
+		u16 tmp;
+
+		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
+			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+			u32 status = readw(mmio + NV_ADMA_STAT);
+			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
+			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
+
+			ata_port_err(ap,
+				"EH in ADMA mode, notifier 0x%X "
+				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
+				"next cpb count 0x%X next cpb idx 0x%x\n",
+				notifier, notifier_error, gen_ctl, status,
+				cpb_count, next_cpb_idx);
+
+			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
+				struct nv_adma_cpb *cpb = &pp->cpb[i];
+				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+				    ap->link.sactive & (1 << i))
+					ata_port_err(ap,
+						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
+						i, cpb->ctl_flags, cpb->resp_flags);
+			}
+		}
+
+		/* Push us back into port register mode for error handling. */
+		nv_adma_register_mode(ap);
+
+		/* Mark all of the CPBs as invalid to prevent them from
+		   being executed */
+		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
+			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
+
+		/* clear CPB fetch count */
+		writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+		/* Reset channel */
+		tmp = readw(mmio + NV_ADMA_CTL);
+		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+		udelay(1);
+		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
+	}
+
+	ata_bmdma_error_handler(ap);
+}
+
+static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+
+	/* queue is full */
+	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
+	dq->defer_bits |= (1 << qc->hw_tag);
+	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
+}
+
+static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+	unsigned int tag;
+
+	if (dq->head == dq->tail)	/* null queue */
+		return NULL;
+
+	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
+	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
+	WARN_ON(!(dq->defer_bits & (1 << tag)));
+	dq->defer_bits &= ~(1 << tag);
+
+	return ata_qc_from_tag(ap, tag);
+}
+
+static void nv_swncq_fis_reinit(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	pp->dhfis_bits = 0;
+	pp->dmafis_bits = 0;
+	pp->sdbfis_bits = 0;
+	pp->ncq_flags = 0;
+}
+
+static void nv_swncq_pp_reinit(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+
+	dq->head = 0;
+	dq->tail = 0;
+	dq->defer_bits = 0;
+	pp->qc_active = 0;
+	pp->last_issue_tag = ATA_TAG_POISON;
+	nv_swncq_fis_reinit(ap);
+}
+
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	writew(fis, pp->irq_block);
+}
+
+static void __ata_bmdma_stop(struct ata_port *ap)
+{
+	struct ata_queued_cmd qc;
+
+	qc.ap = ap;
+	ata_bmdma_stop(&qc);
+}
+
+static void nv_swncq_ncq_stop(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	unsigned int i;
+	u32 sactive;
+	u32 done_mask;
+
+	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
+		     ap->qc_active, ap->link.sactive);
+	ata_port_err(ap,
+		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
+		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
+		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
+		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
+
+	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
+		     ap->ops->sff_check_status(ap),
+		     ioread8(ap->ioaddr.error_addr));
+
+	sactive = readl(pp->sactive_block);
+	done_mask = pp->qc_active ^ sactive;
+
+	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
+	for (i = 0; i < ATA_MAX_QUEUE; i++) {
+		u8 err = 0;
+		if (pp->qc_active & (1 << i))
+			err = 0;
+		else if (done_mask & (1 << i))
+			err = 1;
+		else
+			continue;
+
+		ata_port_err(ap,
+			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
+			     (pp->dhfis_bits >> i) & 0x1,
+			     (pp->dmafis_bits >> i) & 0x1,
+			     (pp->sdbfis_bits >> i) & 0x1,
+			     (sactive >> i) & 0x1,
+			     (err ? "error! tag doesn't exit" : " "));
+	}
+
+	nv_swncq_pp_reinit(ap);
+	ap->ops->sff_irq_clear(ap);
+	__ata_bmdma_stop(ap);
+	nv_swncq_irq_clear(ap, 0xffff);
+}
+
+static void nv_swncq_error_handler(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->link.eh_context;
+
+	if (ap->link.sactive) {
+		nv_swncq_ncq_stop(ap);
+		ehc->i.action |= ATA_EH_RESET;
+	}
+
+	ata_bmdma_error_handler(ap);
+}
+
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	u32 tmp;
+
+	/* clear irq */
+	writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+	/* disable irq */
+	writel(0, mmio + NV_INT_ENABLE_MCP55);
+
+	/* disable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
+	writel(tmp, mmio + NV_CTL_MCP55);
+
+	return 0;
+}
+
+static int nv_swncq_port_resume(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	u32 tmp;
+
+	/* clear irq */
+	writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+	/* enable irq */
+	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+	/* enable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+	return 0;
+}
+#endif
+
+static void nv_swncq_host_init(struct ata_host *host)
+{
+	u32 tmp;
+	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u8 regval;
+
+	/* disable  ECO 398 */
+	pci_read_config_byte(pdev, 0x7f, &regval);
+	regval &= ~(1 << 7);
+	pci_write_config_byte(pdev, 0x7f, regval);
+
+	/* enable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	VPRINTK("HOST_CTL:0x%X\n", tmp);
+	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+	/* enable irq intr */
+	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
+	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+	/*  clear port irq */
+	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
+}
+
+static int nv_swncq_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *dev;
+	int rc;
+	u8 rev;
+	u8 check_maxtor = 0;
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	rc = ata_scsi_slave_config(sdev);
+	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+		/* Not a proper libata device, ignore */
+		return rc;
+
+	dev = &ap->link.device[sdev->id];
+	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
+		return rc;
+
+	/* if MCP51 and Maxtor, then disable ncq */
+	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
+		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
+		check_maxtor = 1;
+
+	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
+	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
+		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
+		pci_read_config_byte(pdev, 0x8, &rev);
+		if (rev <= 0xa2)
+			check_maxtor = 1;
+	}
+
+	if (!check_maxtor)
+		return rc;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	if (strncmp(model_num, "Maxtor", 6) == 0) {
+		ata_scsi_change_queue_depth(sdev, 1);
+		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
+			       sdev->queue_depth);
+	}
+
+	return rc;
+}
+
+static int nv_swncq_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	struct nv_swncq_port_priv *pp;
+	int rc;
+
+	/* we might fallback to bmdma, allocate bmdma resources */
+	rc = ata_bmdma_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
+				      &pp->prd_dma, GFP_KERNEL);
+	if (!pp->prd)
+		return -ENOMEM;
+	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
+
+	ap->private_data = pp;
+	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
+	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
+	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
+
+	return 0;
+}
+
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (qc->tf.protocol != ATA_PROT_NCQ) {
+		ata_bmdma_qc_prep(qc);
+		return;
+	}
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	nv_swncq_fill_sg(qc);
+}
+
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_bmdma_prd *prd;
+	unsigned int si, idx;
+
+	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
+
+	idx = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		addr = (u32)sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			prd[idx].addr = cpu_to_le32(addr);
+			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
+					  struct ata_queued_cmd *qc)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	if (qc == NULL)
+		return 0;
+
+	DPRINTK("Enter\n");
+
+	writel((1 << qc->hw_tag), pp->sactive_block);
+	pp->last_issue_tag = qc->hw_tag;
+	pp->dhfis_bits &= ~(1 << qc->hw_tag);
+	pp->dmafis_bits &= ~(1 << qc->hw_tag);
+	pp->qc_active |= (0x1 << qc->hw_tag);
+
+	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+
+	DPRINTK("Issued tag %u\n", qc->hw_tag);
+
+	return 0;
+}
+
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	if (qc->tf.protocol != ATA_PROT_NCQ)
+		return ata_bmdma_qc_issue(qc);
+
+	DPRINTK("Enter\n");
+
+	if (!pp->qc_active)
+		nv_swncq_issue_atacmd(ap, qc);
+	else
+		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
+
+	return 0;
+}
+
+static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
+{
+	u32 serror;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+
+	ata_ehi_clear_desc(ehi);
+
+	/* AHCI needs SError cleared; otherwise, it might lock up */
+	sata_scr_read(&ap->link, SCR_ERROR, &serror);
+	sata_scr_write(&ap->link, SCR_ERROR, serror);
+
+	/* analyze @irq_stat */
+	if (fis & NV_SWNCQ_IRQ_ADDED)
+		ata_ehi_push_desc(ehi, "hot plug");
+	else if (fis & NV_SWNCQ_IRQ_REMOVED)
+		ata_ehi_push_desc(ehi, "hot unplug");
+
+	ata_ehi_hotplugged(ehi);
+
+	/* okay, let's hand over to EH */
+	ehi->serror |= serror;
+
+	ata_port_freeze(ap);
+}
+
+static int nv_swncq_sdbfis(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u32 sactive;
+	u32 done_mask;
+	u8 host_stat;
+	u8 lack_dhfis = 0;
+
+	host_stat = ap->ops->bmdma_status(ap);
+	if (unlikely(host_stat & ATA_DMA_ERR)) {
+		/* error when transferring data to/from memory */
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+		ehi->err_mask |= AC_ERR_HOST_BUS;
+		ehi->action |= ATA_EH_RESET;
+		return -EINVAL;
+	}
+
+	ap->ops->sff_irq_clear(ap);
+	__ata_bmdma_stop(ap);
+
+	sactive = readl(pp->sactive_block);
+	done_mask = pp->qc_active ^ sactive;
+
+	pp->qc_active &= ~done_mask;
+	pp->dhfis_bits &= ~done_mask;
+	pp->dmafis_bits &= ~done_mask;
+	pp->sdbfis_bits |= done_mask;
+	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+
+	if (!ap->qc_active) {
+		DPRINTK("over\n");
+		nv_swncq_pp_reinit(ap);
+		return 0;
+	}
+
+	if (pp->qc_active & pp->dhfis_bits)
+		return 0;
+
+	if ((pp->ncq_flags & ncq_saw_backout) ||
+	    (pp->qc_active ^ pp->dhfis_bits))
+		/* if the controller can't get a device to host register FIS,
+		 * The driver needs to reissue the new command.
+		 */
+		lack_dhfis = 1;
+
+	DPRINTK("id 0x%x QC: qc_active 0x%x,"
+		"SWNCQ:qc_active 0x%X defer_bits %X "
+		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+		ap->print_id, ap->qc_active, pp->qc_active,
+		pp->defer_queue.defer_bits, pp->dhfis_bits,
+		pp->dmafis_bits, pp->last_issue_tag);
+
+	nv_swncq_fis_reinit(ap);
+
+	if (lack_dhfis) {
+		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
+		nv_swncq_issue_atacmd(ap, qc);
+		return 0;
+	}
+
+	if (pp->defer_queue.defer_bits) {
+		/* send deferral queue command */
+		qc = nv_swncq_qc_from_dq(ap);
+		WARN_ON(qc == NULL);
+		nv_swncq_issue_atacmd(ap, qc);
+	}
+
+	return 0;
+}
+
+static inline u32 nv_swncq_tag(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	u32 tag;
+
+	tag = readb(pp->tag_block) >> 2;
+	return (tag & 0x1f);
+}
+
+static void nv_swncq_dmafis(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	unsigned int rw;
+	u8 dmactl;
+	u32 tag;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	__ata_bmdma_stop(ap);
+	tag = nv_swncq_tag(ap);
+
+	DPRINTK("dma setup tag 0x%x\n", tag);
+	qc = ata_qc_from_tag(ap, tag);
+
+	if (unlikely(!qc))
+		return;
+
+	rw = qc->tf.flags & ATA_TFLAG_WRITE;
+
+	/* load PRD table addr. */
+	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
+		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~ATA_DMA_WR;
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+
+	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u32 serror;
+	u8 ata_stat;
+
+	ata_stat = ap->ops->sff_check_status(ap);
+	nv_swncq_irq_clear(ap, fis);
+	if (!fis)
+		return;
+
+	if (ap->pflags & ATA_PFLAG_FROZEN)
+		return;
+
+	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
+		nv_swncq_hotplug(ap, fis);
+		return;
+	}
+
+	if (!pp->qc_active)
+		return;
+
+	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
+		return;
+	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
+
+	if (ata_stat & ATA_ERR) {
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
+		ehi->err_mask |= AC_ERR_DEV;
+		ehi->serror |= serror;
+		ehi->action |= ATA_EH_RESET;
+		ata_port_freeze(ap);
+		return;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
+		/* If the IRQ is backout, driver must issue
+		 * the new command again some time later.
+		 */
+		pp->ncq_flags |= ncq_saw_backout;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
+		pp->ncq_flags |= ncq_saw_sdb;
+		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
+			ap->print_id, pp->qc_active, pp->dhfis_bits,
+			pp->dmafis_bits, readl(pp->sactive_block));
+		if (nv_swncq_sdbfis(ap) < 0)
+			goto irq_error;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
+		/* The interrupt indicates the new command
+		 * was transmitted correctly to the drive.
+		 */
+		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
+		pp->ncq_flags |= ncq_saw_d2h;
+		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
+			ata_ehi_push_desc(ehi, "illegal fis transaction");
+			ehi->err_mask |= AC_ERR_HSM;
+			ehi->action |= ATA_EH_RESET;
+			goto irq_error;
+		}
+
+		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
+		    !(pp->ncq_flags & ncq_saw_dmas)) {
+			ata_stat = ap->ops->sff_check_status(ap);
+			if (ata_stat & ATA_BUSY)
+				goto irq_exit;
+
+			if (pp->defer_queue.defer_bits) {
+				DPRINTK("send next command\n");
+				qc = nv_swncq_qc_from_dq(ap);
+				nv_swncq_issue_atacmd(ap, qc);
+			}
+		}
+	}
+
+	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
+		/* program the dma controller with appropriate PRD buffers
+		 * and start the DMA transfer for requested command.
+		 */
+		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
+		pp->ncq_flags |= ncq_saw_dmas;
+		nv_swncq_dmafis(ap);
+	}
+
+irq_exit:
+	return;
+irq_error:
+	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
+	ata_port_freeze(ap);
+	return;
+}
+
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+	u32 irq_stat;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->link.sactive) {
+			nv_swncq_host_interrupt(ap, (u16)irq_stat);
+			handled = 1;
+		} else {
+			if (irq_stat)	/* reserve Hotplug */
+				nv_swncq_irq_clear(ap, 0xfff0);
+
+			handled += nv_host_intr(ap, (u8)irq_stat);
+		}
+		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	struct nv_pi_priv *ipriv;
+	struct ata_host *host;
+	struct nv_host_priv *hpriv;
+	int rc;
+	u32 bar;
+	void __iomem *base;
+	unsigned long type = ent->driver_data;
+
+        // Make sure this is a SATA controller by counting the number of bars
+        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
+        // it's an IDE controller and we ignore it.
+	for (bar = 0; bar < 6; bar++)
+		if (pci_resource_start(pdev, bar) == 0)
+			return -ENODEV;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* determine type and allocate host */
+	if (type == CK804 && adma_enabled) {
+		dev_notice(&pdev->dev, "Using ADMA mode\n");
+		type = ADMA;
+	} else if (type == MCP5x && swncq_enabled) {
+		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
+		type = SWNCQ;
+	}
+
+	ppi[0] = &nv_port_info[type];
+	ipriv = ppi[0]->private_data;
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	hpriv->type = type;
+	host->private_data = hpriv;
+
+	/* request and iomap NV_MMIO_BAR */
+	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+
+	/* configure SCR access */
+	base = host->iomap[NV_MMIO_BAR];
+	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
+	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
+
+	/* enable SATA space for CK804 */
+	if (type >= CK804) {
+		u8 regval;
+
+		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+	}
+
+	/* init ADMA */
+	if (type == ADMA) {
+		rc = nv_adma_host_init(host);
+		if (rc)
+			return rc;
+	} else if (type == SWNCQ)
+		nv_swncq_host_init(host);
+
+	if (msi_enabled) {
+		dev_notice(&pdev->dev, "Using MSI\n");
+		pci_enable_msi(pdev);
+	}
+
+	pci_set_master(pdev);
+	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int nv_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct nv_host_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		if (hpriv->type >= CK804) {
+			u8 regval;
+
+			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+		}
+		if (hpriv->type == ADMA) {
+			u32 tmp32;
+			struct nv_adma_port_priv *pp;
+			/* enable/disable ADMA on the ports appropriately */
+			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+
+			pp = host->ports[0]->private_data;
+			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+			else
+				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
+					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+			pp = host->ports[1]->private_data;
+			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
+					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+			else
+				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
+					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+		}
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static void nv_ck804_host_stop(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u8 regval;
+
+	/* disable SATA space for CK804 */
+	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+}
+
+static void nv_adma_host_stop(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u32 tmp32;
+
+	/* disable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+	nv_ck804_host_stop(host);
+}
+
+module_pci_driver(nv_pci_driver);
+
+module_param_named(adma, adma_enabled, bool, 0444);
+MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
+module_param_named(swncq, swncq_enabled, bool, 0444);
+MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
+module_param_named(msi, msi_enabled, bool, 0444);
+MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
new file mode 100644
index 0000000..d032bf6
--- /dev/null
+++ b/drivers/ata/sata_promise.c
@@ -0,0 +1,1268 @@
+/*
+ *  sata_promise.c - Promise SATA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *		    Mikael Pettersson
+ *  		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware information only available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "sata_promise.h"
+
+#define DRV_NAME	"sata_promise"
+#define DRV_VERSION	"2.12"
+
+enum {
+	PDC_MAX_PORTS		= 4,
+	PDC_MMIO_BAR		= 3,
+	PDC_MAX_PRD		= LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
+
+	/* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
+	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
+	PDC_FLASH_CTL		= 0x44, /* Flash control register */
+	PDC_PCI_CTL		= 0x48, /* PCI control/status reg */
+	PDC_SATA_PLUG_CSR	= 0x6C, /* SATA Plug control/status reg */
+	PDC2_SATA_PLUG_CSR	= 0x60, /* SATAII Plug control/status reg */
+	PDC_TBG_MODE		= 0x41C, /* TBG mode (not SATAII) */
+	PDC_SLEW_CTL		= 0x470, /* slew rate control reg (not SATAII) */
+
+	/* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
+	PDC_FEATURE		= 0x04, /* Feature/Error reg (per port) */
+	PDC_SECTOR_COUNT	= 0x08, /* Sector count reg (per port) */
+	PDC_SECTOR_NUMBER	= 0x0C, /* Sector number reg (per port) */
+	PDC_CYLINDER_LOW	= 0x10, /* Cylinder low reg (per port) */
+	PDC_CYLINDER_HIGH	= 0x14, /* Cylinder high reg (per port) */
+	PDC_DEVICE		= 0x18, /* Device/Head reg (per port) */
+	PDC_COMMAND		= 0x1C, /* Command/status reg (per port) */
+	PDC_ALTSTATUS		= 0x38, /* Alternate-status/device-control reg (per port) */
+	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
+	PDC_GLOBAL_CTL		= 0x48, /* Global control/status (per port) */
+	PDC_CTLSTAT		= 0x60,	/* IDE control and status (per port) */
+
+	/* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
+	PDC_SATA_ERROR		= 0x04,
+	PDC_PHYMODE4		= 0x14,
+	PDC_LINK_LAYER_ERRORS	= 0x6C,
+	PDC_FPDMA_CTLSTAT	= 0xD8,
+	PDC_INTERNAL_DEBUG_1	= 0xF8,	/* also used for PATA */
+	PDC_INTERNAL_DEBUG_2	= 0xFC,	/* also used for PATA */
+
+	/* PDC_FPDMA_CTLSTAT bit definitions */
+	PDC_FPDMA_CTLSTAT_RESET			= 1 << 3,
+	PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG	= 1 << 10,
+	PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG	= 1 << 11,
+
+	/* PDC_GLOBAL_CTL bit definitions */
+	PDC_PH_ERR		= (1 <<  8), /* PCI error while loading packet */
+	PDC_SH_ERR		= (1 <<  9), /* PCI error while loading S/G table */
+	PDC_DH_ERR		= (1 << 10), /* PCI error while loading data */
+	PDC2_HTO_ERR		= (1 << 12), /* host bus timeout */
+	PDC2_ATA_HBA_ERR	= (1 << 13), /* error during SATA DATA FIS transmission */
+	PDC2_ATA_DMA_CNT_ERR	= (1 << 14), /* DMA DATA FIS size differs from S/G count */
+	PDC_OVERRUN_ERR		= (1 << 19), /* S/G byte count larger than HD requires */
+	PDC_UNDERRUN_ERR	= (1 << 20), /* S/G byte count less than HD requires */
+	PDC_DRIVE_ERR		= (1 << 21), /* drive error */
+	PDC_PCI_SYS_ERR		= (1 << 22), /* PCI system error */
+	PDC1_PCI_PARITY_ERR	= (1 << 23), /* PCI parity error (from SATA150 driver) */
+	PDC1_ERR_MASK		= PDC1_PCI_PARITY_ERR,
+	PDC2_ERR_MASK		= PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
+				  PDC2_ATA_DMA_CNT_ERR,
+	PDC_ERR_MASK		= PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
+				  PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
+				  PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
+				  PDC1_ERR_MASK | PDC2_ERR_MASK,
+
+	board_2037x		= 0,	/* FastTrak S150 TX2plus */
+	board_2037x_pata	= 1,	/* FastTrak S150 TX2plus PATA port */
+	board_20319		= 2,	/* FastTrak S150 TX4 */
+	board_20619		= 3,	/* FastTrak TX4000 */
+	board_2057x		= 4,	/* SATAII150 Tx2plus */
+	board_2057x_pata	= 5,	/* SATAII150 Tx2plus PATA port */
+	board_40518		= 6,	/* SATAII150 Tx4 */
+
+	PDC_HAS_PATA		= (1 << 1), /* PDC20375/20575 has PATA */
+
+	/* Sequence counter control registers bit definitions */
+	PDC_SEQCNTRL_INT_MASK	= (1 << 5), /* Sequence Interrupt Mask */
+
+	/* Feature register values */
+	PDC_FEATURE_ATAPI_PIO	= 0x00, /* ATAPI data xfer by PIO */
+	PDC_FEATURE_ATAPI_DMA	= 0x01, /* ATAPI data xfer by DMA */
+
+	/* Device/Head register values */
+	PDC_DEVICE_SATA		= 0xE0, /* Device/Head value for SATA devices */
+
+	/* PDC_CTLSTAT bit definitions */
+	PDC_DMA_ENABLE		= (1 << 7),
+	PDC_IRQ_DISABLE		= (1 << 10),
+	PDC_RESET		= (1 << 11), /* HDMA reset */
+
+	PDC_COMMON_FLAGS	= ATA_FLAG_PIO_POLLING,
+
+	/* ap->flags bits */
+	PDC_FLAG_GEN_II		= (1 << 24),
+	PDC_FLAG_SATA_PATA	= (1 << 25), /* supports SATA + PATA */
+	PDC_FLAG_4_PORTS	= (1 << 26), /* 4 ports */
+};
+
+struct pdc_port_priv {
+	u8			*pkt;
+	dma_addr_t		pkt_dma;
+};
+
+struct pdc_host_priv {
+	spinlock_t hard_reset_lock;
+};
+
+static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc_common_port_start(struct ata_port *ap);
+static int pdc_sata_port_start(struct ata_port *ap);
+static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
+static void pdc_irq_clear(struct ata_port *ap);
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_sata_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static void pdc_sata_thaw(struct ata_port *ap);
+static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline);
+static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline);
+static void pdc_error_handler(struct ata_port *ap);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
+static int pdc_pata_cable_detect(struct ata_port *ap);
+static int pdc_sata_cable_detect(struct ata_port *ap);
+
+static struct scsi_host_template pdc_ata_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= PDC_MAX_PRD,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+};
+
+static const struct ata_port_operations pdc_common_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.sff_tf_load		= pdc_tf_load_mmio,
+	.sff_exec_command	= pdc_exec_command_mmio,
+	.check_atapi_dma	= pdc_check_atapi_dma,
+	.qc_prep		= pdc_qc_prep,
+	.qc_issue		= pdc_qc_issue,
+
+	.sff_irq_clear		= pdc_irq_clear,
+	.lost_interrupt		= ATA_OP_NULL,
+
+	.post_internal_cmd	= pdc_post_internal_cmd,
+	.error_handler		= pdc_error_handler,
+};
+
+static struct ata_port_operations pdc_sata_ops = {
+	.inherits		= &pdc_common_ops,
+	.cable_detect		= pdc_sata_cable_detect,
+	.freeze			= pdc_sata_freeze,
+	.thaw			= pdc_sata_thaw,
+	.scr_read		= pdc_sata_scr_read,
+	.scr_write		= pdc_sata_scr_write,
+	.port_start		= pdc_sata_port_start,
+	.hardreset		= pdc_sata_hardreset,
+};
+
+/* First-generation chips need a more restrictive ->check_atapi_dma op,
+   and ->freeze/thaw that ignore the hotplug controls. */
+static struct ata_port_operations pdc_old_sata_ops = {
+	.inherits		= &pdc_sata_ops,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.check_atapi_dma	= pdc_old_sata_check_atapi_dma,
+};
+
+static struct ata_port_operations pdc_pata_ops = {
+	.inherits		= &pdc_common_ops,
+	.cable_detect		= pdc_pata_cable_detect,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.port_start		= pdc_common_port_start,
+	.softreset		= pdc_pata_softreset,
+};
+
+static const struct ata_port_info pdc_port_info[] = {
+	[board_2037x] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_SATA_PATA,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_old_sata_ops,
+	},
+
+	[board_2037x_pata] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_pata_ops,
+	},
+
+	[board_20319] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_4_PORTS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_old_sata_ops,
+	},
+
+	[board_20619] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+				  PDC_FLAG_4_PORTS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_pata_ops,
+	},
+
+	[board_2057x] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_sata_ops,
+	},
+
+	[board_2057x_pata] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+				  PDC_FLAG_GEN_II,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_pata_ops,
+	},
+
+	[board_40518] =
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_sata_ops,
+	},
+};
+
+static const struct pci_device_id pdc_ata_pci_tbl[] = {
+	{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
+
+	{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
+	{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
+	{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
+	{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
+
+	{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pdc_ata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pdc_ata_pci_tbl,
+	.probe			= pdc_ata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static int pdc_common_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct pdc_port_priv *pp;
+	int rc;
+
+	/* we use the same prd table as bmdma, allocate it */
+	rc = ata_bmdma_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
+
+	ap->private_data = pp;
+
+	return 0;
+}
+
+static int pdc_sata_port_start(struct ata_port *ap)
+{
+	int rc;
+
+	rc = pdc_common_port_start(ap);
+	if (rc)
+		return rc;
+
+	/* fix up PHYMODE4 align timing */
+	if (ap->flags & PDC_FLAG_GEN_II) {
+		void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+		unsigned int tmp;
+
+		tmp = readl(sata_mmio + PDC_PHYMODE4);
+		tmp = (tmp & ~3) | 1;	/* set bits 1:0 = 0:1 */
+		writel(tmp, sata_mmio + PDC_PHYMODE4);
+	}
+
+	return 0;
+}
+
+static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap)
+{
+	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+	u32 tmp;
+
+	tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT);
+	tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG;
+	tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG;
+
+	/* It's not allowed to write to the entire FPDMA_CTLSTAT register
+	   when NCQ is running. So do a byte-sized write to bits 10 and 11. */
+	writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1);
+	readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */
+}
+
+static void pdc_fpdma_reset(struct ata_port *ap)
+{
+	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+	u8 tmp;
+
+	tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT);
+	tmp &= 0x7F;
+	tmp |= PDC_FPDMA_CTLSTAT_RESET;
+	writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
+	readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
+	udelay(100);
+	tmp &= ~PDC_FPDMA_CTLSTAT_RESET;
+	writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
+	readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
+
+	pdc_fpdma_clear_interrupt_flag(ap);
+}
+
+static void pdc_not_at_command_packet_phase(struct ata_port *ap)
+{
+	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+	unsigned int i;
+	u32 tmp;
+
+	/* check not at ASIC packet command phase */
+	for (i = 0; i < 100; ++i) {
+		writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1);
+		tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2);
+		if ((tmp & 0xF) != 1)
+			break;
+		udelay(100);
+	}
+}
+
+static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap)
+{
+	void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+
+	writel(0xffffffff, sata_mmio + PDC_SATA_ERROR);
+	writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS);
+}
+
+static void pdc_reset_port(struct ata_port *ap)
+{
+	void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+	unsigned int i;
+	u32 tmp;
+
+	if (ap->flags & PDC_FLAG_GEN_II)
+		pdc_not_at_command_packet_phase(ap);
+
+	tmp = readl(ata_ctlstat_mmio);
+	tmp |= PDC_RESET;
+	writel(tmp, ata_ctlstat_mmio);
+
+	for (i = 11; i > 0; i--) {
+		tmp = readl(ata_ctlstat_mmio);
+		if (tmp & PDC_RESET)
+			break;
+
+		udelay(100);
+
+		tmp |= PDC_RESET;
+		writel(tmp, ata_ctlstat_mmio);
+	}
+
+	tmp &= ~PDC_RESET;
+	writel(tmp, ata_ctlstat_mmio);
+	readl(ata_ctlstat_mmio);	/* flush */
+
+	if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) {
+		pdc_fpdma_reset(ap);
+		pdc_clear_internal_debug_record_error_register(ap);
+	}
+}
+
+static int pdc_pata_cable_detect(struct ata_port *ap)
+{
+	u8 tmp;
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+
+	tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
+	if (tmp & 0x01)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static int pdc_sata_cable_detect(struct ata_port *ap)
+{
+	return ATA_CBL_SATA;
+}
+
+static int pdc_sata_scr_read(struct ata_link *link,
+			     unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+static int pdc_sata_scr_write(struct ata_link *link,
+			      unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	dma_addr_t sg_table = ap->bmdma_prd_dma;
+	unsigned int cdb_len = qc->dev->cdb_len;
+	u8 *cdb = qc->cdb;
+	struct pdc_port_priv *pp = ap->private_data;
+	u8 *buf = pp->pkt;
+	__le32 *buf32 = (__le32 *) buf;
+	unsigned int dev_sel, feature;
+
+	/* set control bits (byte 0), zero delay seq id (byte 3),
+	 * and seq id (byte 2)
+	 */
+	switch (qc->tf.protocol) {
+	case ATAPI_PROT_DMA:
+		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+			buf32[0] = cpu_to_le32(PDC_PKT_READ);
+		else
+			buf32[0] = 0;
+		break;
+	case ATAPI_PROT_NODATA:
+		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	buf32[1] = cpu_to_le32(sg_table);	/* S/G table addr */
+	buf32[2] = 0;				/* no next-packet */
+
+	/* select drive */
+	if (sata_scr_valid(&ap->link))
+		dev_sel = PDC_DEVICE_SATA;
+	else
+		dev_sel = qc->tf.device;
+
+	buf[12] = (1 << 5) | ATA_REG_DEVICE;
+	buf[13] = dev_sel;
+	buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
+	buf[15] = dev_sel; /* once more, waiting for BSY to clear */
+
+	buf[16] = (1 << 5) | ATA_REG_NSECT;
+	buf[17] = qc->tf.nsect;
+	buf[18] = (1 << 5) | ATA_REG_LBAL;
+	buf[19] = qc->tf.lbal;
+
+	/* set feature and byte counter registers */
+	if (qc->tf.protocol != ATAPI_PROT_DMA)
+		feature = PDC_FEATURE_ATAPI_PIO;
+	else
+		feature = PDC_FEATURE_ATAPI_DMA;
+
+	buf[20] = (1 << 5) | ATA_REG_FEATURE;
+	buf[21] = feature;
+	buf[22] = (1 << 5) | ATA_REG_BYTEL;
+	buf[23] = qc->tf.lbam;
+	buf[24] = (1 << 5) | ATA_REG_BYTEH;
+	buf[25] = qc->tf.lbah;
+
+	/* send ATAPI packet command 0xA0 */
+	buf[26] = (1 << 5) | ATA_REG_CMD;
+	buf[27] = qc->tf.command;
+
+	/* select drive and check DRQ */
+	buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
+	buf[29] = dev_sel;
+
+	/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
+	BUG_ON(cdb_len & ~0x1E);
+
+	/* append the CDB as the final part */
+	buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
+	memcpy(buf+31, cdb, cdb_len);
+}
+
+/**
+ *	pdc_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *	Make sure hardware does not choke on it.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void pdc_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_bmdma_prd *prd = ap->bmdma_prd;
+	struct scatterlist *sg;
+	const u32 SG_COUNT_ASIC_BUG = 41*4;
+	unsigned int si, idx;
+	u32 len;
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	idx = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, offset;
+		u32 sg_len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			prd[idx].addr = cpu_to_le32(addr);
+			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	len = le32_to_cpu(prd[idx - 1].flags_len);
+
+	if (len > SG_COUNT_ASIC_BUG) {
+		u32 addr;
+
+		VPRINTK("Splitting last PRD.\n");
+
+		addr = le32_to_cpu(prd[idx - 1].addr);
+		prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+
+		addr = addr + len - SG_COUNT_ASIC_BUG;
+		len = SG_COUNT_ASIC_BUG;
+		prd[idx].addr = cpu_to_le32(addr);
+		prd[idx].flags_len = cpu_to_le32(len);
+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+		idx++;
+	}
+
+	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static void pdc_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct pdc_port_priv *pp = qc->ap->private_data;
+	unsigned int i;
+
+	VPRINTK("ENTER\n");
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		pdc_fill_sg(qc);
+		/*FALLTHROUGH*/
+	case ATA_PROT_NODATA:
+		i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
+				   qc->dev->devno, pp->pkt);
+		if (qc->tf.flags & ATA_TFLAG_LBA48)
+			i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
+		else
+			i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
+		pdc_pkt_footer(&qc->tf, pp->pkt, i);
+		break;
+	case ATAPI_PROT_PIO:
+		pdc_fill_sg(qc);
+		break;
+	case ATAPI_PROT_DMA:
+		pdc_fill_sg(qc);
+		/*FALLTHROUGH*/
+	case ATAPI_PROT_NODATA:
+		pdc_atapi_pkt(qc);
+		break;
+	default:
+		break;
+	}
+}
+
+static int pdc_is_sataii_tx4(unsigned long flags)
+{
+	const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
+	return (flags & mask) == mask;
+}
+
+static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
+					  int is_sataii_tx4)
+{
+	static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
+	return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
+}
+
+static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
+{
+	return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
+}
+
+static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
+{
+	const struct ata_host *host = ap->host;
+	unsigned int nr_ports = pdc_sata_nr_ports(ap);
+	unsigned int i;
+
+	for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
+		;
+	BUG_ON(i >= nr_ports);
+	return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
+}
+
+static void pdc_freeze(struct ata_port *ap)
+{
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	tmp = readl(ata_mmio + PDC_CTLSTAT);
+	tmp |= PDC_IRQ_DISABLE;
+	tmp &= ~PDC_DMA_ENABLE;
+	writel(tmp, ata_mmio + PDC_CTLSTAT);
+	readl(ata_mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_sata_freeze(struct ata_port *ap)
+{
+	struct ata_host *host = ap->host;
+	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+	unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
+	unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
+	u32 hotplug_status;
+
+	/* Disable hotplug events on this port.
+	 *
+	 * Locking:
+	 * 1) hotplug register accesses must be serialised via host->lock
+	 * 2) ap->lock == &ap->host->lock
+	 * 3) ->freeze() and ->thaw() are called with ap->lock held
+	 */
+	hotplug_status = readl(host_mmio + hotplug_offset);
+	hotplug_status |= 0x11 << (ata_no + 16);
+	writel(hotplug_status, host_mmio + hotplug_offset);
+	readl(host_mmio + hotplug_offset); /* flush */
+
+	pdc_freeze(ap);
+}
+
+static void pdc_thaw(struct ata_port *ap)
+{
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	/* clear IRQ */
+	readl(ata_mmio + PDC_COMMAND);
+
+	/* turn IRQ back on */
+	tmp = readl(ata_mmio + PDC_CTLSTAT);
+	tmp &= ~PDC_IRQ_DISABLE;
+	writel(tmp, ata_mmio + PDC_CTLSTAT);
+	readl(ata_mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_sata_thaw(struct ata_port *ap)
+{
+	struct ata_host *host = ap->host;
+	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+	unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
+	unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
+	u32 hotplug_status;
+
+	pdc_thaw(ap);
+
+	/* Enable hotplug events on this port.
+	 * Locking: see pdc_sata_freeze().
+	 */
+	hotplug_status = readl(host_mmio + hotplug_offset);
+	hotplug_status |= 0x11 << ata_no;
+	hotplug_status &= ~(0x11 << (ata_no + 16));
+	writel(hotplug_status, host_mmio + hotplug_offset);
+	readl(host_mmio + hotplug_offset); /* flush */
+}
+
+static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	pdc_reset_port(link->ap);
+	return ata_sff_softreset(link, class, deadline);
+}
+
+static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap)
+{
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+
+	/* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
+	return (ata_mmio - host_mmio - 0x200) / 0x80;
+}
+
+static void pdc_hard_reset_port(struct ata_port *ap)
+{
+	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
+	unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
+	struct pdc_host_priv *hpriv = ap->host->private_data;
+	u8 tmp;
+
+	spin_lock(&hpriv->hard_reset_lock);
+
+	tmp = readb(pcictl_b1_mmio);
+	tmp &= ~(0x10 << ata_no);
+	writeb(tmp, pcictl_b1_mmio);
+	readb(pcictl_b1_mmio); /* flush */
+	udelay(100);
+	tmp |= (0x10 << ata_no);
+	writeb(tmp, pcictl_b1_mmio);
+	readb(pcictl_b1_mmio); /* flush */
+
+	spin_unlock(&hpriv->hard_reset_lock);
+}
+
+static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	if (link->ap->flags & PDC_FLAG_GEN_II)
+		pdc_not_at_command_packet_phase(link->ap);
+	/* hotplug IRQs should have been masked by pdc_sata_freeze() */
+	pdc_hard_reset_port(link->ap);
+	pdc_reset_port(link->ap);
+
+	/* sata_promise can't reliably acquire the first D2H Reg FIS
+	 * after hardreset.  Do non-waiting hardreset and request
+	 * follow-up SRST.
+	 */
+	return sata_std_hardreset(link, class, deadline);
+}
+
+static void pdc_error_handler(struct ata_port *ap)
+{
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		pdc_reset_port(ap);
+
+	ata_sff_error_handler(ap);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		pdc_reset_port(ap);
+}
+
+static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
+			   u32 port_status, u32 err_mask)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	unsigned int ac_err_mask = 0;
+
+	ata_ehi_clear_desc(ehi);
+	ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
+	port_status &= err_mask;
+
+	if (port_status & PDC_DRIVE_ERR)
+		ac_err_mask |= AC_ERR_DEV;
+	if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
+		ac_err_mask |= AC_ERR_OTHER;
+	if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
+		ac_err_mask |= AC_ERR_ATA_BUS;
+	if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
+			   | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
+		ac_err_mask |= AC_ERR_HOST_BUS;
+
+	if (sata_scr_valid(&ap->link)) {
+		u32 serror;
+
+		pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
+		ehi->serror |= serror;
+	}
+
+	qc->err_mask |= ac_err_mask;
+
+	pdc_reset_port(ap);
+
+	ata_port_abort(ap);
+}
+
+static unsigned int pdc_host_intr(struct ata_port *ap,
+				  struct ata_queued_cmd *qc)
+{
+	unsigned int handled = 0;
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+	u32 port_status, err_mask;
+
+	err_mask = PDC_ERR_MASK;
+	if (ap->flags & PDC_FLAG_GEN_II)
+		err_mask &= ~PDC1_ERR_MASK;
+	else
+		err_mask &= ~PDC2_ERR_MASK;
+	port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
+	if (unlikely(port_status & err_mask)) {
+		pdc_error_intr(ap, qc, port_status, err_mask);
+		return 1;
+	}
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+	case ATA_PROT_NODATA:
+	case ATAPI_PROT_DMA:
+	case ATAPI_PROT_NODATA:
+		qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+		ata_qc_complete(qc);
+		handled = 1;
+		break;
+	default:
+		ap->stats.idle_irq++;
+		break;
+	}
+
+	return handled;
+}
+
+static void pdc_irq_clear(struct ata_port *ap)
+{
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+
+	readl(ata_mmio + PDC_COMMAND);
+}
+
+static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct ata_port *ap;
+	u32 mask = 0;
+	unsigned int i, tmp;
+	unsigned int handled = 0;
+	void __iomem *host_mmio;
+	unsigned int hotplug_offset, ata_no;
+	u32 hotplug_status;
+	int is_sataii_tx4;
+
+	VPRINTK("ENTER\n");
+
+	if (!host || !host->iomap[PDC_MMIO_BAR]) {
+		VPRINTK("QUICK EXIT\n");
+		return IRQ_NONE;
+	}
+
+	host_mmio = host->iomap[PDC_MMIO_BAR];
+
+	spin_lock(&host->lock);
+
+	/* read and clear hotplug flags for all ports */
+	if (host->ports[0]->flags & PDC_FLAG_GEN_II) {
+		hotplug_offset = PDC2_SATA_PLUG_CSR;
+		hotplug_status = readl(host_mmio + hotplug_offset);
+		if (hotplug_status & 0xff)
+			writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
+		hotplug_status &= 0xff;	/* clear uninteresting bits */
+	} else
+		hotplug_status = 0;
+
+	/* reading should also clear interrupts */
+	mask = readl(host_mmio + PDC_INT_SEQMASK);
+
+	if (mask == 0xffffffff && hotplug_status == 0) {
+		VPRINTK("QUICK EXIT 2\n");
+		goto done_irq;
+	}
+
+	mask &= 0xffff;		/* only 16 SEQIDs possible */
+	if (mask == 0 && hotplug_status == 0) {
+		VPRINTK("QUICK EXIT 3\n");
+		goto done_irq;
+	}
+
+	writel(mask, host_mmio + PDC_INT_SEQMASK);
+
+	is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
+
+	for (i = 0; i < host->n_ports; i++) {
+		VPRINTK("port %u\n", i);
+		ap = host->ports[i];
+
+		/* check for a plug or unplug event */
+		ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+		tmp = hotplug_status & (0x11 << ata_no);
+		if (tmp) {
+			struct ata_eh_info *ehi = &ap->link.eh_info;
+			ata_ehi_clear_desc(ehi);
+			ata_ehi_hotplugged(ehi);
+			ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
+			ata_port_freeze(ap);
+			++handled;
+			continue;
+		}
+
+		/* check for a packet interrupt */
+		tmp = mask & (1 << (i + 1));
+		if (tmp) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+				handled += pdc_host_intr(ap, qc);
+		}
+	}
+
+	VPRINTK("EXIT\n");
+
+done_irq:
+	spin_unlock(&host->lock);
+	return IRQ_RETVAL(handled);
+}
+
+static void pdc_packet_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pdc_port_priv *pp = ap->private_data;
+	void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+	unsigned int port_no = ap->port_no;
+	u8 seq = (u8) (port_no + 1);
+
+	VPRINTK("ENTER, ap %p\n", ap);
+
+	writel(0x00000001, host_mmio + (seq * 4));
+	readl(host_mmio + (seq * 4));	/* flush */
+
+	pp->pkt[2] = seq;
+	wmb();			/* flush PRD, pkt writes */
+	writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
+	readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
+}
+
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
+{
+	switch (qc->tf.protocol) {
+	case ATAPI_PROT_NODATA:
+		if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+			break;
+		/*FALLTHROUGH*/
+	case ATA_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			break;
+		/*FALLTHROUGH*/
+	case ATAPI_PROT_DMA:
+	case ATA_PROT_DMA:
+		pdc_packet_start(qc);
+		return 0;
+	default:
+		break;
+	}
+	return ata_sff_qc_issue(qc);
+}
+
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+	ata_sff_tf_load(ap, tf);
+}
+
+static void pdc_exec_command_mmio(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+	ata_sff_exec_command(ap, tf);
+}
+
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	u8 *scsicmd = qc->scsicmd->cmnd;
+	int pio = 1; /* atapi dma off by default */
+
+	/* Whitelist commands that may use DMA. */
+	switch (scsicmd[0]) {
+	case WRITE_12:
+	case WRITE_10:
+	case WRITE_6:
+	case READ_12:
+	case READ_10:
+	case READ_6:
+	case 0xad: /* READ_DVD_STRUCTURE */
+	case 0xbe: /* READ_CD */
+		pio = 0;
+	}
+	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
+	if (scsicmd[0] == WRITE_10) {
+		unsigned int lba =
+			(scsicmd[2] << 24) |
+			(scsicmd[3] << 16) |
+			(scsicmd[4] << 8) |
+			scsicmd[5];
+		if (lba >= 0xFFFF4FA2)
+			pio = 1;
+	}
+	return pio;
+}
+
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	/* First generation chips cannot use ATAPI DMA on SATA ports */
+	return 1;
+}
+
+static void pdc_ata_setup_port(struct ata_port *ap,
+			       void __iomem *base, void __iomem *scr_addr)
+{
+	ap->ioaddr.cmd_addr		= base;
+	ap->ioaddr.data_addr		= base;
+	ap->ioaddr.feature_addr		=
+	ap->ioaddr.error_addr		= base + 0x4;
+	ap->ioaddr.nsect_addr		= base + 0x8;
+	ap->ioaddr.lbal_addr		= base + 0xc;
+	ap->ioaddr.lbam_addr		= base + 0x10;
+	ap->ioaddr.lbah_addr		= base + 0x14;
+	ap->ioaddr.device_addr		= base + 0x18;
+	ap->ioaddr.command_addr		=
+	ap->ioaddr.status_addr		= base + 0x1c;
+	ap->ioaddr.altstatus_addr	=
+	ap->ioaddr.ctl_addr		= base + 0x38;
+	ap->ioaddr.scr_addr		= scr_addr;
+}
+
+static void pdc_host_init(struct ata_host *host)
+{
+	void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+	int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
+	int hotplug_offset;
+	u32 tmp;
+
+	if (is_gen2)
+		hotplug_offset = PDC2_SATA_PLUG_CSR;
+	else
+		hotplug_offset = PDC_SATA_PLUG_CSR;
+
+	/*
+	 * Except for the hotplug stuff, this is voodoo from the
+	 * Promise driver.  Label this entire section
+	 * "TODO: figure out why we do this"
+	 */
+
+	/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
+	tmp = readl(host_mmio + PDC_FLASH_CTL);
+	tmp |= 0x02000;	/* bit 13 (enable bmr burst) */
+	if (!is_gen2)
+		tmp |= 0x10000;	/* bit 16 (fifo threshold at 8 dw) */
+	writel(tmp, host_mmio + PDC_FLASH_CTL);
+
+	/* clear plug/unplug flags for all ports */
+	tmp = readl(host_mmio + hotplug_offset);
+	writel(tmp | 0xff, host_mmio + hotplug_offset);
+
+	tmp = readl(host_mmio + hotplug_offset);
+	if (is_gen2)	/* unmask plug/unplug ints */
+		writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
+	else		/* mask plug/unplug ints */
+		writel(tmp | 0xff0000, host_mmio + hotplug_offset);
+
+	/* don't initialise TBG or SLEW on 2nd generation chips */
+	if (is_gen2)
+		return;
+
+	/* reduce TBG clock to 133 Mhz. */
+	tmp = readl(host_mmio + PDC_TBG_MODE);
+	tmp &= ~0x30000; /* clear bit 17, 16*/
+	tmp |= 0x10000;  /* set bit 17:16 = 0:1 */
+	writel(tmp, host_mmio + PDC_TBG_MODE);
+
+	readl(host_mmio + PDC_TBG_MODE);	/* flush */
+	msleep(10);
+
+	/* adjust slew rate control register. */
+	tmp = readl(host_mmio + PDC_SLEW_CTL);
+	tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
+	tmp  |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
+	writel(tmp, host_mmio + PDC_SLEW_CTL);
+}
+
+static int pdc_ata_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+	const struct ata_port_info *ppi[PDC_MAX_PORTS];
+	struct ata_host *host;
+	struct pdc_host_priv *hpriv;
+	void __iomem *host_mmio;
+	int n_ports, i, rc;
+	int is_sataii_tx4;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* enable and acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
+
+	/* determine port configuration and setup host */
+	n_ports = 2;
+	if (pi->flags & PDC_FLAG_4_PORTS)
+		n_ports = 4;
+	for (i = 0; i < n_ports; i++)
+		ppi[i] = pi;
+
+	if (pi->flags & PDC_FLAG_SATA_PATA) {
+		u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
+		if (!(tmp & 0x80))
+			ppi[n_ports++] = pi + 1;
+	}
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate host\n");
+		return -ENOMEM;
+	}
+	hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	spin_lock_init(&hpriv->hard_reset_lock);
+	host->private_data = hpriv;
+	host->iomap = pcim_iomap_table(pdev);
+
+	is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+		unsigned int ata_offset = 0x200 + ata_no * 0x80;
+		unsigned int scr_offset = 0x400 + ata_no * 0x100;
+
+		pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
+
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
+	}
+
+	/* initialize adapter */
+	pdc_host_init(host);
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	/* start host, request IRQ and attach */
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
+				 &pdc_ata_sht);
+}
+
+module_pci_driver(pdc_ata_pci_driver);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
new file mode 100644
index 0000000..61633ef
--- /dev/null
+++ b/drivers/ata/sata_promise.h
@@ -0,0 +1,157 @@
+/*
+ *  sata_promise.h - Promise SATA common definitions and inline funcs
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ */
+
+#ifndef __SATA_PROMISE_H__
+#define __SATA_PROMISE_H__
+
+#include <linux/ata.h>
+
+enum pdc_packet_bits {
+	PDC_PKT_READ		= (1 << 2),
+	PDC_PKT_NODATA		= (1 << 3),
+
+	PDC_PKT_SIZEMASK	= (1 << 7) | (1 << 6) | (1 << 5),
+	PDC_PKT_CLEAR_BSY	= (1 << 4),
+	PDC_PKT_WAIT_DRDY	= (1 << 3) | (1 << 4),
+	PDC_LAST_REG		= (1 << 3),
+
+	PDC_REG_DEVCTL		= (1 << 3) | (1 << 2) | (1 << 1),
+};
+
+static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
+					  dma_addr_t sg_table,
+					  unsigned int devno, u8 *buf)
+{
+	u8 dev_reg;
+	__le32 *buf32 = (__le32 *) buf;
+
+	/* set control bits (byte 0), zero delay seq id (byte 3),
+	 * and seq id (byte 2)
+	 */
+	switch (tf->protocol) {
+	case ATA_PROT_DMA:
+		if (!(tf->flags & ATA_TFLAG_WRITE))
+			buf32[0] = cpu_to_le32(PDC_PKT_READ);
+		else
+			buf32[0] = 0;
+		break;
+
+	case ATA_PROT_NODATA:
+		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	buf32[1] = cpu_to_le32(sg_table);	/* S/G table addr */
+	buf32[2] = 0;				/* no next-packet */
+
+	if (devno == 0)
+		dev_reg = ATA_DEVICE_OBS;
+	else
+		dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+	/* select device */
+	buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+	buf[13] = dev_reg;
+
+	/* device control register */
+	buf[14] = (1 << 5) | PDC_REG_DEVCTL;
+	buf[15] = tf->ctl;
+
+	return 16; 	/* offset of next byte */
+}
+
+static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
+				  unsigned int i)
+{
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		buf[i++] = (1 << 5) | ATA_REG_DEVICE;
+		buf[i++] = tf->device;
+	}
+
+	/* and finally the command itself; also includes end-of-pkt marker */
+	buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
+	buf[i++] = tf->command;
+
+	return i;
+}
+
+static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+	/* the "(1 << 5)" should be read "(count << 5)" */
+
+	/* ATA command block registers */
+	buf[i++] = (1 << 5) | ATA_REG_FEATURE;
+	buf[i++] = tf->feature;
+
+	buf[i++] = (1 << 5) | ATA_REG_NSECT;
+	buf[i++] = tf->nsect;
+
+	buf[i++] = (1 << 5) | ATA_REG_LBAL;
+	buf[i++] = tf->lbal;
+
+	buf[i++] = (1 << 5) | ATA_REG_LBAM;
+	buf[i++] = tf->lbam;
+
+	buf[i++] = (1 << 5) | ATA_REG_LBAH;
+	buf[i++] = tf->lbah;
+
+	return i;
+}
+
+static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+	/* the "(2 << 5)" should be read "(count << 5)" */
+
+	/* ATA command block registers */
+	buf[i++] = (2 << 5) | ATA_REG_FEATURE;
+	buf[i++] = tf->hob_feature;
+	buf[i++] = tf->feature;
+
+	buf[i++] = (2 << 5) | ATA_REG_NSECT;
+	buf[i++] = tf->hob_nsect;
+	buf[i++] = tf->nsect;
+
+	buf[i++] = (2 << 5) | ATA_REG_LBAL;
+	buf[i++] = tf->hob_lbal;
+	buf[i++] = tf->lbal;
+
+	buf[i++] = (2 << 5) | ATA_REG_LBAM;
+	buf[i++] = tf->hob_lbam;
+	buf[i++] = tf->lbam;
+
+	buf[i++] = (2 << 5) | ATA_REG_LBAH;
+	buf[i++] = tf->hob_lbah;
+	buf[i++] = tf->lbah;
+
+	return i;
+}
+
+
+#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
new file mode 100644
index 0000000..1fe9416
--- /dev/null
+++ b/drivers/ata/sata_qstor.c
@@ -0,0 +1,643 @@
+/*
+ *  sata_qstor.c - Pacific Digital Corporation QStor SATA
+ *
+ *  Maintained by:  Mark Lord <mlord@pobox.com>
+ *
+ *  Copyright 2005 Pacific Digital Corporation.
+ *  (OSL/GPL code release authorized by Jalil Fadavi).
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_qstor"
+#define DRV_VERSION	"0.09"
+
+enum {
+	QS_MMIO_BAR		= 4,
+
+	QS_PORTS		= 4,
+	QS_MAX_PRD		= LIBATA_MAX_PRD,
+	QS_CPB_ORDER		= 6,
+	QS_CPB_BYTES		= (1 << QS_CPB_ORDER),
+	QS_PRD_BYTES		= QS_MAX_PRD * 16,
+	QS_PKT_BYTES		= QS_CPB_BYTES + QS_PRD_BYTES,
+
+	/* global register offsets */
+	QS_HCF_CNFG3		= 0x0003, /* host configuration offset */
+	QS_HID_HPHY		= 0x0004, /* host physical interface info */
+	QS_HCT_CTRL		= 0x00e4, /* global interrupt mask offset */
+	QS_HST_SFF		= 0x0100, /* host status fifo offset */
+	QS_HVS_SERD3		= 0x0393, /* PHY enable offset */
+
+	/* global control bits */
+	QS_HPHY_64BIT		= (1 << 1), /* 64-bit bus detected */
+	QS_CNFG3_GSRST		= 0x01,     /* global chip reset */
+	QS_SERD3_PHY_ENA	= 0xf0,     /* PHY detection ENAble*/
+
+	/* per-channel register offsets */
+	QS_CCF_CPBA		= 0x0710, /* chan CPB base address */
+	QS_CCF_CSEP		= 0x0718, /* chan CPB separation factor */
+	QS_CFC_HUFT		= 0x0800, /* host upstream fifo threshold */
+	QS_CFC_HDFT		= 0x0804, /* host downstream fifo threshold */
+	QS_CFC_DUFT		= 0x0808, /* dev upstream fifo threshold */
+	QS_CFC_DDFT		= 0x080c, /* dev downstream fifo threshold */
+	QS_CCT_CTR0		= 0x0900, /* chan control-0 offset */
+	QS_CCT_CTR1		= 0x0901, /* chan control-1 offset */
+	QS_CCT_CFF		= 0x0a00, /* chan command fifo offset */
+
+	/* channel control bits */
+	QS_CTR0_REG		= (1 << 1),   /* register mode (vs. pkt mode) */
+	QS_CTR0_CLER		= (1 << 2),   /* clear channel errors */
+	QS_CTR1_RDEV		= (1 << 1),   /* sata phy/comms reset */
+	QS_CTR1_RCHN		= (1 << 4),   /* reset channel logic */
+	QS_CCF_RUN_PKT		= 0x107,      /* RUN a new dma PKT */
+
+	/* pkt sub-field headers */
+	QS_HCB_HDR		= 0x01,   /* Host Control Block header */
+	QS_DCB_HDR		= 0x02,   /* Device Control Block header */
+
+	/* pkt HCB flag bits */
+	QS_HF_DIRO		= (1 << 0),   /* data DIRection Out */
+	QS_HF_DAT		= (1 << 3),   /* DATa pkt */
+	QS_HF_IEN		= (1 << 4),   /* Interrupt ENable */
+	QS_HF_VLD		= (1 << 5),   /* VaLiD pkt */
+
+	/* pkt DCB flag bits */
+	QS_DF_PORD		= (1 << 2),   /* Pio OR Dma */
+	QS_DF_ELBA		= (1 << 3),   /* Extended LBA (lba48) */
+
+	/* PCI device IDs */
+	board_2068_idx		= 0,	/* QStor 4-port SATA/RAID */
+};
+
+enum {
+	QS_DMA_BOUNDARY		= ~0UL
+};
+
+typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
+
+struct qs_port_priv {
+	u8			*pkt;
+	dma_addr_t		pkt_dma;
+	qs_state_t		state;
+};
+
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int qs_port_start(struct ata_port *ap);
+static void qs_host_stop(struct ata_host *host);
+static void qs_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
+static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
+static void qs_freeze(struct ata_port *ap);
+static void qs_thaw(struct ata_port *ap);
+static int qs_prereset(struct ata_link *link, unsigned long deadline);
+static void qs_error_handler(struct ata_port *ap);
+
+static struct scsi_host_template qs_ata_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= QS_MAX_PRD,
+	.dma_boundary		= QS_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations qs_ata_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.check_atapi_dma	= qs_check_atapi_dma,
+	.qc_prep		= qs_qc_prep,
+	.qc_issue		= qs_qc_issue,
+
+	.freeze			= qs_freeze,
+	.thaw			= qs_thaw,
+	.prereset		= qs_prereset,
+	.softreset		= ATA_OP_NULL,
+	.error_handler		= qs_error_handler,
+	.lost_interrupt		= ATA_OP_NULL,
+
+	.scr_read		= qs_scr_read,
+	.scr_write		= qs_scr_write,
+
+	.port_start		= qs_port_start,
+	.host_stop		= qs_host_stop,
+};
+
+static const struct ata_port_info qs_port_info[] = {
+	/* board_2068_idx */
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
+		.pio_mask	= ATA_PIO4_ONLY,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &qs_ata_ops,
+	},
+};
+
+static const struct pci_device_id qs_ata_pci_tbl[] = {
+	{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver qs_ata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= qs_ata_pci_tbl,
+	.probe			= qs_ata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static void __iomem *qs_mmio_base(struct ata_host *host)
+{
+	return host->iomap[QS_MMIO_BAR];
+}
+
+static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 1;	/* ATAPI DMA not supported */
+}
+
+static inline void qs_enter_reg_mode(struct ata_port *ap)
+{
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+	struct qs_port_priv *pp = ap->private_data;
+
+	pp->state = qs_state_mmio;
+	writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
+	readb(chan + QS_CCT_CTR0);        /* flush */
+}
+
+static inline void qs_reset_channel_logic(struct ata_port *ap)
+{
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+
+	writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
+	readb(chan + QS_CCT_CTR0);        /* flush */
+	qs_enter_reg_mode(ap);
+}
+
+static void qs_freeze(struct ata_port *ap)
+{
+	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
+
+	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+	qs_enter_reg_mode(ap);
+}
+
+static void qs_thaw(struct ata_port *ap)
+{
+	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
+
+	qs_enter_reg_mode(ap);
+	writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
+}
+
+static int qs_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+
+	qs_reset_channel_logic(ap);
+	return ata_sff_prereset(link, deadline);
+}
+
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
+	return 0;
+}
+
+static void qs_error_handler(struct ata_port *ap)
+{
+	qs_enter_reg_mode(ap);
+	ata_sff_error_handler(ap);
+}
+
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
+	return 0;
+}
+
+static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct scatterlist *sg;
+	struct ata_port *ap = qc->ap;
+	struct qs_port_priv *pp = ap->private_data;
+	u8 *prd = pp->pkt + QS_CPB_BYTES;
+	unsigned int si;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u64 addr;
+		u32 len;
+
+		addr = sg_dma_address(sg);
+		*(__le64 *)prd = cpu_to_le64(addr);
+		prd += sizeof(u64);
+
+		len = sg_dma_len(sg);
+		*(__le32 *)prd = cpu_to_le32(len);
+		prd += sizeof(u64);
+
+		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
+					(unsigned long long)addr, len);
+	}
+
+	return si;
+}
+
+static void qs_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct qs_port_priv *pp = qc->ap->private_data;
+	u8 dflags = QS_DF_PORD, *buf = pp->pkt;
+	u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
+	u64 addr;
+	unsigned int nelem;
+
+	VPRINTK("ENTER\n");
+
+	qs_enter_reg_mode(qc->ap);
+	if (qc->tf.protocol != ATA_PROT_DMA)
+		return;
+
+	nelem = qs_fill_sg(qc);
+
+	if ((qc->tf.flags & ATA_TFLAG_WRITE))
+		hflags |= QS_HF_DIRO;
+	if ((qc->tf.flags & ATA_TFLAG_LBA48))
+		dflags |= QS_DF_ELBA;
+
+	/* host control block (HCB) */
+	buf[ 0] = QS_HCB_HDR;
+	buf[ 1] = hflags;
+	*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
+	*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
+	addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
+	*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
+
+	/* device control block (DCB) */
+	buf[24] = QS_DCB_HDR;
+	buf[28] = dflags;
+
+	/* frame information structure (FIS) */
+	ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+}
+
+static inline void qs_packet_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+
+	VPRINTK("ENTER, ap %p\n", ap);
+
+	writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
+	wmb();                             /* flush PRDs and pkt to memory */
+	writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
+	readl(chan + QS_CCT_CFF);          /* flush */
+}
+
+static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct qs_port_priv *pp = qc->ap->private_data;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		pp->state = qs_state_pkt;
+		qs_packet_start(qc);
+		return 0;
+
+	case ATAPI_PROT_DMA:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+
+	pp->state = qs_state_mmio;
+	return ata_sff_qc_issue(qc);
+}
+
+static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
+{
+	qc->err_mask |= ac_err_mask(status);
+
+	if (!qc->err_mask) {
+		ata_qc_complete(qc);
+	} else {
+		struct ata_port    *ap  = qc->ap;
+		struct ata_eh_info *ehi = &ap->link.eh_info;
+
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+		if (qc->err_mask == AC_ERR_DEV)
+			ata_port_abort(ap);
+		else
+			ata_port_freeze(ap);
+	}
+}
+
+static inline unsigned int qs_intr_pkt(struct ata_host *host)
+{
+	unsigned int handled = 0;
+	u8 sFFE;
+	u8 __iomem *mmio_base = qs_mmio_base(host);
+
+	do {
+		u32 sff0 = readl(mmio_base + QS_HST_SFF);
+		u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
+		u8 sEVLD = (sff1 >> 30) & 0x01;	/* valid flag */
+		sFFE  = sff1 >> 31;		/* empty flag */
+
+		if (sEVLD) {
+			u8 sDST = sff0 >> 16;	/* dev status */
+			u8 sHST = sff1 & 0x3f;	/* host status */
+			unsigned int port_no = (sff1 >> 8) & 0x03;
+			struct ata_port *ap = host->ports[port_no];
+			struct qs_port_priv *pp = ap->private_data;
+			struct ata_queued_cmd *qc;
+
+			DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
+					sff1, sff0, port_no, sHST, sDST);
+			handled = 1;
+			if (!pp || pp->state != qs_state_pkt)
+				continue;
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+				switch (sHST) {
+				case 0: /* successful CPB */
+				case 3: /* device error */
+					qs_enter_reg_mode(qc->ap);
+					qs_do_or_die(qc, sDST);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	} while (!sFFE);
+	return handled;
+}
+
+static inline unsigned int qs_intr_mmio(struct ata_host *host)
+{
+	unsigned int handled = 0, port_no;
+
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		struct qs_port_priv *pp = ap->private_data;
+		struct ata_queued_cmd *qc;
+
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+		if (!qc) {
+			/*
+			 * The qstor hardware generates spurious
+			 * interrupts from time to time when switching
+			 * in and out of packet mode.  There's no
+			 * obvious way to know if we're here now due
+			 * to that, so just ack the irq and pretend we
+			 * knew it was ours.. (ugh).  This does not
+			 * affect packet mode.
+			 */
+			ata_sff_check_status(ap);
+			handled = 1;
+			continue;
+		}
+
+		if (!pp || pp->state != qs_state_mmio)
+			continue;
+		if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+			handled |= ata_sff_port_intr(ap, qc);
+	}
+	return handled;
+}
+
+static irqreturn_t qs_intr(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	VPRINTK("ENTER\n");
+
+	spin_lock_irqsave(&host->lock, flags);
+	handled  = qs_intr_pkt(host) | qs_intr_mmio(host);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(handled);
+}
+
+static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		=
+	port->data_addr		= base + 0x400;
+	port->error_addr	=
+	port->feature_addr	= base + 0x408; /* hob_feature = 0x409 */
+	port->nsect_addr	= base + 0x410; /* hob_nsect   = 0x411 */
+	port->lbal_addr		= base + 0x418; /* hob_lbal    = 0x419 */
+	port->lbam_addr		= base + 0x420; /* hob_lbam    = 0x421 */
+	port->lbah_addr		= base + 0x428; /* hob_lbah    = 0x429 */
+	port->device_addr	= base + 0x430;
+	port->status_addr	=
+	port->command_addr	= base + 0x438;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + 0x440;
+	port->scr_addr		= base + 0xc00;
+}
+
+static int qs_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct qs_port_priv *pp;
+	void __iomem *mmio_base = qs_mmio_base(ap->host);
+	void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
+	u64 addr;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
+				      GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
+	memset(pp->pkt, 0, QS_PKT_BYTES);
+	ap->private_data = pp;
+
+	qs_enter_reg_mode(ap);
+	addr = (u64)pp->pkt_dma;
+	writel((u32) addr,        chan + QS_CCF_CPBA);
+	writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
+	return 0;
+}
+
+static void qs_host_stop(struct ata_host *host)
+{
+	void __iomem *mmio_base = qs_mmio_base(host);
+
+	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
+}
+
+static void qs_host_init(struct ata_host *host, unsigned int chip_id)
+{
+	void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
+	unsigned int port_no;
+
+	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
+
+	/* reset each channel in turn */
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
+		writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
+		writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
+		readb(chan + QS_CCT_CTR0);        /* flush */
+	}
+	writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
+
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
+		/* set FIFO depths to same settings as Windows driver */
+		writew(32, chan + QS_CFC_HUFT);
+		writew(32, chan + QS_CFC_HDFT);
+		writew(10, chan + QS_CFC_DUFT);
+		writew( 8, chan + QS_CFC_DDFT);
+		/* set CPB size in bytes, as a power of two */
+		writeb(QS_CPB_ORDER,    chan + QS_CCF_CSEP);
+	}
+	writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
+}
+
+/*
+ * The QStor understands 64-bit buses, and uses 64-bit fields
+ * for DMA pointers regardless of bus width.  We just have to
+ * make sure our DMA masks are set appropriately for whatever
+ * bridge lies between us and the QStor, and then the DMA mapping
+ * code will ensure we only ever "see" appropriate buffer addresses.
+ * If we're 32-bit limited somewhere, then our 64-bit fields will
+ * just end up with zeros in the upper 32-bits, without any special
+ * logic required outside of this routine (below).
+ */
+static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
+{
+	u32 bus_info = readl(mmio_base + QS_HID_HPHY);
+	int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
+
+	if (have_64bit_bus &&
+	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int qs_ata_init_one(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
+	struct ata_host *host;
+	int rc, port_no;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
+		return -ENODEV;
+
+	rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
+	if (rc)
+		return rc;
+
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		unsigned int offset = port_no * 0x4000;
+		void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
+
+		qs_ata_setup_port(&ap->ioaddr, chan);
+
+		ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
+	}
+
+	/* initialize adapter */
+	qs_host_init(host, board_idx);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
+				 &qs_ata_sht);
+}
+
+module_pci_driver(qs_ata_pci_driver);
+
+MODULE_AUTHOR("Mark Lord");
+MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
new file mode 100644
index 0000000..10ecb23
--- /dev/null
+++ b/drivers/ata/sata_rcar.c
@@ -0,0 +1,1060 @@
+/*
+ * Renesas R-Car SATA driver
+ *
+ * Author: Vladimir Barinov <source@cogentembedded.com>
+ * Copyright (C) 2013-2015 Cogent Embedded, Inc.
+ * Copyright (C) 2013-2015 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+
+#define DRV_NAME "sata_rcar"
+
+/* SH-Navi2G/ATAPI-ATA compatible task registers */
+#define DATA_REG			0x100
+#define SDEVCON_REG			0x138
+
+/* SH-Navi2G/ATAPI module compatible control registers */
+#define ATAPI_CONTROL1_REG		0x180
+#define ATAPI_STATUS_REG		0x184
+#define ATAPI_INT_ENABLE_REG		0x188
+#define ATAPI_DTB_ADR_REG		0x198
+#define ATAPI_DMA_START_ADR_REG		0x19C
+#define ATAPI_DMA_TRANS_CNT_REG		0x1A0
+#define ATAPI_CONTROL2_REG		0x1A4
+#define ATAPI_SIG_ST_REG		0x1B0
+#define ATAPI_BYTE_SWAP_REG		0x1BC
+
+/* ATAPI control 1 register (ATAPI_CONTROL1) bits */
+#define ATAPI_CONTROL1_ISM		BIT(16)
+#define ATAPI_CONTROL1_DTA32M		BIT(11)
+#define ATAPI_CONTROL1_RESET		BIT(7)
+#define ATAPI_CONTROL1_DESE		BIT(3)
+#define ATAPI_CONTROL1_RW		BIT(2)
+#define ATAPI_CONTROL1_STOP		BIT(1)
+#define ATAPI_CONTROL1_START		BIT(0)
+
+/* ATAPI status register (ATAPI_STATUS) bits */
+#define ATAPI_STATUS_SATAINT		BIT(11)
+#define ATAPI_STATUS_DNEND		BIT(6)
+#define ATAPI_STATUS_DEVTRM		BIT(5)
+#define ATAPI_STATUS_DEVINT		BIT(4)
+#define ATAPI_STATUS_ERR		BIT(2)
+#define ATAPI_STATUS_NEND		BIT(1)
+#define ATAPI_STATUS_ACT		BIT(0)
+
+/* Interrupt enable register (ATAPI_INT_ENABLE) bits */
+#define ATAPI_INT_ENABLE_SATAINT	BIT(11)
+#define ATAPI_INT_ENABLE_DNEND		BIT(6)
+#define ATAPI_INT_ENABLE_DEVTRM		BIT(5)
+#define ATAPI_INT_ENABLE_DEVINT		BIT(4)
+#define ATAPI_INT_ENABLE_ERR		BIT(2)
+#define ATAPI_INT_ENABLE_NEND		BIT(1)
+#define ATAPI_INT_ENABLE_ACT		BIT(0)
+
+/* Access control registers for physical layer control register */
+#define SATAPHYADDR_REG			0x200
+#define SATAPHYWDATA_REG		0x204
+#define SATAPHYACCEN_REG		0x208
+#define SATAPHYRESET_REG		0x20C
+#define SATAPHYRDATA_REG		0x210
+#define SATAPHYACK_REG			0x214
+
+/* Physical layer control address command register (SATAPHYADDR) bits */
+#define SATAPHYADDR_PHYRATEMODE		BIT(10)
+#define SATAPHYADDR_PHYCMD_READ		BIT(9)
+#define SATAPHYADDR_PHYCMD_WRITE	BIT(8)
+
+/* Physical layer control enable register (SATAPHYACCEN) bits */
+#define SATAPHYACCEN_PHYLANE		BIT(0)
+
+/* Physical layer control reset register (SATAPHYRESET) bits */
+#define SATAPHYRESET_PHYRST		BIT(1)
+#define SATAPHYRESET_PHYSRES		BIT(0)
+
+/* Physical layer control acknowledge register (SATAPHYACK) bits */
+#define SATAPHYACK_PHYACK		BIT(0)
+
+/* Serial-ATA HOST control registers */
+#define BISTCONF_REG			0x102C
+#define SDATA_REG			0x1100
+#define SSDEVCON_REG			0x1204
+
+#define SCRSSTS_REG			0x1400
+#define SCRSERR_REG			0x1404
+#define SCRSCON_REG			0x1408
+#define SCRSACT_REG			0x140C
+
+#define SATAINTSTAT_REG			0x1508
+#define SATAINTMASK_REG			0x150C
+
+/* SATA INT status register (SATAINTSTAT) bits */
+#define SATAINTSTAT_SERR		BIT(3)
+#define SATAINTSTAT_ATA			BIT(0)
+
+/* SATA INT mask register (SATAINTSTAT) bits */
+#define SATAINTMASK_SERRMSK		BIT(3)
+#define SATAINTMASK_ERRMSK		BIT(2)
+#define SATAINTMASK_ERRCRTMSK		BIT(1)
+#define SATAINTMASK_ATAMSK		BIT(0)
+#define SATAINTMASK_ALL_GEN1		0x7ff
+#define SATAINTMASK_ALL_GEN2		0xfff
+
+#define SATA_RCAR_INT_MASK		(SATAINTMASK_SERRMSK | \
+					 SATAINTMASK_ATAMSK)
+
+/* Physical Layer Control Registers */
+#define SATAPCTLR1_REG			0x43
+#define SATAPCTLR2_REG			0x52
+#define SATAPCTLR3_REG			0x5A
+#define SATAPCTLR4_REG			0x60
+
+/* Descriptor table word 0 bit (when DTA32M = 1) */
+#define SATA_RCAR_DTEND			BIT(0)
+
+#define SATA_RCAR_DMA_BOUNDARY		0x1FFFFFFEUL
+
+/* Gen2 Physical Layer Control Registers */
+#define RCAR_GEN2_PHY_CTL1_REG		0x1704
+#define RCAR_GEN2_PHY_CTL1		0x34180002
+#define RCAR_GEN2_PHY_CTL1_SS		0xC180	/* Spread Spectrum */
+
+#define RCAR_GEN2_PHY_CTL2_REG		0x170C
+#define RCAR_GEN2_PHY_CTL2		0x00002303
+
+#define RCAR_GEN2_PHY_CTL3_REG		0x171C
+#define RCAR_GEN2_PHY_CTL3		0x000B0194
+
+#define RCAR_GEN2_PHY_CTL4_REG		0x1724
+#define RCAR_GEN2_PHY_CTL4		0x00030994
+
+#define RCAR_GEN2_PHY_CTL5_REG		0x1740
+#define RCAR_GEN2_PHY_CTL5		0x03004001
+#define RCAR_GEN2_PHY_CTL5_DC		BIT(1)	/* DC connection */
+#define RCAR_GEN2_PHY_CTL5_TR		BIT(2)	/* Termination Resistor */
+
+enum sata_rcar_type {
+	RCAR_GEN1_SATA,
+	RCAR_GEN2_SATA,
+	RCAR_GEN3_SATA,
+	RCAR_R8A7790_ES1_SATA,
+};
+
+struct sata_rcar_priv {
+	void __iomem *base;
+	u32 sataint_mask;
+	enum sata_rcar_type type;
+};
+
+static void sata_rcar_gen1_phy_preinit(struct sata_rcar_priv *priv)
+{
+	void __iomem *base = priv->base;
+
+	/* idle state */
+	iowrite32(0, base + SATAPHYADDR_REG);
+	/* reset */
+	iowrite32(SATAPHYRESET_PHYRST, base + SATAPHYRESET_REG);
+	udelay(10);
+	/* deassert reset */
+	iowrite32(0, base + SATAPHYRESET_REG);
+}
+
+static void sata_rcar_gen1_phy_write(struct sata_rcar_priv *priv, u16 reg,
+				     u32 val, int group)
+{
+	void __iomem *base = priv->base;
+	int timeout;
+
+	/* deassert reset */
+	iowrite32(0, base + SATAPHYRESET_REG);
+	/* lane 1 */
+	iowrite32(SATAPHYACCEN_PHYLANE, base + SATAPHYACCEN_REG);
+	/* write phy register value */
+	iowrite32(val, base + SATAPHYWDATA_REG);
+	/* set register group */
+	if (group)
+		reg |= SATAPHYADDR_PHYRATEMODE;
+	/* write command */
+	iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, base + SATAPHYADDR_REG);
+	/* wait for ack */
+	for (timeout = 0; timeout < 100; timeout++) {
+		val = ioread32(base + SATAPHYACK_REG);
+		if (val & SATAPHYACK_PHYACK)
+			break;
+	}
+	if (timeout >= 100)
+		pr_err("%s timeout\n", __func__);
+	/* idle state */
+	iowrite32(0, base + SATAPHYADDR_REG);
+}
+
+static void sata_rcar_gen1_phy_init(struct sata_rcar_priv *priv)
+{
+	sata_rcar_gen1_phy_preinit(priv);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
+	sata_rcar_gen1_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+}
+
+static void sata_rcar_gen2_phy_init(struct sata_rcar_priv *priv)
+{
+	void __iomem *base = priv->base;
+
+	iowrite32(RCAR_GEN2_PHY_CTL1, base + RCAR_GEN2_PHY_CTL1_REG);
+	iowrite32(RCAR_GEN2_PHY_CTL2, base + RCAR_GEN2_PHY_CTL2_REG);
+	iowrite32(RCAR_GEN2_PHY_CTL3, base + RCAR_GEN2_PHY_CTL3_REG);
+	iowrite32(RCAR_GEN2_PHY_CTL4, base + RCAR_GEN2_PHY_CTL4_REG);
+	iowrite32(RCAR_GEN2_PHY_CTL5 | RCAR_GEN2_PHY_CTL5_DC |
+		  RCAR_GEN2_PHY_CTL5_TR, base + RCAR_GEN2_PHY_CTL5_REG);
+}
+
+static void sata_rcar_freeze(struct ata_port *ap)
+{
+	struct sata_rcar_priv *priv = ap->host->private_data;
+
+	/* mask */
+	iowrite32(priv->sataint_mask, priv->base + SATAINTMASK_REG);
+
+	ata_sff_freeze(ap);
+}
+
+static void sata_rcar_thaw(struct ata_port *ap)
+{
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	void __iomem *base = priv->base;
+
+	/* ack */
+	iowrite32(~(u32)SATA_RCAR_INT_MASK, base + SATAINTSTAT_REG);
+
+	ata_sff_thaw(ap);
+
+	/* unmask */
+	iowrite32(priv->sataint_mask & ~SATA_RCAR_INT_MASK, base + SATAINTMASK_REG);
+}
+
+static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
+{
+	u16 *ptr = buffer;
+
+	while (count--) {
+		u16 data = ioread32(reg);
+
+		*ptr++ = data;
+	}
+}
+
+static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count)
+{
+	const u16 *ptr = buffer;
+
+	while (count--)
+		iowrite32(*ptr++, reg);
+}
+
+static u8 sata_rcar_check_status(struct ata_port *ap)
+{
+	return ioread32(ap->ioaddr.status_addr);
+}
+
+static u8 sata_rcar_check_altstatus(struct ata_port *ap)
+{
+	return ioread32(ap->ioaddr.altstatus_addr);
+}
+
+static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl)
+{
+	iowrite32(ctl, ap->ioaddr.ctl_addr);
+}
+
+static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
+{
+	iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
+}
+
+static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
+					 unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	sata_rcar_dev_select(ap, device);
+
+	iowrite32(0x55, ioaddr->nsect_addr);
+	iowrite32(0xaa, ioaddr->lbal_addr);
+
+	iowrite32(0xaa, ioaddr->nsect_addr);
+	iowrite32(0x55, ioaddr->lbal_addr);
+
+	iowrite32(0x55, ioaddr->nsect_addr);
+	iowrite32(0xaa, ioaddr->lbal_addr);
+
+	nsect = ioread32(ioaddr->nsect_addr);
+	lbal  = ioread32(ioaddr->lbal_addr);
+
+	if (nsect == 0x55 && lbal == 0xaa)
+		return 1;	/* found a device */
+
+	return 0;		/* nothing found */
+}
+
+static int sata_rcar_wait_after_reset(struct ata_link *link,
+				      unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+
+	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+	return ata_sff_wait_ready(link, deadline);
+}
+
+static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+	/* software reset.  causes dev0 to be selected */
+	iowrite32(ap->ctl, ioaddr->ctl_addr);
+	udelay(20);
+	iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+	udelay(20);
+	iowrite32(ap->ctl, ioaddr->ctl_addr);
+	ap->last_ctl = ap->ctl;
+
+	/* wait the port to become ready */
+	return sata_rcar_wait_after_reset(&ap->link, deadline);
+}
+
+static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
+			       unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned int devmask = 0;
+	int rc;
+	u8 err;
+
+	/* determine if device 0 is present */
+	if (sata_rcar_ata_devchk(ap, 0))
+		devmask |= 1 << 0;
+
+	/* issue bus reset */
+	DPRINTK("about to softreset, devmask=%x\n", devmask);
+	rc = sata_rcar_bus_softreset(ap, deadline);
+	/* if link is occupied, -ENODEV too is an error */
+	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
+
+	DPRINTK("classes[0]=%u\n", classes[0]);
+	return 0;
+}
+
+static void sata_rcar_tf_load(struct ata_port *ap,
+			      const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		iowrite32(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		iowrite32(tf->hob_feature, ioaddr->feature_addr);
+		iowrite32(tf->hob_nsect, ioaddr->nsect_addr);
+		iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
+		iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
+		iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		iowrite32(tf->feature, ioaddr->feature_addr);
+		iowrite32(tf->nsect, ioaddr->nsect_addr);
+		iowrite32(tf->lbal, ioaddr->lbal_addr);
+		iowrite32(tf->lbam, ioaddr->lbam_addr);
+		iowrite32(tf->lbah, ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		iowrite32(tf->device, ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = sata_rcar_check_status(ap);
+	tf->feature = ioread32(ioaddr->error_addr);
+	tf->nsect = ioread32(ioaddr->nsect_addr);
+	tf->lbal = ioread32(ioaddr->lbal_addr);
+	tf->lbam = ioread32(ioaddr->lbam_addr);
+	tf->lbah = ioread32(ioaddr->lbah_addr);
+	tf->device = ioread32(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = ioread32(ioaddr->error_addr);
+		tf->hob_nsect = ioread32(ioaddr->nsect_addr);
+		tf->hob_lbal = ioread32(ioaddr->lbal_addr);
+		tf->hob_lbam = ioread32(ioaddr->lbam_addr);
+		tf->hob_lbah = ioread32(ioaddr->lbah_addr);
+		iowrite32(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+	}
+}
+
+static void sata_rcar_exec_command(struct ata_port *ap,
+				   const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	iowrite32(tf->command, ap->ioaddr.command_addr);
+	ata_sff_pause(ap);
+}
+
+static unsigned int sata_rcar_data_xfer(struct ata_queued_cmd *qc,
+					      unsigned char *buf,
+					      unsigned int buflen, int rw)
+{
+	struct ata_port *ap = qc->dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (rw == READ)
+		sata_rcar_ioread16_rep(data_addr, buf, words);
+	else
+		sata_rcar_iowrite16_rep(data_addr, buf, words);
+
+	/* Transfer trailing byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned char pad[2] = { };
+
+		/* Point buf to the tail of buffer */
+		buf += buflen - 1;
+
+		/*
+		 * Use io*16_rep() accessors here as well to avoid pointlessly
+		 * swapping bytes to and from on the big endian machines...
+		 */
+		if (rw == READ) {
+			sata_rcar_ioread16_rep(data_addr, pad, 1);
+			*buf = pad[0];
+		} else {
+			pad[0] = *buf;
+			sata_rcar_iowrite16_rep(data_addr, pad, 1);
+		}
+		words++;
+	}
+
+	return words << 1;
+}
+
+static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
+{
+	int count;
+	struct ata_port *ap;
+
+	/* We only need to flush incoming data when a command was running */
+	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+		return;
+
+	ap = qc->ap;
+	/* Drain up to 64K of data before we give up this recovery method */
+	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) &&
+			count < 65536; count += 2)
+		ioread32(ap->ioaddr.data_addr);
+
+	/* Can become DEBUG later */
+	if (count)
+		ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg,
+			      u32 *val)
+{
+	if (sc_reg > SCR_ACTIVE)
+		return -EINVAL;
+
+	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2));
+	return 0;
+}
+
+static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg,
+			       u32 val)
+{
+	if (sc_reg > SCR_ACTIVE)
+		return -EINVAL;
+
+	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2));
+	return 0;
+}
+
+static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_bmdma_prd *prd = ap->bmdma_prd;
+	struct scatterlist *sg;
+	unsigned int si;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		u32 addr, sg_len;
+
+		/*
+		 * Note: h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32)sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		prd[si].addr = cpu_to_le32(addr);
+		prd[si].flags_len = cpu_to_le32(sg_len);
+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
+	}
+
+	/* end-of-table flag */
+	prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
+}
+
+static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	sata_rcar_bmdma_fill_sg(qc);
+}
+
+static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE;
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	void __iomem *base = priv->base;
+	u32 dmactl;
+
+	/* load PRD table addr. */
+	mb();   /* make sure PRD table writes are visible to controller */
+	iowrite32(ap->bmdma_prd_dma, base + ATAPI_DTB_ADR_REG);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread32(base + ATAPI_CONTROL1_REG);
+	dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP);
+	if (dmactl & ATAPI_CONTROL1_START) {
+		dmactl &= ~ATAPI_CONTROL1_START;
+		dmactl |= ATAPI_CONTROL1_STOP;
+	}
+	if (!rw)
+		dmactl |= ATAPI_CONTROL1_RW;
+	iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	void __iomem *base = priv->base;
+	u32 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = ioread32(base + ATAPI_CONTROL1_REG);
+	dmactl &= ~ATAPI_CONTROL1_STOP;
+	dmactl |= ATAPI_CONTROL1_START;
+	iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
+}
+
+static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	void __iomem *base = priv->base;
+	u32 dmactl;
+
+	/* force termination of DMA transfer if active */
+	dmactl = ioread32(base + ATAPI_CONTROL1_REG);
+	if (dmactl & ATAPI_CONTROL1_START) {
+		dmactl &= ~ATAPI_CONTROL1_START;
+		dmactl |= ATAPI_CONTROL1_STOP;
+		iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
+	}
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_sff_dma_pause(ap);
+}
+
+static u8 sata_rcar_bmdma_status(struct ata_port *ap)
+{
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	u8 host_stat = 0;
+	u32 status;
+
+	status = ioread32(priv->base + ATAPI_STATUS_REG);
+	if (status & ATAPI_STATUS_DEVINT)
+		host_stat |= ATA_DMA_INTR;
+	if (status & ATAPI_STATUS_ACT)
+		host_stat |= ATA_DMA_ACTIVE;
+
+	return host_stat;
+}
+
+static struct scsi_host_template sata_rcar_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	/*
+	 * This controller allows transfer chunks up to 512MB which cross 64KB
+	 * boundaries, therefore the DMA limits are more relaxed than standard
+	 * ATA SFF.
+	 */
+	.sg_tablesize		= ATA_MAX_PRD,
+	.dma_boundary		= SATA_RCAR_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations sata_rcar_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.freeze			= sata_rcar_freeze,
+	.thaw			= sata_rcar_thaw,
+	.softreset		= sata_rcar_softreset,
+
+	.scr_read		= sata_rcar_scr_read,
+	.scr_write		= sata_rcar_scr_write,
+
+	.sff_dev_select		= sata_rcar_dev_select,
+	.sff_set_devctl		= sata_rcar_set_devctl,
+	.sff_check_status	= sata_rcar_check_status,
+	.sff_check_altstatus	= sata_rcar_check_altstatus,
+	.sff_tf_load		= sata_rcar_tf_load,
+	.sff_tf_read		= sata_rcar_tf_read,
+	.sff_exec_command	= sata_rcar_exec_command,
+	.sff_data_xfer		= sata_rcar_data_xfer,
+	.sff_drain_fifo		= sata_rcar_drain_fifo,
+
+	.qc_prep		= sata_rcar_qc_prep,
+
+	.bmdma_setup		= sata_rcar_bmdma_setup,
+	.bmdma_start		= sata_rcar_bmdma_start,
+	.bmdma_stop		= sata_rcar_bmdma_stop,
+	.bmdma_status		= sata_rcar_bmdma_status,
+};
+
+static void sata_rcar_serr_interrupt(struct ata_port *ap)
+{
+	struct sata_rcar_priv *priv = ap->host->private_data;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	int freeze = 0;
+	u32 serror;
+
+	serror = ioread32(priv->base + SCRSERR_REG);
+	if (!serror)
+		return;
+
+	DPRINTK("SError @host_intr: 0x%x\n", serror);
+
+	/* first, analyze and record host port events */
+	ata_ehi_clear_desc(ehi);
+
+	if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
+		/* Setup a soft-reset EH action */
+		ata_ehi_hotplugged(ehi);
+		ata_ehi_push_desc(ehi, "%s", "hotplug");
+
+		freeze = serror & SERR_COMM_WAKE ? 0 : 1;
+	}
+
+	/* freeze or abort */
+	if (freeze)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void sata_rcar_ata_interrupt(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	int handled = 0;
+
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc)
+		handled |= ata_bmdma_port_intr(ap, qc);
+
+	/* be sure to clear ATA interrupt */
+	if (!handled)
+		sata_rcar_check_status(ap);
+}
+
+static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct sata_rcar_priv *priv = host->private_data;
+	void __iomem *base = priv->base;
+	unsigned int handled = 0;
+	struct ata_port *ap;
+	u32 sataintstat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	sataintstat = ioread32(base + SATAINTSTAT_REG);
+	sataintstat &= SATA_RCAR_INT_MASK;
+	if (!sataintstat)
+		goto done;
+	/* ack */
+	iowrite32(~sataintstat & priv->sataint_mask, base + SATAINTSTAT_REG);
+
+	ap = host->ports[0];
+
+	if (sataintstat & SATAINTSTAT_ATA)
+		sata_rcar_ata_interrupt(ap);
+
+	if (sataintstat & SATAINTSTAT_SERR)
+		sata_rcar_serr_interrupt(ap);
+
+	handled = 1;
+done:
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static void sata_rcar_setup_port(struct ata_host *host)
+{
+	struct ata_port *ap = host->ports[0];
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	struct sata_rcar_priv *priv = host->private_data;
+	void __iomem *base = priv->base;
+
+	ap->ops		= &sata_rcar_port_ops;
+	ap->pio_mask	= ATA_PIO4;
+	ap->udma_mask	= ATA_UDMA6;
+	ap->flags	|= ATA_FLAG_SATA;
+
+	if (priv->type == RCAR_R8A7790_ES1_SATA)
+		ap->flags	|= ATA_FLAG_NO_DIPM;
+
+	ioaddr->cmd_addr = base + SDATA_REG;
+	ioaddr->ctl_addr = base + SSDEVCON_REG;
+	ioaddr->scr_addr = base + SCRSSTS_REG;
+	ioaddr->altstatus_addr = ioaddr->ctl_addr;
+
+	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+	ioaddr->nsect_addr	= ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+	ioaddr->lbal_addr	= ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+	ioaddr->lbam_addr	= ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+	ioaddr->lbah_addr	= ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+	ioaddr->device_addr	= ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+	ioaddr->status_addr	= ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static void sata_rcar_init_module(struct sata_rcar_priv *priv)
+{
+	void __iomem *base = priv->base;
+	u32 val;
+
+	/* SATA-IP reset state */
+	val = ioread32(base + ATAPI_CONTROL1_REG);
+	val |= ATAPI_CONTROL1_RESET;
+	iowrite32(val, base + ATAPI_CONTROL1_REG);
+
+	/* ISM mode, PRD mode, DTEND flag at bit 0 */
+	val = ioread32(base + ATAPI_CONTROL1_REG);
+	val |= ATAPI_CONTROL1_ISM;
+	val |= ATAPI_CONTROL1_DESE;
+	val |= ATAPI_CONTROL1_DTA32M;
+	iowrite32(val, base + ATAPI_CONTROL1_REG);
+
+	/* Release the SATA-IP from the reset state */
+	val = ioread32(base + ATAPI_CONTROL1_REG);
+	val &= ~ATAPI_CONTROL1_RESET;
+	iowrite32(val, base + ATAPI_CONTROL1_REG);
+
+	/* ack and mask */
+	iowrite32(0, base + SATAINTSTAT_REG);
+	iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+
+	/* enable interrupts */
+	iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
+}
+
+static void sata_rcar_init_controller(struct ata_host *host)
+{
+	struct sata_rcar_priv *priv = host->private_data;
+
+	priv->sataint_mask = SATAINTMASK_ALL_GEN2;
+
+	/* reset and setup phy */
+	switch (priv->type) {
+	case RCAR_GEN1_SATA:
+		priv->sataint_mask = SATAINTMASK_ALL_GEN1;
+		sata_rcar_gen1_phy_init(priv);
+		break;
+	case RCAR_GEN2_SATA:
+	case RCAR_R8A7790_ES1_SATA:
+		sata_rcar_gen2_phy_init(priv);
+		break;
+	case RCAR_GEN3_SATA:
+		break;
+	default:
+		dev_warn(host->dev, "SATA phy is not initialized\n");
+		break;
+	}
+
+	sata_rcar_init_module(priv);
+}
+
+static const struct of_device_id sata_rcar_match[] = {
+	{
+		/* Deprecated by "renesas,sata-r8a7779" */
+		.compatible = "renesas,rcar-sata",
+		.data = (void *)RCAR_GEN1_SATA,
+	},
+	{
+		.compatible = "renesas,sata-r8a7779",
+		.data = (void *)RCAR_GEN1_SATA,
+	},
+	{
+		.compatible = "renesas,sata-r8a7790",
+		.data = (void *)RCAR_GEN2_SATA
+	},
+	{
+		.compatible = "renesas,sata-r8a7790-es1",
+		.data = (void *)RCAR_R8A7790_ES1_SATA
+	},
+	{
+		.compatible = "renesas,sata-r8a7791",
+		.data = (void *)RCAR_GEN2_SATA
+	},
+	{
+		.compatible = "renesas,sata-r8a7793",
+		.data = (void *)RCAR_GEN2_SATA
+	},
+	{
+		.compatible = "renesas,sata-r8a7795",
+		.data = (void *)RCAR_GEN3_SATA
+	},
+	{
+		.compatible = "renesas,rcar-gen2-sata",
+		.data = (void *)RCAR_GEN2_SATA
+	},
+	{
+		.compatible = "renesas,rcar-gen3-sata",
+		.data = (void *)RCAR_GEN3_SATA
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sata_rcar_match);
+
+static int sata_rcar_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_host *host;
+	struct sata_rcar_priv *priv;
+	struct resource *mem;
+	int irq;
+	int ret = 0;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return -EINVAL;
+
+	priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->type = (enum sata_rcar_type)of_device_get_match_data(dev);
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0)
+		goto err_pm_disable;
+
+	host = ata_host_alloc(dev, 1);
+	if (!host) {
+		dev_err(dev, "ata_host_alloc failed\n");
+		ret = -ENOMEM;
+		goto err_pm_put;
+	}
+
+	host->private_data = priv;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(priv->base)) {
+		ret = PTR_ERR(priv->base);
+		goto err_pm_put;
+	}
+
+	/* setup port */
+	sata_rcar_setup_port(host);
+
+	/* initialize host controller */
+	sata_rcar_init_controller(host);
+
+	ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0,
+				&sata_rcar_sht);
+	if (!ret)
+		return 0;
+
+err_pm_put:
+	pm_runtime_put(dev);
+err_pm_disable:
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int sata_rcar_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct sata_rcar_priv *priv = host->private_data;
+	void __iomem *base = priv->base;
+
+	ata_host_detach(host);
+
+	/* disable interrupts */
+	iowrite32(0, base + ATAPI_INT_ENABLE_REG);
+	/* ack and mask */
+	iowrite32(0, base + SATAINTSTAT_REG);
+	iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sata_rcar_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_rcar_priv *priv = host->private_data;
+	void __iomem *base = priv->base;
+	int ret;
+
+	ret = ata_host_suspend(host, PMSG_SUSPEND);
+	if (!ret) {
+		/* disable interrupts */
+		iowrite32(0, base + ATAPI_INT_ENABLE_REG);
+		/* mask */
+		iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+
+		pm_runtime_put(dev);
+	}
+
+	return ret;
+}
+
+static int sata_rcar_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_rcar_priv *priv = host->private_data;
+	void __iomem *base = priv->base;
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0)
+		return ret;
+
+	if (priv->type == RCAR_GEN3_SATA) {
+		sata_rcar_init_module(priv);
+	} else {
+		/* ack and mask */
+		iowrite32(0, base + SATAINTSTAT_REG);
+		iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
+
+		/* enable interrupts */
+		iowrite32(ATAPI_INT_ENABLE_SATAINT,
+			  base + ATAPI_INT_ENABLE_REG);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+
+static int sata_rcar_restore(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0)
+		return ret;
+
+	sata_rcar_setup_port(host);
+
+	/* initialize host controller */
+	sata_rcar_init_controller(host);
+
+	ata_host_resume(host);
+
+	return 0;
+}
+
+static const struct dev_pm_ops sata_rcar_pm_ops = {
+	.suspend	= sata_rcar_suspend,
+	.resume		= sata_rcar_resume,
+	.freeze		= sata_rcar_suspend,
+	.thaw		= sata_rcar_resume,
+	.poweroff	= sata_rcar_suspend,
+	.restore	= sata_rcar_restore,
+};
+#endif
+
+static struct platform_driver sata_rcar_driver = {
+	.probe		= sata_rcar_probe,
+	.remove		= sata_rcar_remove,
+	.driver = {
+		.name		= DRV_NAME,
+		.of_match_table	= sata_rcar_match,
+#ifdef CONFIG_PM_SLEEP
+		.pm		= &sata_rcar_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(sata_rcar_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vladimir Barinov");
+MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver");
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
new file mode 100644
index 0000000..ed76f07
--- /dev/null
+++ b/drivers/ata/sata_sil.c
@@ -0,0 +1,825 @@
+/*
+ *  sata_sil.c - Silicon Image SATA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *  		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2005 Red Hat, Inc.
+ *  Copyright 2003 Benjamin Herrenschmidt
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Documentation for SiI 3112:
+ *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
+ *
+ *  Other errata and documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"sata_sil"
+#define DRV_VERSION	"2.4"
+
+#define SIL_DMA_BOUNDARY	0x7fffffffUL
+
+enum {
+	SIL_MMIO_BAR		= 5,
+
+	/*
+	 * host flags
+	 */
+	SIL_FLAG_NO_SATA_IRQ	= (1 << 28),
+	SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
+	SIL_FLAG_MOD15WRITE	= (1 << 30),
+
+	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA,
+
+	/*
+	 * Controller IDs
+	 */
+	sil_3112		= 0,
+	sil_3112_no_sata_irq	= 1,
+	sil_3512		= 2,
+	sil_3114		= 3,
+
+	/*
+	 * Register offsets
+	 */
+	SIL_SYSCFG		= 0x48,
+
+	/*
+	 * Register bits
+	 */
+	/* SYSCFG */
+	SIL_MASK_IDE0_INT	= (1 << 22),
+	SIL_MASK_IDE1_INT	= (1 << 23),
+	SIL_MASK_IDE2_INT	= (1 << 24),
+	SIL_MASK_IDE3_INT	= (1 << 25),
+	SIL_MASK_2PORT		= SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
+	SIL_MASK_4PORT		= SIL_MASK_2PORT |
+				  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
+
+	/* BMDMA/BMDMA2 */
+	SIL_INTR_STEERING	= (1 << 1),
+
+	SIL_DMA_ENABLE		= (1 << 0),  /* DMA run switch */
+	SIL_DMA_RDWR		= (1 << 3),  /* DMA Rd-Wr */
+	SIL_DMA_SATA_IRQ	= (1 << 4),  /* OR of all SATA IRQs */
+	SIL_DMA_ACTIVE		= (1 << 16), /* DMA running */
+	SIL_DMA_ERROR		= (1 << 17), /* PCI bus error */
+	SIL_DMA_COMPLETE	= (1 << 18), /* cmd complete / IRQ pending */
+	SIL_DMA_N_SATA_IRQ	= (1 << 6),  /* SATA_IRQ for the next channel */
+	SIL_DMA_N_ACTIVE	= (1 << 24), /* ACTIVE for the next channel */
+	SIL_DMA_N_ERROR		= (1 << 25), /* ERROR for the next channel */
+	SIL_DMA_N_COMPLETE	= (1 << 26), /* COMPLETE for the next channel */
+
+	/* SIEN */
+	SIL_SIEN_N		= (1 << 16), /* triggered by SError.N */
+
+	/*
+	 * Others
+	 */
+	SIL_QUIRK_MOD15WRITE	= (1 << 0),
+	SIL_QUIRK_UDMA5MAX	= (1 << 1),
+};
+
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int sil_pci_device_resume(struct pci_dev *pdev);
+#endif
+static void sil_dev_config(struct ata_device *dev);
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
+static void sil_qc_prep(struct ata_queued_cmd *qc);
+static void sil_bmdma_setup(struct ata_queued_cmd *qc);
+static void sil_bmdma_start(struct ata_queued_cmd *qc);
+static void sil_bmdma_stop(struct ata_queued_cmd *qc);
+static void sil_freeze(struct ata_port *ap);
+static void sil_thaw(struct ata_port *ap);
+
+
+static const struct pci_device_id sil_pci_tbl[] = {
+	{ PCI_VDEVICE(CMD, 0x3112), sil_3112 },
+	{ PCI_VDEVICE(CMD, 0x0240), sil_3112 },
+	{ PCI_VDEVICE(CMD, 0x3512), sil_3512 },
+	{ PCI_VDEVICE(CMD, 0x3114), sil_3114 },
+	{ PCI_VDEVICE(ATI, 0x436e), sil_3112 },
+	{ PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
+	{ PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
+
+	{ }	/* terminate list */
+};
+
+
+/* TODO firmware versions should be added - eric */
+static const struct sil_drivelist {
+	const char *product;
+	unsigned int quirk;
+} sil_blacklist [] = {
+	{ "ST320012AS",		SIL_QUIRK_MOD15WRITE },
+	{ "ST330013AS",		SIL_QUIRK_MOD15WRITE },
+	{ "ST340017AS",		SIL_QUIRK_MOD15WRITE },
+	{ "ST360015AS",		SIL_QUIRK_MOD15WRITE },
+	{ "ST380023AS",		SIL_QUIRK_MOD15WRITE },
+	{ "ST3120023AS",	SIL_QUIRK_MOD15WRITE },
+	{ "ST340014ASL",	SIL_QUIRK_MOD15WRITE },
+	{ "ST360014ASL",	SIL_QUIRK_MOD15WRITE },
+	{ "ST380011ASL",	SIL_QUIRK_MOD15WRITE },
+	{ "ST3120022ASL",	SIL_QUIRK_MOD15WRITE },
+	{ "ST3160021ASL",	SIL_QUIRK_MOD15WRITE },
+	{ "TOSHIBA MK2561GSYN",	SIL_QUIRK_MOD15WRITE },
+	{ "Maxtor 4D060H3",	SIL_QUIRK_UDMA5MAX },
+	{ }
+};
+
+static struct pci_driver sil_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sil_pci_tbl,
+	.probe			= sil_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= sil_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sil_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	/** These controllers support Large Block Transfer which allows
+	    transfer chunks up to 2GB and which cross 64KB boundaries,
+	    therefore the DMA limits are more relaxed than standard ATA SFF. */
+	.dma_boundary		= SIL_DMA_BOUNDARY,
+	.sg_tablesize		= ATA_MAX_PRD
+};
+
+static struct ata_port_operations sil_ops = {
+	.inherits		= &ata_bmdma32_port_ops,
+	.dev_config		= sil_dev_config,
+	.set_mode		= sil_set_mode,
+	.bmdma_setup            = sil_bmdma_setup,
+	.bmdma_start            = sil_bmdma_start,
+	.bmdma_stop		= sil_bmdma_stop,
+	.qc_prep		= sil_qc_prep,
+	.freeze			= sil_freeze,
+	.thaw			= sil_thaw,
+	.scr_read		= sil_scr_read,
+	.scr_write		= sil_scr_write,
+};
+
+static const struct ata_port_info sil_port_info[] = {
+	/* sil_3112 */
+	{
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil_ops,
+	},
+	/* sil_3112_no_sata_irq */
+	{
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
+				  SIL_FLAG_NO_SATA_IRQ,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil_ops,
+	},
+	/* sil_3512 */
+	{
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil_ops,
+	},
+	/* sil_3114 */
+	{
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil_ops,
+	},
+};
+
+/* per-port register offsets */
+/* TODO: we can probably calculate rather than use a table */
+static const struct {
+	unsigned long tf;	/* ATA taskfile register block */
+	unsigned long ctl;	/* ATA control/altstatus register block */
+	unsigned long bmdma;	/* DMA register block */
+	unsigned long bmdma2;	/* DMA register block #2 */
+	unsigned long fifo_cfg;	/* FIFO Valid Byte Count and Control */
+	unsigned long scr;	/* SATA control register block */
+	unsigned long sien;	/* SATA Interrupt Enable register */
+	unsigned long xfer_mode;/* data transfer mode register */
+	unsigned long sfis_cfg;	/* SATA FIS reception config register */
+} sil_port[] = {
+	/* port 0 ... */
+	/*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
+	{  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
+	{  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
+	{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
+	{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
+	/* ... port 3 */
+};
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static int slow_down;
+module_param(slow_down, int, 0444);
+MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
+
+
+static void sil_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+	void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
+
+	/* clear start/stop bit - can safely always write 0 */
+	iowrite8(0, bmdma2);
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_sff_dma_pause(ap);
+}
+
+static void sil_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+	/* load PRD table addr. */
+	iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
+
+	/* issue r/w command */
+	ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sil_bmdma_start(struct ata_queued_cmd *qc)
+{
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+	void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
+	u8 dmactl = ATA_DMA_START;
+
+	/* set transfer direction, start host DMA transaction
+	   Note: For Large Block Transfer to work, the DMA must be started
+	   using the bmdma2 register. */
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	iowrite8(dmactl, bmdma2);
+}
+
+/* The way God intended PCI IDE scatter/gather lists to look and behave... */
+static void sil_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct scatterlist *sg;
+	struct ata_port *ap = qc->ap;
+	struct ata_bmdma_prd *prd, *last_prd = NULL;
+	unsigned int si;
+
+	prd = &ap->bmdma_prd[0];
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		/* Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		u32 addr = (u32) sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		prd->addr = cpu_to_le32(addr);
+		prd->flags_len = cpu_to_le32(sg_len);
+		VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
+
+		last_prd = prd;
+		prd++;
+	}
+
+	if (likely(last_prd))
+		last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static void sil_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	sil_fill_sg(qc);
+}
+
+static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
+{
+	u8 cache_line = 0;
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
+	return cache_line;
+}
+
+/**
+ *	sil_set_mode		-	wrap set_mode functions
+ *	@link: link to set up
+ *	@r_failed: returned device when we fail
+ *
+ *	Wrap the libata method for device setup as after the setup we need
+ *	to inspect the results and do some configuration work
+ */
+
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+	void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
+	struct ata_device *dev;
+	u32 tmp, dev_mode[2] = { };
+	int rc;
+
+	rc = ata_do_set_mode(link, r_failed);
+	if (rc)
+		return rc;
+
+	ata_for_each_dev(dev, link, ALL) {
+		if (!ata_dev_enabled(dev))
+			dev_mode[dev->devno] = 0;	/* PIO0/1/2 */
+		else if (dev->flags & ATA_DFLAG_PIO)
+			dev_mode[dev->devno] = 1;	/* PIO3/4 */
+		else
+			dev_mode[dev->devno] = 3;	/* UDMA */
+		/* value 2 indicates MDMA */
+	}
+
+	tmp = readl(addr);
+	tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
+	tmp |= dev_mode[0];
+	tmp |= (dev_mode[1] << 4);
+	writel(tmp, addr);
+	readl(addr);	/* flush */
+	return 0;
+}
+
+static inline void __iomem *sil_scr_addr(struct ata_port *ap,
+					 unsigned int sc_reg)
+{
+	void __iomem *offset = ap->ioaddr.scr_addr;
+
+	switch (sc_reg) {
+	case SCR_STATUS:
+		return offset + 4;
+	case SCR_ERROR:
+		return offset + 8;
+	case SCR_CONTROL:
+		return offset;
+	default:
+		/* do nothing */
+		break;
+	}
+
+	return NULL;
+}
+
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
+
+	if (mmio) {
+		*val = readl(mmio);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
+
+	if (mmio) {
+		writel(val, mmio);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	u8 status;
+
+	if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
+		u32 serror = 0xffffffff;
+
+		/* SIEN doesn't mask SATA IRQs on some 3112s.  Those
+		 * controllers continue to assert IRQ as long as
+		 * SError bits are pending.  Clear SError immediately.
+		 */
+		sil_scr_read(&ap->link, SCR_ERROR, &serror);
+		sil_scr_write(&ap->link, SCR_ERROR, serror);
+
+		/* Sometimes spurious interrupts occur, double check
+		 * it's PHYRDY CHG.
+		 */
+		if (serror & SERR_PHYRDY_CHG) {
+			ap->link.eh_info.serror |= serror;
+			goto freeze;
+		}
+
+		if (!(bmdma2 & SIL_DMA_COMPLETE))
+			return;
+	}
+
+	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+		/* this sometimes happens, just clear IRQ */
+		ap->ops->sff_check_status(ap);
+		return;
+	}
+
+	/* Check whether we are expecting interrupt in this state */
+	switch (ap->hsm_task_state) {
+	case HSM_ST_FIRST:
+		/* Some pre-ATAPI-4 devices assert INTRQ
+		 * at this state when ready to receive CDB.
+		 */
+
+		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+		 * The flag was turned on only for atapi devices.  No
+		 * need to check ata_is_atapi(qc->tf.protocol) again.
+		 */
+		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+			goto err_hsm;
+		break;
+	case HSM_ST_LAST:
+		if (ata_is_dma(qc->tf.protocol)) {
+			/* clear DMA-Start bit */
+			ap->ops->bmdma_stop(qc);
+
+			if (bmdma2 & SIL_DMA_ERROR) {
+				qc->err_mask |= AC_ERR_HOST_BUS;
+				ap->hsm_task_state = HSM_ST_ERR;
+			}
+		}
+		break;
+	case HSM_ST:
+		break;
+	default:
+		goto err_hsm;
+	}
+
+	/* check main status, clearing INTRQ */
+	status = ap->ops->sff_check_status(ap);
+	if (unlikely(status & ATA_BUSY))
+		goto err_hsm;
+
+	/* ack bmdma irq events */
+	ata_bmdma_irq_clear(ap);
+
+	/* kick HSM in the ass */
+	ata_sff_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
+
+	return;
+
+ err_hsm:
+	qc->err_mask |= AC_ERR_HSM;
+ freeze:
+	ata_port_freeze(ap);
+}
+
+static irqreturn_t sil_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+	int handled = 0;
+	int i;
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
+
+		/* turn off SATA_IRQ if not supported */
+		if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
+			bmdma2 &= ~SIL_DMA_SATA_IRQ;
+
+		if (bmdma2 == 0xffffffff ||
+		    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
+			continue;
+
+		sil_host_intr(ap, bmdma2);
+		handled = 1;
+	}
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+static void sil_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+	u32 tmp;
+
+	/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
+	writel(0, mmio_base + sil_port[ap->port_no].sien);
+
+	/* plug IRQ */
+	tmp = readl(mmio_base + SIL_SYSCFG);
+	tmp |= SIL_MASK_IDE0_INT << ap->port_no;
+	writel(tmp, mmio_base + SIL_SYSCFG);
+	readl(mmio_base + SIL_SYSCFG);	/* flush */
+
+	/* Ensure DMA_ENABLE is off.
+	 *
+	 * This is because the controller will not give us access to the
+	 * taskfile registers while a DMA is in progress
+	 */
+	iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
+		 ap->ioaddr.bmdma_addr);
+
+	/* According to ata_bmdma_stop, an HDMA transition requires
+	 * on PIO cycle. But we can't read a taskfile register.
+	 */
+	ioread8(ap->ioaddr.bmdma_addr);
+}
+
+static void sil_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+	u32 tmp;
+
+	/* clear IRQ */
+	ap->ops->sff_check_status(ap);
+	ata_bmdma_irq_clear(ap);
+
+	/* turn on SATA IRQ if supported */
+	if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
+		writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
+
+	/* turn on IRQ */
+	tmp = readl(mmio_base + SIL_SYSCFG);
+	tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
+	writel(tmp, mmio_base + SIL_SYSCFG);
+}
+
+/**
+ *	sil_dev_config - Apply device/host-specific errata fixups
+ *	@dev: Device to be examined
+ *
+ *	After the IDENTIFY [PACKET] DEVICE step is complete, and a
+ *	device is known to be present, this function is called.
+ *	We apply two errata fixups which are specific to Silicon Image,
+ *	a Seagate and a Maxtor fixup.
+ *
+ *	For certain Seagate devices, we must limit the maximum sectors
+ *	to under 8K.
+ *
+ *	For certain Maxtor devices, we must not program the drive
+ *	beyond udma5.
+ *
+ *	Both fixups are unfairly pessimistic.  As soon as I get more
+ *	information on these errata, I will create a more exhaustive
+ *	list, and apply the fixups to only the specific
+ *	devices/hosts/firmwares that need it.
+ *
+ *	20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
+ *	The Maxtor quirk is in the blacklist, but I'm keeping the original
+ *	pessimistic fix for the following reasons...
+ *	- There seems to be less info on it, only one device gleaned off the
+ *	Windows	driver, maybe only one is affected.  More info would be greatly
+ *	appreciated.
+ *	- But then again UDMA5 is hardly anything to complain about
+ */
+static void sil_dev_config(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
+	unsigned int n, quirks = 0;
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	/* This controller doesn't support trim */
+	dev->horkage |= ATA_HORKAGE_NOTRIM;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	for (n = 0; sil_blacklist[n].product; n++)
+		if (!strcmp(sil_blacklist[n].product, model_num)) {
+			quirks = sil_blacklist[n].quirk;
+			break;
+		}
+
+	/* limit requests to 15 sectors */
+	if (slow_down ||
+	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
+	     (quirks & SIL_QUIRK_MOD15WRITE))) {
+		if (print_info)
+			ata_dev_info(dev,
+		"applying Seagate errata fix (mod15write workaround)\n");
+		dev->max_sectors = 15;
+		return;
+	}
+
+	/* limit to udma5 */
+	if (quirks & SIL_QUIRK_UDMA5MAX) {
+		if (print_info)
+			ata_dev_info(dev, "applying Maxtor errata fix %s\n",
+				     model_num);
+		dev->udma_mask &= ATA_UDMA5;
+		return;
+	}
+}
+
+static void sil_init_controller(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+	u8 cls;
+	u32 tmp;
+	int i;
+
+	/* Initialize FIFO PCI bus arbitration */
+	cls = sil_get_device_cache_line(pdev);
+	if (cls) {
+		cls >>= 3;
+		cls++;  /* cls = (line_size/8)+1 */
+		for (i = 0; i < host->n_ports; i++)
+			writew(cls << 8 | cls,
+			       mmio_base + sil_port[i].fifo_cfg);
+	} else
+		dev_warn(&pdev->dev,
+			 "cache line size not set.  Driver may not function\n");
+
+	/* Apply R_ERR on DMA activate FIS errata workaround */
+	if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
+		int cnt;
+
+		for (i = 0, cnt = 0; i < host->n_ports; i++) {
+			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
+			if ((tmp & 0x3) != 0x01)
+				continue;
+			if (!cnt)
+				dev_info(&pdev->dev,
+					 "Applying R_ERR on DMA activate FIS errata fix\n");
+			writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
+			cnt++;
+		}
+	}
+
+	if (host->n_ports == 4) {
+		/* flip the magic "make 4 ports work" bit */
+		tmp = readl(mmio_base + sil_port[2].bmdma);
+		if ((tmp & SIL_INTR_STEERING) == 0)
+			writel(tmp | SIL_INTR_STEERING,
+			       mmio_base + sil_port[2].bmdma);
+	}
+}
+
+static bool sil_broken_system_poweroff(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id broken_systems[] = {
+		{
+			.ident = "HP Compaq nx6325",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
+			},
+			/* PCI slot number of the controller */
+			.driver_data = (void *)0x12UL,
+		},
+
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
+
+	if (dmi) {
+		unsigned long slot = (unsigned long)dmi->driver_data;
+		/* apply the quirk only to on-board controllers */
+		return slot == PCI_SLOT(pdev->devfn);
+	}
+
+	return false;
+}
+
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int board_id = ent->driver_data;
+	struct ata_port_info pi = sil_port_info[board_id];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int n_ports, rc;
+	unsigned int i;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* allocate host */
+	n_ports = 2;
+	if (board_id == sil_3114)
+		n_ports = 4;
+
+	if (sil_broken_system_poweroff(pdev)) {
+		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
+					ATA_FLAG_NO_HIBERNATE_SPINDOWN;
+		dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
+				"on poweroff and hibernation\n");
+	}
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	mmio_base = host->iomap[SIL_MMIO_BAR];
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
+
+		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
+		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
+		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
+		ata_sff_std_ports(ioaddr);
+
+		ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
+	}
+
+	/* initialize and activate */
+	sil_init_controller(host);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
+				 &sil_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sil_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	sil_init_controller(host);
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+module_pci_driver(sil_pci_driver);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
new file mode 100644
index 0000000..319f517
--- /dev/null
+++ b/drivers/ata/sata_sil24.c
@@ -0,0 +1,1391 @@
+/*
+ * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
+ *
+ * Copyright 2005  Tejun Heo
+ *
+ * Based on preview driver from Silicon Image.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_sil24"
+#define DRV_VERSION	"1.1"
+
+/*
+ * Port request block (PRB) 32 bytes
+ */
+struct sil24_prb {
+	__le16	ctrl;
+	__le16	prot;
+	__le32	rx_cnt;
+	u8	fis[6 * 4];
+};
+
+/*
+ * Scatter gather entry (SGE) 16 bytes
+ */
+struct sil24_sge {
+	__le64	addr;
+	__le32	cnt;
+	__le32	flags;
+};
+
+
+enum {
+	SIL24_HOST_BAR		= 0,
+	SIL24_PORT_BAR		= 2,
+
+	/* sil24 fetches in chunks of 64bytes.  The first block
+	 * contains the PRB and two SGEs.  From the second block, it's
+	 * consisted of four SGEs and called SGT.  Calculate the
+	 * number of SGTs that fit into one page.
+	 */
+	SIL24_PRB_SZ		= sizeof(struct sil24_prb)
+				  + 2 * sizeof(struct sil24_sge),
+	SIL24_MAX_SGT		= (PAGE_SIZE - SIL24_PRB_SZ)
+				  / (4 * sizeof(struct sil24_sge)),
+
+	/* This will give us one unused SGEs for ATA.  This extra SGE
+	 * will be used to store CDB for ATAPI devices.
+	 */
+	SIL24_MAX_SGE		= 4 * SIL24_MAX_SGT + 1,
+
+	/*
+	 * Global controller registers (128 bytes @ BAR0)
+	 */
+		/* 32 bit regs */
+	HOST_SLOT_STAT		= 0x00, /* 32 bit slot stat * 4 */
+	HOST_CTRL		= 0x40,
+	HOST_IRQ_STAT		= 0x44,
+	HOST_PHY_CFG		= 0x48,
+	HOST_BIST_CTRL		= 0x50,
+	HOST_BIST_PTRN		= 0x54,
+	HOST_BIST_STAT		= 0x58,
+	HOST_MEM_BIST_STAT	= 0x5c,
+	HOST_FLASH_CMD		= 0x70,
+		/* 8 bit regs */
+	HOST_FLASH_DATA		= 0x74,
+	HOST_TRANSITION_DETECT	= 0x75,
+	HOST_GPIO_CTRL		= 0x76,
+	HOST_I2C_ADDR		= 0x78, /* 32 bit */
+	HOST_I2C_DATA		= 0x7c,
+	HOST_I2C_XFER_CNT	= 0x7e,
+	HOST_I2C_CTRL		= 0x7f,
+
+	/* HOST_SLOT_STAT bits */
+	HOST_SSTAT_ATTN		= (1 << 31),
+
+	/* HOST_CTRL bits */
+	HOST_CTRL_M66EN		= (1 << 16), /* M66EN PCI bus signal */
+	HOST_CTRL_TRDY		= (1 << 17), /* latched PCI TRDY */
+	HOST_CTRL_STOP		= (1 << 18), /* latched PCI STOP */
+	HOST_CTRL_DEVSEL	= (1 << 19), /* latched PCI DEVSEL */
+	HOST_CTRL_REQ64		= (1 << 20), /* latched PCI REQ64 */
+	HOST_CTRL_GLOBAL_RST	= (1 << 31), /* global reset */
+
+	/*
+	 * Port registers
+	 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
+	 */
+	PORT_REGS_SIZE		= 0x2000,
+
+	PORT_LRAM		= 0x0000, /* 31 LRAM slots and PMP regs */
+	PORT_LRAM_SLOT_SZ	= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
+
+	PORT_PMP		= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
+	PORT_PMP_STATUS		= 0x0000, /* port device status offset */
+	PORT_PMP_QACTIVE	= 0x0004, /* port device QActive offset */
+	PORT_PMP_SIZE		= 0x0008, /* 8 bytes per PMP */
+
+		/* 32 bit regs */
+	PORT_CTRL_STAT		= 0x1000, /* write: ctrl-set, read: stat */
+	PORT_CTRL_CLR		= 0x1004, /* write: ctrl-clear */
+	PORT_IRQ_STAT		= 0x1008, /* high: status, low: interrupt */
+	PORT_IRQ_ENABLE_SET	= 0x1010, /* write: enable-set */
+	PORT_IRQ_ENABLE_CLR	= 0x1014, /* write: enable-clear */
+	PORT_ACTIVATE_UPPER_ADDR= 0x101c,
+	PORT_EXEC_FIFO		= 0x1020, /* command execution fifo */
+	PORT_CMD_ERR		= 0x1024, /* command error number */
+	PORT_FIS_CFG		= 0x1028,
+	PORT_FIFO_THRES		= 0x102c,
+		/* 16 bit regs */
+	PORT_DECODE_ERR_CNT	= 0x1040,
+	PORT_DECODE_ERR_THRESH	= 0x1042,
+	PORT_CRC_ERR_CNT	= 0x1044,
+	PORT_CRC_ERR_THRESH	= 0x1046,
+	PORT_HSHK_ERR_CNT	= 0x1048,
+	PORT_HSHK_ERR_THRESH	= 0x104a,
+		/* 32 bit regs */
+	PORT_PHY_CFG		= 0x1050,
+	PORT_SLOT_STAT		= 0x1800,
+	PORT_CMD_ACTIVATE	= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+	PORT_CONTEXT		= 0x1e04,
+	PORT_EXEC_DIAG		= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
+	PORT_PSD_DIAG		= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
+	PORT_SCONTROL		= 0x1f00,
+	PORT_SSTATUS		= 0x1f04,
+	PORT_SERROR		= 0x1f08,
+	PORT_SACTIVE		= 0x1f0c,
+
+	/* PORT_CTRL_STAT bits */
+	PORT_CS_PORT_RST	= (1 << 0), /* port reset */
+	PORT_CS_DEV_RST		= (1 << 1), /* device reset */
+	PORT_CS_INIT		= (1 << 2), /* port initialize */
+	PORT_CS_IRQ_WOC		= (1 << 3), /* interrupt write one to clear */
+	PORT_CS_CDB16		= (1 << 5), /* 0=12b cdb, 1=16b cdb */
+	PORT_CS_PMP_RESUME	= (1 << 6), /* PMP resume */
+	PORT_CS_32BIT_ACTV	= (1 << 10), /* 32-bit activation */
+	PORT_CS_PMP_EN		= (1 << 13), /* port multiplier enable */
+	PORT_CS_RDY		= (1 << 31), /* port ready to accept commands */
+
+	/* PORT_IRQ_STAT/ENABLE_SET/CLR */
+	/* bits[11:0] are masked */
+	PORT_IRQ_COMPLETE	= (1 << 0), /* command(s) completed */
+	PORT_IRQ_ERROR		= (1 << 1), /* command execution error */
+	PORT_IRQ_PORTRDY_CHG	= (1 << 2), /* port ready change */
+	PORT_IRQ_PWR_CHG	= (1 << 3), /* power management change */
+	PORT_IRQ_PHYRDY_CHG	= (1 << 4), /* PHY ready change */
+	PORT_IRQ_COMWAKE	= (1 << 5), /* COMWAKE received */
+	PORT_IRQ_UNK_FIS	= (1 << 6), /* unknown FIS received */
+	PORT_IRQ_DEV_XCHG	= (1 << 7), /* device exchanged */
+	PORT_IRQ_8B10B		= (1 << 8), /* 8b/10b decode error threshold */
+	PORT_IRQ_CRC		= (1 << 9), /* CRC error threshold */
+	PORT_IRQ_HANDSHAKE	= (1 << 10), /* handshake error threshold */
+	PORT_IRQ_SDB_NOTIFY	= (1 << 11), /* SDB notify received */
+
+	DEF_PORT_IRQ		= PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
+				  PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
+				  PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
+
+	/* bits[27:16] are unmasked (raw) */
+	PORT_IRQ_RAW_SHIFT	= 16,
+	PORT_IRQ_MASKED_MASK	= 0x7ff,
+	PORT_IRQ_RAW_MASK	= (0x7ff << PORT_IRQ_RAW_SHIFT),
+
+	/* ENABLE_SET/CLR specific, intr steering - 2 bit field */
+	PORT_IRQ_STEER_SHIFT	= 30,
+	PORT_IRQ_STEER_MASK	= (3 << PORT_IRQ_STEER_SHIFT),
+
+	/* PORT_CMD_ERR constants */
+	PORT_CERR_DEV		= 1, /* Error bit in D2H Register FIS */
+	PORT_CERR_SDB		= 2, /* Error bit in SDB FIS */
+	PORT_CERR_DATA		= 3, /* Error in data FIS not detected by dev */
+	PORT_CERR_SEND		= 4, /* Initial cmd FIS transmission failure */
+	PORT_CERR_INCONSISTENT	= 5, /* Protocol mismatch */
+	PORT_CERR_DIRECTION	= 6, /* Data direction mismatch */
+	PORT_CERR_UNDERRUN	= 7, /* Ran out of SGEs while writing */
+	PORT_CERR_OVERRUN	= 8, /* Ran out of SGEs while reading */
+	PORT_CERR_PKT_PROT	= 11, /* DIR invalid in 1st PIO setup of ATAPI */
+	PORT_CERR_SGT_BOUNDARY	= 16, /* PLD ecode 00 - SGT not on qword boundary */
+	PORT_CERR_SGT_TGTABRT	= 17, /* PLD ecode 01 - target abort */
+	PORT_CERR_SGT_MSTABRT	= 18, /* PLD ecode 10 - master abort */
+	PORT_CERR_SGT_PCIPERR	= 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
+	PORT_CERR_CMD_BOUNDARY	= 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
+	PORT_CERR_CMD_TGTABRT	= 25, /* ctrl[15:13] 010 - target abort */
+	PORT_CERR_CMD_MSTABRT	= 26, /* ctrl[15:13] 100 - master abort */
+	PORT_CERR_CMD_PCIPERR	= 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
+	PORT_CERR_XFR_UNDEF	= 32, /* PSD ecode 00 - undefined */
+	PORT_CERR_XFR_TGTABRT	= 33, /* PSD ecode 01 - target abort */
+	PORT_CERR_XFR_MSTABRT	= 34, /* PSD ecode 10 - master abort */
+	PORT_CERR_XFR_PCIPERR	= 35, /* PSD ecode 11 - PCI prity err during transfer */
+	PORT_CERR_SENDSERVICE	= 36, /* FIS received while sending service */
+
+	/* bits of PRB control field */
+	PRB_CTRL_PROTOCOL	= (1 << 0), /* override def. ATA protocol */
+	PRB_CTRL_PACKET_READ	= (1 << 4), /* PACKET cmd read */
+	PRB_CTRL_PACKET_WRITE	= (1 << 5), /* PACKET cmd write */
+	PRB_CTRL_NIEN		= (1 << 6), /* Mask completion irq */
+	PRB_CTRL_SRST		= (1 << 7), /* Soft reset request (ign BSY?) */
+
+	/* PRB protocol field */
+	PRB_PROT_PACKET		= (1 << 0),
+	PRB_PROT_TCQ		= (1 << 1),
+	PRB_PROT_NCQ		= (1 << 2),
+	PRB_PROT_READ		= (1 << 3),
+	PRB_PROT_WRITE		= (1 << 4),
+	PRB_PROT_TRANSPARENT	= (1 << 5),
+
+	/*
+	 * Other constants
+	 */
+	SGE_TRM			= (1 << 31), /* Last SGE in chain */
+	SGE_LNK			= (1 << 30), /* linked list
+						Points to SGT, not SGE */
+	SGE_DRD			= (1 << 29), /* discard data read (/dev/null)
+						data address ignored */
+
+	SIL24_MAX_CMDS		= 31,
+
+	/* board id */
+	BID_SIL3124		= 0,
+	BID_SIL3132		= 1,
+	BID_SIL3131		= 2,
+
+	/* host flags */
+	SIL24_COMMON_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+				  ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
+				  ATA_FLAG_AN | ATA_FLAG_PMP,
+	SIL24_FLAG_PCIX_IRQ_WOC	= (1 << 24), /* IRQ loss errata on PCI-X */
+
+	IRQ_STAT_4PORTS		= 0xf,
+};
+
+struct sil24_ata_block {
+	struct sil24_prb prb;
+	struct sil24_sge sge[SIL24_MAX_SGE];
+};
+
+struct sil24_atapi_block {
+	struct sil24_prb prb;
+	u8 cdb[16];
+	struct sil24_sge sge[SIL24_MAX_SGE];
+};
+
+union sil24_cmd_block {
+	struct sil24_ata_block ata;
+	struct sil24_atapi_block atapi;
+};
+
+static const struct sil24_cerr_info {
+	unsigned int err_mask, action;
+	const char *desc;
+} sil24_cerr_db[] = {
+	[0]			= { AC_ERR_DEV, 0,
+				    "device error" },
+	[PORT_CERR_DEV]		= { AC_ERR_DEV, 0,
+				    "device error via D2H FIS" },
+	[PORT_CERR_SDB]		= { AC_ERR_DEV, 0,
+				    "device error via SDB FIS" },
+	[PORT_CERR_DATA]	= { AC_ERR_ATA_BUS, ATA_EH_RESET,
+				    "error in data FIS" },
+	[PORT_CERR_SEND]	= { AC_ERR_ATA_BUS, ATA_EH_RESET,
+				    "failed to transmit command FIS" },
+	[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
+				     "protocol mismatch" },
+	[PORT_CERR_DIRECTION]	= { AC_ERR_HSM, ATA_EH_RESET,
+				    "data direction mismatch" },
+	[PORT_CERR_UNDERRUN]	= { AC_ERR_HSM, ATA_EH_RESET,
+				    "ran out of SGEs while writing" },
+	[PORT_CERR_OVERRUN]	= { AC_ERR_HSM, ATA_EH_RESET,
+				    "ran out of SGEs while reading" },
+	[PORT_CERR_PKT_PROT]	= { AC_ERR_HSM, ATA_EH_RESET,
+				    "invalid data direction for ATAPI CDB" },
+	[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
+				     "SGT not on qword boundary" },
+	[PORT_CERR_SGT_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI target abort while fetching SGT" },
+	[PORT_CERR_SGT_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI master abort while fetching SGT" },
+	[PORT_CERR_SGT_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI parity error while fetching SGT" },
+	[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
+				     "PRB not on qword boundary" },
+	[PORT_CERR_CMD_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI target abort while fetching PRB" },
+	[PORT_CERR_CMD_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI master abort while fetching PRB" },
+	[PORT_CERR_CMD_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI parity error while fetching PRB" },
+	[PORT_CERR_XFR_UNDEF]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "undefined error while transferring data" },
+	[PORT_CERR_XFR_TGTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI target abort while transferring data" },
+	[PORT_CERR_XFR_MSTABRT]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI master abort while transferring data" },
+	[PORT_CERR_XFR_PCIPERR]	= { AC_ERR_HOST_BUS, ATA_EH_RESET,
+				    "PCI parity error while transferring data" },
+	[PORT_CERR_SENDSERVICE]	= { AC_ERR_HSM, ATA_EH_RESET,
+				    "FIS received while sending service FIS" },
+};
+
+/*
+ * ap->private_data
+ *
+ * The preview driver always returned 0 for status.  We emulate it
+ * here from the previous interrupt.
+ */
+struct sil24_port_priv {
+	union sil24_cmd_block *cmd_block;	/* 32 cmd blocks */
+	dma_addr_t cmd_block_dma;		/* DMA base addr for them */
+	int do_port_rst;
+};
+
+static void sil24_dev_config(struct ata_device *dev);
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
+static int sil24_qc_defer(struct ata_queued_cmd *qc);
+static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void sil24_pmp_attach(struct ata_port *ap);
+static void sil24_pmp_detach(struct ata_port *ap);
+static void sil24_freeze(struct ata_port *ap);
+static void sil24_thaw(struct ata_port *ap);
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline);
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline);
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline);
+static void sil24_error_handler(struct ata_port *ap);
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
+static int sil24_port_start(struct ata_port *ap);
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int sil24_pci_device_resume(struct pci_dev *pdev);
+#endif
+#ifdef CONFIG_PM
+static int sil24_port_resume(struct ata_port *ap);
+#endif
+
+static const struct pci_device_id sil24_pci_tbl[] = {
+	{ PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
+	{ PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
+	{ PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
+	{ PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
+	{ PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
+	{ PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
+	{ PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
+
+	{ } /* terminate list */
+};
+
+static struct pci_driver sil24_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sil24_pci_tbl,
+	.probe			= sil24_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= sil24_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sil24_sht = {
+	ATA_NCQ_SHT(DRV_NAME),
+	.can_queue		= SIL24_MAX_CMDS,
+	.sg_tablesize		= SIL24_MAX_SGE,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.tag_alloc_policy	= BLK_TAG_ALLOC_FIFO,
+};
+
+static struct ata_port_operations sil24_ops = {
+	.inherits		= &sata_pmp_port_ops,
+
+	.qc_defer		= sil24_qc_defer,
+	.qc_prep		= sil24_qc_prep,
+	.qc_issue		= sil24_qc_issue,
+	.qc_fill_rtf		= sil24_qc_fill_rtf,
+
+	.freeze			= sil24_freeze,
+	.thaw			= sil24_thaw,
+	.softreset		= sil24_softreset,
+	.hardreset		= sil24_hardreset,
+	.pmp_softreset		= sil24_softreset,
+	.pmp_hardreset		= sil24_pmp_hardreset,
+	.error_handler		= sil24_error_handler,
+	.post_internal_cmd	= sil24_post_internal_cmd,
+	.dev_config		= sil24_dev_config,
+
+	.scr_read		= sil24_scr_read,
+	.scr_write		= sil24_scr_write,
+	.pmp_attach		= sil24_pmp_attach,
+	.pmp_detach		= sil24_pmp_detach,
+
+	.port_start		= sil24_port_start,
+#ifdef CONFIG_PM
+	.port_resume		= sil24_port_resume,
+#endif
+};
+
+static bool sata_sil24_msi;    /* Disable MSI */
+module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
+
+/*
+ * Use bits 30-31 of port_flags to encode available port numbers.
+ * Current maxium is 4.
+ */
+#define SIL24_NPORTS2FLAG(nports)	((((unsigned)(nports) - 1) & 0x3) << 30)
+#define SIL24_FLAG2NPORTS(flag)		((((flag) >> 30) & 0x3) + 1)
+
+static const struct ata_port_info sil24_port_info[] = {
+	/* sil_3124 */
+	{
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
+				  SIL24_FLAG_PCIX_IRQ_WOC,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil24_ops,
+	},
+	/* sil_3132 */
+	{
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil24_ops,
+	},
+	/* sil_3131/sil_3531 */
+	{
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA5,
+		.port_ops	= &sil24_ops,
+	},
+};
+
+static int sil24_tag(int tag)
+{
+	if (unlikely(ata_tag_internal(tag)))
+		return 0;
+	return tag;
+}
+
+static unsigned long sil24_port_offset(struct ata_port *ap)
+{
+	return ap->port_no * PORT_REGS_SIZE;
+}
+
+static void __iomem *sil24_port_base(struct ata_port *ap)
+{
+	return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
+}
+
+static void sil24_dev_config(struct ata_device *dev)
+{
+	void __iomem *port = sil24_port_base(dev->link->ap);
+
+	if (dev->cdb_len == 16)
+		writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
+	else
+		writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
+}
+
+static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
+{
+	void __iomem *port = sil24_port_base(ap);
+	struct sil24_prb __iomem *prb;
+	u8 fis[6 * 4];
+
+	prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
+	memcpy_fromio(fis, prb->fis, sizeof(fis));
+	ata_tf_from_fis(fis, tf);
+}
+
+static int sil24_scr_map[] = {
+	[SCR_CONTROL]	= 0,
+	[SCR_STATUS]	= 1,
+	[SCR_ERROR]	= 2,
+	[SCR_ACTIVE]	= 3,
+};
+
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
+{
+	void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
+
+	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+		*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
+{
+	void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
+
+	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+		writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void sil24_config_port(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+
+	/* configure IRQ WoC */
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
+	else
+		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+	/* zero error counters. */
+	writew(0x8000, port + PORT_DECODE_ERR_THRESH);
+	writew(0x8000, port + PORT_CRC_ERR_THRESH);
+	writew(0x8000, port + PORT_HSHK_ERR_THRESH);
+	writew(0x0000, port + PORT_DECODE_ERR_CNT);
+	writew(0x0000, port + PORT_CRC_ERR_CNT);
+	writew(0x0000, port + PORT_HSHK_ERR_CNT);
+
+	/* always use 64bit activation */
+	writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
+
+	/* clear port multiplier enable and resume bits */
+	writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+}
+
+static void sil24_config_pmp(struct ata_port *ap, int attached)
+{
+	void __iomem *port = sil24_port_base(ap);
+
+	if (attached)
+		writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
+	else
+		writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
+}
+
+static void sil24_clear_pmp(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+	int i;
+
+	writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+
+	for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
+		void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
+
+		writel(0, pmp_base + PORT_PMP_STATUS);
+		writel(0, pmp_base + PORT_PMP_QACTIVE);
+	}
+}
+
+static int sil24_init_port(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+	struct sil24_port_priv *pp = ap->private_data;
+	u32 tmp;
+
+	/* clear PMP error status */
+	if (sata_pmp_attached(ap))
+		sil24_clear_pmp(ap);
+
+	writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
+	ata_wait_register(ap, port + PORT_CTRL_STAT,
+			  PORT_CS_INIT, PORT_CS_INIT, 10, 100);
+	tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
+				PORT_CS_RDY, 0, 10, 100);
+
+	if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
+		pp->do_port_rst = 1;
+		ap->link.eh_context.i.action |= ATA_EH_RESET;
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
+				 const struct ata_taskfile *tf,
+				 int is_cmd, u32 ctrl,
+				 unsigned long timeout_msec)
+{
+	void __iomem *port = sil24_port_base(ap);
+	struct sil24_port_priv *pp = ap->private_data;
+	struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
+	dma_addr_t paddr = pp->cmd_block_dma;
+	u32 irq_enabled, irq_mask, irq_stat;
+	int rc;
+
+	prb->ctrl = cpu_to_le16(ctrl);
+	ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
+
+	/* temporarily plug completion and error interrupts */
+	irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
+	writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
+
+	/*
+	 * The barrier is required to ensure that writes to cmd_block reach
+	 * the memory before the write to PORT_CMD_ACTIVATE.
+	 */
+	wmb();
+	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+	writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
+
+	irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
+	irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
+				     10, timeout_msec);
+
+	writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
+	irq_stat >>= PORT_IRQ_RAW_SHIFT;
+
+	if (irq_stat & PORT_IRQ_COMPLETE)
+		rc = 0;
+	else {
+		/* force port into known state */
+		sil24_init_port(ap);
+
+		if (irq_stat & PORT_IRQ_ERROR)
+			rc = -EIO;
+		else
+			rc = -EBUSY;
+	}
+
+	/* restore IRQ enabled */
+	writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
+
+	return rc;
+}
+
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	int pmp = sata_srst_pmp(link);
+	unsigned long timeout_msec = 0;
+	struct ata_taskfile tf;
+	const char *reason;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* put the port into known state */
+	if (sil24_init_port(ap)) {
+		reason = "port not ready";
+		goto err;
+	}
+
+	/* do SRST */
+	if (time_after(deadline, jiffies))
+		timeout_msec = jiffies_to_msecs(deadline - jiffies);
+
+	ata_tf_init(link->device, &tf);	/* doesn't really matter */
+	rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
+				   timeout_msec);
+	if (rc == -EBUSY) {
+		reason = "timeout";
+		goto err;
+	} else if (rc) {
+		reason = "SRST command error";
+		goto err;
+	}
+
+	sil24_read_tf(ap, 0, &tf);
+	*class = ata_dev_classify(&tf);
+
+	DPRINTK("EXIT, class=%u\n", *class);
+	return 0;
+
+ err:
+	ata_link_err(link, "softreset failed (%s)\n", reason);
+	return -EIO;
+}
+
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *port = sil24_port_base(ap);
+	struct sil24_port_priv *pp = ap->private_data;
+	int did_port_rst = 0;
+	const char *reason;
+	int tout_msec, rc;
+	u32 tmp;
+
+ retry:
+	/* Sometimes, DEV_RST is not enough to recover the controller.
+	 * This happens often after PM DMA CS errata.
+	 */
+	if (pp->do_port_rst) {
+		ata_port_warn(ap,
+			      "controller in dubious state, performing PORT_RST\n");
+
+		writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
+		ata_msleep(ap, 10);
+		writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+		ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
+				  10, 5000);
+
+		/* restore port configuration */
+		sil24_config_port(ap);
+		sil24_config_pmp(ap, ap->nr_pmp_links);
+
+		pp->do_port_rst = 0;
+		did_port_rst = 1;
+	}
+
+	/* sil24 does the right thing(tm) without any protection */
+	sata_set_spd(link);
+
+	tout_msec = 100;
+	if (ata_link_online(link))
+		tout_msec = 5000;
+
+	writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
+	tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
+				PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
+				tout_msec);
+
+	/* SStatus oscillates between zero and valid status after
+	 * DEV_RST, debounce it.
+	 */
+	rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
+	if (rc) {
+		reason = "PHY debouncing failed";
+		goto err;
+	}
+
+	if (tmp & PORT_CS_DEV_RST) {
+		if (ata_link_offline(link))
+			return 0;
+		reason = "link not ready";
+		goto err;
+	}
+
+	/* Sil24 doesn't store signature FIS after hardreset, so we
+	 * can't wait for BSY to clear.  Some devices take a long time
+	 * to get ready and those devices will choke if we don't wait
+	 * for BSY clearance here.  Tell libata to perform follow-up
+	 * softreset.
+	 */
+	return -EAGAIN;
+
+ err:
+	if (!did_port_rst) {
+		pp->do_port_rst = 1;
+		goto retry;
+	}
+
+	ata_link_err(link, "hardreset failed (%s)\n", reason);
+	return -EIO;
+}
+
+static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+				 struct sil24_sge *sge)
+{
+	struct scatterlist *sg;
+	struct sil24_sge *last_sge = NULL;
+	unsigned int si;
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		sge->addr = cpu_to_le64(sg_dma_address(sg));
+		sge->cnt = cpu_to_le32(sg_dma_len(sg));
+		sge->flags = 0;
+
+		last_sge = sge;
+		sge++;
+	}
+
+	last_sge->flags = cpu_to_le32(SGE_TRM);
+}
+
+static int sil24_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_port *ap = link->ap;
+	u8 prot = qc->tf.protocol;
+
+	/*
+	 * There is a bug in the chip:
+	 * Port LRAM Causes the PRB/SGT Data to be Corrupted
+	 * If the host issues a read request for LRAM and SActive registers
+	 * while active commands are available in the port, PRB/SGT data in
+	 * the LRAM can become corrupted. This issue applies only when
+	 * reading from, but not writing to, the LRAM.
+	 *
+	 * Therefore, reading LRAM when there is no particular error [and
+	 * other commands may be outstanding] is prohibited.
+	 *
+	 * To avoid this bug there are two situations where a command must run
+	 * exclusive of any other commands on the port:
+	 *
+	 * - ATAPI commands which check the sense data
+	 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
+	 *   set.
+	 *
+ 	 */
+	int is_excl = (ata_is_atapi(prot) ||
+		       (qc->flags & ATA_QCFLAG_RESULT_TF));
+
+	if (unlikely(ap->excl_link)) {
+		if (link == ap->excl_link) {
+			if (ap->nr_active_links)
+				return ATA_DEFER_PORT;
+			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+		} else
+			return ATA_DEFER_PORT;
+	} else if (unlikely(is_excl)) {
+		ap->excl_link = link;
+		if (ap->nr_active_links)
+			return ATA_DEFER_PORT;
+		qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+	}
+
+	return ata_std_qc_defer(qc);
+}
+
+static void sil24_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sil24_port_priv *pp = ap->private_data;
+	union sil24_cmd_block *cb;
+	struct sil24_prb *prb;
+	struct sil24_sge *sge;
+	u16 ctrl = 0;
+
+	cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
+
+	if (!ata_is_atapi(qc->tf.protocol)) {
+		prb = &cb->ata.prb;
+		sge = cb->ata.sge;
+		if (ata_is_data(qc->tf.protocol)) {
+			u16 prot = 0;
+			ctrl = PRB_CTRL_PROTOCOL;
+			if (ata_is_ncq(qc->tf.protocol))
+				prot |= PRB_PROT_NCQ;
+			if (qc->tf.flags & ATA_TFLAG_WRITE)
+				prot |= PRB_PROT_WRITE;
+			else
+				prot |= PRB_PROT_READ;
+			prb->prot = cpu_to_le16(prot);
+		}
+	} else {
+		prb = &cb->atapi.prb;
+		sge = cb->atapi.sge;
+		memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
+		memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
+
+		if (ata_is_data(qc->tf.protocol)) {
+			if (qc->tf.flags & ATA_TFLAG_WRITE)
+				ctrl = PRB_CTRL_PACKET_WRITE;
+			else
+				ctrl = PRB_CTRL_PACKET_READ;
+		}
+	}
+
+	prb->ctrl = cpu_to_le16(ctrl);
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
+
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		sil24_fill_sg(qc, sge);
+}
+
+static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sil24_port_priv *pp = ap->private_data;
+	void __iomem *port = sil24_port_base(ap);
+	unsigned int tag = sil24_tag(qc->hw_tag);
+	dma_addr_t paddr;
+	void __iomem *activate;
+
+	paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
+	activate = port + PORT_CMD_ACTIVATE + tag * 8;
+
+	/*
+	 * The barrier is required to ensure that writes to cmd_block reach
+	 * the memory before the write to PORT_CMD_ACTIVATE.
+	 */
+	wmb();
+	writel((u32)paddr, activate);
+	writel((u64)paddr >> 32, activate + 4);
+
+	return 0;
+}
+
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
+	return true;
+}
+
+static void sil24_pmp_attach(struct ata_port *ap)
+{
+	u32 *gscr = ap->link.device->gscr;
+
+	sil24_config_pmp(ap, 1);
+	sil24_init_port(ap);
+
+	if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
+	    sata_pmp_gscr_devid(gscr) == 0x4140) {
+		ata_port_info(ap,
+			"disabling NCQ support due to sil24-mv4140 quirk\n");
+		ap->flags &= ~ATA_FLAG_NCQ;
+	}
+}
+
+static void sil24_pmp_detach(struct ata_port *ap)
+{
+	sil24_init_port(ap);
+	sil24_config_pmp(ap, 0);
+
+	ap->flags |= ATA_FLAG_NCQ;
+}
+
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline)
+{
+	int rc;
+
+	rc = sil24_init_port(link->ap);
+	if (rc) {
+		ata_link_err(link, "hardreset failed (port not ready)\n");
+		return rc;
+	}
+
+	return sata_std_hardreset(link, class, deadline);
+}
+
+static void sil24_freeze(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+
+	/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
+	 * PORT_IRQ_ENABLE instead.
+	 */
+	writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
+}
+
+static void sil24_thaw(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+	u32 tmp;
+
+	/* clear IRQ */
+	tmp = readl(port + PORT_IRQ_STAT);
+	writel(tmp, port + PORT_IRQ_STAT);
+
+	/* turn IRQ back on */
+	writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
+}
+
+static void sil24_error_intr(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+	struct sil24_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc = NULL;
+	struct ata_link *link;
+	struct ata_eh_info *ehi;
+	int abort = 0, freeze = 0;
+	u32 irq_stat;
+
+	/* on error, we need to clear IRQ explicitly */
+	irq_stat = readl(port + PORT_IRQ_STAT);
+	writel(irq_stat, port + PORT_IRQ_STAT);
+
+	/* first, analyze and record host port events */
+	link = &ap->link;
+	ehi = &link->eh_info;
+	ata_ehi_clear_desc(ehi);
+
+	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+
+	if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
+		ata_ehi_push_desc(ehi, "SDB notify");
+		sata_async_notification(ap);
+	}
+
+	if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
+		ata_ehi_hotplugged(ehi);
+		ata_ehi_push_desc(ehi, "%s",
+				  irq_stat & PORT_IRQ_PHYRDY_CHG ?
+				  "PHY RDY changed" : "device exchanged");
+		freeze = 1;
+	}
+
+	if (irq_stat & PORT_IRQ_UNK_FIS) {
+		ehi->err_mask |= AC_ERR_HSM;
+		ehi->action |= ATA_EH_RESET;
+		ata_ehi_push_desc(ehi, "unknown FIS");
+		freeze = 1;
+	}
+
+	/* deal with command error */
+	if (irq_stat & PORT_IRQ_ERROR) {
+		const struct sil24_cerr_info *ci = NULL;
+		unsigned int err_mask = 0, action = 0;
+		u32 context, cerr;
+		int pmp;
+
+		abort = 1;
+
+		/* DMA Context Switch Failure in Port Multiplier Mode
+		 * errata.  If we have active commands to 3 or more
+		 * devices, any error condition on active devices can
+		 * corrupt DMA context switching.
+		 */
+		if (ap->nr_active_links >= 3) {
+			ehi->err_mask |= AC_ERR_OTHER;
+			ehi->action |= ATA_EH_RESET;
+			ata_ehi_push_desc(ehi, "PMP DMA CS errata");
+			pp->do_port_rst = 1;
+			freeze = 1;
+		}
+
+		/* find out the offending link and qc */
+		if (sata_pmp_attached(ap)) {
+			context = readl(port + PORT_CONTEXT);
+			pmp = (context >> 5) & 0xf;
+
+			if (pmp < ap->nr_pmp_links) {
+				link = &ap->pmp_link[pmp];
+				ehi = &link->eh_info;
+				qc = ata_qc_from_tag(ap, link->active_tag);
+
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
+						  irq_stat);
+			} else {
+				err_mask |= AC_ERR_HSM;
+				action |= ATA_EH_RESET;
+				freeze = 1;
+			}
+		} else
+			qc = ata_qc_from_tag(ap, link->active_tag);
+
+		/* analyze CMD_ERR */
+		cerr = readl(port + PORT_CMD_ERR);
+		if (cerr < ARRAY_SIZE(sil24_cerr_db))
+			ci = &sil24_cerr_db[cerr];
+
+		if (ci && ci->desc) {
+			err_mask |= ci->err_mask;
+			action |= ci->action;
+			if (action & ATA_EH_RESET)
+				freeze = 1;
+			ata_ehi_push_desc(ehi, "%s", ci->desc);
+		} else {
+			err_mask |= AC_ERR_OTHER;
+			action |= ATA_EH_RESET;
+			freeze = 1;
+			ata_ehi_push_desc(ehi, "unknown command error %d",
+					  cerr);
+		}
+
+		/* record error info */
+		if (qc)
+			qc->err_mask |= err_mask;
+		else
+			ehi->err_mask |= err_mask;
+
+		ehi->action |= action;
+
+		/* if PMP, resume */
+		if (sata_pmp_attached(ap))
+			writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
+	}
+
+	/* freeze or abort */
+	if (freeze)
+		ata_port_freeze(ap);
+	else if (abort) {
+		if (qc)
+			ata_link_abort(qc->dev->link);
+		else
+			ata_port_abort(ap);
+	}
+}
+
+static inline void sil24_host_intr(struct ata_port *ap)
+{
+	void __iomem *port = sil24_port_base(ap);
+	u32 slot_stat, qc_active;
+	int rc;
+
+	/* If PCIX_IRQ_WOC, there's an inherent race window between
+	 * clearing IRQ pending status and reading PORT_SLOT_STAT
+	 * which may cause spurious interrupts afterwards.  This is
+	 * unavoidable and much better than losing interrupts which
+	 * happens if IRQ pending is cleared after reading
+	 * PORT_SLOT_STAT.
+	 */
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
+	slot_stat = readl(port + PORT_SLOT_STAT);
+
+	if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
+		sil24_error_intr(ap);
+		return;
+	}
+
+	qc_active = slot_stat & ~HOST_SSTAT_ATTN;
+	rc = ata_qc_complete_multiple(ap, qc_active);
+	if (rc > 0)
+		return;
+	if (rc < 0) {
+		struct ata_eh_info *ehi = &ap->link.eh_info;
+		ehi->err_mask |= AC_ERR_HSM;
+		ehi->action |= ATA_EH_RESET;
+		ata_port_freeze(ap);
+		return;
+	}
+
+	/* spurious interrupts are expected if PCIX_IRQ_WOC */
+	if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
+		ata_port_info(ap,
+			"spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
+			slot_stat, ap->link.active_tag, ap->link.sactive);
+}
+
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+	unsigned handled = 0;
+	u32 status;
+	int i;
+
+	status = readl(host_base + HOST_IRQ_STAT);
+
+	if (status == 0xffffffff) {
+		dev_err(host->dev, "IRQ status == 0xffffffff, "
+			"PCI fault or device removal?\n");
+		goto out;
+	}
+
+	if (!(status & IRQ_STAT_4PORTS))
+		goto out;
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++)
+		if (status & (1 << i)) {
+			sil24_host_intr(host->ports[i]);
+			handled++;
+		}
+
+	spin_unlock(&host->lock);
+ out:
+	return IRQ_RETVAL(handled);
+}
+
+static void sil24_error_handler(struct ata_port *ap)
+{
+	struct sil24_port_priv *pp = ap->private_data;
+
+	if (sil24_init_port(ap))
+		ata_eh_freeze_port(ap);
+
+	sata_pmp_error_handler(ap);
+
+	pp->do_port_rst = 0;
+}
+
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
+		ata_eh_freeze_port(ap);
+}
+
+static int sil24_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct sil24_port_priv *pp;
+	union sil24_cmd_block *cb;
+	size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
+	dma_addr_t cb_dma;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
+	if (!cb)
+		return -ENOMEM;
+	memset(cb, 0, cb_size);
+
+	pp->cmd_block = cb;
+	pp->cmd_block_dma = cb_dma;
+
+	ap->private_data = pp;
+
+	ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
+	ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
+
+	return 0;
+}
+
+static void sil24_init_controller(struct ata_host *host)
+{
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+	u32 tmp;
+	int i;
+
+	/* GPIO off */
+	writel(0, host_base + HOST_FLASH_CMD);
+
+	/* clear global reset & mask interrupts during initialization */
+	writel(0, host_base + HOST_CTRL);
+
+	/* init ports */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *port = sil24_port_base(ap);
+
+
+		/* Initial PHY setting */
+		writel(0x20c, port + PORT_PHY_CFG);
+
+		/* Clear port RST */
+		tmp = readl(port + PORT_CTRL_STAT);
+		if (tmp & PORT_CS_PORT_RST) {
+			writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+			tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
+						PORT_CS_PORT_RST,
+						PORT_CS_PORT_RST, 10, 100);
+			if (tmp & PORT_CS_PORT_RST)
+				dev_err(host->dev,
+					"failed to clear port RST\n");
+		}
+
+		/* configure port */
+		sil24_config_port(ap);
+	}
+
+	/* Turn on interrupts */
+	writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+}
+
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
+	struct ata_port_info pi = sil24_port_info[ent->driver_data];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	void __iomem * const *iomap;
+	struct ata_host *host;
+	int rc;
+	u32 tmp;
+
+	/* cause link error if sil24_cmd_block is sized wrongly */
+	if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
+		__MARKER__sil24_cmd_block_is_sized_wrongly = 1;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev,
+				(1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
+				DRV_NAME);
+	if (rc)
+		return rc;
+	iomap = pcim_iomap_table(pdev);
+
+	/* apply workaround for completion IRQ loss on PCI-X errata */
+	if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
+		tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
+		if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
+			dev_info(&pdev->dev,
+				 "Applying completion IRQ loss on PCI-X errata fix\n");
+		else
+			pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
+	}
+
+	/* allocate and fill host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi,
+				    SIL24_FLAG2NPORTS(ppi[0]->flags));
+	if (!host)
+		return -ENOMEM;
+	host->iomap = iomap;
+
+	/* configure and activate the device */
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_err(&pdev->dev,
+				"32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+
+	/* Set max read request size to 4096.  This slightly increases
+	 * write throughput for pci-e variants.
+	 */
+	pcie_set_readrq(pdev, 4096);
+
+	sil24_init_controller(host);
+
+	if (sata_sil24_msi && !pci_enable_msi(pdev)) {
+		dev_info(&pdev->dev, "Using MSI\n");
+		pci_intx(pdev, 0);
+	}
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
+				 &sil24_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sil24_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
+		writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
+
+	sil24_init_controller(host);
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int sil24_port_resume(struct ata_port *ap)
+{
+	sil24_config_pmp(ap, ap->nr_pmp_links);
+	return 0;
+}
+#endif
+
+module_pci_driver(sil24_pci_driver);
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
new file mode 100644
index 0000000..30f4f35
--- /dev/null
+++ b/drivers/ata/sata_sis.c
@@ -0,0 +1,314 @@
+/*
+ *  sata_sis.c - Silicon Integrated Systems SATA
+ *
+ *  Maintained by:  Uwe Koziolek
+ *  		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2004 Uwe Koziolek
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include "sis.h"
+
+#define DRV_NAME	"sata_sis"
+#define DRV_VERSION	"1.0"
+
+enum {
+	sis_180			= 0,
+	SIS_SCR_PCI_BAR		= 5,
+
+	/* PCI configuration registers */
+	SIS_GENCTL		= 0x54, /* IDE General Control register */
+	SIS_SCR_BASE		= 0xc0, /* sata0 phy SCR registers */
+	SIS180_SATA1_OFS	= 0x10, /* offset from sata0->sata1 phy regs */
+	SIS182_SATA1_OFS	= 0x20, /* offset from sata0->sata1 phy regs */
+	SIS_PMR			= 0x90, /* port mapping register */
+	SIS_PMR_COMBINED	= 0x30,
+
+	/* random bits */
+	SIS_FLAG_CFGSCR		= (1 << 30), /* host flag: SCRs via PCI cfg */
+
+	GENCTL_IOMAPPED_SCR	= (1 << 26), /* if set, SCRs are in IO space */
+};
+
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static const struct pci_device_id sis_pci_tbl[] = {
+	{ PCI_VDEVICE(SI, 0x0180), sis_180 },	/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0181), sis_180 },	/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0182), sis_180 },	/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x0183), sis_180 },	/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x1182), sis_180 },	/* SiS 966/680 */
+	{ PCI_VDEVICE(SI, 0x1183), sis_180 },	/* SiS 966/966L/968/680 */
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver sis_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sis_pci_tbl,
+	.probe			= sis_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sis_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sis_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.scr_read		= sis_scr_read,
+	.scr_write		= sis_scr_write,
+};
+
+static const struct ata_port_info sis_port_info = {
+	.flags		= ATA_FLAG_SATA,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_ops,
+};
+
+MODULE_AUTHOR("Uwe Koziolek");
+MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg)
+{
+	struct ata_port *ap = link->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
+	u8 pmr;
+
+	if (ap->port_no)  {
+		switch (pdev->device) {
+		case 0x0180:
+		case 0x0181:
+			pci_read_config_byte(pdev, SIS_PMR, &pmr);
+			if ((pmr & SIS_PMR_COMBINED) == 0)
+				addr += SIS180_SATA1_OFS;
+			break;
+
+		case 0x0182:
+		case 0x0183:
+		case 0x1182:
+			addr += SIS182_SATA1_OFS;
+			break;
+		}
+	}
+	if (link->pmp)
+		addr += 0x10;
+
+	return addr;
+}
+
+static u32 sis_scr_cfg_read(struct ata_link *link,
+			    unsigned int sc_reg, u32 *val)
+{
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
+
+	if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
+		return -EINVAL;
+
+	pci_read_config_dword(pdev, cfg_addr, val);
+	return 0;
+}
+
+static int sis_scr_cfg_write(struct ata_link *link,
+			     unsigned int sc_reg, u32 val)
+{
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
+
+	pci_write_config_dword(pdev, cfg_addr, val);
+	return 0;
+}
+
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
+
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+
+	if (ap->flags & SIS_FLAG_CFGSCR)
+		return sis_scr_cfg_read(link, sc_reg, val);
+
+	*val = ioread32(base + sc_reg * 4);
+	return 0;
+}
+
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
+
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+
+	if (ap->flags & SIS_FLAG_CFGSCR)
+		return sis_scr_cfg_write(link, sc_reg, val);
+
+	iowrite32(val, base + (sc_reg * 4));
+	return 0;
+}
+
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct ata_port_info pi = sis_port_info;
+	const struct ata_port_info *ppi[] = { &pi, &pi };
+	struct ata_host *host;
+	u32 genctl, val;
+	u8 pmr;
+	u8 port2_start = 0x20;
+	int i, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* check and see if the SCRs are in IO space or PCI cfg space */
+	pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
+	if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
+		pi.flags |= SIS_FLAG_CFGSCR;
+
+	/* if hardware thinks SCRs are in IO space, but there are
+	 * no IO resources assigned, change to PCI cfg space.
+	 */
+	if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
+	    ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
+	     (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
+		genctl &= ~GENCTL_IOMAPPED_SCR;
+		pci_write_config_dword(pdev, SIS_GENCTL, genctl);
+		pi.flags |= SIS_FLAG_CFGSCR;
+	}
+
+	pci_read_config_byte(pdev, SIS_PMR, &pmr);
+	switch (ent->device) {
+	case 0x0180:
+	case 0x0181:
+
+		/* The PATA-handling is provided by pata_sis */
+		switch (pmr & 0x30) {
+		case 0x10:
+			ppi[1] = &sis_info133_for_sata;
+			break;
+
+		case 0x30:
+			ppi[0] = &sis_info133_for_sata;
+			break;
+		}
+		if ((pmr & SIS_PMR_COMBINED) == 0) {
+			dev_info(&pdev->dev,
+				 "Detected SiS 180/181/964 chipset in SATA mode\n");
+			port2_start = 64;
+		} else {
+			dev_info(&pdev->dev,
+				 "Detected SiS 180/181 chipset in combined mode\n");
+			port2_start = 0;
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
+		}
+		break;
+
+	case 0x0182:
+	case 0x0183:
+		pci_read_config_dword(pdev, 0x6C, &val);
+		if (val & (1L << 31)) {
+			dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
+		} else {
+			dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
+		}
+		break;
+
+	case 0x1182:
+		dev_info(&pdev->dev,
+			 "Detected SiS 1182/966/680 SATA controller\n");
+		pi.flags |= ATA_FLAG_SLAVE_POSS;
+		break;
+
+	case 0x1183:
+		dev_info(&pdev->dev,
+			 "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+		ppi[0] = &sis_info133_for_sata;
+		ppi[1] = &sis_info133_for_sata;
+		break;
+	}
+
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->flags & ATA_FLAG_SATA &&
+		    ap->flags & ATA_FLAG_SLAVE_POSS) {
+			rc = ata_slave_link_init(ap);
+			if (rc)
+				return rc;
+		}
+	}
+
+	if (!(pi.flags & SIS_FLAG_CFGSCR)) {
+		void __iomem *mmio;
+
+		rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
+		if (rc)
+			return rc;
+		mmio = host->iomap[SIS_SCR_PCI_BAR];
+
+		host->ports[0]->ioaddr.scr_addr = mmio;
+		host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
+	}
+
+	pci_set_master(pdev);
+	pci_intx(pdev, 1);
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &sis_sht);
+}
+
+module_pci_driver(sis_pci_driver);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
new file mode 100644
index 0000000..a9d692c
--- /dev/null
+++ b/drivers/ata/sata_svw.c
@@ -0,0 +1,544 @@
+/*
+ *  sata_svw.c - ServerWorks / Apple K2 SATA
+ *
+ *  Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
+ *		   Jeff Garzik <jgarzik@pobox.com>
+ *  		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ *  Bits from Jeff Garzik, Copyright RedHat, Inc.
+ *
+ *  This driver probably works with non-Apple versions of the
+ *  Broadcom chipset...
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi.h>
+#include <linux/libata.h>
+#include <linux/of.h>
+
+#define DRV_NAME	"sata_svw"
+#define DRV_VERSION	"2.3"
+
+enum {
+	/* ap->flags bits */
+	K2_FLAG_SATA_8_PORTS		= (1 << 24),
+	K2_FLAG_NO_ATAPI_DMA		= (1 << 25),
+	K2_FLAG_BAR_POS_3			= (1 << 26),
+
+	/* Taskfile registers offsets */
+	K2_SATA_TF_CMD_OFFSET		= 0x00,
+	K2_SATA_TF_DATA_OFFSET		= 0x00,
+	K2_SATA_TF_ERROR_OFFSET		= 0x04,
+	K2_SATA_TF_NSECT_OFFSET		= 0x08,
+	K2_SATA_TF_LBAL_OFFSET		= 0x0c,
+	K2_SATA_TF_LBAM_OFFSET		= 0x10,
+	K2_SATA_TF_LBAH_OFFSET		= 0x14,
+	K2_SATA_TF_DEVICE_OFFSET	= 0x18,
+	K2_SATA_TF_CMDSTAT_OFFSET      	= 0x1c,
+	K2_SATA_TF_CTL_OFFSET		= 0x20,
+
+	/* DMA base */
+	K2_SATA_DMA_CMD_OFFSET		= 0x30,
+
+	/* SCRs base */
+	K2_SATA_SCR_STATUS_OFFSET	= 0x40,
+	K2_SATA_SCR_ERROR_OFFSET	= 0x44,
+	K2_SATA_SCR_CONTROL_OFFSET	= 0x48,
+
+	/* Others */
+	K2_SATA_SICR1_OFFSET		= 0x80,
+	K2_SATA_SICR2_OFFSET		= 0x84,
+	K2_SATA_SIM_OFFSET		= 0x88,
+
+	/* Port stride */
+	K2_SATA_PORT_OFFSET		= 0x100,
+
+	chip_svw4			= 0,
+	chip_svw8			= 1,
+	chip_svw42			= 2,	/* bar 3 */
+	chip_svw43			= 3,	/* bar 5 */
+};
+
+static u8 k2_stat_check_status(struct ata_port *ap);
+
+
+static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	u8 cmnd = qc->scsicmd->cmnd[0];
+
+	if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
+		return -1;	/* ATAPI DMA not supported */
+	else {
+		switch (cmnd) {
+		case READ_10:
+		case READ_12:
+		case READ_16:
+		case WRITE_10:
+		case WRITE_12:
+		case WRITE_16:
+			return 0;
+
+		default:
+			return -1;
+		}
+
+	}
+}
+
+static int k2_sata_scr_read(struct ata_link *link,
+			    unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+
+static int k2_sata_scr_write(struct ata_link *link,
+			     unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+static int k2_sata_softreset(struct ata_link *link,
+			     unsigned int *class, unsigned long deadline)
+{
+	u8 dmactl;
+	void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+	dmactl = readb(mmio + ATA_DMA_CMD);
+
+	/* Clear the start bit */
+	if (dmactl & ATA_DMA_START) {
+		dmactl &= ~ATA_DMA_START;
+		writeb(dmactl, mmio + ATA_DMA_CMD);
+	}
+
+	return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+			     unsigned int *class, unsigned long deadline)
+{
+	u8 dmactl;
+	void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+	dmactl = readb(mmio + ATA_DMA_CMD);
+
+	/* Clear the start bit */
+	if (dmactl & ATA_DMA_START) {
+		dmactl &= ~ATA_DMA_START;
+		writeb(dmactl, mmio + ATA_DMA_CMD);
+	}
+
+	return sata_sff_hardreset(link, class, deadline);
+}
+
+static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		writeb(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		writew(tf->feature | (((u16)tf->hob_feature) << 8),
+		       ioaddr->feature_addr);
+		writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+		       ioaddr->nsect_addr);
+		writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+		       ioaddr->lbal_addr);
+		writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+		       ioaddr->lbam_addr);
+		writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+		       ioaddr->lbah_addr);
+	} else if (is_addr) {
+		writew(tf->feature, ioaddr->feature_addr);
+		writew(tf->nsect, ioaddr->nsect_addr);
+		writew(tf->lbal, ioaddr->lbal_addr);
+		writew(tf->lbam, ioaddr->lbam_addr);
+		writew(tf->lbah, ioaddr->lbah_addr);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE)
+		writeb(tf->device, ioaddr->device_addr);
+
+	ata_wait_idle(ap);
+}
+
+
+static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u16 nsect, lbal, lbam, lbah, feature;
+
+	tf->command = k2_stat_check_status(ap);
+	tf->device = readw(ioaddr->device_addr);
+	feature = readw(ioaddr->error_addr);
+	nsect = readw(ioaddr->nsect_addr);
+	lbal = readw(ioaddr->lbal_addr);
+	lbam = readw(ioaddr->lbam_addr);
+	lbah = readw(ioaddr->lbah_addr);
+
+	tf->feature = feature;
+	tf->nsect = nsect;
+	tf->lbal = lbal;
+	tf->lbam = lbam;
+	tf->lbah = lbah;
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		tf->hob_feature = feature >> 8;
+		tf->hob_nsect = nsect >> 8;
+		tf->hob_lbal = lbal >> 8;
+		tf->hob_lbam = lbam >> 8;
+		tf->hob_lbah = lbah >> 8;
+	}
+}
+
+/**
+ *	k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+
+static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	writeb(dmactl, mmio + ATA_DMA_CMD);
+
+	/* issue r/w command if this is not a ATA DMA command*/
+	if (qc->tf.protocol != ATA_PROT_DMA)
+		ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ *	k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+
+static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+	/* This works around possible data corruption.
+
+	   On certain SATA controllers that can be seen when the r/w
+	   command is given to the controller before the host DMA is
+	   started.
+
+	   On a Read command, the controller would initiate the
+	   command to the drive even before it sees the DMA
+	   start. When there are very fast drives connected to the
+	   controller, or when the data request hits in the drive
+	   cache, there is the possibility that the drive returns a
+	   part or all of the requested data to the controller before
+	   the DMA start is issued.  In this case, the controller
+	   would become confused as to what to do with the data.  In
+	   the worst case when all the data is returned back to the
+	   controller, the controller could hang. In other cases it
+	   could return partial data returning in data
+	   corruption. This problem has been seen in PPC systems and
+	   can also appear on an system with very fast disks, where
+	   the SATA controller is sitting behind a number of bridges,
+	   and hence there is significant latency between the r/w
+	   command and the start command. */
+	/* issue r/w command if the access is to ATA */
+	if (qc->tf.protocol == ATA_PROT_DMA)
+		ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+
+static u8 k2_stat_check_status(struct ata_port *ap)
+{
+	return readl(ap->ioaddr.status_addr);
+}
+
+static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
+{
+	struct ata_port *ap;
+	struct device_node *np;
+	int index;
+
+	/* Find  the ata_port */
+	ap = ata_shost_to_port(shost);
+	if (ap == NULL)
+		return 0;
+
+	/* Find the OF node for the PCI device proper */
+	np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
+	if (np == NULL)
+		return 0;
+
+	/* Match it to a port node */
+	index = (ap == ap->host->ports[0]) ? 0 : 1;
+	for (np = np->child; np != NULL; np = np->sibling) {
+		const u32 *reg = of_get_property(np, "reg", NULL);
+		if (!reg)
+			continue;
+		if (index == *reg) {
+			seq_printf(m, "devspec: %pOF\n", np);
+			break;
+		}
+	}
+	return 0;
+}
+
+static struct scsi_host_template k2_sata_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+	.show_info		= k2_sata_show_info,
+};
+
+
+static struct ata_port_operations k2_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.softreset              = k2_sata_softreset,
+	.hardreset              = k2_sata_hardreset,
+	.sff_tf_load		= k2_sata_tf_load,
+	.sff_tf_read		= k2_sata_tf_read,
+	.sff_check_status	= k2_stat_check_status,
+	.check_atapi_dma	= k2_sata_check_atapi_dma,
+	.bmdma_setup		= k2_bmdma_setup_mmio,
+	.bmdma_start		= k2_bmdma_start_mmio,
+	.scr_read		= k2_sata_scr_read,
+	.scr_write		= k2_sata_scr_write,
+};
+
+static const struct ata_port_info k2_port_info[] = {
+	/* chip_svw4 */
+	{
+		.flags		= ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &k2_sata_ops,
+	},
+	/* chip_svw8 */
+	{
+		.flags		= ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
+				  K2_FLAG_SATA_8_PORTS,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &k2_sata_ops,
+	},
+	/* chip_svw42 */
+	{
+		.flags		= ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &k2_sata_ops,
+	},
+	/* chip_svw43 */
+	{
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &k2_sata_ops,
+	},
+};
+
+static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		= base + K2_SATA_TF_CMD_OFFSET;
+	port->data_addr		= base + K2_SATA_TF_DATA_OFFSET;
+	port->feature_addr	=
+	port->error_addr	= base + K2_SATA_TF_ERROR_OFFSET;
+	port->nsect_addr	= base + K2_SATA_TF_NSECT_OFFSET;
+	port->lbal_addr		= base + K2_SATA_TF_LBAL_OFFSET;
+	port->lbam_addr		= base + K2_SATA_TF_LBAM_OFFSET;
+	port->lbah_addr		= base + K2_SATA_TF_LBAH_OFFSET;
+	port->device_addr	= base + K2_SATA_TF_DEVICE_OFFSET;
+	port->command_addr	=
+	port->status_addr	= base + K2_SATA_TF_CMDSTAT_OFFSET;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + K2_SATA_TF_CTL_OFFSET;
+	port->bmdma_addr	= base + K2_SATA_DMA_CMD_OFFSET;
+	port->scr_addr		= base + K2_SATA_SCR_STATUS_OFFSET;
+}
+
+
+static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] =
+		{ &k2_port_info[ent->driver_data], NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int n_ports, i, rc, bar_pos;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* allocate host */
+	n_ports = 4;
+	if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS)
+		n_ports = 8;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	bar_pos = 5;
+	if (ppi[0]->flags & K2_FLAG_BAR_POS_3)
+		bar_pos = 3;
+	/*
+	 * If this driver happens to only be useful on Apple's K2, then
+	 * we should check that here as it has a normal Serverworks ID
+	 */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Check if we have resources mapped at all (second function may
+	 * have been disabled by firmware)
+	 */
+	if (pci_resource_len(pdev, bar_pos) == 0) {
+		/* In IDE mode we need to pin the device to ensure that
+			pcim_release does not clear the busmaster bit in config
+			space, clearing causes busmaster DMA to fail on
+			ports 3 & 4 */
+		pcim_pin_device(pdev);
+		return -ENODEV;
+	}
+
+	/* Request and iomap PCI regions */
+	rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	mmio_base = host->iomap[bar_pos];
+
+	/* different controllers have different number of ports - currently 4 or 8 */
+	/* All ports are on the same function. Multi-function device is no
+	 * longer available. This should not be seen in any system. */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned int offset = i * K2_SATA_PORT_OFFSET;
+
+		k2_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+		ata_port_pbar_desc(ap, 5, -1, "mmio");
+		ata_port_pbar_desc(ap, 5, offset, "port");
+	}
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	/* Clear a magic bit in SCR1 according to Darwin, those help
+	 * some funky seagate drives (though so far, those were already
+	 * set by the firmware on the machines I had access to)
+	 */
+	writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
+	       mmio_base + K2_SATA_SICR1_OFFSET);
+
+	/* Clear SATA error & interrupts we don't use */
+	writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
+	writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &k2_sata_sht);
+}
+
+/* 0x240 is device ID for Apple K2 device
+ * 0x241 is device ID for Serverworks Frodo4
+ * 0x242 is device ID for Serverworks Frodo8
+ * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
+ * controller
+ * */
+static const struct pci_device_id k2_sata_pci_tbl[] = {
+	{ PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw8 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 },
+
+	{ }
+};
+
+static struct pci_driver k2_sata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= k2_sata_pci_tbl,
+	.probe			= k2_sata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+module_pci_driver(k2_sata_pci_driver);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt");
+MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
new file mode 100644
index 0000000..405e606
--- /dev/null
+++ b/drivers/ata/sata_sx4.c
@@ -0,0 +1,1511 @@
+/*
+ *  sata_sx4.c - Promise SATA
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ *  		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available under NDA.
+ *
+ */
+
+/*
+	Theory of operation
+	-------------------
+
+	The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
+	engine, DIMM memory, and four ATA engines (one per SATA port).
+	Data is copied to/from DIMM memory by the HDMA engine, before
+	handing off to one (or more) of the ATA engines.  The ATA
+	engines operate solely on DIMM memory.
+
+	The SX4 behaves like a PATA chip, with no SATA controls or
+	knowledge whatsoever, leading to the presumption that
+	PATA<->SATA bridges exist on SX4 boards, external to the
+	PDC20621 chip itself.
+
+	The chip is quite capable, supporting an XOR engine and linked
+	hardware commands (permits a string to transactions to be
+	submitted and waited-on as a single unit), and an optional
+	microprocessor.
+
+	The limiting factor is largely software.  This Linux driver was
+	written to multiplex the single HDMA engine to copy disk
+	transactions into a fixed DIMM memory space, from where an ATA
+	engine takes over.  As a result, each WRITE looks like this:
+
+		submit HDMA packet to hardware
+		hardware copies data from system memory to DIMM
+		hardware raises interrupt
+
+		submit ATA packet to hardware
+		hardware executes ATA WRITE command, w/ data in DIMM
+		hardware raises interrupt
+
+	and each READ looks like this:
+
+		submit ATA packet to hardware
+		hardware executes ATA READ command, w/ data in DIMM
+		hardware raises interrupt
+
+		submit HDMA packet to hardware
+		hardware copies data from DIMM to system memory
+		hardware raises interrupt
+
+	This is a very slow, lock-step way of doing things that can
+	certainly be improved by motivated kernel hackers.
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "sata_promise.h"
+
+#define DRV_NAME	"sata_sx4"
+#define DRV_VERSION	"0.12"
+
+
+enum {
+	PDC_MMIO_BAR		= 3,
+	PDC_DIMM_BAR		= 4,
+
+	PDC_PRD_TBL		= 0x44,	/* Direct command DMA table addr */
+
+	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
+	PDC_HDMA_PKT_SUBMIT	= 0x100, /* Host DMA packet pointer addr */
+	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
+	PDC_HDMA_CTLSTAT	= 0x12C, /* Host DMA control / status */
+
+	PDC_CTLSTAT		= 0x60,	/* IDEn control / status */
+
+	PDC_20621_SEQCTL	= 0x400,
+	PDC_20621_SEQMASK	= 0x480,
+	PDC_20621_GENERAL_CTL	= 0x484,
+	PDC_20621_PAGE_SIZE	= (32 * 1024),
+
+	/* chosen, not constant, values; we design our own DIMM mem map */
+	PDC_20621_DIMM_WINDOW	= 0x0C,	/* page# for 32K DIMM window */
+	PDC_20621_DIMM_BASE	= 0x00200000,
+	PDC_20621_DIMM_DATA	= (64 * 1024),
+	PDC_DIMM_DATA_STEP	= (256 * 1024),
+	PDC_DIMM_WINDOW_STEP	= (8 * 1024),
+	PDC_DIMM_HOST_PRD	= (6 * 1024),
+	PDC_DIMM_HOST_PKT	= (128 * 0),
+	PDC_DIMM_HPKT_PRD	= (128 * 1),
+	PDC_DIMM_ATA_PKT	= (128 * 2),
+	PDC_DIMM_APKT_PRD	= (128 * 3),
+	PDC_DIMM_HEADER_SZ	= PDC_DIMM_APKT_PRD + 128,
+	PDC_PAGE_WINDOW		= 0x40,
+	PDC_PAGE_DATA		= PDC_PAGE_WINDOW +
+				  (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
+	PDC_PAGE_SET		= PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
+
+	PDC_CHIP0_OFS		= 0xC0000, /* offset of chip #0 */
+
+	PDC_20621_ERR_MASK	= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
+				  (1<<23),
+
+	board_20621		= 0,	/* FastTrak S150 SX4 */
+
+	PDC_MASK_INT		= (1 << 10), /* HDMA/ATA mask int */
+	PDC_RESET		= (1 << 11), /* HDMA/ATA reset */
+	PDC_DMA_ENABLE		= (1 << 7),  /* DMA start/stop */
+
+	PDC_MAX_HDMA		= 32,
+	PDC_HDMA_Q_MASK		= (PDC_MAX_HDMA - 1),
+
+	PDC_DIMM0_SPD_DEV_ADDRESS	= 0x50,
+	PDC_DIMM1_SPD_DEV_ADDRESS	= 0x51,
+	PDC_I2C_CONTROL			= 0x48,
+	PDC_I2C_ADDR_DATA		= 0x4C,
+	PDC_DIMM0_CONTROL		= 0x80,
+	PDC_DIMM1_CONTROL		= 0x84,
+	PDC_SDRAM_CONTROL		= 0x88,
+	PDC_I2C_WRITE			= 0,		/* master -> slave */
+	PDC_I2C_READ			= (1 << 6),	/* master <- slave */
+	PDC_I2C_START			= (1 << 7),	/* start I2C proto */
+	PDC_I2C_MASK_INT		= (1 << 5),	/* mask I2C interrupt */
+	PDC_I2C_COMPLETE		= (1 << 16),	/* I2C normal compl. */
+	PDC_I2C_NO_ACK			= (1 << 20),	/* slave no-ack addr */
+	PDC_DIMM_SPD_SUBADDRESS_START	= 0x00,
+	PDC_DIMM_SPD_SUBADDRESS_END	= 0x7F,
+	PDC_DIMM_SPD_ROW_NUM		= 3,
+	PDC_DIMM_SPD_COLUMN_NUM		= 4,
+	PDC_DIMM_SPD_MODULE_ROW		= 5,
+	PDC_DIMM_SPD_TYPE		= 11,
+	PDC_DIMM_SPD_FRESH_RATE		= 12,
+	PDC_DIMM_SPD_BANK_NUM		= 17,
+	PDC_DIMM_SPD_CAS_LATENCY	= 18,
+	PDC_DIMM_SPD_ATTRIBUTE		= 21,
+	PDC_DIMM_SPD_ROW_PRE_CHARGE	= 27,
+	PDC_DIMM_SPD_ROW_ACTIVE_DELAY	= 28,
+	PDC_DIMM_SPD_RAS_CAS_DELAY	= 29,
+	PDC_DIMM_SPD_ACTIVE_PRECHARGE	= 30,
+	PDC_DIMM_SPD_SYSTEM_FREQ	= 126,
+	PDC_CTL_STATUS			= 0x08,
+	PDC_DIMM_WINDOW_CTLR		= 0x0C,
+	PDC_TIME_CONTROL		= 0x3C,
+	PDC_TIME_PERIOD			= 0x40,
+	PDC_TIME_COUNTER		= 0x44,
+	PDC_GENERAL_CTLR		= 0x484,
+	PCI_PLL_INIT			= 0x8A531824,
+	PCI_X_TCOUNT			= 0xEE1E5CFF,
+
+	/* PDC_TIME_CONTROL bits */
+	PDC_TIMER_BUZZER		= (1 << 10),
+	PDC_TIMER_MODE_PERIODIC		= 0,		/* bits 9:8 == 00 */
+	PDC_TIMER_MODE_ONCE		= (1 << 8),	/* bits 9:8 == 01 */
+	PDC_TIMER_ENABLE		= (1 << 7),
+	PDC_TIMER_MASK_INT		= (1 << 5),
+	PDC_TIMER_SEQ_MASK		= 0x1f,		/* SEQ ID for timer */
+	PDC_TIMER_DEFAULT		= PDC_TIMER_MODE_ONCE |
+					  PDC_TIMER_ENABLE |
+					  PDC_TIMER_MASK_INT,
+};
+
+#define ECC_ERASE_BUF_SZ (128 * 1024)
+
+struct pdc_port_priv {
+	u8			dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
+	u8			*pkt;
+	dma_addr_t		pkt_dma;
+};
+
+struct pdc_host_priv {
+	unsigned int		doing_hdma;
+	unsigned int		hdma_prod;
+	unsigned int		hdma_cons;
+	struct {
+		struct ata_queued_cmd *qc;
+		unsigned int	seq;
+		unsigned long	pkt_ofs;
+	} hdma[32];
+};
+
+
+static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void pdc_error_handler(struct ata_port *ap);
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static int pdc_port_start(struct ata_port *ap);
+static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static unsigned int pdc20621_dimm_init(struct ata_host *host);
+static int pdc20621_detect_dimm(struct ata_host *host);
+static unsigned int pdc20621_i2c_read(struct ata_host *host,
+				      u32 device, u32 subaddr, u32 *pdata);
+static int pdc20621_prog_dimm0(struct ata_host *host);
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_get_from_dimm(struct ata_host *host,
+				   void *psource, u32 offset, u32 size);
+#endif
+static void pdc20621_put_to_dimm(struct ata_host *host,
+				 void *psource, u32 offset, u32 size);
+static void pdc20621_irq_clear(struct ata_port *ap);
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
+static int pdc_softreset(struct ata_link *link, unsigned int *class,
+			 unsigned long deadline);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
+
+
+static struct scsi_host_template pdc_sata_sht = {
+	ATA_BASE_SHT(DRV_NAME),
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+};
+
+/* TODO: inherit from base port_ops after converting to new EH */
+static struct ata_port_operations pdc_20621_ops = {
+	.inherits		= &ata_sff_port_ops,
+
+	.check_atapi_dma	= pdc_check_atapi_dma,
+	.qc_prep		= pdc20621_qc_prep,
+	.qc_issue		= pdc20621_qc_issue,
+
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.softreset		= pdc_softreset,
+	.error_handler		= pdc_error_handler,
+	.lost_interrupt		= ATA_OP_NULL,
+	.post_internal_cmd	= pdc_post_internal_cmd,
+
+	.port_start		= pdc_port_start,
+
+	.sff_tf_load		= pdc_tf_load_mmio,
+	.sff_exec_command	= pdc_exec_command_mmio,
+	.sff_irq_clear		= pdc20621_irq_clear,
+};
+
+static const struct ata_port_info pdc_port_info[] = {
+	/* board_20621 */
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
+				  ATA_FLAG_PIO_POLLING,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &pdc_20621_ops,
+	},
+
+};
+
+static const struct pci_device_id pdc_sata_pci_tbl[] = {
+	{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pdc_sata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pdc_sata_pci_tbl,
+	.probe			= pdc_sata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+
+static int pdc_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct pdc_port_priv *pp;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
+
+	ap->private_data = pp;
+
+	return 0;
+}
+
+static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
+				   unsigned int total_len)
+{
+	u32 addr;
+	unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
+	__le32 *buf32 = (__le32 *) buf;
+
+	/* output ATA packet S/G table */
+	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+	       (PDC_DIMM_DATA_STEP * portno);
+	VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
+	buf32[dw] = cpu_to_le32(addr);
+	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
+
+	VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
+		PDC_20621_DIMM_BASE +
+		       (PDC_DIMM_WINDOW_STEP * portno) +
+		       PDC_DIMM_APKT_PRD,
+		buf32[dw], buf32[dw + 1]);
+}
+
+static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
+				    unsigned int total_len)
+{
+	u32 addr;
+	unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
+	__le32 *buf32 = (__le32 *) buf;
+
+	/* output Host DMA packet S/G table */
+	addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+	       (PDC_DIMM_DATA_STEP * portno);
+
+	buf32[dw] = cpu_to_le32(addr);
+	buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
+
+	VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
+		PDC_20621_DIMM_BASE +
+		       (PDC_DIMM_WINDOW_STEP * portno) +
+		       PDC_DIMM_HPKT_PRD,
+		buf32[dw], buf32[dw + 1]);
+}
+
+static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
+					    unsigned int devno, u8 *buf,
+					    unsigned int portno)
+{
+	unsigned int i, dw;
+	__le32 *buf32 = (__le32 *) buf;
+	u8 dev_reg;
+
+	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
+			       (PDC_DIMM_WINDOW_STEP * portno) +
+			       PDC_DIMM_APKT_PRD;
+	VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
+
+	i = PDC_DIMM_ATA_PKT;
+
+	/*
+	 * Set up ATA packet
+	 */
+	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
+		buf[i++] = PDC_PKT_READ;
+	else if (tf->protocol == ATA_PROT_NODATA)
+		buf[i++] = PDC_PKT_NODATA;
+	else
+		buf[i++] = 0;
+	buf[i++] = 0;			/* reserved */
+	buf[i++] = portno + 1;		/* seq. id */
+	buf[i++] = 0xff;		/* delay seq. id */
+
+	/* dimm dma S/G, and next-pkt */
+	dw = i >> 2;
+	if (tf->protocol == ATA_PROT_NODATA)
+		buf32[dw] = 0;
+	else
+		buf32[dw] = cpu_to_le32(dimm_sg);
+	buf32[dw + 1] = 0;
+	i += 8;
+
+	if (devno == 0)
+		dev_reg = ATA_DEVICE_OBS;
+	else
+		dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+	/* select device */
+	buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+	buf[i++] = dev_reg;
+
+	/* device control register */
+	buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
+	buf[i++] = tf->ctl;
+
+	return i;
+}
+
+static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
+				     unsigned int portno)
+{
+	unsigned int dw;
+	u32 tmp;
+	__le32 *buf32 = (__le32 *) buf;
+
+	unsigned int host_sg = PDC_20621_DIMM_BASE +
+			       (PDC_DIMM_WINDOW_STEP * portno) +
+			       PDC_DIMM_HOST_PRD;
+	unsigned int dimm_sg = PDC_20621_DIMM_BASE +
+			       (PDC_DIMM_WINDOW_STEP * portno) +
+			       PDC_DIMM_HPKT_PRD;
+	VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
+	VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
+
+	dw = PDC_DIMM_HOST_PKT >> 2;
+
+	/*
+	 * Set up Host DMA packet
+	 */
+	if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
+		tmp = PDC_PKT_READ;
+	else
+		tmp = 0;
+	tmp |= ((portno + 1 + 4) << 16);	/* seq. id */
+	tmp |= (0xff << 24);			/* delay seq. id */
+	buf32[dw + 0] = cpu_to_le32(tmp);
+	buf32[dw + 1] = cpu_to_le32(host_sg);
+	buf32[dw + 2] = cpu_to_le32(dimm_sg);
+	buf32[dw + 3] = 0;
+
+	VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
+		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
+			PDC_DIMM_HOST_PKT,
+		buf32[dw + 0],
+		buf32[dw + 1],
+		buf32[dw + 2],
+		buf32[dw + 3]);
+}
+
+static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
+{
+	struct scatterlist *sg;
+	struct ata_port *ap = qc->ap;
+	struct pdc_port_priv *pp = ap->private_data;
+	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+	unsigned int portno = ap->port_no;
+	unsigned int i, si, idx, total_len = 0, sgt_len;
+	__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
+
+	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
+
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	/*
+	 * Build S/G table
+	 */
+	idx = 0;
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		buf[idx++] = cpu_to_le32(sg_dma_address(sg));
+		buf[idx++] = cpu_to_le32(sg_dma_len(sg));
+		total_len += sg_dma_len(sg);
+	}
+	buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
+	sgt_len = idx * 4;
+
+	/*
+	 * Build ATA, host DMA packets
+	 */
+	pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
+	pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
+
+	pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
+	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
+
+	if (qc->tf.flags & ATA_TFLAG_LBA48)
+		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
+	else
+		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
+
+	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
+
+	/* copy three S/G tables and two packets to DIMM MMIO window */
+	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
+		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
+	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
+		    PDC_DIMM_HOST_PRD,
+		    &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
+
+	/* force host FIFO dump */
+	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
+
+	readl(dimm_mmio);	/* MMIO PCI posting flush */
+
+	VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
+}
+
+static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pdc_port_priv *pp = ap->private_data;
+	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+	unsigned int portno = ap->port_no;
+	unsigned int i;
+
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
+
+	if (qc->tf.flags & ATA_TFLAG_LBA48)
+		i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
+	else
+		i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
+
+	pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
+
+	/* copy three S/G tables and two packets to DIMM MMIO window */
+	memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
+		    &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
+
+	/* force host FIFO dump */
+	writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
+
+	readl(dimm_mmio);	/* MMIO PCI posting flush */
+
+	VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
+}
+
+static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+{
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		pdc20621_dma_prep(qc);
+		break;
+	case ATA_PROT_NODATA:
+		pdc20621_nodata_prep(qc);
+		break;
+	default:
+		break;
+	}
+}
+
+static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
+				 unsigned int seq,
+				 u32 pkt_ofs)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_host *host = ap->host;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+	readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
+
+	writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
+	readl(mmio + PDC_HDMA_PKT_SUBMIT);	/* flush */
+}
+
+static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
+				unsigned int seq,
+				u32 pkt_ofs)
+{
+	struct ata_port *ap = qc->ap;
+	struct pdc_host_priv *pp = ap->host->private_data;
+	unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
+
+	if (!pp->doing_hdma) {
+		__pdc20621_push_hdma(qc, seq, pkt_ofs);
+		pp->doing_hdma = 1;
+		return;
+	}
+
+	pp->hdma[idx].qc = qc;
+	pp->hdma[idx].seq = seq;
+	pp->hdma[idx].pkt_ofs = pkt_ofs;
+	pp->hdma_prod++;
+}
+
+static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pdc_host_priv *pp = ap->host->private_data;
+	unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
+
+	/* if nothing on queue, we're done */
+	if (pp->hdma_prod == pp->hdma_cons) {
+		pp->doing_hdma = 0;
+		return;
+	}
+
+	__pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
+			     pp->hdma[idx].pkt_ofs);
+	pp->hdma_cons++;
+}
+
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int port_no = ap->port_no;
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+
+	dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
+	dimm_mmio += PDC_DIMM_HOST_PKT;
+
+	printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
+	printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
+	printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
+	printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
+}
+#else
+static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
+#endif /* ATA_VERBOSE_DEBUG */
+
+static void pdc20621_packet_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_host *host = ap->host;
+	unsigned int port_no = ap->port_no;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 seq = (u8) (port_no + 1);
+	unsigned int port_ofs;
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+	wmb();			/* flush PRD, pkt writes */
+
+	port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
+
+	/* if writing, we (1) DMA to DIMM, then (2) do ATA command */
+	if (rw && qc->tf.protocol == ATA_PROT_DMA) {
+		seq += 4;
+
+		pdc20621_dump_hdma(qc);
+		pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
+		VPRINTK("queued ofs 0x%x (%u), seq %u\n",
+			port_ofs + PDC_DIMM_HOST_PKT,
+			port_ofs + PDC_DIMM_HOST_PKT,
+			seq);
+	} else {
+		writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+		readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
+
+		writel(port_ofs + PDC_DIMM_ATA_PKT,
+		       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+		readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+		VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
+			port_ofs + PDC_DIMM_ATA_PKT,
+			port_ofs + PDC_DIMM_ATA_PKT,
+			seq);
+	}
+}
+
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
+{
+	switch (qc->tf.protocol) {
+	case ATA_PROT_NODATA:
+		if (qc->tf.flags & ATA_TFLAG_POLLING)
+			break;
+		/*FALLTHROUGH*/
+	case ATA_PROT_DMA:
+		pdc20621_packet_start(qc);
+		return 0;
+
+	case ATAPI_PROT_DMA:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+
+	return ata_sff_qc_issue(qc);
+}
+
+static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
+					  struct ata_queued_cmd *qc,
+					  unsigned int doing_hdma,
+					  void __iomem *mmio)
+{
+	unsigned int port_no = ap->port_no;
+	unsigned int port_ofs =
+		PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
+	u8 status;
+	unsigned int handled = 0;
+
+	VPRINTK("ENTER\n");
+
+	if ((qc->tf.protocol == ATA_PROT_DMA) &&	/* read */
+	    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+
+		/* step two - DMA from DIMM to host */
+		if (doing_hdma) {
+			VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
+				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+			/* get drive status; clear intr; complete txn */
+			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+			ata_qc_complete(qc);
+			pdc20621_pop_hdma(qc);
+		}
+
+		/* step one - exec ATA command */
+		else {
+			u8 seq = (u8) (port_no + 1 + 4);
+			VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
+				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+
+			/* submit hdma pkt */
+			pdc20621_dump_hdma(qc);
+			pdc20621_push_hdma(qc, seq,
+					   port_ofs + PDC_DIMM_HOST_PKT);
+		}
+		handled = 1;
+
+	} else if (qc->tf.protocol == ATA_PROT_DMA) {	/* write */
+
+		/* step one - DMA from host to DIMM */
+		if (doing_hdma) {
+			u8 seq = (u8) (port_no + 1);
+			VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
+				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+
+			/* submit ata pkt */
+			writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+			readl(mmio + PDC_20621_SEQCTL + (seq * 4));
+			writel(port_ofs + PDC_DIMM_ATA_PKT,
+			       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+			readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+		}
+
+		/* step two - execute ATA command */
+		else {
+			VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
+				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+			/* get drive status; clear intr; complete txn */
+			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+			ata_qc_complete(qc);
+			pdc20621_pop_hdma(qc);
+		}
+		handled = 1;
+
+	/* command completion, but no data xfer */
+	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
+
+		status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+		DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+		qc->err_mask |= ac_err_mask(status);
+		ata_qc_complete(qc);
+		handled = 1;
+
+	} else {
+		ap->stats.idle_irq++;
+	}
+
+	return handled;
+}
+
+static void pdc20621_irq_clear(struct ata_port *ap)
+{
+	ioread8(ap->ioaddr.status_addr);
+}
+
+static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct ata_port *ap;
+	u32 mask = 0;
+	unsigned int i, tmp, port_no;
+	unsigned int handled = 0;
+	void __iomem *mmio_base;
+
+	VPRINTK("ENTER\n");
+
+	if (!host || !host->iomap[PDC_MMIO_BAR]) {
+		VPRINTK("QUICK EXIT\n");
+		return IRQ_NONE;
+	}
+
+	mmio_base = host->iomap[PDC_MMIO_BAR];
+
+	/* reading should also clear interrupts */
+	mmio_base += PDC_CHIP0_OFS;
+	mask = readl(mmio_base + PDC_20621_SEQMASK);
+	VPRINTK("mask == 0x%x\n", mask);
+
+	if (mask == 0xffffffff) {
+		VPRINTK("QUICK EXIT 2\n");
+		return IRQ_NONE;
+	}
+	mask &= 0xffff;		/* only 16 tags possible */
+	if (!mask) {
+		VPRINTK("QUICK EXIT 3\n");
+		return IRQ_NONE;
+	}
+
+	spin_lock(&host->lock);
+
+	for (i = 1; i < 9; i++) {
+		port_no = i - 1;
+		if (port_no > 3)
+			port_no -= 4;
+		if (port_no >= host->n_ports)
+			ap = NULL;
+		else
+			ap = host->ports[port_no];
+		tmp = mask & (1 << i);
+		VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
+		if (tmp && ap) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+				handled += pdc20621_host_intr(ap, qc, (i > 4),
+							      mmio_base);
+		}
+	}
+
+	spin_unlock(&host->lock);
+
+	VPRINTK("mask == 0x%x\n", mask);
+
+	VPRINTK("EXIT\n");
+
+	return IRQ_RETVAL(handled);
+}
+
+static void pdc_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	/* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
+
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp |= PDC_MASK_INT;
+	tmp &= ~PDC_DMA_ENABLE;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	/* FIXME: start HDMA engine, if zero ATA engines running */
+
+	/* clear IRQ */
+	ioread8(ap->ioaddr.status_addr);
+
+	/* turn IRQ back on */
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp &= ~PDC_MASK_INT;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_reset_port(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+	unsigned int i;
+	u32 tmp;
+
+	/* FIXME: handle HDMA copy engine */
+
+	for (i = 11; i > 0; i--) {
+		tmp = readl(mmio);
+		if (tmp & PDC_RESET)
+			break;
+
+		udelay(100);
+
+		tmp |= PDC_RESET;
+		writel(tmp, mmio);
+	}
+
+	tmp &= ~PDC_RESET;
+	writel(tmp, mmio);
+	readl(mmio);	/* flush */
+}
+
+static int pdc_softreset(struct ata_link *link, unsigned int *class,
+			 unsigned long deadline)
+{
+	pdc_reset_port(link->ap);
+	return ata_sff_softreset(link, class, deadline);
+}
+
+static void pdc_error_handler(struct ata_port *ap)
+{
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		pdc_reset_port(ap);
+
+	ata_sff_error_handler(ap);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		pdc_reset_port(ap);
+}
+
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	u8 *scsicmd = qc->scsicmd->cmnd;
+	int pio = 1; /* atapi dma off by default */
+
+	/* Whitelist commands that may use DMA. */
+	switch (scsicmd[0]) {
+	case WRITE_12:
+	case WRITE_10:
+	case WRITE_6:
+	case READ_12:
+	case READ_10:
+	case READ_6:
+	case 0xad: /* READ_DVD_STRUCTURE */
+	case 0xbe: /* READ_CD */
+		pio = 0;
+	}
+	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
+	if (scsicmd[0] == WRITE_10) {
+		unsigned int lba =
+			(scsicmd[2] << 24) |
+			(scsicmd[3] << 16) |
+			(scsicmd[4] << 8) |
+			scsicmd[5];
+		if (lba >= 0xFFFF4FA2)
+			pio = 1;
+	}
+	return pio;
+}
+
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATAPI_PROT_DMA);
+	ata_sff_tf_load(ap, tf);
+}
+
+
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATAPI_PROT_DMA);
+	ata_sff_exec_command(ap, tf);
+}
+
+
+static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		= base;
+	port->data_addr		= base;
+	port->feature_addr	=
+	port->error_addr	= base + 0x4;
+	port->nsect_addr	= base + 0x8;
+	port->lbal_addr		= base + 0xc;
+	port->lbam_addr		= base + 0x10;
+	port->lbah_addr		= base + 0x14;
+	port->device_addr	= base + 0x18;
+	port->command_addr	=
+	port->status_addr	= base + 0x1c;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + 0x38;
+}
+
+
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
+				   u32 offset, u32 size)
+{
+	u32 window_size;
+	u16 idx;
+	u8 page_mask;
+	long dist;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	page_mask = 0x00;
+	window_size = 0x2000 * 4; /* 32K byte uchar size */
+	idx = (u16) (offset / window_size);
+
+	writel(0x01, mmio + PDC_GENERAL_CTLR);
+	readl(mmio + PDC_GENERAL_CTLR);
+	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+	readl(mmio + PDC_DIMM_WINDOW_CTLR);
+
+	offset -= (idx * window_size);
+	idx++;
+	dist = ((long) (window_size - (offset + size))) >= 0 ? size :
+		(long) (window_size - offset);
+	memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
+
+	psource += dist;
+	size -= dist;
+	for (; (long) size >= (long) window_size ;) {
+		writel(0x01, mmio + PDC_GENERAL_CTLR);
+		readl(mmio + PDC_GENERAL_CTLR);
+		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+		readl(mmio + PDC_DIMM_WINDOW_CTLR);
+		memcpy_fromio(psource, dimm_mmio, window_size / 4);
+		psource += window_size;
+		size -= window_size;
+		idx++;
+	}
+
+	if (size) {
+		writel(0x01, mmio + PDC_GENERAL_CTLR);
+		readl(mmio + PDC_GENERAL_CTLR);
+		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+		readl(mmio + PDC_DIMM_WINDOW_CTLR);
+		memcpy_fromio(psource, dimm_mmio, size / 4);
+	}
+}
+#endif
+
+
+static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
+				 u32 offset, u32 size)
+{
+	u32 window_size;
+	u16 idx;
+	u8 page_mask;
+	long dist;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	page_mask = 0x00;
+	window_size = 0x2000 * 4;       /* 32K byte uchar size */
+	idx = (u16) (offset / window_size);
+
+	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+	readl(mmio + PDC_DIMM_WINDOW_CTLR);
+	offset -= (idx * window_size);
+	idx++;
+	dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
+		(long) (window_size - offset);
+	memcpy_toio(dimm_mmio + offset / 4, psource, dist);
+	writel(0x01, mmio + PDC_GENERAL_CTLR);
+	readl(mmio + PDC_GENERAL_CTLR);
+
+	psource += dist;
+	size -= dist;
+	for (; (long) size >= (long) window_size ;) {
+		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+		readl(mmio + PDC_DIMM_WINDOW_CTLR);
+		memcpy_toio(dimm_mmio, psource, window_size / 4);
+		writel(0x01, mmio + PDC_GENERAL_CTLR);
+		readl(mmio + PDC_GENERAL_CTLR);
+		psource += window_size;
+		size -= window_size;
+		idx++;
+	}
+
+	if (size) {
+		writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+		readl(mmio + PDC_DIMM_WINDOW_CTLR);
+		memcpy_toio(dimm_mmio, psource, size / 4);
+		writel(0x01, mmio + PDC_GENERAL_CTLR);
+		readl(mmio + PDC_GENERAL_CTLR);
+	}
+}
+
+
+static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
+				      u32 subaddr, u32 *pdata)
+{
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	u32 i2creg  = 0;
+	u32 status;
+	u32 count = 0;
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	i2creg |= device << 24;
+	i2creg |= subaddr << 16;
+
+	/* Set the device and subaddress */
+	writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
+	readl(mmio + PDC_I2C_ADDR_DATA);
+
+	/* Write Control to perform read operation, mask int */
+	writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
+	       mmio + PDC_I2C_CONTROL);
+
+	for (count = 0; count <= 1000; count ++) {
+		status = readl(mmio + PDC_I2C_CONTROL);
+		if (status & PDC_I2C_COMPLETE) {
+			status = readl(mmio + PDC_I2C_ADDR_DATA);
+			break;
+		} else if (count == 1000)
+			return 0;
+	}
+
+	*pdata = (status >> 8) & 0x000000ff;
+	return 1;
+}
+
+
+static int pdc20621_detect_dimm(struct ata_host *host)
+{
+	u32 data = 0;
+	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
+		if (data == 100)
+			return 100;
+	} else
+		return 0;
+
+	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
+		if (data <= 0x75)
+			return 133;
+	} else
+		return 0;
+
+	return 0;
+}
+
+
+static int pdc20621_prog_dimm0(struct ata_host *host)
+{
+	u32 spd0[50];
+	u32 data = 0;
+	int size, i;
+	u8 bdimmsize;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	static const struct {
+		unsigned int reg;
+		unsigned int ofs;
+	} pdc_i2c_read_data [] = {
+		{ PDC_DIMM_SPD_TYPE, 11 },
+		{ PDC_DIMM_SPD_FRESH_RATE, 12 },
+		{ PDC_DIMM_SPD_COLUMN_NUM, 4 },
+		{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
+		{ PDC_DIMM_SPD_ROW_NUM, 3 },
+		{ PDC_DIMM_SPD_BANK_NUM, 17 },
+		{ PDC_DIMM_SPD_MODULE_ROW, 5 },
+		{ PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
+		{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
+		{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
+		{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
+		{ PDC_DIMM_SPD_CAS_LATENCY, 18 },
+	};
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
+		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+				  pdc_i2c_read_data[i].reg,
+				  &spd0[pdc_i2c_read_data[i].ofs]);
+
+	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
+	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+		((((spd0[27] + 9) / 10) - 1) << 8) ;
+	data |= (((((spd0[29] > spd0[28])
+		    ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
+	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
+
+	if (spd0[18] & 0x08)
+		data |= ((0x03) << 14);
+	else if (spd0[18] & 0x04)
+		data |= ((0x02) << 14);
+	else if (spd0[18] & 0x01)
+		data |= ((0x01) << 14);
+	else
+		data |= (0 << 14);
+
+	/*
+	   Calculate the size of bDIMMSize (power of 2) and
+	   merge the DIMM size by program start/end address.
+	*/
+
+	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
+	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
+	data |= (((size / 16) - 1) << 16);
+	data |= (0 << 23);
+	data |= 8;
+	writel(data, mmio + PDC_DIMM0_CONTROL);
+	readl(mmio + PDC_DIMM0_CONTROL);
+	return size;
+}
+
+
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
+{
+	u32 data, spd0;
+	int error, i;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	/*
+	  Set To Default : DIMM Module Global Control Register (0x022259F1)
+	  DIMM Arbitration Disable (bit 20)
+	  DIMM Data/Control Output Driving Selection (bit12 - bit15)
+	  Refresh Enable (bit 17)
+	*/
+
+	data = 0x022259F1;
+	writel(data, mmio + PDC_SDRAM_CONTROL);
+	readl(mmio + PDC_SDRAM_CONTROL);
+
+	/* Turn on for ECC */
+	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+			       PDC_DIMM_SPD_TYPE, &spd0)) {
+		pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+		return 1;
+	}
+	if (spd0 == 0x02) {
+		data |= (0x01 << 16);
+		writel(data, mmio + PDC_SDRAM_CONTROL);
+		readl(mmio + PDC_SDRAM_CONTROL);
+		printk(KERN_ERR "Local DIMM ECC Enabled\n");
+	}
+
+	/* DIMM Initialization Select/Enable (bit 18/19) */
+	data &= (~(1<<18));
+	data |= (1<<19);
+	writel(data, mmio + PDC_SDRAM_CONTROL);
+
+	error = 1;
+	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
+		data = readl(mmio + PDC_SDRAM_CONTROL);
+		if (!(data & (1<<19))) {
+			error = 0;
+			break;
+		}
+		msleep(i*100);
+	}
+	return error;
+}
+
+
+static unsigned int pdc20621_dimm_init(struct ata_host *host)
+{
+	int speed, size, length;
+	u32 addr, spd0, pci_status;
+	u32 time_period = 0;
+	u32 tcount = 0;
+	u32 ticks = 0;
+	u32 clock = 0;
+	u32 fparam = 0;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	/* Initialize PLL based upon PCI Bus Frequency */
+
+	/* Initialize Time Period Register */
+	writel(0xffffffff, mmio + PDC_TIME_PERIOD);
+	time_period = readl(mmio + PDC_TIME_PERIOD);
+	VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
+
+	/* Enable timer */
+	writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
+	readl(mmio + PDC_TIME_CONTROL);
+
+	/* Wait 3 seconds */
+	msleep(3000);
+
+	/*
+	   When timer is enabled, counter is decreased every internal
+	   clock cycle.
+	*/
+
+	tcount = readl(mmio + PDC_TIME_COUNTER);
+	VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
+
+	/*
+	   If SX4 is on PCI-X bus, after 3 seconds, the timer counter
+	   register should be >= (0xffffffff - 3x10^8).
+	*/
+	if (tcount >= PCI_X_TCOUNT) {
+		ticks = (time_period - tcount);
+		VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
+
+		clock = (ticks / 300000);
+		VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
+
+		clock = (clock * 33);
+		VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
+
+		/* PLL F Param (bit 22:16) */
+		fparam = (1400000 / clock) - 2;
+		VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
+
+		/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
+		pci_status = (0x8a001824 | (fparam << 16));
+	} else
+		pci_status = PCI_PLL_INIT;
+
+	/* Initialize PLL. */
+	VPRINTK("pci_status: 0x%x\n", pci_status);
+	writel(pci_status, mmio + PDC_CTL_STATUS);
+	readl(mmio + PDC_CTL_STATUS);
+
+	/*
+	   Read SPD of DIMM by I2C interface,
+	   and program the DIMM Module Controller.
+	*/
+	if (!(speed = pdc20621_detect_dimm(host))) {
+		printk(KERN_ERR "Detect Local DIMM Fail\n");
+		return 1;	/* DIMM error */
+	}
+	VPRINTK("Local DIMM Speed = %d\n", speed);
+
+	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
+	size = pdc20621_prog_dimm0(host);
+	VPRINTK("Local DIMM Size = %dMB\n", size);
+
+	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
+	if (pdc20621_prog_dimm_global(host)) {
+		printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
+		return 1;
+	}
+
+#ifdef ATA_VERBOSE_DEBUG
+	{
+		u8 test_parttern1[40] =
+			{0x55,0xAA,'P','r','o','m','i','s','e',' ',
+			'N','o','t',' ','Y','e','t',' ',
+			'D','e','f','i','n','e','d',' ',
+			'1','.','1','0',
+			'9','8','0','3','1','6','1','2',0,0};
+		u8 test_parttern2[40] = {0};
+
+		pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
+		pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+
+		pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
+		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+		       test_parttern2[1], &(test_parttern2[2]));
+		pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+				       40);
+		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+		       test_parttern2[1], &(test_parttern2[2]));
+
+		pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
+		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+		       test_parttern2[1], &(test_parttern2[2]));
+	}
+#endif
+
+	/* ECC initiliazation. */
+
+	if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+			       PDC_DIMM_SPD_TYPE, &spd0)) {
+		pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+		       PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+		return 1;
+	}
+	if (spd0 == 0x02) {
+		void *buf;
+		VPRINTK("Start ECC initialization\n");
+		addr = 0;
+		length = size * 1024 * 1024;
+		buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
+		if (!buf)
+			return 1;
+		while (addr < length) {
+			pdc20621_put_to_dimm(host, buf, addr,
+					     ECC_ERASE_BUF_SZ);
+			addr += ECC_ERASE_BUF_SZ;
+		}
+		kfree(buf);
+		VPRINTK("Finish ECC initialization\n");
+	}
+	return 0;
+}
+
+
+static void pdc_20621_init(struct ata_host *host)
+{
+	u32 tmp;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+	/* hard-code chip #0 */
+	mmio += PDC_CHIP0_OFS;
+
+	/*
+	 * Select page 0x40 for our 32k DIMM window
+	 */
+	tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
+	tmp |= PDC_PAGE_WINDOW;	/* page 40h; arbitrarily selected */
+	writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
+
+	/*
+	 * Reset Host DMA
+	 */
+	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
+	tmp |= PDC_RESET;
+	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
+	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
+
+	udelay(10);
+
+	tmp = readl(mmio + PDC_HDMA_CTLSTAT);
+	tmp &= ~PDC_RESET;
+	writel(tmp, mmio + PDC_HDMA_CTLSTAT);
+	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
+}
+
+static int pdc_sata_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] =
+		{ &pdc_port_info[ent->driver_data], NULL };
+	struct ata_host *host;
+	struct pdc_host_priv *hpriv;
+	int i, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* allocate host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+
+	host->private_data = hpriv;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
+				DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	for (i = 0; i < 4; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
+		unsigned int offset = 0x200 + i * 0x80;
+
+		pdc_sata_setup_port(&ap->ioaddr, base + offset);
+
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
+	}
+
+	/* configure and activate */
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	if (pdc20621_dimm_init(host))
+		return -ENOMEM;
+	pdc_20621_init(host);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
+				 IRQF_SHARED, &pdc_sata_sht);
+}
+
+module_pci_driver(pdc_sata_pci_driver);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Promise SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
new file mode 100644
index 0000000..4f6e8d8
--- /dev/null
+++ b/drivers/ata/sata_uli.c
@@ -0,0 +1,245 @@
+/*
+ *  sata_uli.c - ULi Electronics SATA
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_uli"
+#define DRV_VERSION	"1.3"
+
+enum {
+	uli_5289		= 0,
+	uli_5287		= 1,
+	uli_5281		= 2,
+
+	uli_max_ports		= 4,
+
+	/* PCI configuration registers */
+	ULI5287_BASE		= 0x90, /* sata0 phy SCR registers */
+	ULI5287_OFFS		= 0x10, /* offset from sata0->sata1 phy regs */
+	ULI5281_BASE		= 0x60, /* sata0 phy SCR  registers */
+	ULI5281_OFFS		= 0x60, /* offset from sata0->sata1 phy regs */
+};
+
+struct uli_priv {
+	unsigned int		scr_cfg_addr[uli_max_ports];
+};
+
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static const struct pci_device_id uli_pci_tbl[] = {
+	{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
+	{ PCI_VDEVICE(AL, 0x5287), uli_5287 },
+	{ PCI_VDEVICE(AL, 0x5281), uli_5281 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver uli_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= uli_pci_tbl,
+	.probe			= uli_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static struct scsi_host_template uli_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations uli_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.scr_read		= uli_scr_read,
+	.scr_write		= uli_scr_write,
+	.hardreset		= ATA_OP_NULL,
+};
+
+static const struct ata_port_info uli_port_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
+	.pio_mask       = ATA_PIO4,
+	.udma_mask      = ATA_UDMA6,
+	.port_ops       = &uli_ops,
+};
+
+
+MODULE_AUTHOR("Peer Chen");
+MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
+{
+	struct uli_priv *hpriv = ap->host->private_data;
+	return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
+}
+
+static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
+{
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
+	u32 val;
+
+	pci_read_config_dword(pdev, cfg_addr, &val);
+	return val;
+}
+
+static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
+
+	pci_write_config_dword(pdev, cfg_addr, val);
+}
+
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+
+	*val = uli_scr_cfg_read(link, sc_reg);
+	return 0;
+}
+
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
+		return -EINVAL;
+
+	uli_scr_cfg_write(link, sc_reg, val);
+	return 0;
+}
+
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	struct ata_host *host;
+	struct uli_priv *hpriv;
+	void __iomem * const *iomap;
+	struct ata_ioports *ioaddr;
+	int n_ports, rc;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	n_ports = 2;
+	if (board_idx == uli_5287)
+		n_ports = 4;
+
+	/* allocate the host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	host->private_data = hpriv;
+
+	/* the first two ports are standard SFF */
+	rc = ata_pci_sff_init_host(host);
+	if (rc)
+		return rc;
+
+	ata_pci_bmdma_init(host);
+
+	iomap = host->iomap;
+
+	switch (board_idx) {
+	case uli_5287:
+		/* If there are four, the last two live right after
+		 * the standard SFF ports.
+		 */
+		hpriv->scr_cfg_addr[0] = ULI5287_BASE;
+		hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
+
+		ioaddr = &host->ports[2]->ioaddr;
+		ioaddr->cmd_addr = iomap[0] + 8;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = (void __iomem *)
+			((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
+		ioaddr->bmdma_addr = iomap[4] + 16;
+		hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
+		ata_sff_std_ports(ioaddr);
+
+		ata_port_desc(host->ports[2],
+			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, 0) + 8,
+			((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
+			(unsigned long long)pci_resource_start(pdev, 4) + 16);
+
+		ioaddr = &host->ports[3]->ioaddr;
+		ioaddr->cmd_addr = iomap[2] + 8;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = (void __iomem *)
+			((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
+		ioaddr->bmdma_addr = iomap[4] + 24;
+		hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
+		ata_sff_std_ports(ioaddr);
+
+		ata_port_desc(host->ports[2],
+			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, 2) + 9,
+			((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
+			(unsigned long long)pci_resource_start(pdev, 4) + 24);
+
+		break;
+
+	case uli_5289:
+		hpriv->scr_cfg_addr[0] = ULI5287_BASE;
+		hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
+		break;
+
+	case uli_5281:
+		hpriv->scr_cfg_addr[0] = ULI5281_BASE;
+		hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	pci_set_master(pdev);
+	pci_intx(pdev, 1);
+	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+				 IRQF_SHARED, &uli_sht);
+}
+
+module_pci_driver(uli_pci_driver);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
new file mode 100644
index 0000000..93b8d78
--- /dev/null
+++ b/drivers/ata/sata_via.c
@@ -0,0 +1,783 @@
+/*
+ *  sata_via.c - VIA Serial ATA controllers
+ *
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
+ * 		   Please ALWAYS copy linux-ide@vger.kernel.org
+ *		   on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Hardware documentation available under NDA.
+ *
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_via"
+#define DRV_VERSION	"2.6"
+
+/*
+ * vt8251 is different from other sata controllers of VIA.  It has two
+ * channels, each channel has both Master and Slave slot.
+ */
+enum board_ids_enum {
+	vt6420,
+	vt6421,
+	vt8251,
+};
+
+enum {
+	SATA_CHAN_ENAB		= 0x40, /* SATA channel enable */
+	SATA_INT_GATE		= 0x41, /* SATA interrupt gating */
+	SATA_NATIVE_MODE	= 0x42, /* Native mode enable */
+	SVIA_MISC_3		= 0x46,	/* Miscellaneous Control III */
+	PATA_UDMA_TIMING	= 0xB3, /* PATA timing for DMA/ cable detect */
+	PATA_PIO_TIMING		= 0xAB, /* PATA timing register */
+
+	PORT0			= (1 << 1),
+	PORT1			= (1 << 0),
+	ALL_PORTS		= PORT0 | PORT1,
+
+	NATIVE_MODE_ALL		= (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
+
+	SATA_EXT_PHY		= (1 << 6), /* 0==use PATA, 1==ext phy */
+
+	SATA_HOTPLUG		= (1 << 5), /* enable IRQ on hotplug */
+};
+
+struct svia_priv {
+	bool			wd_workaround;
+};
+
+static int vt6420_hotplug;
+module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
+MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
+
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev);
+#endif
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
+static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
+static void svia_noop_freeze(struct ata_port *ap);
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
+static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
+static int vt6421_pata_cable_detect(struct ata_port *ap);
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_error_handler(struct ata_port *ap);
+
+static const struct pci_device_id svia_pci_tbl[] = {
+	{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
+	{ PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
+	{ PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
+	{ PCI_VDEVICE(VIA, 0x5372), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x7372), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
+	{ PCI_VDEVICE(VIA, 0x9000), vt8251 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver svia_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= svia_pci_tbl,
+	.probe			= svia_init_one,
+#ifdef CONFIG_PM_SLEEP
+	.suspend		= ata_pci_device_suspend,
+	.resume			= svia_pci_device_resume,
+#endif
+	.remove			= ata_pci_remove_one,
+};
+
+static struct scsi_host_template svia_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations svia_base_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	.sff_tf_load		= svia_tf_load,
+};
+
+static struct ata_port_operations vt6420_sata_ops = {
+	.inherits		= &svia_base_ops,
+	.freeze			= svia_noop_freeze,
+	.prereset		= vt6420_prereset,
+	.bmdma_start		= vt6420_bmdma_start,
+};
+
+static struct ata_port_operations vt6421_pata_ops = {
+	.inherits		= &svia_base_ops,
+	.cable_detect		= vt6421_pata_cable_detect,
+	.set_piomode		= vt6421_set_pio_mode,
+	.set_dmamode		= vt6421_set_dma_mode,
+};
+
+static struct ata_port_operations vt6421_sata_ops = {
+	.inherits		= &svia_base_ops,
+	.scr_read		= svia_scr_read,
+	.scr_write		= svia_scr_write,
+	.error_handler		= vt6421_error_handler,
+};
+
+static struct ata_port_operations vt8251_ops = {
+	.inherits		= &svia_base_ops,
+	.hardreset		= sata_std_hardreset,
+	.scr_read		= vt8251_scr_read,
+	.scr_write		= vt8251_scr_write,
+};
+
+static const struct ata_port_info vt6420_port_info = {
+	.flags		= ATA_FLAG_SATA,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &vt6420_sata_ops,
+};
+
+static const struct ata_port_info vt6421_sport_info = {
+	.flags		= ATA_FLAG_SATA,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &vt6421_sata_ops,
+};
+
+static const struct ata_port_info vt6421_pport_info = {
+	.flags		= ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	/* No MWDMA */
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &vt6421_pata_ops,
+};
+
+static const struct ata_port_info vt8251_port_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
+	.pio_mask	= ATA_PIO4,
+	.mwdma_mask	= ATA_MWDMA2,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &vt8251_ops,
+};
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	*val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
+	return 0;
+}
+
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
+	return 0;
+}
+
+static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+	static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	int slot = 2 * link->ap->port_no + link->pmp;
+	u32 v = 0;
+	u8 raw;
+
+	switch (scr) {
+	case SCR_STATUS:
+		pci_read_config_byte(pdev, 0xA0 + slot, &raw);
+
+		/* read the DET field, bit0 and 1 of the config byte */
+		v |= raw & 0x03;
+
+		/* read the SPD field, bit4 of the configure byte */
+		if (raw & (1 << 4))
+			v |= 0x02 << 4;
+		else
+			v |= 0x01 << 4;
+
+		/* read the IPM field, bit2 and 3 of the config byte */
+		v |= ipm_tbl[(raw >> 2) & 0x3];
+		break;
+
+	case SCR_ERROR:
+		/* devices other than 5287 uses 0xA8 as base */
+		WARN_ON(pdev->device != 0x5287);
+		pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
+		break;
+
+	case SCR_CONTROL:
+		pci_read_config_byte(pdev, 0xA4 + slot, &raw);
+
+		/* read the DET field, bit0 and bit1 */
+		v |= ((raw & 0x02) << 1) | (raw & 0x01);
+
+		/* read the IPM field, bit2 and bit3 */
+		v |= ((raw >> 2) & 0x03) << 8;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	*val = v;
+	return 0;
+}
+
+static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+	struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+	int slot = 2 * link->ap->port_no + link->pmp;
+	u32 v = 0;
+
+	switch (scr) {
+	case SCR_ERROR:
+		/* devices other than 5287 uses 0xA8 as base */
+		WARN_ON(pdev->device != 0x5287);
+		pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
+		return 0;
+
+	case SCR_CONTROL:
+		/* set the DET field */
+		v |= ((val & 0x4) >> 1) | (val & 0x1);
+
+		/* set the IPM field */
+		v |= ((val >> 8) & 0x3) << 2;
+
+		pci_write_config_byte(pdev, 0xA4 + slot, v);
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ *	svia_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	This is to fix the internal bug of via chipsets, which will
+ *	reset the device register after changing the IEN bit on ctl
+ *	register.
+ */
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_taskfile ttf;
+
+	if (tf->ctl != ap->last_ctl)  {
+		ttf = *tf;
+		ttf.flags |= ATA_TFLAG_DEVICE;
+		tf = &ttf;
+	}
+	ata_sff_tf_load(ap, tf);
+}
+
+static void svia_noop_freeze(struct ata_port *ap)
+{
+	/* Some VIA controllers choke if ATA_NIEN is manipulated in
+	 * certain way.  Leave it alone and just clear pending IRQ.
+	 */
+	ap->ops->sff_check_status(ap);
+	ata_bmdma_irq_clear(ap);
+}
+
+/**
+ *	vt6420_prereset - prereset for vt6420
+ *	@link: target ATA link
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SCR registers on vt6420 are pieces of shit and may hang the
+ *	whole machine completely if accessed with the wrong timing.
+ *	To avoid such catastrophe, vt6420 doesn't provide generic SCR
+ *	access operations, but uses SStatus and SControl only during
+ *	boot probing in controlled way.
+ *
+ *	As the old (pre EH update) probing code is proven to work, we
+ *	strictly follow the access pattern.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &ap->link.eh_context;
+	unsigned long timeout = jiffies + (HZ * 5);
+	u32 sstatus, scontrol;
+	int online;
+
+	/* don't do any SCR stuff if we're not loading */
+	if (!(ap->pflags & ATA_PFLAG_LOADING))
+		goto skip_scr;
+
+	/* Resume phy.  This is the old SATA resume sequence */
+	svia_scr_write(link, SCR_CONTROL, 0x300);
+	svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
+
+	/* wait for phy to become ready, if necessary */
+	do {
+		ata_msleep(link->ap, 200);
+		svia_scr_read(link, SCR_STATUS, &sstatus);
+		if ((sstatus & 0xf) != 1)
+			break;
+	} while (time_before(jiffies, timeout));
+
+	/* open code sata_print_link_status() */
+	svia_scr_read(link, SCR_STATUS, &sstatus);
+	svia_scr_read(link, SCR_CONTROL, &scontrol);
+
+	online = (sstatus & 0xf) == 0x3;
+
+	ata_port_info(ap,
+		      "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
+		      online ? "up" : "down", sstatus, scontrol);
+
+	/* SStatus is read one more time */
+	svia_scr_read(link, SCR_STATUS, &sstatus);
+
+	if (!online) {
+		/* tell EH to bail */
+		ehc->i.action &= ~ATA_EH_RESET;
+		return 0;
+	}
+
+ skip_scr:
+	/* wait for !BSY */
+	ata_sff_wait_ready(link, deadline);
+
+	return 0;
+}
+
+static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	if ((qc->tf.command == ATA_CMD_PACKET) &&
+	    (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
+		/* Prevents corruption on some ATAPI burners */
+		ata_sff_pause(ap);
+	}
+	ata_bmdma_start(qc);
+}
+
+static int vt6421_pata_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
+	if (tmp & 0x10)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
+	pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
+			      pio_bits[adev->pio_mode - XFER_PIO_0]);
+}
+
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
+	pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
+			      udma_bits[adev->dma_mode - XFER_UDMA_0]);
+}
+
+static const unsigned int svia_bar_sizes[] = {
+	8, 4, 8, 4, 16, 256
+};
+
+static const unsigned int vt6421_bar_sizes[] = {
+	16, 16, 16, 16, 32, 128
+};
+
+static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
+{
+	return addr + (port * 128);
+}
+
+static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
+{
+	return addr + (port * 64);
+}
+
+static void vt6421_init_addrs(struct ata_port *ap)
+{
+	void __iomem * const * iomap = ap->host->iomap;
+	void __iomem *reg_addr = iomap[ap->port_no];
+	void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	ioaddr->cmd_addr = reg_addr;
+	ioaddr->altstatus_addr =
+	ioaddr->ctl_addr = (void __iomem *)
+		((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
+	ioaddr->bmdma_addr = bmdma_addr;
+	ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
+
+	ata_sff_std_ports(ioaddr);
+
+	ata_port_pbar_desc(ap, ap->port_no, -1, "port");
+	ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
+}
+
+static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+	const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
+	struct ata_host *host;
+	int rc;
+
+	if (vt6420_hotplug) {
+		ppi[0]->port_ops->scr_read = svia_scr_read;
+		ppi[0]->port_ops->scr_write = svia_scr_write;
+	}
+
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+	*r_host = host;
+
+	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
+		return rc;
+	}
+
+	host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
+	host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
+
+	return 0;
+}
+
+static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+	const struct ata_port_info *ppi[] =
+		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
+	struct ata_host *host;
+	int i, rc;
+
+	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
+	if (!host) {
+		dev_err(&pdev->dev, "failed to allocate host\n");
+		return -ENOMEM;
+	}
+
+	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
+			rc);
+		return rc;
+	}
+	host->iomap = pcim_iomap_table(pdev);
+
+	for (i = 0; i < host->n_ports; i++)
+		vt6421_init_addrs(host->ports[i]);
+
+	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+	const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
+	struct ata_host *host;
+	int i, rc;
+
+	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+	*r_host = host;
+
+	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
+		return rc;
+	}
+
+	/* 8251 hosts four sata ports as M/S of the two channels */
+	for (i = 0; i < host->n_ports; i++)
+		ata_slave_link_init(host->ports[i]);
+
+	return 0;
+}
+
+static void svia_wd_fix(struct pci_dev *pdev)
+{
+	u8 tmp8;
+
+	pci_read_config_byte(pdev, 0x52, &tmp8);
+	pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
+}
+
+static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
+
+	/* if the IRQ was not handled, it might be a hotplug IRQ */
+	if (rc != IRQ_HANDLED) {
+		u32 serror;
+		unsigned long flags;
+
+		spin_lock_irqsave(&host->lock, flags);
+		/* check for hotplug on port 0 */
+		svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
+		if (serror & SERR_PHYRDY_CHG) {
+			ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
+			ata_port_freeze(host->ports[0]);
+			rc = IRQ_HANDLED;
+		}
+		/* check for hotplug on port 1 */
+		svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
+		if (serror & SERR_PHYRDY_CHG) {
+			ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
+			ata_port_freeze(host->ports[1]);
+			rc = IRQ_HANDLED;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+
+	return rc;
+}
+
+static void vt6421_error_handler(struct ata_port *ap)
+{
+	struct svia_priv *hpriv = ap->host->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 serror;
+
+	/* see svia_configure() for description */
+	if (!hpriv->wd_workaround) {
+		svia_scr_read(&ap->link, SCR_ERROR, &serror);
+		if (serror == 0x1000500) {
+			ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
+			svia_wd_fix(pdev);
+			hpriv->wd_workaround = true;
+			ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
+		}
+	}
+
+	ata_sff_error_handler(ap);
+}
+
+static void svia_configure(struct pci_dev *pdev, int board_id,
+			   struct svia_priv *hpriv)
+{
+	u8 tmp8;
+
+	pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
+	dev_info(&pdev->dev, "routed to hard irq line %d\n",
+		 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
+
+	/* make sure SATA channels are enabled */
+	pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
+	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
+		dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
+			(int)tmp8);
+		tmp8 |= ALL_PORTS;
+		pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
+	}
+
+	/* make sure interrupts for each channel sent to us */
+	pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
+	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
+		dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
+			(int) tmp8);
+		tmp8 |= ALL_PORTS;
+		pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
+	}
+
+	/* make sure native mode is enabled */
+	pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
+	if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
+		dev_dbg(&pdev->dev,
+			"enabling SATA channel native mode (0x%x)\n",
+			(int) tmp8);
+		tmp8 |= NATIVE_MODE_ALL;
+		pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
+	}
+
+	if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
+		/* enable IRQ on hotplug */
+		pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+		if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+			dev_dbg(&pdev->dev,
+				"enabling SATA hotplug (0x%x)\n",
+				(int) tmp8);
+			tmp8 |= SATA_HOTPLUG;
+			pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+		}
+	}
+
+	/*
+	 * vt6420/1 has problems talking to some drives.  The following
+	 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
+	 *
+	 * When host issues HOLD, device may send up to 20DW of data
+	 * before acknowledging it with HOLDA and the host should be
+	 * able to buffer them in FIFO.  Unfortunately, some WD drives
+	 * send up to 40DW before acknowledging HOLD and, in the
+	 * default configuration, this ends up overflowing vt6421's
+	 * FIFO, making the controller abort the transaction with
+	 * R_ERR.
+	 *
+	 * Rx52[2] is the internal 128DW FIFO Flow control watermark
+	 * adjusting mechanism enable bit and the default value 0
+	 * means host will issue HOLD to device when the left FIFO
+	 * size goes below 32DW.  Setting it to 1 makes the watermark
+	 * 64DW.
+	 *
+	 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
+	 * http://article.gmane.org/gmane.linux.ide/46352
+	 * http://thread.gmane.org/gmane.linux.kernel/1062139
+	 *
+	 * As the fix slows down data transfer, apply it only if the error
+	 * actually appears - see vt6421_error_handler()
+	 * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
+	 * read safely.
+	 */
+	if (board_id == vt6420) {
+		svia_wd_fix(pdev);
+		hpriv->wd_workaround = true;
+	}
+}
+
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	unsigned int i;
+	int rc;
+	struct ata_host *host = NULL;
+	int board_id = (int) ent->driver_data;
+	const unsigned *bar_sizes;
+	struct svia_priv *hpriv;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	if (board_id == vt6421)
+		bar_sizes = &vt6421_bar_sizes[0];
+	else
+		bar_sizes = &svia_bar_sizes[0];
+
+	for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
+		if ((pci_resource_start(pdev, i) == 0) ||
+		    (pci_resource_len(pdev, i) < bar_sizes[i])) {
+			dev_err(&pdev->dev,
+				"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
+				i,
+				(unsigned long long)pci_resource_start(pdev, i),
+				(unsigned long long)pci_resource_len(pdev, i));
+			return -ENODEV;
+		}
+
+	switch (board_id) {
+	case vt6420:
+		rc = vt6420_prepare_host(pdev, &host);
+		break;
+	case vt6421:
+		rc = vt6421_prepare_host(pdev, &host);
+		break;
+	case vt8251:
+		rc = vt8251_prepare_host(pdev, &host);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	host->private_data = hpriv;
+
+	svia_configure(pdev, board_id, hpriv);
+
+	pci_set_master(pdev);
+	if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
+		return ata_host_activate(host, pdev->irq, vt642x_interrupt,
+					 IRQF_SHARED, &svia_sht);
+	else
+		return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+					 IRQF_SHARED, &svia_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = pci_get_drvdata(pdev);
+	struct svia_priv *hpriv = host->private_data;
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (hpriv->wd_workaround)
+		svia_wd_fix(pdev);
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+module_pci_driver(svia_pci_driver);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
new file mode 100644
index 0000000..9648127
--- /dev/null
+++ b/drivers/ata/sata_vsc.c
@@ -0,0 +1,443 @@
+/*
+ *  sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
+ *
+ *  Maintained by:  Jeremy Higdon @ SGI
+ * 		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2004 SGI
+ *
+ *  Bits from Jeff Garzik, Copyright RedHat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/driver-api/libata.rst
+ *
+ *  Vitesse hardware documentation presumably available under NDA.
+ *  Intel 31244 (same hardware interface) documentation presumably
+ *  available from http://developer.intel.com/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sata_vsc"
+#define DRV_VERSION	"2.3"
+
+enum {
+	VSC_MMIO_BAR			= 0,
+
+	/* Interrupt register offsets (from chip base address) */
+	VSC_SATA_INT_STAT_OFFSET	= 0x00,
+	VSC_SATA_INT_MASK_OFFSET	= 0x04,
+
+	/* Taskfile registers offsets */
+	VSC_SATA_TF_CMD_OFFSET		= 0x00,
+	VSC_SATA_TF_DATA_OFFSET		= 0x00,
+	VSC_SATA_TF_ERROR_OFFSET	= 0x04,
+	VSC_SATA_TF_FEATURE_OFFSET	= 0x06,
+	VSC_SATA_TF_NSECT_OFFSET	= 0x08,
+	VSC_SATA_TF_LBAL_OFFSET		= 0x0c,
+	VSC_SATA_TF_LBAM_OFFSET		= 0x10,
+	VSC_SATA_TF_LBAH_OFFSET		= 0x14,
+	VSC_SATA_TF_DEVICE_OFFSET	= 0x18,
+	VSC_SATA_TF_STATUS_OFFSET	= 0x1c,
+	VSC_SATA_TF_COMMAND_OFFSET	= 0x1d,
+	VSC_SATA_TF_ALTSTATUS_OFFSET	= 0x28,
+	VSC_SATA_TF_CTL_OFFSET		= 0x29,
+
+	/* DMA base */
+	VSC_SATA_UP_DESCRIPTOR_OFFSET	= 0x64,
+	VSC_SATA_UP_DATA_BUFFER_OFFSET	= 0x6C,
+	VSC_SATA_DMA_CMD_OFFSET		= 0x70,
+
+	/* SCRs base */
+	VSC_SATA_SCR_STATUS_OFFSET	= 0x100,
+	VSC_SATA_SCR_ERROR_OFFSET	= 0x104,
+	VSC_SATA_SCR_CONTROL_OFFSET	= 0x108,
+
+	/* Port stride */
+	VSC_SATA_PORT_OFFSET		= 0x200,
+
+	/* Error interrupt status bit offsets */
+	VSC_SATA_INT_ERROR_CRC		= 0x40,
+	VSC_SATA_INT_ERROR_T		= 0x20,
+	VSC_SATA_INT_ERROR_P		= 0x10,
+	VSC_SATA_INT_ERROR_R		= 0x8,
+	VSC_SATA_INT_ERROR_E		= 0x4,
+	VSC_SATA_INT_ERROR_M		= 0x2,
+	VSC_SATA_INT_PHY_CHANGE		= 0x1,
+	VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC  | VSC_SATA_INT_ERROR_T | \
+			      VSC_SATA_INT_ERROR_P    | VSC_SATA_INT_ERROR_R | \
+			      VSC_SATA_INT_ERROR_E    | VSC_SATA_INT_ERROR_M | \
+			      VSC_SATA_INT_PHY_CHANGE),
+};
+
+static int vsc_sata_scr_read(struct ata_link *link,
+			     unsigned int sc_reg, u32 *val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+
+static int vsc_sata_scr_write(struct ata_link *link,
+			      unsigned int sc_reg, u32 val)
+{
+	if (sc_reg > SCR_CONTROL)
+		return -EINVAL;
+	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
+}
+
+
+static void vsc_freeze(struct ata_port *ap)
+{
+	void __iomem *mask_addr;
+
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+	writeb(0, mask_addr);
+}
+
+
+static void vsc_thaw(struct ata_port *ap)
+{
+	void __iomem *mask_addr;
+
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+	writeb(0xff, mask_addr);
+}
+
+
+static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
+{
+	void __iomem *mask_addr;
+	u8 mask;
+
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+	mask = readb(mask_addr);
+	if (ctl & ATA_NIEN)
+		mask |= 0x80;
+	else
+		mask &= 0x7F;
+	writeb(mask, mask_addr);
+}
+
+
+static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	/*
+	 * The only thing the ctl register is used for is SRST.
+	 * That is not enabled or disabled via tf_load.
+	 * However, if ATA_NIEN is changed, then we need to change
+	 * the interrupt register.
+	 */
+	if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
+		ap->last_ctl = tf->ctl;
+		vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
+	}
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		writew(tf->feature | (((u16)tf->hob_feature) << 8),
+		       ioaddr->feature_addr);
+		writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+		       ioaddr->nsect_addr);
+		writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+		       ioaddr->lbal_addr);
+		writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+		       ioaddr->lbam_addr);
+		writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+		       ioaddr->lbah_addr);
+	} else if (is_addr) {
+		writew(tf->feature, ioaddr->feature_addr);
+		writew(tf->nsect, ioaddr->nsect_addr);
+		writew(tf->lbal, ioaddr->lbal_addr);
+		writew(tf->lbam, ioaddr->lbam_addr);
+		writew(tf->lbah, ioaddr->lbah_addr);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE)
+		writeb(tf->device, ioaddr->device_addr);
+
+	ata_wait_idle(ap);
+}
+
+
+static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u16 nsect, lbal, lbam, lbah, feature;
+
+	tf->command = ata_sff_check_status(ap);
+	tf->device = readw(ioaddr->device_addr);
+	feature = readw(ioaddr->error_addr);
+	nsect = readw(ioaddr->nsect_addr);
+	lbal = readw(ioaddr->lbal_addr);
+	lbam = readw(ioaddr->lbam_addr);
+	lbah = readw(ioaddr->lbah_addr);
+
+	tf->feature = feature;
+	tf->nsect = nsect;
+	tf->lbal = lbal;
+	tf->lbam = lbam;
+	tf->lbah = lbah;
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		tf->hob_feature = feature >> 8;
+		tf->hob_nsect = nsect >> 8;
+		tf->hob_lbal = lbal >> 8;
+		tf->hob_lbam = lbam >> 8;
+		tf->hob_lbah = lbah >> 8;
+	}
+}
+
+static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
+{
+	if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void vsc_port_intr(u8 port_status, struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	int handled = 0;
+
+	if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
+		vsc_error_intr(port_status, ap);
+		return;
+	}
+
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
+		handled = ata_bmdma_port_intr(ap, qc);
+
+	/* We received an interrupt during a polled command,
+	 * or some other spurious condition.  Interrupt reporting
+	 * with this hardware is fairly reliable so it is safe to
+	 * simply clear the interrupt
+	 */
+	if (unlikely(!handled))
+		ap->ops->sff_check_status(ap);
+}
+
+/*
+ * vsc_sata_interrupt
+ *
+ * Read the interrupt register and process for the devices that have
+ * them pending.
+ */
+static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	u32 status;
+
+	status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
+
+	if (unlikely(status == 0xffffffff || status == 0)) {
+		if (status)
+			dev_err(host->dev,
+				": IRQ status == 0xffffffff, PCI fault or device removal?\n");
+		goto out;
+	}
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++) {
+		u8 port_status = (status >> (8 * i)) & 0xff;
+		if (port_status) {
+			vsc_port_intr(port_status, host->ports[i]);
+			handled++;
+		}
+	}
+
+	spin_unlock(&host->lock);
+out:
+	return IRQ_RETVAL(handled);
+}
+
+
+static struct scsi_host_template vsc_sata_sht = {
+	ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+static struct ata_port_operations vsc_sata_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+	/* The IRQ handling is not quite standard SFF behaviour so we
+	   cannot use the default lost interrupt handler */
+	.lost_interrupt		= ATA_OP_NULL,
+	.sff_tf_load		= vsc_sata_tf_load,
+	.sff_tf_read		= vsc_sata_tf_read,
+	.freeze			= vsc_freeze,
+	.thaw			= vsc_thaw,
+	.scr_read		= vsc_sata_scr_read,
+	.scr_write		= vsc_sata_scr_write,
+};
+
+static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		= base + VSC_SATA_TF_CMD_OFFSET;
+	port->data_addr		= base + VSC_SATA_TF_DATA_OFFSET;
+	port->error_addr	= base + VSC_SATA_TF_ERROR_OFFSET;
+	port->feature_addr	= base + VSC_SATA_TF_FEATURE_OFFSET;
+	port->nsect_addr	= base + VSC_SATA_TF_NSECT_OFFSET;
+	port->lbal_addr		= base + VSC_SATA_TF_LBAL_OFFSET;
+	port->lbam_addr		= base + VSC_SATA_TF_LBAM_OFFSET;
+	port->lbah_addr		= base + VSC_SATA_TF_LBAH_OFFSET;
+	port->device_addr	= base + VSC_SATA_TF_DEVICE_OFFSET;
+	port->status_addr	= base + VSC_SATA_TF_STATUS_OFFSET;
+	port->command_addr	= base + VSC_SATA_TF_COMMAND_OFFSET;
+	port->altstatus_addr	= base + VSC_SATA_TF_ALTSTATUS_OFFSET;
+	port->ctl_addr		= base + VSC_SATA_TF_CTL_OFFSET;
+	port->bmdma_addr	= base + VSC_SATA_DMA_CMD_OFFSET;
+	port->scr_addr		= base + VSC_SATA_SCR_STATUS_OFFSET;
+	writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
+	writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
+}
+
+
+static int vsc_sata_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	static const struct ata_port_info pi = {
+		.flags		= ATA_FLAG_SATA,
+		.pio_mask	= ATA_PIO4,
+		.mwdma_mask	= ATA_MWDMA2,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &vsc_sata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int i, rc;
+	u8 cls;
+
+	ata_print_version_once(&pdev->dev, DRV_VERSION);
+
+	/* allocate host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+	if (!host)
+		return -ENOMEM;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* check if we have needed resource mapped */
+	if (pci_resource_len(pdev, 0) == 0)
+		return -ENODEV;
+
+	/* map IO regions and initialize host accordingly */
+	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	mmio_base = host->iomap[VSC_MMIO_BAR];
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
+
+		vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+		ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
+	}
+
+	/*
+	 * Use 32 bit DMA mask, because 64 bit address support is poor.
+	 */
+	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
+	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
+
+	/*
+	 * Due to a bug in the chip, the default cache line size can't be
+	 * used (unless the default is non-zero).
+	 */
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
+	if (cls == 0x00)
+		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
+
+	if (pci_enable_msi(pdev) == 0)
+		pci_intx(pdev, 0);
+
+	/*
+	 * Config offset 0x98 is "Extended Control and Status Register 0"
+	 * Default value is (1 << 28).  All bits except bit 28 are reserved in
+	 * DPA mode.  If bit 28 is set, LED 0 reflects all ports' activity.
+	 * If bit 28 is clear, each port has its own LED.
+	 */
+	pci_write_config_dword(pdev, 0x98, 0);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
+				 IRQF_SHARED, &vsc_sata_sht);
+}
+
+static const struct pci_device_id vsc_sata_pci_tbl[] = {
+	{ PCI_VENDOR_ID_VITESSE, 0x7174,
+	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
+	{ PCI_VENDOR_ID_INTEL, 0x3200,
+	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver vsc_sata_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= vsc_sata_pci_tbl,
+	.probe			= vsc_sata_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+module_pci_driver(vsc_sata_pci_driver);
+
+MODULE_AUTHOR("Jeremy Higdon");
+MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sis.h b/drivers/ata/sis.h
new file mode 100644
index 0000000..0be4969
--- /dev/null
+++ b/drivers/ata/sis.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+struct ata_port_info;
+
+/* pata_sis.c */
+extern const struct ata_port_info sis_info133_for_sata;